diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-07 18:49:45 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-07 18:49:45 +0000 |
commit | 2c3c1048746a4622d8c89a29670120dc8fab93c4 (patch) | |
tree | 848558de17fb3008cdf4d861b01ac7781903ce39 /drivers/net/ethernet/netronome/nfp/nfpcore | |
parent | Initial commit. (diff) | |
download | linux-2c3c1048746a4622d8c89a29670120dc8fab93c4.tar.xz linux-2c3c1048746a4622d8c89a29670120dc8fab93c4.zip |
Adding upstream version 6.1.76.upstream/6.1.76upstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'drivers/net/ethernet/netronome/nfp/nfpcore')
24 files changed, 9186 insertions, 0 deletions
diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/crc32.h b/drivers/net/ethernet/netronome/nfp/nfpcore/crc32.h new file mode 100644 index 000000000..6ad43c7ce --- /dev/null +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/crc32.h @@ -0,0 +1,34 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ +/* Copyright (C) 2015-2017 Netronome Systems, Inc. */ + +#ifndef NFP_CRC32_H +#define NFP_CRC32_H + +#include <linux/crc32.h> + +/** + * crc32_posix_end() - Finalize POSIX CRC32 working state + * @crc: Current CRC32 working state + * @total_len: Total length of data that was CRC32'd + * + * Return: Final POSIX CRC32 value + */ +static inline u32 crc32_posix_end(u32 crc, size_t total_len) +{ + /* Extend with the length of the string. */ + while (total_len != 0) { + u8 c = total_len & 0xff; + + crc = crc32_be(crc, &c, 1); + total_len >>= 8; + } + + return ~crc; +} + +static inline u32 crc32_posix(const void *buff, size_t len) +{ + return crc32_posix_end(crc32_be(0, buff, len), len); +} + +#endif /* NFP_CRC32_H */ diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp.h b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp.h new file mode 100644 index 000000000..db94b0bdd --- /dev/null +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp.h @@ -0,0 +1,84 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ +/* Copyright (C) 2015-2018 Netronome Systems, Inc. */ + +/* + * nfp.h + * Interface for NFP device access and query functions. + */ + +#ifndef __NFP_H__ +#define __NFP_H__ + +#include <linux/device.h> +#include <linux/types.h> + +#include "nfp_cpp.h" + +/* Implemented in nfp_hwinfo.c */ + +struct nfp_hwinfo; +struct nfp_hwinfo *nfp_hwinfo_read(struct nfp_cpp *cpp); +const char *nfp_hwinfo_lookup(struct nfp_hwinfo *hwinfo, const char *lookup); +char *nfp_hwinfo_get_packed_strings(struct nfp_hwinfo *hwinfo); +u32 nfp_hwinfo_get_packed_str_size(struct nfp_hwinfo *hwinfo); + +/* Implemented in nfp_nsp.c, low level functions */ + +struct nfp_nsp; + +struct nfp_cpp *nfp_nsp_cpp(struct nfp_nsp *state); +bool nfp_nsp_config_modified(struct nfp_nsp *state); +void nfp_nsp_config_set_modified(struct nfp_nsp *state, bool modified); +void *nfp_nsp_config_entries(struct nfp_nsp *state); +unsigned int nfp_nsp_config_idx(struct nfp_nsp *state); +void nfp_nsp_config_set_state(struct nfp_nsp *state, void *entries, + unsigned int idx); +void nfp_nsp_config_clear_state(struct nfp_nsp *state); +int nfp_nsp_read_eth_table(struct nfp_nsp *state, void *buf, unsigned int size); +int nfp_nsp_write_eth_table(struct nfp_nsp *state, + const void *buf, unsigned int size); +int nfp_nsp_read_identify(struct nfp_nsp *state, void *buf, unsigned int size); +int nfp_nsp_read_sensors(struct nfp_nsp *state, unsigned int sensor_mask, + void *buf, unsigned int size); + +/* Implemented in nfp_resource.c */ + +/* All keys are CRC32-POSIX of the 8-byte identification string */ + +/* ARM/PCI vNIC Interfaces 0..3 */ +#define NFP_RESOURCE_VNIC_PCI_0 "vnic.p0" +#define NFP_RESOURCE_VNIC_PCI_1 "vnic.p1" +#define NFP_RESOURCE_VNIC_PCI_2 "vnic.p2" +#define NFP_RESOURCE_VNIC_PCI_3 "vnic.p3" + +/* NFP Hardware Info Database */ +#define NFP_RESOURCE_NFP_HWINFO "nfp.info" + +/* Service Processor */ +#define NFP_RESOURCE_NSP "nfp.sp" +#define NFP_RESOURCE_NSP_DIAG "arm.diag" + +/* Netronone Flow Firmware Table */ +#define NFP_RESOURCE_NFP_NFFW "nfp.nffw" + +/* MAC Statistics Accumulator */ +#define NFP_RESOURCE_MAC_STATISTICS "mac.stat" + +int nfp_resource_table_init(struct nfp_cpp *cpp); + +struct nfp_resource * +nfp_resource_acquire(struct nfp_cpp *cpp, const char *name); + +void nfp_resource_release(struct nfp_resource *res); + +int nfp_resource_wait(struct nfp_cpp *cpp, const char *name, unsigned int secs); + +u32 nfp_resource_cpp_id(struct nfp_resource *res); + +const char *nfp_resource_name(struct nfp_resource *res); + +u64 nfp_resource_address(struct nfp_resource *res); + +u64 nfp_resource_size(struct nfp_resource *res); + +#endif /* !__NFP_H__ */ diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000/nfp6000.h b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000/nfp6000.h new file mode 100644 index 000000000..4a1213385 --- /dev/null +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000/nfp6000.h @@ -0,0 +1,58 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ +/* Copyright (C) 2015-2017 Netronome Systems, Inc. */ + +#ifndef NFP6000_NFP6000_H +#define NFP6000_NFP6000_H + +#include <linux/errno.h> +#include <linux/types.h> + +/* CPP Target IDs */ +#define NFP_CPP_TARGET_INVALID 0 +#define NFP_CPP_TARGET_NBI 1 +#define NFP_CPP_TARGET_QDR 2 +#define NFP_CPP_TARGET_ILA 6 +#define NFP_CPP_TARGET_MU 7 +#define NFP_CPP_TARGET_PCIE 9 +#define NFP_CPP_TARGET_ARM 10 +#define NFP_CPP_TARGET_CRYPTO 12 +#define NFP_CPP_TARGET_ISLAND_XPB 14 /* Shared with CAP */ +#define NFP_CPP_TARGET_ISLAND_CAP 14 /* Shared with XPB */ +#define NFP_CPP_TARGET_CT_XPB 14 +#define NFP_CPP_TARGET_LOCAL_SCRATCH 15 +#define NFP_CPP_TARGET_CLS NFP_CPP_TARGET_LOCAL_SCRATCH + +#define NFP_ISL_EMEM0 24 + +#define NFP_MU_ADDR_ACCESS_TYPE_MASK 3ULL +#define NFP_MU_ADDR_ACCESS_TYPE_DIRECT 2ULL + +#define PUSHPULL(_pull, _push) ((_pull) << 4 | (_push) << 0) +#define PUSH_WIDTH(_pushpull) pushpull_width((_pushpull) >> 0) +#define PULL_WIDTH(_pushpull) pushpull_width((_pushpull) >> 4) + +static inline int pushpull_width(int pp) +{ + pp &= 0xf; + + if (pp == 0) + return -EINVAL; + return 2 << pp; +} + +static inline int nfp_cppat_mu_locality_lsb(int mode, bool addr40) +{ + switch (mode) { + case 0 ... 3: + return addr40 ? 38 : 30; + default: + return -EINVAL; + } +} + +int nfp_target_pushpull(u32 cpp_id, u64 address); +int nfp_target_cpp(u32 cpp_island_id, u64 cpp_island_address, + u32 *cpp_target_id, u64 *cpp_target_address, + const u32 *imb_table); + +#endif /* NFP6000_NFP6000_H */ diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000/nfp_xpb.h b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000/nfp_xpb.h new file mode 100644 index 000000000..9a86ec11c --- /dev/null +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000/nfp_xpb.h @@ -0,0 +1,27 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ +/* Copyright (C) 2015-2017 Netronome Systems, Inc. */ + +/* + * nfp_xpb.h + * Author: Jason McMullan <jason.mcmullan@netronome.com> + */ + +#ifndef NFP6000_XPB_H +#define NFP6000_XPB_H + +/* For use with NFP6000 Databook "XPB Addressing" section + */ +#define NFP_XPB_OVERLAY(island) (((island) & 0x3f) << 24) + +#define NFP_XPB_ISLAND(island) (NFP_XPB_OVERLAY(island) + 0x60000) + +#define NFP_XPB_ISLAND_of(offset) (((offset) >> 24) & 0x3F) + +/* For use with NFP6000 Databook "XPB Island and Device IDs" chapter + */ +#define NFP_XPB_DEVICE(island, slave, device) \ + (NFP_XPB_OVERLAY(island) | \ + (((slave) & 3) << 22) | \ + (((device) & 0x3f) << 16)) + +#endif /* NFP6000_XPB_H */ diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c new file mode 100644 index 000000000..33b4c2856 --- /dev/null +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.c @@ -0,0 +1,1366 @@ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +/* Copyright (C) 2015-2018 Netronome Systems, Inc. */ + +/* + * nfp6000_pcie.c + * Authors: Jakub Kicinski <jakub.kicinski@netronome.com> + * Jason McMullan <jason.mcmullan@netronome.com> + * Rolf Neugebauer <rolf.neugebauer@netronome.com> + * + * Multiplexes the NFP BARs between NFP internal resources and + * implements the PCIe specific interface for generic CPP bus access. + * + * The BARs are managed with refcounts and are allocated/acquired + * using target, token and offset/size matching. The generic CPP bus + * abstraction builds upon this BAR interface. + */ + +#include <asm/unaligned.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/kref.h> +#include <linux/io.h> +#include <linux/delay.h> +#include <linux/interrupt.h> +#include <linux/sort.h> +#include <linux/sched.h> +#include <linux/types.h> +#include <linux/pci.h> + +#include "nfp_cpp.h" +#include "nfp_dev.h" + +#include "nfp6000/nfp6000.h" + +#include "nfp6000_pcie.h" + +#define NFP_PCIE_BAR(_pf) (0x30000 + ((_pf) & 7) * 0xc0) +#define NFP_PCIE_BAR_EXPLICIT_BAR0(_x, _y) \ + (0x00000080 + (0x40 * ((_x) & 0x3)) + (0x10 * ((_y) & 0x3))) +#define NFP_PCIE_BAR_EXPLICIT_BAR0_SignalType(_x) (((_x) & 0x3) << 30) +#define NFP_PCIE_BAR_EXPLICIT_BAR0_SignalType_of(_x) (((_x) >> 30) & 0x3) +#define NFP_PCIE_BAR_EXPLICIT_BAR0_Token(_x) (((_x) & 0x3) << 28) +#define NFP_PCIE_BAR_EXPLICIT_BAR0_Token_of(_x) (((_x) >> 28) & 0x3) +#define NFP_PCIE_BAR_EXPLICIT_BAR0_Address(_x) (((_x) & 0xffffff) << 0) +#define NFP_PCIE_BAR_EXPLICIT_BAR0_Address_of(_x) (((_x) >> 0) & 0xffffff) +#define NFP_PCIE_BAR_EXPLICIT_BAR1(_x, _y) \ + (0x00000084 + (0x40 * ((_x) & 0x3)) + (0x10 * ((_y) & 0x3))) +#define NFP_PCIE_BAR_EXPLICIT_BAR1_SignalRef(_x) (((_x) & 0x7f) << 24) +#define NFP_PCIE_BAR_EXPLICIT_BAR1_SignalRef_of(_x) (((_x) >> 24) & 0x7f) +#define NFP_PCIE_BAR_EXPLICIT_BAR1_DataMaster(_x) (((_x) & 0x3ff) << 14) +#define NFP_PCIE_BAR_EXPLICIT_BAR1_DataMaster_of(_x) (((_x) >> 14) & 0x3ff) +#define NFP_PCIE_BAR_EXPLICIT_BAR1_DataRef(_x) (((_x) & 0x3fff) << 0) +#define NFP_PCIE_BAR_EXPLICIT_BAR1_DataRef_of(_x) (((_x) >> 0) & 0x3fff) +#define NFP_PCIE_BAR_EXPLICIT_BAR2(_x, _y) \ + (0x00000088 + (0x40 * ((_x) & 0x3)) + (0x10 * ((_y) & 0x3))) +#define NFP_PCIE_BAR_EXPLICIT_BAR2_Target(_x) (((_x) & 0xf) << 28) +#define NFP_PCIE_BAR_EXPLICIT_BAR2_Target_of(_x) (((_x) >> 28) & 0xf) +#define NFP_PCIE_BAR_EXPLICIT_BAR2_Action(_x) (((_x) & 0x1f) << 23) +#define NFP_PCIE_BAR_EXPLICIT_BAR2_Action_of(_x) (((_x) >> 23) & 0x1f) +#define NFP_PCIE_BAR_EXPLICIT_BAR2_Length(_x) (((_x) & 0x1f) << 18) +#define NFP_PCIE_BAR_EXPLICIT_BAR2_Length_of(_x) (((_x) >> 18) & 0x1f) +#define NFP_PCIE_BAR_EXPLICIT_BAR2_ByteMask(_x) (((_x) & 0xff) << 10) +#define NFP_PCIE_BAR_EXPLICIT_BAR2_ByteMask_of(_x) (((_x) >> 10) & 0xff) +#define NFP_PCIE_BAR_EXPLICIT_BAR2_SignalMaster(_x) (((_x) & 0x3ff) << 0) +#define NFP_PCIE_BAR_EXPLICIT_BAR2_SignalMaster_of(_x) (((_x) >> 0) & 0x3ff) + +#define NFP_PCIE_BAR_PCIE2CPP_Action_BaseAddress(_x) (((_x) & 0x1f) << 16) +#define NFP_PCIE_BAR_PCIE2CPP_Action_BaseAddress_of(_x) (((_x) >> 16) & 0x1f) +#define NFP_PCIE_BAR_PCIE2CPP_BaseAddress(_x) (((_x) & 0xffff) << 0) +#define NFP_PCIE_BAR_PCIE2CPP_BaseAddress_of(_x) (((_x) >> 0) & 0xffff) +#define NFP_PCIE_BAR_PCIE2CPP_LengthSelect(_x) (((_x) & 0x3) << 27) +#define NFP_PCIE_BAR_PCIE2CPP_LengthSelect_of(_x) (((_x) >> 27) & 0x3) +#define NFP_PCIE_BAR_PCIE2CPP_LengthSelect_32BIT 0 +#define NFP_PCIE_BAR_PCIE2CPP_LengthSelect_64BIT 1 +#define NFP_PCIE_BAR_PCIE2CPP_LengthSelect_0BYTE 3 +#define NFP_PCIE_BAR_PCIE2CPP_MapType(_x) (((_x) & 0x7) << 29) +#define NFP_PCIE_BAR_PCIE2CPP_MapType_of(_x) (((_x) >> 29) & 0x7) +#define NFP_PCIE_BAR_PCIE2CPP_MapType_FIXED 0 +#define NFP_PCIE_BAR_PCIE2CPP_MapType_BULK 1 +#define NFP_PCIE_BAR_PCIE2CPP_MapType_TARGET 2 +#define NFP_PCIE_BAR_PCIE2CPP_MapType_GENERAL 3 +#define NFP_PCIE_BAR_PCIE2CPP_MapType_EXPLICIT0 4 +#define NFP_PCIE_BAR_PCIE2CPP_MapType_EXPLICIT1 5 +#define NFP_PCIE_BAR_PCIE2CPP_MapType_EXPLICIT2 6 +#define NFP_PCIE_BAR_PCIE2CPP_MapType_EXPLICIT3 7 +#define NFP_PCIE_BAR_PCIE2CPP_Target_BaseAddress(_x) (((_x) & 0xf) << 23) +#define NFP_PCIE_BAR_PCIE2CPP_Target_BaseAddress_of(_x) (((_x) >> 23) & 0xf) +#define NFP_PCIE_BAR_PCIE2CPP_Token_BaseAddress(_x) (((_x) & 0x3) << 21) +#define NFP_PCIE_BAR_PCIE2CPP_Token_BaseAddress_of(_x) (((_x) >> 21) & 0x3) +#define NFP_PCIE_EM 0x020000 +#define NFP_PCIE_SRAM 0x000000 + +/* Minimal size of the PCIe cfg memory we depend on being mapped, + * queue controller and DMA controller don't have to be covered. + */ +#define NFP_PCI_MIN_MAP_SIZE 0x080000 + +#define NFP_PCIE_P2C_FIXED_SIZE(bar) (1 << (bar)->bitsize) +#define NFP_PCIE_P2C_BULK_SIZE(bar) (1 << (bar)->bitsize) +#define NFP_PCIE_P2C_GENERAL_TARGET_OFFSET(bar, x) ((x) << ((bar)->bitsize - 2)) +#define NFP_PCIE_P2C_GENERAL_TOKEN_OFFSET(bar, x) ((x) << ((bar)->bitsize - 4)) +#define NFP_PCIE_P2C_GENERAL_SIZE(bar) (1 << ((bar)->bitsize - 4)) + +#define NFP_PCIE_P2C_EXPBAR_OFFSET(bar_index) ((bar_index) * 4) + +/* The number of explicit BARs to reserve. + * Minimum is 0, maximum is 4 on the NFP6000. + * The NFP3800 can have only one per PF. + */ +#define NFP_PCIE_EXPLICIT_BARS 2 + +struct nfp6000_pcie; +struct nfp6000_area_priv; + +/** + * struct nfp_bar - describes BAR configuration and usage + * @nfp: backlink to owner + * @barcfg: cached contents of BAR config CSR + * @base: the BAR's base CPP offset + * @mask: mask for the BAR aperture (read only) + * @bitsize: bitsize of BAR aperture (read only) + * @index: index of the BAR + * @refcnt: number of current users + * @iomem: mapped IO memory + * @resource: iomem resource window + */ +struct nfp_bar { + struct nfp6000_pcie *nfp; + u32 barcfg; + u64 base; /* CPP address base */ + u64 mask; /* Bit mask of the bar */ + u32 bitsize; /* Bit size of the bar */ + int index; + atomic_t refcnt; + + void __iomem *iomem; + struct resource *resource; +}; + +#define NFP_PCI_BAR_MAX (PCI_64BIT_BAR_COUNT * 8) + +struct nfp6000_pcie { + struct pci_dev *pdev; + struct device *dev; + const struct nfp_dev_info *dev_info; + + /* PCI BAR management */ + spinlock_t bar_lock; /* Protect the PCI2CPP BAR cache */ + int bars; + struct nfp_bar bar[NFP_PCI_BAR_MAX]; + wait_queue_head_t bar_waiters; + + /* Reserved BAR access */ + struct { + void __iomem *csr; + void __iomem *em; + void __iomem *expl[4]; + } iomem; + + /* Explicit IO access */ + struct { + struct mutex mutex; /* Lock access to this explicit group */ + u8 master_id; + u8 signal_ref; + void __iomem *data; + struct { + void __iomem *addr; + int bitsize; + int free[4]; + } group[4]; + } expl; +}; + +static u32 nfp_bar_maptype(struct nfp_bar *bar) +{ + return NFP_PCIE_BAR_PCIE2CPP_MapType_of(bar->barcfg); +} + +static resource_size_t nfp_bar_resource_len(struct nfp_bar *bar) +{ + return pci_resource_len(bar->nfp->pdev, (bar->index / 8) * 2) / 8; +} + +static resource_size_t nfp_bar_resource_start(struct nfp_bar *bar) +{ + return pci_resource_start(bar->nfp->pdev, (bar->index / 8) * 2) + + nfp_bar_resource_len(bar) * (bar->index & 7); +} + +#define TARGET_WIDTH_32 4 +#define TARGET_WIDTH_64 8 + +static int +compute_bar(const struct nfp6000_pcie *nfp, const struct nfp_bar *bar, + u32 *bar_config, u64 *bar_base, + int tgt, int act, int tok, u64 offset, size_t size, int width) +{ + int bitsize; + u32 newcfg; + + if (tgt >= NFP_CPP_NUM_TARGETS) + return -EINVAL; + + switch (width) { + case 8: + newcfg = NFP_PCIE_BAR_PCIE2CPP_LengthSelect( + NFP_PCIE_BAR_PCIE2CPP_LengthSelect_64BIT); + break; + case 4: + newcfg = NFP_PCIE_BAR_PCIE2CPP_LengthSelect( + NFP_PCIE_BAR_PCIE2CPP_LengthSelect_32BIT); + break; + case 0: + newcfg = NFP_PCIE_BAR_PCIE2CPP_LengthSelect( + NFP_PCIE_BAR_PCIE2CPP_LengthSelect_0BYTE); + break; + default: + return -EINVAL; + } + + if (act != NFP_CPP_ACTION_RW && act != 0) { + /* Fixed CPP mapping with specific action */ + u64 mask = ~(NFP_PCIE_P2C_FIXED_SIZE(bar) - 1); + + newcfg |= NFP_PCIE_BAR_PCIE2CPP_MapType( + NFP_PCIE_BAR_PCIE2CPP_MapType_FIXED); + newcfg |= NFP_PCIE_BAR_PCIE2CPP_Target_BaseAddress(tgt); + newcfg |= NFP_PCIE_BAR_PCIE2CPP_Action_BaseAddress(act); + newcfg |= NFP_PCIE_BAR_PCIE2CPP_Token_BaseAddress(tok); + + if ((offset & mask) != ((offset + size - 1) & mask)) + return -EINVAL; + offset &= mask; + + bitsize = 40 - 16; + } else { + u64 mask = ~(NFP_PCIE_P2C_BULK_SIZE(bar) - 1); + + /* Bulk mapping */ + newcfg |= NFP_PCIE_BAR_PCIE2CPP_MapType( + NFP_PCIE_BAR_PCIE2CPP_MapType_BULK); + newcfg |= NFP_PCIE_BAR_PCIE2CPP_Target_BaseAddress(tgt); + newcfg |= NFP_PCIE_BAR_PCIE2CPP_Token_BaseAddress(tok); + + if ((offset & mask) != ((offset + size - 1) & mask)) + return -EINVAL; + + offset &= mask; + + bitsize = 40 - 21; + } + + if (bar->bitsize < bitsize) + return -EINVAL; + + newcfg |= offset >> bitsize; + + if (bar_base) + *bar_base = offset; + + if (bar_config) + *bar_config = newcfg; + + return 0; +} + +static int +nfp6000_bar_write(struct nfp6000_pcie *nfp, struct nfp_bar *bar, u32 newcfg) +{ + unsigned int xbar; + + xbar = NFP_PCIE_P2C_EXPBAR_OFFSET(bar->index); + + if (nfp->iomem.csr) { + writel(newcfg, nfp->iomem.csr + xbar); + /* Readback to ensure BAR is flushed */ + readl(nfp->iomem.csr + xbar); + } else { + xbar += nfp->dev_info->pcie_cfg_expbar_offset; + pci_write_config_dword(nfp->pdev, xbar, newcfg); + } + + bar->barcfg = newcfg; + + return 0; +} + +static int +reconfigure_bar(struct nfp6000_pcie *nfp, struct nfp_bar *bar, + int tgt, int act, int tok, u64 offset, size_t size, int width) +{ + u64 newbase; + u32 newcfg; + int err; + + err = compute_bar(nfp, bar, &newcfg, &newbase, + tgt, act, tok, offset, size, width); + if (err) + return err; + + bar->base = newbase; + + return nfp6000_bar_write(nfp, bar, newcfg); +} + +/* Check if BAR can be used with the given parameters. */ +static int matching_bar(struct nfp_bar *bar, u32 tgt, u32 act, u32 tok, + u64 offset, size_t size, int width) +{ + int bartgt, baract, bartok; + int barwidth; + u32 maptype; + + maptype = NFP_PCIE_BAR_PCIE2CPP_MapType_of(bar->barcfg); + bartgt = NFP_PCIE_BAR_PCIE2CPP_Target_BaseAddress_of(bar->barcfg); + bartok = NFP_PCIE_BAR_PCIE2CPP_Token_BaseAddress_of(bar->barcfg); + baract = NFP_PCIE_BAR_PCIE2CPP_Action_BaseAddress_of(bar->barcfg); + + barwidth = NFP_PCIE_BAR_PCIE2CPP_LengthSelect_of(bar->barcfg); + switch (barwidth) { + case NFP_PCIE_BAR_PCIE2CPP_LengthSelect_32BIT: + barwidth = 4; + break; + case NFP_PCIE_BAR_PCIE2CPP_LengthSelect_64BIT: + barwidth = 8; + break; + case NFP_PCIE_BAR_PCIE2CPP_LengthSelect_0BYTE: + barwidth = 0; + break; + default: + barwidth = -1; + break; + } + + switch (maptype) { + case NFP_PCIE_BAR_PCIE2CPP_MapType_TARGET: + bartok = -1; + fallthrough; + case NFP_PCIE_BAR_PCIE2CPP_MapType_BULK: + baract = NFP_CPP_ACTION_RW; + if (act == 0) + act = NFP_CPP_ACTION_RW; + fallthrough; + case NFP_PCIE_BAR_PCIE2CPP_MapType_FIXED: + break; + default: + /* We don't match explicit bars through the area interface */ + return 0; + } + + /* Make sure to match up the width */ + if (barwidth != width) + return 0; + + if ((bartgt < 0 || bartgt == tgt) && + (bartok < 0 || bartok == tok) && + (baract == act) && + bar->base <= offset && + (bar->base + (1 << bar->bitsize)) >= (offset + size)) + return 1; + + /* No match */ + return 0; +} + +static int +find_matching_bar(struct nfp6000_pcie *nfp, + u32 tgt, u32 act, u32 tok, u64 offset, size_t size, int width) +{ + int n; + + for (n = 0; n < nfp->bars; n++) { + struct nfp_bar *bar = &nfp->bar[n]; + + if (matching_bar(bar, tgt, act, tok, offset, size, width)) + return n; + } + + return -1; +} + +/* Return EAGAIN if no resource is available */ +static int +find_unused_bar_noblock(const struct nfp6000_pcie *nfp, + int tgt, int act, int tok, + u64 offset, size_t size, int width) +{ + int n, busy = 0; + + for (n = 0; n < nfp->bars; n++) { + const struct nfp_bar *bar = &nfp->bar[n]; + int err; + + if (!bar->bitsize) + continue; + + /* Just check to see if we can make it fit... */ + err = compute_bar(nfp, bar, NULL, NULL, + tgt, act, tok, offset, size, width); + if (err) + continue; + + if (!atomic_read(&bar->refcnt)) + return n; + + busy++; + } + + if (WARN(!busy, "No suitable BAR found for request tgt:0x%x act:0x%x tok:0x%x off:0x%llx size:%zd width:%d\n", + tgt, act, tok, offset, size, width)) + return -EINVAL; + + return -EAGAIN; +} + +static int +find_unused_bar_and_lock(struct nfp6000_pcie *nfp, + int tgt, int act, int tok, + u64 offset, size_t size, int width) +{ + unsigned long flags; + int n; + + spin_lock_irqsave(&nfp->bar_lock, flags); + + n = find_unused_bar_noblock(nfp, tgt, act, tok, offset, size, width); + if (n < 0) + spin_unlock_irqrestore(&nfp->bar_lock, flags); + else + __release(&nfp->bar_lock); + + return n; +} + +static void nfp_bar_get(struct nfp6000_pcie *nfp, struct nfp_bar *bar) +{ + atomic_inc(&bar->refcnt); +} + +static void nfp_bar_put(struct nfp6000_pcie *nfp, struct nfp_bar *bar) +{ + if (atomic_dec_and_test(&bar->refcnt)) + wake_up_interruptible(&nfp->bar_waiters); +} + +static int +nfp_wait_for_bar(struct nfp6000_pcie *nfp, int *barnum, + u32 tgt, u32 act, u32 tok, u64 offset, size_t size, int width) +{ + return wait_event_interruptible(nfp->bar_waiters, + (*barnum = find_unused_bar_and_lock(nfp, tgt, act, tok, + offset, size, width)) + != -EAGAIN); +} + +static int +nfp_alloc_bar(struct nfp6000_pcie *nfp, + u32 tgt, u32 act, u32 tok, + u64 offset, size_t size, int width, int nonblocking) +{ + unsigned long irqflags; + int barnum, retval; + + if (size > (1 << 24)) + return -EINVAL; + + spin_lock_irqsave(&nfp->bar_lock, irqflags); + barnum = find_matching_bar(nfp, tgt, act, tok, offset, size, width); + if (barnum >= 0) { + /* Found a perfect match. */ + nfp_bar_get(nfp, &nfp->bar[barnum]); + spin_unlock_irqrestore(&nfp->bar_lock, irqflags); + return barnum; + } + + barnum = find_unused_bar_noblock(nfp, tgt, act, tok, + offset, size, width); + if (barnum < 0) { + if (nonblocking) + goto err_nobar; + + /* Wait until a BAR becomes available. The + * find_unused_bar function will reclaim the bar_lock + * if a free BAR is found. + */ + spin_unlock_irqrestore(&nfp->bar_lock, irqflags); + retval = nfp_wait_for_bar(nfp, &barnum, tgt, act, tok, + offset, size, width); + if (retval) + return retval; + __acquire(&nfp->bar_lock); + } + + nfp_bar_get(nfp, &nfp->bar[barnum]); + retval = reconfigure_bar(nfp, &nfp->bar[barnum], + tgt, act, tok, offset, size, width); + if (retval < 0) { + nfp_bar_put(nfp, &nfp->bar[barnum]); + barnum = retval; + } + +err_nobar: + spin_unlock_irqrestore(&nfp->bar_lock, irqflags); + return barnum; +} + +static void disable_bars(struct nfp6000_pcie *nfp); + +static int bar_cmp(const void *aptr, const void *bptr) +{ + const struct nfp_bar *a = aptr, *b = bptr; + + if (a->bitsize == b->bitsize) + return a->index - b->index; + else + return a->bitsize - b->bitsize; +} + +/* Map all PCI bars and fetch the actual BAR configurations from the + * board. We assume that the BAR with the PCIe config block is + * already mapped. + * + * BAR0.0: Reserved for General Mapping (for MSI-X access to PCIe SRAM) + * BAR0.1: Reserved for XPB access (for MSI-X access to PCIe PBA) + * BAR0.2: -- + * BAR0.3: -- + * BAR0.4: Reserved for Explicit 0.0-0.3 access + * BAR0.5: Reserved for Explicit 1.0-1.3 access + * BAR0.6: Reserved for Explicit 2.0-2.3 access + * BAR0.7: Reserved for Explicit 3.0-3.3 access + * + * BAR1.0-BAR1.7: -- + * BAR2.0-BAR2.7: -- + */ +static int enable_bars(struct nfp6000_pcie *nfp, u16 interface) +{ + const u32 barcfg_msix_general = + NFP_PCIE_BAR_PCIE2CPP_MapType( + NFP_PCIE_BAR_PCIE2CPP_MapType_GENERAL) | + NFP_PCIE_BAR_PCIE2CPP_LengthSelect_32BIT; + const u32 barcfg_msix_xpb = + NFP_PCIE_BAR_PCIE2CPP_MapType( + NFP_PCIE_BAR_PCIE2CPP_MapType_BULK) | + NFP_PCIE_BAR_PCIE2CPP_LengthSelect_32BIT | + NFP_PCIE_BAR_PCIE2CPP_Target_BaseAddress( + NFP_CPP_TARGET_ISLAND_XPB); + const u32 barcfg_explicit[4] = { + NFP_PCIE_BAR_PCIE2CPP_MapType( + NFP_PCIE_BAR_PCIE2CPP_MapType_EXPLICIT0), + NFP_PCIE_BAR_PCIE2CPP_MapType( + NFP_PCIE_BAR_PCIE2CPP_MapType_EXPLICIT1), + NFP_PCIE_BAR_PCIE2CPP_MapType( + NFP_PCIE_BAR_PCIE2CPP_MapType_EXPLICIT2), + NFP_PCIE_BAR_PCIE2CPP_MapType( + NFP_PCIE_BAR_PCIE2CPP_MapType_EXPLICIT3), + }; + char status_msg[196] = {}; + int i, err, bars_free; + struct nfp_bar *bar; + int expl_groups; + char *msg, *end; + + msg = status_msg + + snprintf(status_msg, sizeof(status_msg) - 1, "RESERVED BARs: "); + end = status_msg + sizeof(status_msg) - 1; + + bar = &nfp->bar[0]; + for (i = 0; i < ARRAY_SIZE(nfp->bar); i++, bar++) { + struct resource *res; + + res = &nfp->pdev->resource[(i >> 3) * 2]; + + /* Skip over BARs that are not IORESOURCE_MEM */ + if (!(resource_type(res) & IORESOURCE_MEM)) { + bar--; + continue; + } + + bar->resource = res; + bar->barcfg = 0; + + bar->nfp = nfp; + bar->index = i; + bar->mask = nfp_bar_resource_len(bar) - 1; + bar->bitsize = fls(bar->mask); + bar->base = 0; + bar->iomem = NULL; + } + + nfp->bars = bar - &nfp->bar[0]; + if (nfp->bars < 8) { + dev_err(nfp->dev, "No usable BARs found!\n"); + return -EINVAL; + } + + bars_free = nfp->bars; + + /* Convert unit ID (0..3) to signal master/data master ID (0x40..0x70) + */ + mutex_init(&nfp->expl.mutex); + + nfp->expl.master_id = ((NFP_CPP_INTERFACE_UNIT_of(interface) & 3) + 4) + << 4; + nfp->expl.signal_ref = 0x10; + + /* Configure, and lock, BAR0.0 for General Target use (MSI-X SRAM) */ + bar = &nfp->bar[0]; + if (nfp_bar_resource_len(bar) >= NFP_PCI_MIN_MAP_SIZE) + bar->iomem = ioremap(nfp_bar_resource_start(bar), + nfp_bar_resource_len(bar)); + if (bar->iomem) { + int pf; + + msg += scnprintf(msg, end - msg, "0.0: General/MSI-X SRAM, "); + atomic_inc(&bar->refcnt); + bars_free--; + + nfp6000_bar_write(nfp, bar, barcfg_msix_general); + + nfp->expl.data = bar->iomem + NFP_PCIE_SRAM + + nfp->dev_info->pcie_expl_offset; + + switch (nfp->pdev->device) { + case PCI_DEVICE_ID_NFP3800: + pf = nfp->pdev->devfn & 7; + nfp->iomem.csr = bar->iomem + NFP_PCIE_BAR(pf); + break; + case PCI_DEVICE_ID_NFP4000: + case PCI_DEVICE_ID_NFP5000: + case PCI_DEVICE_ID_NFP6000: + nfp->iomem.csr = bar->iomem + NFP_PCIE_BAR(0); + break; + default: + dev_err(nfp->dev, "Unsupported device ID: %04hx!\n", + nfp->pdev->device); + err = -EINVAL; + goto err_unmap_bar0; + } + nfp->iomem.em = bar->iomem + NFP_PCIE_EM; + } + + switch (nfp->pdev->device) { + case PCI_DEVICE_ID_NFP3800: + expl_groups = 1; + break; + case PCI_DEVICE_ID_NFP4000: + case PCI_DEVICE_ID_NFP5000: + case PCI_DEVICE_ID_NFP6000: + expl_groups = 4; + break; + default: + dev_err(nfp->dev, "Unsupported device ID: %04hx!\n", + nfp->pdev->device); + err = -EINVAL; + goto err_unmap_bar0; + } + + /* Configure, and lock, BAR0.1 for PCIe XPB (MSI-X PBA) */ + bar = &nfp->bar[1]; + msg += scnprintf(msg, end - msg, "0.1: PCIe XPB/MSI-X PBA, "); + atomic_inc(&bar->refcnt); + bars_free--; + + nfp6000_bar_write(nfp, bar, barcfg_msix_xpb); + + /* Use BAR0.4..BAR0.7 for EXPL IO */ + for (i = 0; i < 4; i++) { + int j; + + if (i >= NFP_PCIE_EXPLICIT_BARS || i >= expl_groups) { + nfp->expl.group[i].bitsize = 0; + continue; + } + + bar = &nfp->bar[4 + i]; + bar->iomem = ioremap(nfp_bar_resource_start(bar), + nfp_bar_resource_len(bar)); + if (bar->iomem) { + msg += scnprintf(msg, end - msg, + "0.%d: Explicit%d, ", 4 + i, i); + atomic_inc(&bar->refcnt); + bars_free--; + + nfp->expl.group[i].bitsize = bar->bitsize; + nfp->expl.group[i].addr = bar->iomem; + nfp6000_bar_write(nfp, bar, barcfg_explicit[i]); + + for (j = 0; j < 4; j++) + nfp->expl.group[i].free[j] = true; + } + nfp->iomem.expl[i] = bar->iomem; + } + + /* Sort bars by bit size - use the smallest possible first. */ + sort(&nfp->bar[0], nfp->bars, sizeof(nfp->bar[0]), + bar_cmp, NULL); + + dev_info(nfp->dev, "%sfree: %d/%d\n", status_msg, bars_free, nfp->bars); + + return 0; + +err_unmap_bar0: + if (nfp->bar[0].iomem) + iounmap(nfp->bar[0].iomem); + return err; +} + +static void disable_bars(struct nfp6000_pcie *nfp) +{ + struct nfp_bar *bar = &nfp->bar[0]; + int n; + + for (n = 0; n < nfp->bars; n++, bar++) { + if (bar->iomem) { + iounmap(bar->iomem); + bar->iomem = NULL; + } + } +} + +/* + * Generic CPP bus access interface. + */ + +struct nfp6000_area_priv { + atomic_t refcnt; + + struct nfp_bar *bar; + u32 bar_offset; + + u32 target; + u32 action; + u32 token; + u64 offset; + struct { + int read; + int write; + int bar; + } width; + size_t size; + + void __iomem *iomem; + phys_addr_t phys; + struct resource resource; +}; + +static int nfp6000_area_init(struct nfp_cpp_area *area, u32 dest, + unsigned long long address, unsigned long size) +{ + struct nfp6000_area_priv *priv = nfp_cpp_area_priv(area); + u32 target = NFP_CPP_ID_TARGET_of(dest); + u32 action = NFP_CPP_ID_ACTION_of(dest); + u32 token = NFP_CPP_ID_TOKEN_of(dest); + int pp; + + pp = nfp_target_pushpull(NFP_CPP_ID(target, action, token), address); + if (pp < 0) + return pp; + + priv->width.read = PUSH_WIDTH(pp); + priv->width.write = PULL_WIDTH(pp); + if (priv->width.read > 0 && + priv->width.write > 0 && + priv->width.read != priv->width.write) { + return -EINVAL; + } + + if (priv->width.read > 0) + priv->width.bar = priv->width.read; + else + priv->width.bar = priv->width.write; + + atomic_set(&priv->refcnt, 0); + priv->bar = NULL; + + priv->target = target; + priv->action = action; + priv->token = token; + priv->offset = address; + priv->size = size; + memset(&priv->resource, 0, sizeof(priv->resource)); + + return 0; +} + +static void nfp6000_area_cleanup(struct nfp_cpp_area *area) +{ +} + +static void priv_area_get(struct nfp_cpp_area *area) +{ + struct nfp6000_area_priv *priv = nfp_cpp_area_priv(area); + + atomic_inc(&priv->refcnt); +} + +static int priv_area_put(struct nfp_cpp_area *area) +{ + struct nfp6000_area_priv *priv = nfp_cpp_area_priv(area); + + if (WARN_ON(!atomic_read(&priv->refcnt))) + return 0; + + return atomic_dec_and_test(&priv->refcnt); +} + +static int nfp6000_area_acquire(struct nfp_cpp_area *area) +{ + struct nfp6000_pcie *nfp = nfp_cpp_priv(nfp_cpp_area_cpp(area)); + struct nfp6000_area_priv *priv = nfp_cpp_area_priv(area); + int barnum, err; + + if (priv->bar) { + /* Already allocated. */ + priv_area_get(area); + return 0; + } + + barnum = nfp_alloc_bar(nfp, priv->target, priv->action, priv->token, + priv->offset, priv->size, priv->width.bar, 1); + + if (barnum < 0) { + err = barnum; + goto err_alloc_bar; + } + priv->bar = &nfp->bar[barnum]; + + /* Calculate offset into BAR. */ + if (nfp_bar_maptype(priv->bar) == + NFP_PCIE_BAR_PCIE2CPP_MapType_GENERAL) { + priv->bar_offset = priv->offset & + (NFP_PCIE_P2C_GENERAL_SIZE(priv->bar) - 1); + priv->bar_offset += NFP_PCIE_P2C_GENERAL_TARGET_OFFSET( + priv->bar, priv->target); + priv->bar_offset += NFP_PCIE_P2C_GENERAL_TOKEN_OFFSET( + priv->bar, priv->token); + } else { + priv->bar_offset = priv->offset & priv->bar->mask; + } + + /* We don't actually try to acquire the resource area using + * request_resource. This would prevent sharing the mapped + * BAR between multiple CPP areas and prevent us from + * effectively utilizing the limited amount of BAR resources. + */ + priv->phys = nfp_bar_resource_start(priv->bar) + priv->bar_offset; + priv->resource.name = nfp_cpp_area_name(area); + priv->resource.start = priv->phys; + priv->resource.end = priv->resource.start + priv->size - 1; + priv->resource.flags = IORESOURCE_MEM; + + /* If the bar is already mapped in, use its mapping */ + if (priv->bar->iomem) + priv->iomem = priv->bar->iomem + priv->bar_offset; + else + /* Must have been too big. Sub-allocate. */ + priv->iomem = ioremap(priv->phys, priv->size); + + if (IS_ERR_OR_NULL(priv->iomem)) { + dev_err(nfp->dev, "Can't ioremap() a %d byte region of BAR %d\n", + (int)priv->size, priv->bar->index); + err = !priv->iomem ? -ENOMEM : PTR_ERR(priv->iomem); + priv->iomem = NULL; + goto err_iomem_remap; + } + + priv_area_get(area); + return 0; + +err_iomem_remap: + nfp_bar_put(nfp, priv->bar); + priv->bar = NULL; +err_alloc_bar: + return err; +} + +static void nfp6000_area_release(struct nfp_cpp_area *area) +{ + struct nfp6000_pcie *nfp = nfp_cpp_priv(nfp_cpp_area_cpp(area)); + struct nfp6000_area_priv *priv = nfp_cpp_area_priv(area); + + if (!priv_area_put(area)) + return; + + if (!priv->bar->iomem) + iounmap(priv->iomem); + + nfp_bar_put(nfp, priv->bar); + + priv->bar = NULL; + priv->iomem = NULL; +} + +static phys_addr_t nfp6000_area_phys(struct nfp_cpp_area *area) +{ + struct nfp6000_area_priv *priv = nfp_cpp_area_priv(area); + + return priv->phys; +} + +static void __iomem *nfp6000_area_iomem(struct nfp_cpp_area *area) +{ + struct nfp6000_area_priv *priv = nfp_cpp_area_priv(area); + + return priv->iomem; +} + +static struct resource *nfp6000_area_resource(struct nfp_cpp_area *area) +{ + /* Use the BAR resource as the resource for the CPP area. + * This enables us to share the BAR among multiple CPP areas + * without resource conflicts. + */ + struct nfp6000_area_priv *priv = nfp_cpp_area_priv(area); + + return priv->bar->resource; +} + +static int nfp6000_area_read(struct nfp_cpp_area *area, void *kernel_vaddr, + unsigned long offset, unsigned int length) +{ + u64 __maybe_unused *wrptr64 = kernel_vaddr; + const u64 __iomem __maybe_unused *rdptr64; + struct nfp6000_area_priv *priv; + u32 *wrptr32 = kernel_vaddr; + const u32 __iomem *rdptr32; + int n, width; + + priv = nfp_cpp_area_priv(area); + rdptr64 = priv->iomem + offset; + rdptr32 = priv->iomem + offset; + + if (offset + length > priv->size) + return -EFAULT; + + width = priv->width.read; + if (width <= 0) + return -EINVAL; + + /* MU reads via a PCIe2CPP BAR support 32bit (and other) lengths */ + if (priv->target == (NFP_CPP_TARGET_MU & NFP_CPP_TARGET_ID_MASK) && + priv->action == NFP_CPP_ACTION_RW && + (offset % sizeof(u64) == 4 || length % sizeof(u64) == 4)) + width = TARGET_WIDTH_32; + + /* Unaligned? Translate to an explicit access */ + if ((priv->offset + offset) & (width - 1)) + return nfp_cpp_explicit_read(nfp_cpp_area_cpp(area), + NFP_CPP_ID(priv->target, + priv->action, + priv->token), + priv->offset + offset, + kernel_vaddr, length, width); + + if (WARN_ON(!priv->bar)) + return -EFAULT; + + switch (width) { + case TARGET_WIDTH_32: + if (offset % sizeof(u32) != 0 || length % sizeof(u32) != 0) + return -EINVAL; + + for (n = 0; n < length; n += sizeof(u32)) + *wrptr32++ = __raw_readl(rdptr32++); + return n; +#ifdef __raw_readq + case TARGET_WIDTH_64: + if (offset % sizeof(u64) != 0 || length % sizeof(u64) != 0) + return -EINVAL; + + for (n = 0; n < length; n += sizeof(u64)) + *wrptr64++ = __raw_readq(rdptr64++); + return n; +#endif + default: + return -EINVAL; + } +} + +static int +nfp6000_area_write(struct nfp_cpp_area *area, + const void *kernel_vaddr, + unsigned long offset, unsigned int length) +{ + const u64 __maybe_unused *rdptr64 = kernel_vaddr; + u64 __iomem __maybe_unused *wrptr64; + const u32 *rdptr32 = kernel_vaddr; + struct nfp6000_area_priv *priv; + u32 __iomem *wrptr32; + int n, width; + + priv = nfp_cpp_area_priv(area); + wrptr64 = priv->iomem + offset; + wrptr32 = priv->iomem + offset; + + if (offset + length > priv->size) + return -EFAULT; + + width = priv->width.write; + if (width <= 0) + return -EINVAL; + + /* MU writes via a PCIe2CPP BAR support 32bit (and other) lengths */ + if (priv->target == (NFP_CPP_TARGET_ID_MASK & NFP_CPP_TARGET_MU) && + priv->action == NFP_CPP_ACTION_RW && + (offset % sizeof(u64) == 4 || length % sizeof(u64) == 4)) + width = TARGET_WIDTH_32; + + /* Unaligned? Translate to an explicit access */ + if ((priv->offset + offset) & (width - 1)) + return nfp_cpp_explicit_write(nfp_cpp_area_cpp(area), + NFP_CPP_ID(priv->target, + priv->action, + priv->token), + priv->offset + offset, + kernel_vaddr, length, width); + + if (WARN_ON(!priv->bar)) + return -EFAULT; + + switch (width) { + case TARGET_WIDTH_32: + if (offset % sizeof(u32) != 0 || length % sizeof(u32) != 0) + return -EINVAL; + + for (n = 0; n < length; n += sizeof(u32)) { + __raw_writel(*rdptr32++, wrptr32++); + wmb(); + } + return n; +#ifdef __raw_writeq + case TARGET_WIDTH_64: + if (offset % sizeof(u64) != 0 || length % sizeof(u64) != 0) + return -EINVAL; + + for (n = 0; n < length; n += sizeof(u64)) { + __raw_writeq(*rdptr64++, wrptr64++); + wmb(); + } + return n; +#endif + default: + return -EINVAL; + } +} + +struct nfp6000_explicit_priv { + struct nfp6000_pcie *nfp; + struct { + int group; + int area; + } bar; + int bitsize; + void __iomem *data; + void __iomem *addr; +}; + +static int nfp6000_explicit_acquire(struct nfp_cpp_explicit *expl) +{ + struct nfp6000_pcie *nfp = nfp_cpp_priv(nfp_cpp_explicit_cpp(expl)); + struct nfp6000_explicit_priv *priv = nfp_cpp_explicit_priv(expl); + int i, j; + + mutex_lock(&nfp->expl.mutex); + for (i = 0; i < ARRAY_SIZE(nfp->expl.group); i++) { + if (!nfp->expl.group[i].bitsize) + continue; + + for (j = 0; j < ARRAY_SIZE(nfp->expl.group[i].free); j++) { + u16 data_offset; + + if (!nfp->expl.group[i].free[j]) + continue; + + priv->nfp = nfp; + priv->bar.group = i; + priv->bar.area = j; + priv->bitsize = nfp->expl.group[i].bitsize - 2; + + data_offset = (priv->bar.group << 9) + + (priv->bar.area << 7); + priv->data = nfp->expl.data + data_offset; + priv->addr = nfp->expl.group[i].addr + + (priv->bar.area << priv->bitsize); + nfp->expl.group[i].free[j] = false; + + mutex_unlock(&nfp->expl.mutex); + return 0; + } + } + mutex_unlock(&nfp->expl.mutex); + + return -EAGAIN; +} + +static void nfp6000_explicit_release(struct nfp_cpp_explicit *expl) +{ + struct nfp6000_explicit_priv *priv = nfp_cpp_explicit_priv(expl); + struct nfp6000_pcie *nfp = priv->nfp; + + mutex_lock(&nfp->expl.mutex); + nfp->expl.group[priv->bar.group].free[priv->bar.area] = true; + mutex_unlock(&nfp->expl.mutex); +} + +static int nfp6000_explicit_put(struct nfp_cpp_explicit *expl, + const void *buff, size_t len) +{ + struct nfp6000_explicit_priv *priv = nfp_cpp_explicit_priv(expl); + const u32 *src = buff; + size_t i; + + for (i = 0; i < len; i += sizeof(u32)) + writel(*(src++), priv->data + i); + + return i; +} + +static int +nfp6000_explicit_do(struct nfp_cpp_explicit *expl, + const struct nfp_cpp_explicit_command *cmd, u64 address) +{ + struct nfp6000_explicit_priv *priv = nfp_cpp_explicit_priv(expl); + u8 signal_master, signal_ref, data_master; + struct nfp6000_pcie *nfp = priv->nfp; + int sigmask = 0; + u16 data_ref; + u32 csr[3]; + + if (cmd->siga_mode) + sigmask |= 1 << cmd->siga; + if (cmd->sigb_mode) + sigmask |= 1 << cmd->sigb; + + signal_master = cmd->signal_master; + if (!signal_master) + signal_master = nfp->expl.master_id; + + signal_ref = cmd->signal_ref; + if (signal_master == nfp->expl.master_id) + signal_ref = nfp->expl.signal_ref + + ((priv->bar.group * 4 + priv->bar.area) << 1); + + data_master = cmd->data_master; + if (!data_master) + data_master = nfp->expl.master_id; + + data_ref = cmd->data_ref; + if (data_master == nfp->expl.master_id) + data_ref = 0x1000 + + (priv->bar.group << 9) + (priv->bar.area << 7); + + csr[0] = NFP_PCIE_BAR_EXPLICIT_BAR0_SignalType(sigmask) | + NFP_PCIE_BAR_EXPLICIT_BAR0_Token( + NFP_CPP_ID_TOKEN_of(cmd->cpp_id)) | + NFP_PCIE_BAR_EXPLICIT_BAR0_Address(address >> 16); + + csr[1] = NFP_PCIE_BAR_EXPLICIT_BAR1_SignalRef(signal_ref) | + NFP_PCIE_BAR_EXPLICIT_BAR1_DataMaster(data_master) | + NFP_PCIE_BAR_EXPLICIT_BAR1_DataRef(data_ref); + + csr[2] = NFP_PCIE_BAR_EXPLICIT_BAR2_Target( + NFP_CPP_ID_TARGET_of(cmd->cpp_id)) | + NFP_PCIE_BAR_EXPLICIT_BAR2_Action( + NFP_CPP_ID_ACTION_of(cmd->cpp_id)) | + NFP_PCIE_BAR_EXPLICIT_BAR2_Length(cmd->len) | + NFP_PCIE_BAR_EXPLICIT_BAR2_ByteMask(cmd->byte_mask) | + NFP_PCIE_BAR_EXPLICIT_BAR2_SignalMaster(signal_master); + + if (nfp->iomem.csr) { + writel(csr[0], nfp->iomem.csr + + NFP_PCIE_BAR_EXPLICIT_BAR0(priv->bar.group, + priv->bar.area)); + writel(csr[1], nfp->iomem.csr + + NFP_PCIE_BAR_EXPLICIT_BAR1(priv->bar.group, + priv->bar.area)); + writel(csr[2], nfp->iomem.csr + + NFP_PCIE_BAR_EXPLICIT_BAR2(priv->bar.group, + priv->bar.area)); + /* Readback to ensure BAR is flushed */ + readl(nfp->iomem.csr + + NFP_PCIE_BAR_EXPLICIT_BAR0(priv->bar.group, + priv->bar.area)); + readl(nfp->iomem.csr + + NFP_PCIE_BAR_EXPLICIT_BAR1(priv->bar.group, + priv->bar.area)); + readl(nfp->iomem.csr + + NFP_PCIE_BAR_EXPLICIT_BAR2(priv->bar.group, + priv->bar.area)); + } else { + pci_write_config_dword(nfp->pdev, 0x400 + + NFP_PCIE_BAR_EXPLICIT_BAR0( + priv->bar.group, priv->bar.area), + csr[0]); + + pci_write_config_dword(nfp->pdev, 0x400 + + NFP_PCIE_BAR_EXPLICIT_BAR1( + priv->bar.group, priv->bar.area), + csr[1]); + + pci_write_config_dword(nfp->pdev, 0x400 + + NFP_PCIE_BAR_EXPLICIT_BAR2( + priv->bar.group, priv->bar.area), + csr[2]); + } + + /* Issue the 'kickoff' transaction */ + readb(priv->addr + (address & ((1 << priv->bitsize) - 1))); + + return sigmask; +} + +static int nfp6000_explicit_get(struct nfp_cpp_explicit *expl, + void *buff, size_t len) +{ + struct nfp6000_explicit_priv *priv = nfp_cpp_explicit_priv(expl); + u32 *dst = buff; + size_t i; + + for (i = 0; i < len; i += sizeof(u32)) + *(dst++) = readl(priv->data + i); + + return i; +} + +static int nfp6000_init(struct nfp_cpp *cpp) +{ + nfp_cpp_area_cache_add(cpp, SZ_64K); + nfp_cpp_area_cache_add(cpp, SZ_64K); + nfp_cpp_area_cache_add(cpp, SZ_256K); + + return 0; +} + +static void nfp6000_free(struct nfp_cpp *cpp) +{ + struct nfp6000_pcie *nfp = nfp_cpp_priv(cpp); + + disable_bars(nfp); + kfree(nfp); +} + +static int nfp6000_read_serial(struct device *dev, u8 *serial) +{ + struct pci_dev *pdev = to_pci_dev(dev); + u64 dsn; + + dsn = pci_get_dsn(pdev); + if (!dsn) { + dev_err(dev, "can't find PCIe Serial Number Capability\n"); + return -EINVAL; + } + + put_unaligned_be32((u32)(dsn >> 32), serial); + put_unaligned_be16((u16)(dsn >> 16), serial + 4); + + return 0; +} + +static int nfp6000_get_interface(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + u64 dsn; + + dsn = pci_get_dsn(pdev); + if (!dsn) { + dev_err(dev, "can't find PCIe Serial Number Capability\n"); + return -EINVAL; + } + + return dsn & 0xffff; +} + +static const struct nfp_cpp_operations nfp6000_pcie_ops = { + .owner = THIS_MODULE, + + .init = nfp6000_init, + .free = nfp6000_free, + + .read_serial = nfp6000_read_serial, + .get_interface = nfp6000_get_interface, + + .area_priv_size = sizeof(struct nfp6000_area_priv), + .area_init = nfp6000_area_init, + .area_cleanup = nfp6000_area_cleanup, + .area_acquire = nfp6000_area_acquire, + .area_release = nfp6000_area_release, + .area_phys = nfp6000_area_phys, + .area_iomem = nfp6000_area_iomem, + .area_resource = nfp6000_area_resource, + .area_read = nfp6000_area_read, + .area_write = nfp6000_area_write, + + .explicit_priv_size = sizeof(struct nfp6000_explicit_priv), + .explicit_acquire = nfp6000_explicit_acquire, + .explicit_release = nfp6000_explicit_release, + .explicit_put = nfp6000_explicit_put, + .explicit_do = nfp6000_explicit_do, + .explicit_get = nfp6000_explicit_get, +}; + +/** + * nfp_cpp_from_nfp6000_pcie() - Build a NFP CPP bus from a NFP6000 PCI device + * @pdev: NFP6000 PCI device + * @dev_info: NFP ASIC params + * + * Return: NFP CPP handle + */ +struct nfp_cpp * +nfp_cpp_from_nfp6000_pcie(struct pci_dev *pdev, const struct nfp_dev_info *dev_info) +{ + struct nfp6000_pcie *nfp; + u16 interface; + int err; + + /* Finished with card initialization. */ + dev_info(&pdev->dev, "Network Flow Processor %s PCIe Card Probe\n", + dev_info->chip_names); + pcie_print_link_status(pdev); + + nfp = kzalloc(sizeof(*nfp), GFP_KERNEL); + if (!nfp) { + err = -ENOMEM; + goto err_ret; + } + + nfp->dev = &pdev->dev; + nfp->pdev = pdev; + nfp->dev_info = dev_info; + init_waitqueue_head(&nfp->bar_waiters); + spin_lock_init(&nfp->bar_lock); + + interface = nfp6000_get_interface(&pdev->dev); + + if (NFP_CPP_INTERFACE_TYPE_of(interface) != + NFP_CPP_INTERFACE_TYPE_PCI) { + dev_err(&pdev->dev, + "Interface type %d is not the expected %d\n", + NFP_CPP_INTERFACE_TYPE_of(interface), + NFP_CPP_INTERFACE_TYPE_PCI); + err = -ENODEV; + goto err_free_nfp; + } + + if (NFP_CPP_INTERFACE_CHANNEL_of(interface) != + NFP_CPP_INTERFACE_CHANNEL_PEROPENER) { + dev_err(&pdev->dev, "Interface channel %d is not the expected %d\n", + NFP_CPP_INTERFACE_CHANNEL_of(interface), + NFP_CPP_INTERFACE_CHANNEL_PEROPENER); + err = -ENODEV; + goto err_free_nfp; + } + + err = enable_bars(nfp, interface); + if (err) + goto err_free_nfp; + + /* Probe for all the common NFP devices */ + return nfp_cpp_from_operations(&nfp6000_pcie_ops, &pdev->dev, nfp); + +err_free_nfp: + kfree(nfp); +err_ret: + dev_err(&pdev->dev, "NFP6000 PCI setup failed\n"); + return ERR_PTR(err); +} diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.h b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.h new file mode 100644 index 000000000..097660b67 --- /dev/null +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp6000_pcie.h @@ -0,0 +1,17 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ +/* Copyright (C) 2015-2017 Netronome Systems, Inc. */ + +/* + * nfp6000_pcie.h + * Author: Jason McMullan <jason.mcmullan@netronome.com> + */ + +#ifndef NFP6000_PCIE_H +#define NFP6000_PCIE_H + +#include "nfp_cpp.h" + +struct nfp_cpp * +nfp_cpp_from_nfp6000_pcie(struct pci_dev *pdev, const struct nfp_dev_info *dev_info); + +#endif /* NFP6000_PCIE_H */ diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_arm.h b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_arm.h new file mode 100644 index 000000000..3d172e255 --- /dev/null +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_arm.h @@ -0,0 +1,216 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ +/* Copyright (C) 2015-2017 Netronome Systems, Inc. */ + +/* + * nfp_arm.h + * Definitions for ARM-based registers and memory spaces + */ + +#ifndef NFP_ARM_H +#define NFP_ARM_H + +#define NFP_ARM_QUEUE(_q) (0x100000 + (0x800 * ((_q) & 0xff))) +#define NFP_ARM_IM 0x200000 +#define NFP_ARM_EM 0x300000 +#define NFP_ARM_GCSR 0x400000 +#define NFP_ARM_MPCORE 0x800000 +#define NFP_ARM_PL310 0xa00000 +/* Register Type: BulkBARConfig */ +#define NFP_ARM_GCSR_BULK_BAR(_bar) (0x0 + (0x4 * ((_bar) & 0x7))) +#define NFP_ARM_GCSR_BULK_BAR_TYPE (0x1 << 31) +#define NFP_ARM_GCSR_BULK_BAR_TYPE_BULK (0x0) +#define NFP_ARM_GCSR_BULK_BAR_TYPE_EXPA (0x80000000) +#define NFP_ARM_GCSR_BULK_BAR_TGT(_x) (((_x) & 0xf) << 27) +#define NFP_ARM_GCSR_BULK_BAR_TGT_of(_x) (((_x) >> 27) & 0xf) +#define NFP_ARM_GCSR_BULK_BAR_TOK(_x) (((_x) & 0x3) << 25) +#define NFP_ARM_GCSR_BULK_BAR_TOK_of(_x) (((_x) >> 25) & 0x3) +#define NFP_ARM_GCSR_BULK_BAR_LEN (0x1 << 24) +#define NFP_ARM_GCSR_BULK_BAR_LEN_32BIT (0x0) +#define NFP_ARM_GCSR_BULK_BAR_LEN_64BIT (0x1000000) +#define NFP_ARM_GCSR_BULK_BAR_ADDR(_x) ((_x) & 0x7ff) +#define NFP_ARM_GCSR_BULK_BAR_ADDR_of(_x) ((_x) & 0x7ff) +/* Register Type: ExpansionBARConfig */ +#define NFP_ARM_GCSR_EXPA_BAR(_bar) (0x20 + (0x4 * ((_bar) & 0xf))) +#define NFP_ARM_GCSR_EXPA_BAR_TYPE (0x1 << 31) +#define NFP_ARM_GCSR_EXPA_BAR_TYPE_EXPA (0x0) +#define NFP_ARM_GCSR_EXPA_BAR_TYPE_EXPL (0x80000000) +#define NFP_ARM_GCSR_EXPA_BAR_TGT(_x) (((_x) & 0xf) << 27) +#define NFP_ARM_GCSR_EXPA_BAR_TGT_of(_x) (((_x) >> 27) & 0xf) +#define NFP_ARM_GCSR_EXPA_BAR_TOK(_x) (((_x) & 0x3) << 25) +#define NFP_ARM_GCSR_EXPA_BAR_TOK_of(_x) (((_x) >> 25) & 0x3) +#define NFP_ARM_GCSR_EXPA_BAR_LEN (0x1 << 24) +#define NFP_ARM_GCSR_EXPA_BAR_LEN_32BIT (0x0) +#define NFP_ARM_GCSR_EXPA_BAR_LEN_64BIT (0x1000000) +#define NFP_ARM_GCSR_EXPA_BAR_ACT(_x) (((_x) & 0x1f) << 19) +#define NFP_ARM_GCSR_EXPA_BAR_ACT_of(_x) (((_x) >> 19) & 0x1f) +#define NFP_ARM_GCSR_EXPA_BAR_ACT_DERIVED (0) +#define NFP_ARM_GCSR_EXPA_BAR_ADDR(_x) ((_x) & 0x7fff) +#define NFP_ARM_GCSR_EXPA_BAR_ADDR_of(_x) ((_x) & 0x7fff) +/* Register Type: ExplicitBARConfig0_Reg */ +#define NFP_ARM_GCSR_EXPL0_BAR(_bar) (0x60 + (0x4 * ((_bar) & 0x7))) +#define NFP_ARM_GCSR_EXPL0_BAR_ADDR(_x) ((_x) & 0x3ffff) +#define NFP_ARM_GCSR_EXPL0_BAR_ADDR_of(_x) ((_x) & 0x3ffff) +/* Register Type: ExplicitBARConfig1_Reg */ +#define NFP_ARM_GCSR_EXPL1_BAR(_bar) (0x80 + (0x4 * ((_bar) & 0x7))) +#define NFP_ARM_GCSR_EXPL1_BAR_POSTED (0x1 << 31) +#define NFP_ARM_GCSR_EXPL1_BAR_SIGNAL_REF(_x) (((_x) & 0x7f) << 24) +#define NFP_ARM_GCSR_EXPL1_BAR_SIGNAL_REF_of(_x) (((_x) >> 24) & 0x7f) +#define NFP_ARM_GCSR_EXPL1_BAR_DATA_MASTER(_x) (((_x) & 0xff) << 16) +#define NFP_ARM_GCSR_EXPL1_BAR_DATA_MASTER_of(_x) (((_x) >> 16) & 0xff) +#define NFP_ARM_GCSR_EXPL1_BAR_DATA_REF(_x) ((_x) & 0x3fff) +#define NFP_ARM_GCSR_EXPL1_BAR_DATA_REF_of(_x) ((_x) & 0x3fff) +/* Register Type: ExplicitBARConfig2_Reg */ +#define NFP_ARM_GCSR_EXPL2_BAR(_bar) (0xa0 + (0x4 * ((_bar) & 0x7))) +#define NFP_ARM_GCSR_EXPL2_BAR_TGT(_x) (((_x) & 0xf) << 28) +#define NFP_ARM_GCSR_EXPL2_BAR_TGT_of(_x) (((_x) >> 28) & 0xf) +#define NFP_ARM_GCSR_EXPL2_BAR_ACT(_x) (((_x) & 0x1f) << 23) +#define NFP_ARM_GCSR_EXPL2_BAR_ACT_of(_x) (((_x) >> 23) & 0x1f) +#define NFP_ARM_GCSR_EXPL2_BAR_LEN(_x) (((_x) & 0x1f) << 18) +#define NFP_ARM_GCSR_EXPL2_BAR_LEN_of(_x) (((_x) >> 18) & 0x1f) +#define NFP_ARM_GCSR_EXPL2_BAR_BYTE_MASK(_x) (((_x) & 0xff) << 10) +#define NFP_ARM_GCSR_EXPL2_BAR_BYTE_MASK_of(_x) (((_x) >> 10) & 0xff) +#define NFP_ARM_GCSR_EXPL2_BAR_TOK(_x) (((_x) & 0x3) << 8) +#define NFP_ARM_GCSR_EXPL2_BAR_TOK_of(_x) (((_x) >> 8) & 0x3) +#define NFP_ARM_GCSR_EXPL2_BAR_SIGNAL_MASTER(_x) ((_x) & 0xff) +#define NFP_ARM_GCSR_EXPL2_BAR_SIGNAL_MASTER_of(_x) ((_x) & 0xff) +/* Register Type: PostedCommandSignal */ +#define NFP_ARM_GCSR_EXPL_POST(_bar) (0xc0 + (0x4 * ((_bar) & 0x7))) +#define NFP_ARM_GCSR_EXPL_POST_SIG_B(_x) (((_x) & 0x7f) << 25) +#define NFP_ARM_GCSR_EXPL_POST_SIG_B_of(_x) (((_x) >> 25) & 0x7f) +#define NFP_ARM_GCSR_EXPL_POST_SIG_B_BUS (0x1 << 24) +#define NFP_ARM_GCSR_EXPL_POST_SIG_B_BUS_PULL (0x0) +#define NFP_ARM_GCSR_EXPL_POST_SIG_B_BUS_PUSH (0x1000000) +#define NFP_ARM_GCSR_EXPL_POST_SIG_A(_x) (((_x) & 0x7f) << 17) +#define NFP_ARM_GCSR_EXPL_POST_SIG_A_of(_x) (((_x) >> 17) & 0x7f) +#define NFP_ARM_GCSR_EXPL_POST_SIG_A_BUS (0x1 << 16) +#define NFP_ARM_GCSR_EXPL_POST_SIG_A_BUS_PULL (0x0) +#define NFP_ARM_GCSR_EXPL_POST_SIG_A_BUS_PUSH (0x10000) +#define NFP_ARM_GCSR_EXPL_POST_SIG_B_RCVD (0x1 << 7) +#define NFP_ARM_GCSR_EXPL_POST_SIG_B_VALID (0x1 << 6) +#define NFP_ARM_GCSR_EXPL_POST_SIG_A_RCVD (0x1 << 5) +#define NFP_ARM_GCSR_EXPL_POST_SIG_A_VALID (0x1 << 4) +#define NFP_ARM_GCSR_EXPL_POST_CMD_COMPLETE (0x1) +/* Register Type: MPCoreBaseAddress */ +#define NFP_ARM_GCSR_MPCORE_BASE 0x00e0 +#define NFP_ARM_GCSR_MPCORE_BASE_ADDR(_x) (((_x) & 0x7ffff) << 13) +#define NFP_ARM_GCSR_MPCORE_BASE_ADDR_of(_x) (((_x) >> 13) & 0x7ffff) +/* Register Type: PL310BaseAddress */ +#define NFP_ARM_GCSR_PL310_BASE 0x00e4 +#define NFP_ARM_GCSR_PL310_BASE_ADDR(_x) (((_x) & 0xfffff) << 12) +#define NFP_ARM_GCSR_PL310_BASE_ADDR_of(_x) (((_x) >> 12) & 0xfffff) +/* Register Type: MPCoreConfig */ +#define NFP_ARM_GCSR_MP0_CFG 0x00e8 +#define NFP_ARM_GCSR_MP0_CFG_SPI_BOOT (0x1 << 14) +#define NFP_ARM_GCSR_MP0_CFG_ENDIAN(_x) (((_x) & 0x3) << 12) +#define NFP_ARM_GCSR_MP0_CFG_ENDIAN_of(_x) (((_x) >> 12) & 0x3) +#define NFP_ARM_GCSR_MP0_CFG_ENDIAN_LITTLE (0) +#define NFP_ARM_GCSR_MP0_CFG_ENDIAN_BIG (1) +#define NFP_ARM_GCSR_MP0_CFG_RESET_VECTOR (0x1 << 8) +#define NFP_ARM_GCSR_MP0_CFG_RESET_VECTOR_LO (0x0) +#define NFP_ARM_GCSR_MP0_CFG_RESET_VECTOR_HI (0x100) +#define NFP_ARM_GCSR_MP0_CFG_OUTCLK_EN(_x) (((_x) & 0xf) << 4) +#define NFP_ARM_GCSR_MP0_CFG_OUTCLK_EN_of(_x) (((_x) >> 4) & 0xf) +#define NFP_ARM_GCSR_MP0_CFG_ARMID(_x) ((_x) & 0xf) +#define NFP_ARM_GCSR_MP0_CFG_ARMID_of(_x) ((_x) & 0xf) +/* Register Type: MPCoreIDCacheDataError */ +#define NFP_ARM_GCSR_MP0_CACHE_ERR 0x00ec +#define NFP_ARM_GCSR_MP0_CACHE_ERR_MP0_D7 (0x1 << 15) +#define NFP_ARM_GCSR_MP0_CACHE_ERR_MP0_D6 (0x1 << 14) +#define NFP_ARM_GCSR_MP0_CACHE_ERR_MP0_D5 (0x1 << 13) +#define NFP_ARM_GCSR_MP0_CACHE_ERR_MP0_D4 (0x1 << 12) +#define NFP_ARM_GCSR_MP0_CACHE_ERR_MP0_D3 (0x1 << 11) +#define NFP_ARM_GCSR_MP0_CACHE_ERR_MP0_D2 (0x1 << 10) +#define NFP_ARM_GCSR_MP0_CACHE_ERR_MP0_D1 (0x1 << 9) +#define NFP_ARM_GCSR_MP0_CACHE_ERR_MP0_D0 (0x1 << 8) +#define NFP_ARM_GCSR_MP0_CACHE_ERR_MP0_I7 (0x1 << 7) +#define NFP_ARM_GCSR_MP0_CACHE_ERR_MP0_I6 (0x1 << 6) +#define NFP_ARM_GCSR_MP0_CACHE_ERR_MP0_I5 (0x1 << 5) +#define NFP_ARM_GCSR_MP0_CACHE_ERR_MP0_I4 (0x1 << 4) +#define NFP_ARM_GCSR_MP0_CACHE_ERR_MP0_I3 (0x1 << 3) +#define NFP_ARM_GCSR_MP0_CACHE_ERR_MP0_I2 (0x1 << 2) +#define NFP_ARM_GCSR_MP0_CACHE_ERR_MP0_I1 (0x1 << 1) +#define NFP_ARM_GCSR_MP0_CACHE_ERR_MP0_I0 (0x1) +/* Register Type: ARMDFT */ +#define NFP_ARM_GCSR_DFT 0x0100 +#define NFP_ARM_GCSR_DFT_DBG_REQ (0x1 << 20) +#define NFP_ARM_GCSR_DFT_DBG_EN (0x1 << 19) +#define NFP_ARM_GCSR_DFT_WFE_EVT_TRG (0x1 << 18) +#define NFP_ARM_GCSR_DFT_ETM_WFI_RDY (0x1 << 17) +#define NFP_ARM_GCSR_DFT_ETM_PWR_ON (0x1 << 16) +#define NFP_ARM_GCSR_DFT_BIST_FAIL_of(_x) (((_x) >> 8) & 0xf) +#define NFP_ARM_GCSR_DFT_BIST_DONE_of(_x) (((_x) >> 4) & 0xf) +#define NFP_ARM_GCSR_DFT_BIST_RUN(_x) ((_x) & 0x7) +#define NFP_ARM_GCSR_DFT_BIST_RUN_of(_x) ((_x) & 0x7) + +/* Gasket CSRs */ +/* NOTE: These cannot be remapped, and are always at this location. + */ +#define NFP_ARM_GCSR_START (0xd6000000 + NFP_ARM_GCSR) +#define NFP_ARM_GCSR_SIZE SZ_64K + +/* BAR CSRs + */ +#define NFP_ARM_GCSR_BULK_BITS 11 +#define NFP_ARM_GCSR_EXPA_BITS 15 +#define NFP_ARM_GCSR_EXPL_BITS 18 + +#define NFP_ARM_GCSR_BULK_SHIFT (40 - 11) +#define NFP_ARM_GCSR_EXPA_SHIFT (40 - 15) +#define NFP_ARM_GCSR_EXPL_SHIFT (40 - 18) + +#define NFP_ARM_GCSR_BULK_SIZE (1 << NFP_ARM_GCSR_BULK_SHIFT) +#define NFP_ARM_GCSR_EXPA_SIZE (1 << NFP_ARM_GCSR_EXPA_SHIFT) +#define NFP_ARM_GCSR_EXPL_SIZE (1 << NFP_ARM_GCSR_EXPL_SHIFT) + +#define NFP_ARM_GCSR_EXPL2_CSR(target, action, length, \ + byte_mask, token, signal_master) \ + (NFP_ARM_GCSR_EXPL2_BAR_TGT(target) | \ + NFP_ARM_GCSR_EXPL2_BAR_ACT(action) | \ + NFP_ARM_GCSR_EXPL2_BAR_LEN(length) | \ + NFP_ARM_GCSR_EXPL2_BAR_BYTE_MASK(byte_mask) | \ + NFP_ARM_GCSR_EXPL2_BAR_TOK(token) | \ + NFP_ARM_GCSR_EXPL2_BAR_SIGNAL_MASTER(signal_master)) +#define NFP_ARM_GCSR_EXPL1_CSR(posted, signal_ref, data_master, data_ref) \ + (((posted) ? NFP_ARM_GCSR_EXPL1_BAR_POSTED : 0) | \ + NFP_ARM_GCSR_EXPL1_BAR_SIGNAL_REF(signal_ref) | \ + NFP_ARM_GCSR_EXPL1_BAR_DATA_MASTER(data_master) | \ + NFP_ARM_GCSR_EXPL1_BAR_DATA_REF(data_ref)) +#define NFP_ARM_GCSR_EXPL0_CSR(address) \ + NFP_ARM_GCSR_EXPL0_BAR_ADDR((address) >> NFP_ARM_GCSR_EXPL_SHIFT) +#define NFP_ARM_GCSR_EXPL_POST_EXPECT_A(sig_ref, is_push, is_required) \ + (NFP_ARM_GCSR_EXPL_POST_SIG_A(sig_ref) | \ + ((is_push) ? NFP_ARM_GCSR_EXPL_POST_SIG_A_BUS_PUSH : \ + NFP_ARM_GCSR_EXPL_POST_SIG_A_BUS_PULL) | \ + ((is_required) ? NFP_ARM_GCSR_EXPL_POST_SIG_A_VALID : 0)) +#define NFP_ARM_GCSR_EXPL_POST_EXPECT_B(sig_ref, is_push, is_required) \ + (NFP_ARM_GCSR_EXPL_POST_SIG_B(sig_ref) | \ + ((is_push) ? NFP_ARM_GCSR_EXPL_POST_SIG_B_BUS_PUSH : \ + NFP_ARM_GCSR_EXPL_POST_SIG_B_BUS_PULL) | \ + ((is_required) ? NFP_ARM_GCSR_EXPL_POST_SIG_B_VALID : 0)) + +#define NFP_ARM_GCSR_EXPA_CSR(mode, target, token, is_64, action, address) \ + (((mode) ? NFP_ARM_GCSR_EXPA_BAR_TYPE_EXPL : \ + NFP_ARM_GCSR_EXPA_BAR_TYPE_EXPA) | \ + NFP_ARM_GCSR_EXPA_BAR_TGT(target) | \ + NFP_ARM_GCSR_EXPA_BAR_TOK(token) | \ + ((is_64) ? NFP_ARM_GCSR_EXPA_BAR_LEN_64BIT : \ + NFP_ARM_GCSR_EXPA_BAR_LEN_32BIT) | \ + NFP_ARM_GCSR_EXPA_BAR_ACT(action) | \ + NFP_ARM_GCSR_EXPA_BAR_ADDR((address) >> NFP_ARM_GCSR_EXPA_SHIFT)) + +#define NFP_ARM_GCSR_BULK_CSR(mode, target, token, is_64, address) \ + (((mode) ? NFP_ARM_GCSR_BULK_BAR_TYPE_EXPA : \ + NFP_ARM_GCSR_BULK_BAR_TYPE_BULK) | \ + NFP_ARM_GCSR_BULK_BAR_TGT(target) | \ + NFP_ARM_GCSR_BULK_BAR_TOK(token) | \ + ((is_64) ? NFP_ARM_GCSR_BULK_BAR_LEN_64BIT : \ + NFP_ARM_GCSR_BULK_BAR_LEN_32BIT) | \ + NFP_ARM_GCSR_BULK_BAR_ADDR((address) >> NFP_ARM_GCSR_BULK_SHIFT)) + + /* MP Core CSRs */ +#define NFP_ARM_MPCORE_SIZE SZ_128K + + /* PL320 CSRs */ +#define NFP_ARM_PCSR_SIZE SZ_64K + +#endif /* NFP_ARM_H */ diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpp.h b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpp.h new file mode 100644 index 000000000..3d379e937 --- /dev/null +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpp.h @@ -0,0 +1,432 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ +/* Copyright (C) 2015-2018 Netronome Systems, Inc. */ + +/* + * nfp_cpp.h + * Interface for low-level NFP CPP access. + * Authors: Jason McMullan <jason.mcmullan@netronome.com> + * Rolf Neugebauer <rolf.neugebauer@netronome.com> + */ +#ifndef __NFP_CPP_H__ +#define __NFP_CPP_H__ + +#include <linux/ctype.h> +#include <linux/types.h> +#include <linux/sizes.h> + +#ifndef NFP_SUBSYS +#define NFP_SUBSYS "nfp" +#endif + +#define nfp_err(cpp, fmt, args...) \ + dev_err(nfp_cpp_device(cpp)->parent, NFP_SUBSYS ": " fmt, ## args) +#define nfp_warn(cpp, fmt, args...) \ + dev_warn(nfp_cpp_device(cpp)->parent, NFP_SUBSYS ": " fmt, ## args) +#define nfp_info(cpp, fmt, args...) \ + dev_info(nfp_cpp_device(cpp)->parent, NFP_SUBSYS ": " fmt, ## args) +#define nfp_dbg(cpp, fmt, args...) \ + dev_dbg(nfp_cpp_device(cpp)->parent, NFP_SUBSYS ": " fmt, ## args) +#define nfp_printk(level, cpp, fmt, args...) \ + dev_printk(level, nfp_cpp_device(cpp)->parent, \ + NFP_SUBSYS ": " fmt, ## args) + +#define PCI_64BIT_BAR_COUNT 3 + +#define NFP_CPP_NUM_TARGETS 16 +/* Max size of area it should be safe to request */ +#define NFP_CPP_SAFE_AREA_SIZE SZ_2M + +/* NFP_MUTEX_WAIT_* are timeouts in seconds when waiting for a mutex */ +#define NFP_MUTEX_WAIT_FIRST_WARN 15 +#define NFP_MUTEX_WAIT_NEXT_WARN 5 +#define NFP_MUTEX_WAIT_ERROR 60 + +struct device; + +struct nfp_cpp_area; +struct nfp_cpp; +struct resource; + +/* Wildcard indicating a CPP read or write action + * + * The action used will be either read or write depending on whether a + * read or write instruction/call is performed on the NFP_CPP_ID. It + * is recomended that the RW action is used even if all actions to be + * performed on a NFP_CPP_ID are known to be only reads or writes. + * Doing so will in many cases save NFP CPP internal software + * resources. + */ +#define NFP_CPP_ACTION_RW 32 + +#define NFP_CPP_TARGET_ID_MASK 0x1f + +#define NFP_CPP_ATOMIC_RD(target, island) \ + NFP_CPP_ISLAND_ID((target), 3, 0, (island)) +#define NFP_CPP_ATOMIC_WR(target, island) \ + NFP_CPP_ISLAND_ID((target), 4, 0, (island)) + +/** + * NFP_CPP_ID() - pack target, token, and action into a CPP ID. + * @target: NFP CPP target id + * @action: NFP CPP action id + * @token: NFP CPP token id + * + * Create a 32-bit CPP identifier representing the access to be made. + * These identifiers are used as parameters to other NFP CPP + * functions. Some CPP devices may allow wildcard identifiers to be + * specified. + * + * Return: NFP CPP ID + */ +#define NFP_CPP_ID(target, action, token) \ + ((((target) & 0x7f) << 24) | (((token) & 0xff) << 16) | \ + (((action) & 0xff) << 8)) + +/** + * NFP_CPP_ISLAND_ID() - pack target, token, action, and island into a CPP ID. + * @target: NFP CPP target id + * @action: NFP CPP action id + * @token: NFP CPP token id + * @island: NFP CPP island id + * + * Create a 32-bit CPP identifier representing the access to be made. + * These identifiers are used as parameters to other NFP CPP + * functions. Some CPP devices may allow wildcard identifiers to be + * specified. + * + * Return: NFP CPP ID + */ +#define NFP_CPP_ISLAND_ID(target, action, token, island) \ + ((((target) & 0x7f) << 24) | (((token) & 0xff) << 16) | \ + (((action) & 0xff) << 8) | (((island) & 0xff) << 0)) + +/** + * NFP_CPP_ID_TARGET_of() - Return the NFP CPP target of a NFP CPP ID + * @id: NFP CPP ID + * + * Return: NFP CPP target + */ +static inline u8 NFP_CPP_ID_TARGET_of(u32 id) +{ + return (id >> 24) & NFP_CPP_TARGET_ID_MASK; +} + +/** + * NFP_CPP_ID_TOKEN_of() - Return the NFP CPP token of a NFP CPP ID + * @id: NFP CPP ID + * Return: NFP CPP token + */ +static inline u8 NFP_CPP_ID_TOKEN_of(u32 id) +{ + return (id >> 16) & 0xff; +} + +/** + * NFP_CPP_ID_ACTION_of() - Return the NFP CPP action of a NFP CPP ID + * @id: NFP CPP ID + * + * Return: NFP CPP action + */ +static inline u8 NFP_CPP_ID_ACTION_of(u32 id) +{ + return (id >> 8) & 0xff; +} + +/** + * NFP_CPP_ID_ISLAND_of() - Return the NFP CPP island of a NFP CPP ID + * @id: NFP CPP ID + * + * Return: NFP CPP island + */ +static inline u8 NFP_CPP_ID_ISLAND_of(u32 id) +{ + return (id >> 0) & 0xff; +} + +/* NFP Interface types - logical interface for this CPP connection + * 4 bits are reserved for interface type. + */ +#define NFP_CPP_INTERFACE_TYPE_INVALID 0x0 +#define NFP_CPP_INTERFACE_TYPE_PCI 0x1 +#define NFP_CPP_INTERFACE_TYPE_ARM 0x2 +#define NFP_CPP_INTERFACE_TYPE_RPC 0x3 +#define NFP_CPP_INTERFACE_TYPE_ILA 0x4 + +/** + * NFP_CPP_INTERFACE() - Construct a 16-bit NFP Interface ID + * @type: NFP Interface Type + * @unit: Unit identifier for the interface type + * @channel: Channel identifier for the interface unit + * + * Interface IDs consists of 4 bits of interface type, + * 4 bits of unit identifier, and 8 bits of channel identifier. + * + * The NFP Interface ID is used in the implementation of + * NFP CPP API mutexes, which use the MU Atomic CompareAndWrite + * operation - hence the limit to 16 bits to be able to + * use the NFP Interface ID as a lock owner. + * + * Return: Interface ID + */ +#define NFP_CPP_INTERFACE(type, unit, channel) \ + ((((type) & 0xf) << 12) | \ + (((unit) & 0xf) << 8) | \ + (((channel) & 0xff) << 0)) + +/** + * NFP_CPP_INTERFACE_TYPE_of() - Get the interface type + * @interface: NFP Interface ID + * Return: NFP Interface ID's type + */ +#define NFP_CPP_INTERFACE_TYPE_of(interface) (((interface) >> 12) & 0xf) + +/** + * NFP_CPP_INTERFACE_UNIT_of() - Get the interface unit + * @interface: NFP Interface ID + * Return: NFP Interface ID's unit + */ +#define NFP_CPP_INTERFACE_UNIT_of(interface) (((interface) >> 8) & 0xf) + +/** + * NFP_CPP_INTERFACE_CHANNEL_of() - Get the interface channel + * @interface: NFP Interface ID + * Return: NFP Interface ID's channel + */ +#define NFP_CPP_INTERFACE_CHANNEL_of(interface) (((interface) >> 0) & 0xff) + +/* Implemented in nfp_cppcore.c */ +void nfp_cpp_free(struct nfp_cpp *cpp); +u32 nfp_cpp_model(struct nfp_cpp *cpp); +u16 nfp_cpp_interface(struct nfp_cpp *cpp); +int nfp_cpp_serial(struct nfp_cpp *cpp, const u8 **serial); +unsigned int nfp_cpp_mu_locality_lsb(struct nfp_cpp *cpp); + +struct nfp_cpp_area *nfp_cpp_area_alloc_with_name(struct nfp_cpp *cpp, + u32 cpp_id, + const char *name, + unsigned long long address, + unsigned long size); +struct nfp_cpp_area *nfp_cpp_area_alloc(struct nfp_cpp *cpp, u32 cpp_id, + unsigned long long address, + unsigned long size); +struct nfp_cpp_area * +nfp_cpp_area_alloc_acquire(struct nfp_cpp *cpp, const char *name, u32 cpp_id, + unsigned long long address, unsigned long size); +void nfp_cpp_area_free(struct nfp_cpp_area *area); +int nfp_cpp_area_acquire(struct nfp_cpp_area *area); +int nfp_cpp_area_acquire_nonblocking(struct nfp_cpp_area *area); +void nfp_cpp_area_release(struct nfp_cpp_area *area); +void nfp_cpp_area_release_free(struct nfp_cpp_area *area); +int nfp_cpp_area_read(struct nfp_cpp_area *area, unsigned long offset, + void *buffer, size_t length); +int nfp_cpp_area_write(struct nfp_cpp_area *area, unsigned long offset, + const void *buffer, size_t length); +size_t nfp_cpp_area_size(struct nfp_cpp_area *area); +const char *nfp_cpp_area_name(struct nfp_cpp_area *cpp_area); +void *nfp_cpp_area_priv(struct nfp_cpp_area *cpp_area); +struct nfp_cpp *nfp_cpp_area_cpp(struct nfp_cpp_area *cpp_area); +struct resource *nfp_cpp_area_resource(struct nfp_cpp_area *area); +phys_addr_t nfp_cpp_area_phys(struct nfp_cpp_area *area); +void __iomem *nfp_cpp_area_iomem(struct nfp_cpp_area *area); + +int nfp_cpp_area_readl(struct nfp_cpp_area *area, unsigned long offset, + u32 *value); +int nfp_cpp_area_writel(struct nfp_cpp_area *area, unsigned long offset, + u32 value); +int nfp_cpp_area_readq(struct nfp_cpp_area *area, unsigned long offset, + u64 *value); +int nfp_cpp_area_writeq(struct nfp_cpp_area *area, unsigned long offset, + u64 value); +int nfp_cpp_area_fill(struct nfp_cpp_area *area, unsigned long offset, + u32 value, size_t length); + +int nfp_xpb_readl(struct nfp_cpp *cpp, u32 xpb_tgt, u32 *value); +int nfp_xpb_writel(struct nfp_cpp *cpp, u32 xpb_tgt, u32 value); +int nfp_xpb_writelm(struct nfp_cpp *cpp, u32 xpb_tgt, u32 mask, u32 value); + +/* Implemented in nfp_cpplib.c */ +int nfp_cpp_read(struct nfp_cpp *cpp, u32 cpp_id, + unsigned long long address, void *kernel_vaddr, size_t length); +int nfp_cpp_write(struct nfp_cpp *cpp, u32 cpp_id, + unsigned long long address, const void *kernel_vaddr, + size_t length); +int nfp_cpp_readl(struct nfp_cpp *cpp, u32 cpp_id, + unsigned long long address, u32 *value); +int nfp_cpp_writel(struct nfp_cpp *cpp, u32 cpp_id, + unsigned long long address, u32 value); +int nfp_cpp_readq(struct nfp_cpp *cpp, u32 cpp_id, + unsigned long long address, u64 *value); +int nfp_cpp_writeq(struct nfp_cpp *cpp, u32 cpp_id, + unsigned long long address, u64 value); + +u8 __iomem * +nfp_cpp_map_area(struct nfp_cpp *cpp, const char *name, u32 cpp_id, u64 addr, + unsigned long size, struct nfp_cpp_area **area); + +struct nfp_cpp_mutex; + +int nfp_cpp_mutex_init(struct nfp_cpp *cpp, int target, + unsigned long long address, u32 key_id); +struct nfp_cpp_mutex *nfp_cpp_mutex_alloc(struct nfp_cpp *cpp, int target, + unsigned long long address, + u32 key_id); +void nfp_cpp_mutex_free(struct nfp_cpp_mutex *mutex); +int nfp_cpp_mutex_lock(struct nfp_cpp_mutex *mutex); +int nfp_cpp_mutex_unlock(struct nfp_cpp_mutex *mutex); +int nfp_cpp_mutex_trylock(struct nfp_cpp_mutex *mutex); +int nfp_cpp_mutex_reclaim(struct nfp_cpp *cpp, int target, + unsigned long long address); + +/** + * nfp_cppcore_pcie_unit() - Get PCI Unit of a CPP handle + * @cpp: CPP handle + * + * Return: PCI unit for the NFP CPP handle + */ +static inline u8 nfp_cppcore_pcie_unit(struct nfp_cpp *cpp) +{ + return NFP_CPP_INTERFACE_UNIT_of(nfp_cpp_interface(cpp)); +} + +struct nfp_cpp_explicit; + +struct nfp_cpp_explicit_command { + u32 cpp_id; + u16 data_ref; + u8 data_master; + u8 len; + u8 byte_mask; + u8 signal_master; + u8 signal_ref; + u8 posted; + u8 siga; + u8 sigb; + s8 siga_mode; + s8 sigb_mode; +}; + +#define NFP_SERIAL_LEN 6 + +/** + * struct nfp_cpp_operations - NFP CPP operations structure + * @area_priv_size: Size of the nfp_cpp_area private data + * @owner: Owner module + * @init: Initialize the NFP CPP bus + * @free: Free the bus + * @read_serial: Read serial number to memory provided + * @get_interface: Return CPP interface + * @area_init: Initialize a new NFP CPP area (not serialized) + * @area_cleanup: Clean up a NFP CPP area (not serialized) + * @area_acquire: Acquire the NFP CPP area (serialized) + * @area_release: Release area (serialized) + * @area_resource: Get resource range of area (not serialized) + * @area_phys: Get physical address of area (not serialized) + * @area_iomem: Get iomem of area (not serialized) + * @area_read: Perform a read from a NFP CPP area (serialized) + * @area_write: Perform a write to a NFP CPP area (serialized) + * @explicit_priv_size: Size of an explicit's private area + * @explicit_acquire: Acquire an explicit area + * @explicit_release: Release an explicit area + * @explicit_put: Write data to send + * @explicit_get: Read data received + * @explicit_do: Perform the transaction + */ +struct nfp_cpp_operations { + size_t area_priv_size; + struct module *owner; + + int (*init)(struct nfp_cpp *cpp); + void (*free)(struct nfp_cpp *cpp); + + int (*read_serial)(struct device *dev, u8 *serial); + int (*get_interface)(struct device *dev); + + int (*area_init)(struct nfp_cpp_area *area, + u32 dest, unsigned long long address, + unsigned long size); + void (*area_cleanup)(struct nfp_cpp_area *area); + int (*area_acquire)(struct nfp_cpp_area *area); + void (*area_release)(struct nfp_cpp_area *area); + struct resource *(*area_resource)(struct nfp_cpp_area *area); + phys_addr_t (*area_phys)(struct nfp_cpp_area *area); + void __iomem *(*area_iomem)(struct nfp_cpp_area *area); + int (*area_read)(struct nfp_cpp_area *area, void *kernel_vaddr, + unsigned long offset, unsigned int length); + int (*area_write)(struct nfp_cpp_area *area, const void *kernel_vaddr, + unsigned long offset, unsigned int length); + + size_t explicit_priv_size; + int (*explicit_acquire)(struct nfp_cpp_explicit *expl); + void (*explicit_release)(struct nfp_cpp_explicit *expl); + int (*explicit_put)(struct nfp_cpp_explicit *expl, + const void *buff, size_t len); + int (*explicit_get)(struct nfp_cpp_explicit *expl, + void *buff, size_t len); + int (*explicit_do)(struct nfp_cpp_explicit *expl, + const struct nfp_cpp_explicit_command *cmd, + u64 address); +}; + +struct nfp_cpp * +nfp_cpp_from_operations(const struct nfp_cpp_operations *ops, + struct device *parent, void *priv); +void *nfp_cpp_priv(struct nfp_cpp *priv); + +int nfp_cpp_area_cache_add(struct nfp_cpp *cpp, size_t size); + +/* The following section contains extensions to the + * NFP CPP API, to be used in a Linux kernel-space context. + */ + +/* Use this channel ID for multiple virtual channel interfaces + * (ie ARM and PCIe) when setting up the interface field. + */ +#define NFP_CPP_INTERFACE_CHANNEL_PEROPENER 255 +struct device *nfp_cpp_device(struct nfp_cpp *cpp); + +/* Return code masks for nfp_cpp_explicit_do() + */ +#define NFP_SIGNAL_MASK_A BIT(0) /* Signal A fired */ +#define NFP_SIGNAL_MASK_B BIT(1) /* Signal B fired */ + +enum nfp_cpp_explicit_signal_mode { + NFP_SIGNAL_NONE = 0, + NFP_SIGNAL_PUSH = 1, + NFP_SIGNAL_PUSH_OPTIONAL = -1, + NFP_SIGNAL_PULL = 2, + NFP_SIGNAL_PULL_OPTIONAL = -2, +}; + +struct nfp_cpp_explicit *nfp_cpp_explicit_acquire(struct nfp_cpp *cpp); +int nfp_cpp_explicit_set_target(struct nfp_cpp_explicit *expl, u32 cpp_id, + u8 len, u8 mask); +int nfp_cpp_explicit_set_data(struct nfp_cpp_explicit *expl, + u8 data_master, u16 data_ref); +int nfp_cpp_explicit_set_signal(struct nfp_cpp_explicit *expl, + u8 signal_master, u8 signal_ref); +int nfp_cpp_explicit_set_posted(struct nfp_cpp_explicit *expl, int posted, + u8 siga, + enum nfp_cpp_explicit_signal_mode siga_mode, + u8 sigb, + enum nfp_cpp_explicit_signal_mode sigb_mode); +int nfp_cpp_explicit_put(struct nfp_cpp_explicit *expl, + const void *buff, size_t len); +int nfp_cpp_explicit_do(struct nfp_cpp_explicit *expl, u64 address); +int nfp_cpp_explicit_get(struct nfp_cpp_explicit *expl, void *buff, size_t len); +void nfp_cpp_explicit_release(struct nfp_cpp_explicit *expl); +struct nfp_cpp *nfp_cpp_explicit_cpp(struct nfp_cpp_explicit *expl); +void *nfp_cpp_explicit_priv(struct nfp_cpp_explicit *cpp_explicit); + +/* Implemented in nfp_cpplib.c */ + +int nfp_cpp_model_autodetect(struct nfp_cpp *cpp, u32 *model); + +int nfp_cpp_explicit_read(struct nfp_cpp *cpp, u32 cpp_id, + u64 addr, void *buff, size_t len, + int width_read); + +int nfp_cpp_explicit_write(struct nfp_cpp *cpp, u32 cpp_id, + u64 addr, const void *buff, size_t len, + int width_write); + +#endif /* !__NFP_CPP_H__ */ diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c new file mode 100644 index 000000000..a8286d003 --- /dev/null +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cppcore.c @@ -0,0 +1,1499 @@ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +/* Copyright (C) 2015-2018 Netronome Systems, Inc. */ + +/* + * nfp_cppcore.c + * Provides low-level access to the NFP's internal CPP bus + * Authors: Jakub Kicinski <jakub.kicinski@netronome.com> + * Jason McMullan <jason.mcmullan@netronome.com> + * Rolf Neugebauer <rolf.neugebauer@netronome.com> + */ + +#include <asm/unaligned.h> +#include <linux/delay.h> +#include <linux/device.h> +#include <linux/ioport.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/mutex.h> +#include <linux/sched.h> +#include <linux/slab.h> +#include <linux/wait.h> + +#include "nfp_arm.h" +#include "nfp_cpp.h" +#include "nfp6000/nfp6000.h" + +#define NFP_ARM_GCSR_SOFTMODEL2 0x0000014c +#define NFP_ARM_GCSR_SOFTMODEL3 0x00000150 + +struct nfp_cpp_resource { + struct list_head list; + const char *name; + u32 cpp_id; + u64 start; + u64 end; +}; + +/** + * struct nfp_cpp - main nfpcore device structure + * Following fields are read-only after probe() exits or netdevs are spawned. + * @dev: embedded device structure + * @op: low-level implementation ops + * @priv: private data of the low-level implementation + * @model: chip model + * @interface: chip interface id we are using to reach it + * @serial: chip serial number + * @imb_cat_table: CPP Mapping Table + * @mu_locality_lsb: MU access type bit offset + * + * Following fields use explicit locking: + * @resource_list: NFP CPP resource list + * @resource_lock: protects @resource_list + * + * @area_cache_list: cached areas for cpp/xpb read/write speed up + * @area_cache_mutex: protects @area_cache_list + * + * @waitq: area wait queue + */ +struct nfp_cpp { + struct device dev; + + void *priv; + + u32 model; + u16 interface; + u8 serial[NFP_SERIAL_LEN]; + + const struct nfp_cpp_operations *op; + struct list_head resource_list; + rwlock_t resource_lock; + wait_queue_head_t waitq; + + u32 imb_cat_table[16]; + unsigned int mu_locality_lsb; + + struct mutex area_cache_mutex; + struct list_head area_cache_list; +}; + +/* Element of the area_cache_list */ +struct nfp_cpp_area_cache { + struct list_head entry; + u32 id; + u64 addr; + u32 size; + struct nfp_cpp_area *area; +}; + +struct nfp_cpp_area { + struct nfp_cpp *cpp; + struct kref kref; + atomic_t refcount; + struct mutex mutex; /* Lock for the area's refcount */ + unsigned long long offset; + unsigned long size; + struct nfp_cpp_resource resource; + void __iomem *iomem; + /* Here follows the 'priv' part of nfp_cpp_area. */ +}; + +struct nfp_cpp_explicit { + struct nfp_cpp *cpp; + struct nfp_cpp_explicit_command cmd; + /* Here follows the 'priv' part of nfp_cpp_area. */ +}; + +static void __resource_add(struct list_head *head, struct nfp_cpp_resource *res) +{ + struct nfp_cpp_resource *tmp; + struct list_head *pos; + + list_for_each(pos, head) { + tmp = container_of(pos, struct nfp_cpp_resource, list); + + if (tmp->cpp_id > res->cpp_id) + break; + + if (tmp->cpp_id == res->cpp_id && tmp->start > res->start) + break; + } + + list_add_tail(&res->list, pos); +} + +static void __resource_del(struct nfp_cpp_resource *res) +{ + list_del_init(&res->list); +} + +static void __release_cpp_area(struct kref *kref) +{ + struct nfp_cpp_area *area = + container_of(kref, struct nfp_cpp_area, kref); + struct nfp_cpp *cpp = nfp_cpp_area_cpp(area); + + if (area->cpp->op->area_cleanup) + area->cpp->op->area_cleanup(area); + + write_lock(&cpp->resource_lock); + __resource_del(&area->resource); + write_unlock(&cpp->resource_lock); + kfree(area); +} + +static void nfp_cpp_area_put(struct nfp_cpp_area *area) +{ + kref_put(&area->kref, __release_cpp_area); +} + +static struct nfp_cpp_area *nfp_cpp_area_get(struct nfp_cpp_area *area) +{ + kref_get(&area->kref); + + return area; +} + +/** + * nfp_cpp_free() - free the CPP handle + * @cpp: CPP handle + */ +void nfp_cpp_free(struct nfp_cpp *cpp) +{ + struct nfp_cpp_area_cache *cache, *ctmp; + struct nfp_cpp_resource *res, *rtmp; + + /* Remove all caches */ + list_for_each_entry_safe(cache, ctmp, &cpp->area_cache_list, entry) { + list_del(&cache->entry); + if (cache->id) + nfp_cpp_area_release(cache->area); + nfp_cpp_area_free(cache->area); + kfree(cache); + } + + /* There should be no dangling areas at this point */ + WARN_ON(!list_empty(&cpp->resource_list)); + + /* .. but if they weren't, try to clean up. */ + list_for_each_entry_safe(res, rtmp, &cpp->resource_list, list) { + struct nfp_cpp_area *area = container_of(res, + struct nfp_cpp_area, + resource); + + dev_err(cpp->dev.parent, "Dangling area: %d:%d:%d:0x%0llx-0x%0llx%s%s\n", + NFP_CPP_ID_TARGET_of(res->cpp_id), + NFP_CPP_ID_ACTION_of(res->cpp_id), + NFP_CPP_ID_TOKEN_of(res->cpp_id), + res->start, res->end, + res->name ? " " : "", + res->name ? res->name : ""); + + if (area->cpp->op->area_release) + area->cpp->op->area_release(area); + + __release_cpp_area(&area->kref); + } + + if (cpp->op->free) + cpp->op->free(cpp); + + device_unregister(&cpp->dev); + + kfree(cpp); +} + +/** + * nfp_cpp_model() - Retrieve the Model ID of the NFP + * @cpp: NFP CPP handle + * + * Return: NFP CPP Model ID + */ +u32 nfp_cpp_model(struct nfp_cpp *cpp) +{ + return cpp->model; +} + +/** + * nfp_cpp_interface() - Retrieve the Interface ID of the NFP + * @cpp: NFP CPP handle + * + * Return: NFP CPP Interface ID + */ +u16 nfp_cpp_interface(struct nfp_cpp *cpp) +{ + return cpp->interface; +} + +/** + * nfp_cpp_serial() - Retrieve the Serial ID of the NFP + * @cpp: NFP CPP handle + * @serial: Pointer to NFP serial number + * + * Return: Length of NFP serial number + */ +int nfp_cpp_serial(struct nfp_cpp *cpp, const u8 **serial) +{ + *serial = &cpp->serial[0]; + return sizeof(cpp->serial); +} + +#define NFP_IMB_TGTADDRESSMODECFG_MODE_of(_x) (((_x) >> 13) & 0x7) +#define NFP_IMB_TGTADDRESSMODECFG_ADDRMODE BIT(12) +#define NFP_IMB_TGTADDRESSMODECFG_ADDRMODE_32_BIT 0 +#define NFP_IMB_TGTADDRESSMODECFG_ADDRMODE_40_BIT BIT(12) + +static int nfp_cpp_set_mu_locality_lsb(struct nfp_cpp *cpp) +{ + unsigned int mode, addr40; + u32 imbcppat; + int res; + + imbcppat = cpp->imb_cat_table[NFP_CPP_TARGET_MU]; + mode = NFP_IMB_TGTADDRESSMODECFG_MODE_of(imbcppat); + addr40 = !!(imbcppat & NFP_IMB_TGTADDRESSMODECFG_ADDRMODE); + + res = nfp_cppat_mu_locality_lsb(mode, addr40); + if (res < 0) + return res; + cpp->mu_locality_lsb = res; + + return 0; +} + +unsigned int nfp_cpp_mu_locality_lsb(struct nfp_cpp *cpp) +{ + return cpp->mu_locality_lsb; +} + +/** + * nfp_cpp_area_alloc_with_name() - allocate a new CPP area + * @cpp: CPP device handle + * @dest: NFP CPP ID + * @name: Name of region + * @address: Address of region + * @size: Size of region + * + * Allocate and initialize a CPP area structure. The area must later + * be locked down with an 'acquire' before it can be safely accessed. + * + * NOTE: @address and @size must be 32-bit aligned values. + * + * Return: NFP CPP area handle, or NULL + */ +struct nfp_cpp_area * +nfp_cpp_area_alloc_with_name(struct nfp_cpp *cpp, u32 dest, const char *name, + unsigned long long address, unsigned long size) +{ + struct nfp_cpp_area *area; + u64 tmp64 = address; + int err, name_len; + + /* Remap from cpp_island to cpp_target */ + err = nfp_target_cpp(dest, tmp64, &dest, &tmp64, cpp->imb_cat_table); + if (err < 0) + return NULL; + + address = tmp64; + + if (!name) + name = "(reserved)"; + + name_len = strlen(name) + 1; + area = kzalloc(sizeof(*area) + cpp->op->area_priv_size + name_len, + GFP_KERNEL); + if (!area) + return NULL; + + area->cpp = cpp; + area->resource.name = (void *)area + sizeof(*area) + + cpp->op->area_priv_size; + memcpy((char *)area->resource.name, name, name_len); + + area->resource.cpp_id = dest; + area->resource.start = address; + area->resource.end = area->resource.start + size - 1; + INIT_LIST_HEAD(&area->resource.list); + + atomic_set(&area->refcount, 0); + kref_init(&area->kref); + mutex_init(&area->mutex); + + if (cpp->op->area_init) { + int err; + + err = cpp->op->area_init(area, dest, address, size); + if (err < 0) { + kfree(area); + return NULL; + } + } + + write_lock(&cpp->resource_lock); + __resource_add(&cpp->resource_list, &area->resource); + write_unlock(&cpp->resource_lock); + + area->offset = address; + area->size = size; + + return area; +} + +/** + * nfp_cpp_area_alloc() - allocate a new CPP area + * @cpp: CPP handle + * @dest: CPP id + * @address: Start address on CPP target + * @size: Size of area in bytes + * + * Allocate and initialize a CPP area structure. The area must later + * be locked down with an 'acquire' before it can be safely accessed. + * + * NOTE: @address and @size must be 32-bit aligned values. + * + * Return: NFP CPP Area handle, or NULL + */ +struct nfp_cpp_area * +nfp_cpp_area_alloc(struct nfp_cpp *cpp, u32 dest, + unsigned long long address, unsigned long size) +{ + return nfp_cpp_area_alloc_with_name(cpp, dest, NULL, address, size); +} + +/** + * nfp_cpp_area_alloc_acquire() - allocate a new CPP area and lock it down + * @cpp: CPP handle + * @name: Name of region + * @dest: CPP id + * @address: Start address on CPP target + * @size: Size of area + * + * Allocate and initialize a CPP area structure, and lock it down so + * that it can be accessed directly. + * + * NOTE: @address and @size must be 32-bit aligned values. + * The area must also be 'released' when the structure is freed. + * + * Return: NFP CPP Area handle, or NULL + */ +struct nfp_cpp_area * +nfp_cpp_area_alloc_acquire(struct nfp_cpp *cpp, const char *name, u32 dest, + unsigned long long address, unsigned long size) +{ + struct nfp_cpp_area *area; + + area = nfp_cpp_area_alloc_with_name(cpp, dest, name, address, size); + if (!area) + return NULL; + + if (nfp_cpp_area_acquire(area)) { + nfp_cpp_area_free(area); + return NULL; + } + + return area; +} + +/** + * nfp_cpp_area_free() - free up the CPP area + * @area: CPP area handle + * + * Frees up memory resources held by the CPP area. + */ +void nfp_cpp_area_free(struct nfp_cpp_area *area) +{ + if (atomic_read(&area->refcount)) + nfp_warn(area->cpp, "Warning: freeing busy area\n"); + nfp_cpp_area_put(area); +} + +static bool nfp_cpp_area_acquire_try(struct nfp_cpp_area *area, int *status) +{ + *status = area->cpp->op->area_acquire(area); + + return *status != -EAGAIN; +} + +static int __nfp_cpp_area_acquire(struct nfp_cpp_area *area) +{ + int err, status; + + if (atomic_inc_return(&area->refcount) > 1) + return 0; + + if (!area->cpp->op->area_acquire) + return 0; + + err = wait_event_interruptible(area->cpp->waitq, + nfp_cpp_area_acquire_try(area, &status)); + if (!err) + err = status; + if (err) { + nfp_warn(area->cpp, "Warning: area wait failed: %d\n", err); + atomic_dec(&area->refcount); + return err; + } + + nfp_cpp_area_get(area); + + return 0; +} + +/** + * nfp_cpp_area_acquire() - lock down a CPP area for access + * @area: CPP area handle + * + * Locks down the CPP area for a potential long term activity. Area + * must always be locked down before being accessed. + * + * Return: 0, or -ERRNO + */ +int nfp_cpp_area_acquire(struct nfp_cpp_area *area) +{ + int ret; + + mutex_lock(&area->mutex); + ret = __nfp_cpp_area_acquire(area); + mutex_unlock(&area->mutex); + + return ret; +} + +/** + * nfp_cpp_area_acquire_nonblocking() - lock down a CPP area for access + * @area: CPP area handle + * + * Locks down the CPP area for a potential long term activity. Area + * must always be locked down before being accessed. + * + * NOTE: Returns -EAGAIN is no area is available + * + * Return: 0, or -ERRNO + */ +int nfp_cpp_area_acquire_nonblocking(struct nfp_cpp_area *area) +{ + mutex_lock(&area->mutex); + if (atomic_inc_return(&area->refcount) == 1) { + if (area->cpp->op->area_acquire) { + int err; + + err = area->cpp->op->area_acquire(area); + if (err < 0) { + atomic_dec(&area->refcount); + mutex_unlock(&area->mutex); + return err; + } + } + } + mutex_unlock(&area->mutex); + + nfp_cpp_area_get(area); + return 0; +} + +/** + * nfp_cpp_area_release() - release a locked down CPP area + * @area: CPP area handle + * + * Releases a previously locked down CPP area. + */ +void nfp_cpp_area_release(struct nfp_cpp_area *area) +{ + mutex_lock(&area->mutex); + /* Only call the release on refcount == 0 */ + if (atomic_dec_and_test(&area->refcount)) { + if (area->cpp->op->area_release) { + area->cpp->op->area_release(area); + /* Let anyone waiting for a BAR try to get one.. */ + wake_up_interruptible_all(&area->cpp->waitq); + } + } + mutex_unlock(&area->mutex); + + nfp_cpp_area_put(area); +} + +/** + * nfp_cpp_area_release_free() - release CPP area and free it + * @area: CPP area handle + * + * Releases CPP area and frees up memory resources held by the it. + */ +void nfp_cpp_area_release_free(struct nfp_cpp_area *area) +{ + nfp_cpp_area_release(area); + nfp_cpp_area_free(area); +} + +/** + * nfp_cpp_area_read() - read data from CPP area + * @area: CPP area handle + * @offset: offset into CPP area + * @kernel_vaddr: kernel address to put data into + * @length: number of bytes to read + * + * Read data from indicated CPP region. + * + * NOTE: @offset and @length must be 32-bit aligned values. + * Area must have been locked down with an 'acquire'. + * + * Return: length of io, or -ERRNO + */ +int nfp_cpp_area_read(struct nfp_cpp_area *area, + unsigned long offset, void *kernel_vaddr, + size_t length) +{ + return area->cpp->op->area_read(area, kernel_vaddr, offset, length); +} + +/** + * nfp_cpp_area_write() - write data to CPP area + * @area: CPP area handle + * @offset: offset into CPP area + * @kernel_vaddr: kernel address to read data from + * @length: number of bytes to write + * + * Write data to indicated CPP region. + * + * NOTE: @offset and @length must be 32-bit aligned values. + * Area must have been locked down with an 'acquire'. + * + * Return: length of io, or -ERRNO + */ +int nfp_cpp_area_write(struct nfp_cpp_area *area, + unsigned long offset, const void *kernel_vaddr, + size_t length) +{ + return area->cpp->op->area_write(area, kernel_vaddr, offset, length); +} + +/** + * nfp_cpp_area_size() - return size of a CPP area + * @cpp_area: CPP area handle + * + * Return: Size of the area + */ +size_t nfp_cpp_area_size(struct nfp_cpp_area *cpp_area) +{ + return cpp_area->size; +} + +/** + * nfp_cpp_area_name() - return name of a CPP area + * @cpp_area: CPP area handle + * + * Return: Name of the area, or NULL + */ +const char *nfp_cpp_area_name(struct nfp_cpp_area *cpp_area) +{ + return cpp_area->resource.name; +} + +/** + * nfp_cpp_area_priv() - return private struct for CPP area + * @cpp_area: CPP area handle + * + * Return: Private data for the CPP area + */ +void *nfp_cpp_area_priv(struct nfp_cpp_area *cpp_area) +{ + return &cpp_area[1]; +} + +/** + * nfp_cpp_area_cpp() - return CPP handle for CPP area + * @cpp_area: CPP area handle + * + * Return: NFP CPP handle + */ +struct nfp_cpp *nfp_cpp_area_cpp(struct nfp_cpp_area *cpp_area) +{ + return cpp_area->cpp; +} + +/** + * nfp_cpp_area_resource() - get resource + * @area: CPP area handle + * + * NOTE: Area must have been locked down with an 'acquire'. + * + * Return: struct resource pointer, or NULL + */ +struct resource *nfp_cpp_area_resource(struct nfp_cpp_area *area) +{ + struct resource *res = NULL; + + if (area->cpp->op->area_resource) + res = area->cpp->op->area_resource(area); + + return res; +} + +/** + * nfp_cpp_area_phys() - get physical address of CPP area + * @area: CPP area handle + * + * NOTE: Area must have been locked down with an 'acquire'. + * + * Return: phy_addr_t of the area, or NULL + */ +phys_addr_t nfp_cpp_area_phys(struct nfp_cpp_area *area) +{ + phys_addr_t addr = ~0; + + if (area->cpp->op->area_phys) + addr = area->cpp->op->area_phys(area); + + return addr; +} + +/** + * nfp_cpp_area_iomem() - get IOMEM region for CPP area + * @area: CPP area handle + * + * Returns an iomem pointer for use with readl()/writel() style + * operations. + * + * NOTE: Area must have been locked down with an 'acquire'. + * + * Return: __iomem pointer to the area, or NULL + */ +void __iomem *nfp_cpp_area_iomem(struct nfp_cpp_area *area) +{ + void __iomem *iomem = NULL; + + if (area->cpp->op->area_iomem) + iomem = area->cpp->op->area_iomem(area); + + return iomem; +} + +/** + * nfp_cpp_area_readl() - Read a u32 word from an area + * @area: CPP Area handle + * @offset: Offset into area + * @value: Pointer to read buffer + * + * Return: 0 on success, or -ERRNO + */ +int nfp_cpp_area_readl(struct nfp_cpp_area *area, + unsigned long offset, u32 *value) +{ + u8 tmp[4]; + int n; + + n = nfp_cpp_area_read(area, offset, &tmp, sizeof(tmp)); + if (n != sizeof(tmp)) + return n < 0 ? n : -EIO; + + *value = get_unaligned_le32(tmp); + return 0; +} + +/** + * nfp_cpp_area_writel() - Write a u32 word to an area + * @area: CPP Area handle + * @offset: Offset into area + * @value: Value to write + * + * Return: 0 on success, or -ERRNO + */ +int nfp_cpp_area_writel(struct nfp_cpp_area *area, + unsigned long offset, u32 value) +{ + u8 tmp[4]; + int n; + + put_unaligned_le32(value, tmp); + n = nfp_cpp_area_write(area, offset, &tmp, sizeof(tmp)); + + return n == sizeof(tmp) ? 0 : n < 0 ? n : -EIO; +} + +/** + * nfp_cpp_area_readq() - Read a u64 word from an area + * @area: CPP Area handle + * @offset: Offset into area + * @value: Pointer to read buffer + * + * Return: 0 on success, or -ERRNO + */ +int nfp_cpp_area_readq(struct nfp_cpp_area *area, + unsigned long offset, u64 *value) +{ + u8 tmp[8]; + int n; + + n = nfp_cpp_area_read(area, offset, &tmp, sizeof(tmp)); + if (n != sizeof(tmp)) + return n < 0 ? n : -EIO; + + *value = get_unaligned_le64(tmp); + return 0; +} + +/** + * nfp_cpp_area_writeq() - Write a u64 word to an area + * @area: CPP Area handle + * @offset: Offset into area + * @value: Value to write + * + * Return: 0 on success, or -ERRNO + */ +int nfp_cpp_area_writeq(struct nfp_cpp_area *area, + unsigned long offset, u64 value) +{ + u8 tmp[8]; + int n; + + put_unaligned_le64(value, tmp); + n = nfp_cpp_area_write(area, offset, &tmp, sizeof(tmp)); + + return n == sizeof(tmp) ? 0 : n < 0 ? n : -EIO; +} + +/** + * nfp_cpp_area_fill() - fill a CPP area with a value + * @area: CPP area + * @offset: offset into CPP area + * @value: value to fill with + * @length: length of area to fill + * + * Fill indicated area with given value. + * + * Return: length of io, or -ERRNO + */ +int nfp_cpp_area_fill(struct nfp_cpp_area *area, + unsigned long offset, u32 value, size_t length) +{ + u8 tmp[4]; + size_t i; + int k; + + put_unaligned_le32(value, tmp); + + if (offset % sizeof(tmp) || length % sizeof(tmp)) + return -EINVAL; + + for (i = 0; i < length; i += sizeof(tmp)) { + k = nfp_cpp_area_write(area, offset + i, &tmp, sizeof(tmp)); + if (k < 0) + return k; + } + + return i; +} + +/** + * nfp_cpp_area_cache_add() - Permanently reserve and area for the hot cache + * @cpp: NFP CPP handle + * @size: Size of the area - MUST BE A POWER OF 2. + */ +int nfp_cpp_area_cache_add(struct nfp_cpp *cpp, size_t size) +{ + struct nfp_cpp_area_cache *cache; + struct nfp_cpp_area *area; + + /* Allocate an area - we use the MU target's base as a placeholder, + * as all supported chips have a MU. + */ + area = nfp_cpp_area_alloc(cpp, NFP_CPP_ID(7, NFP_CPP_ACTION_RW, 0), + 0, size); + if (!area) + return -ENOMEM; + + cache = kzalloc(sizeof(*cache), GFP_KERNEL); + if (!cache) { + nfp_cpp_area_free(area); + return -ENOMEM; + } + + cache->id = 0; + cache->addr = 0; + cache->size = size; + cache->area = area; + mutex_lock(&cpp->area_cache_mutex); + list_add_tail(&cache->entry, &cpp->area_cache_list); + mutex_unlock(&cpp->area_cache_mutex); + + return 0; +} + +static struct nfp_cpp_area_cache * +area_cache_get(struct nfp_cpp *cpp, u32 id, + u64 addr, unsigned long *offset, size_t length) +{ + struct nfp_cpp_area_cache *cache; + int err; + + /* Early exit when length == 0, which prevents + * the need for special case code below when + * checking against available cache size. + */ + if (length == 0 || id == 0) + return NULL; + + /* Remap from cpp_island to cpp_target */ + err = nfp_target_cpp(id, addr, &id, &addr, cpp->imb_cat_table); + if (err < 0) + return NULL; + + mutex_lock(&cpp->area_cache_mutex); + + if (list_empty(&cpp->area_cache_list)) { + mutex_unlock(&cpp->area_cache_mutex); + return NULL; + } + + addr += *offset; + + /* See if we have a match */ + list_for_each_entry(cache, &cpp->area_cache_list, entry) { + if (id == cache->id && + addr >= cache->addr && + addr + length <= cache->addr + cache->size) + goto exit; + } + + /* No matches - inspect the tail of the LRU */ + cache = list_entry(cpp->area_cache_list.prev, + struct nfp_cpp_area_cache, entry); + + /* Can we fit in the cache entry? */ + if (round_down(addr + length - 1, cache->size) != + round_down(addr, cache->size)) { + mutex_unlock(&cpp->area_cache_mutex); + return NULL; + } + + /* If id != 0, we will need to release it */ + if (cache->id) { + nfp_cpp_area_release(cache->area); + cache->id = 0; + cache->addr = 0; + } + + /* Adjust the start address to be cache size aligned */ + cache->addr = addr & ~(u64)(cache->size - 1); + + /* Re-init to the new ID and address */ + if (cpp->op->area_init) { + err = cpp->op->area_init(cache->area, + id, cache->addr, cache->size); + if (err < 0) { + mutex_unlock(&cpp->area_cache_mutex); + return NULL; + } + } + + /* Attempt to acquire */ + err = nfp_cpp_area_acquire(cache->area); + if (err < 0) { + mutex_unlock(&cpp->area_cache_mutex); + return NULL; + } + + cache->id = id; + +exit: + /* Adjust offset */ + *offset = addr - cache->addr; + return cache; +} + +static void +area_cache_put(struct nfp_cpp *cpp, struct nfp_cpp_area_cache *cache) +{ + if (!cache) + return; + + /* Move to front of LRU */ + list_move(&cache->entry, &cpp->area_cache_list); + + mutex_unlock(&cpp->area_cache_mutex); +} + +static int __nfp_cpp_read(struct nfp_cpp *cpp, u32 destination, + unsigned long long address, void *kernel_vaddr, + size_t length) +{ + struct nfp_cpp_area_cache *cache; + struct nfp_cpp_area *area; + unsigned long offset = 0; + int err; + + cache = area_cache_get(cpp, destination, address, &offset, length); + if (cache) { + area = cache->area; + } else { + area = nfp_cpp_area_alloc(cpp, destination, address, length); + if (!area) + return -ENOMEM; + + err = nfp_cpp_area_acquire(area); + if (err) { + nfp_cpp_area_free(area); + return err; + } + } + + err = nfp_cpp_area_read(area, offset, kernel_vaddr, length); + + if (cache) + area_cache_put(cpp, cache); + else + nfp_cpp_area_release_free(area); + + return err; +} + +/** + * nfp_cpp_read() - read from CPP target + * @cpp: CPP handle + * @destination: CPP id + * @address: offset into CPP target + * @kernel_vaddr: kernel buffer for result + * @length: number of bytes to read + * + * Return: length of io, or -ERRNO + */ +int nfp_cpp_read(struct nfp_cpp *cpp, u32 destination, + unsigned long long address, void *kernel_vaddr, + size_t length) +{ + size_t n, offset; + int ret; + + for (offset = 0; offset < length; offset += n) { + unsigned long long r_addr = address + offset; + + /* make first read smaller to align to safe window */ + n = min_t(size_t, length - offset, + ALIGN(r_addr + 1, NFP_CPP_SAFE_AREA_SIZE) - r_addr); + + ret = __nfp_cpp_read(cpp, destination, address + offset, + kernel_vaddr + offset, n); + if (ret < 0) + return ret; + if (ret != n) + return offset + n; + } + + return length; +} + +static int __nfp_cpp_write(struct nfp_cpp *cpp, u32 destination, + unsigned long long address, + const void *kernel_vaddr, size_t length) +{ + struct nfp_cpp_area_cache *cache; + struct nfp_cpp_area *area; + unsigned long offset = 0; + int err; + + cache = area_cache_get(cpp, destination, address, &offset, length); + if (cache) { + area = cache->area; + } else { + area = nfp_cpp_area_alloc(cpp, destination, address, length); + if (!area) + return -ENOMEM; + + err = nfp_cpp_area_acquire(area); + if (err) { + nfp_cpp_area_free(area); + return err; + } + } + + err = nfp_cpp_area_write(area, offset, kernel_vaddr, length); + + if (cache) + area_cache_put(cpp, cache); + else + nfp_cpp_area_release_free(area); + + return err; +} + +/** + * nfp_cpp_write() - write to CPP target + * @cpp: CPP handle + * @destination: CPP id + * @address: offset into CPP target + * @kernel_vaddr: kernel buffer to read from + * @length: number of bytes to write + * + * Return: length of io, or -ERRNO + */ +int nfp_cpp_write(struct nfp_cpp *cpp, u32 destination, + unsigned long long address, + const void *kernel_vaddr, size_t length) +{ + size_t n, offset; + int ret; + + for (offset = 0; offset < length; offset += n) { + unsigned long long w_addr = address + offset; + + /* make first write smaller to align to safe window */ + n = min_t(size_t, length - offset, + ALIGN(w_addr + 1, NFP_CPP_SAFE_AREA_SIZE) - w_addr); + + ret = __nfp_cpp_write(cpp, destination, address + offset, + kernel_vaddr + offset, n); + if (ret < 0) + return ret; + if (ret != n) + return offset + n; + } + + return length; +} + +/* Return the correct CPP address, and fixup xpb_addr as needed. */ +static u32 nfp_xpb_to_cpp(struct nfp_cpp *cpp, u32 *xpb_addr) +{ + int island; + u32 xpb; + + xpb = NFP_CPP_ID(14, NFP_CPP_ACTION_RW, 0); + /* Ensure that non-local XPB accesses go + * out through the global XPBM bus. + */ + island = (*xpb_addr >> 24) & 0x3f; + if (!island) + return xpb; + + if (island != 1) { + *xpb_addr |= 1 << 30; + return xpb; + } + + /* Accesses to the ARM Island overlay uses Island 0 / Global Bit */ + *xpb_addr &= ~0x7f000000; + if (*xpb_addr < 0x60000) { + *xpb_addr |= 1 << 30; + } else { + /* And only non-ARM interfaces use the island id = 1 */ + if (NFP_CPP_INTERFACE_TYPE_of(nfp_cpp_interface(cpp)) + != NFP_CPP_INTERFACE_TYPE_ARM) + *xpb_addr |= 1 << 24; + } + + return xpb; +} + +/** + * nfp_xpb_readl() - Read a u32 word from a XPB location + * @cpp: CPP device handle + * @xpb_addr: Address for operation + * @value: Pointer to read buffer + * + * Return: 0 on success, or -ERRNO + */ +int nfp_xpb_readl(struct nfp_cpp *cpp, u32 xpb_addr, u32 *value) +{ + u32 cpp_dest = nfp_xpb_to_cpp(cpp, &xpb_addr); + + return nfp_cpp_readl(cpp, cpp_dest, xpb_addr, value); +} + +/** + * nfp_xpb_writel() - Write a u32 word to a XPB location + * @cpp: CPP device handle + * @xpb_addr: Address for operation + * @value: Value to write + * + * Return: 0 on success, or -ERRNO + */ +int nfp_xpb_writel(struct nfp_cpp *cpp, u32 xpb_addr, u32 value) +{ + u32 cpp_dest = nfp_xpb_to_cpp(cpp, &xpb_addr); + + return nfp_cpp_writel(cpp, cpp_dest, xpb_addr, value); +} + +/** + * nfp_xpb_writelm() - Modify bits of a 32-bit value from the XPB bus + * @cpp: NFP CPP device handle + * @xpb_tgt: XPB target and address + * @mask: mask of bits to alter + * @value: value to modify + * + * KERNEL: This operation is safe to call in interrupt or softirq context. + * + * Return: 0 on success, or -ERRNO + */ +int nfp_xpb_writelm(struct nfp_cpp *cpp, u32 xpb_tgt, + u32 mask, u32 value) +{ + int err; + u32 tmp; + + err = nfp_xpb_readl(cpp, xpb_tgt, &tmp); + if (err < 0) + return err; + + tmp &= ~mask; + tmp |= mask & value; + return nfp_xpb_writel(cpp, xpb_tgt, tmp); +} + +/* Lockdep markers */ +static struct lock_class_key nfp_cpp_resource_lock_key; + +static void nfp_cpp_dev_release(struct device *dev) +{ + /* Nothing to do here - it just makes the kernel happy */ +} + +/** + * nfp_cpp_from_operations() - Create a NFP CPP handle + * from an operations structure + * @ops: NFP CPP operations structure + * @parent: Parent device + * @priv: Private data of low-level implementation + * + * NOTE: On failure, cpp_ops->free will be called! + * + * Return: NFP CPP handle on success, ERR_PTR on failure + */ +struct nfp_cpp * +nfp_cpp_from_operations(const struct nfp_cpp_operations *ops, + struct device *parent, void *priv) +{ + const u32 arm = NFP_CPP_ID(NFP_CPP_TARGET_ARM, NFP_CPP_ACTION_RW, 0); + struct nfp_cpp *cpp; + int ifc, err; + u32 mask[2]; + u32 xpbaddr; + size_t tgt; + + cpp = kzalloc(sizeof(*cpp), GFP_KERNEL); + if (!cpp) { + err = -ENOMEM; + goto err_malloc; + } + + cpp->op = ops; + cpp->priv = priv; + + ifc = ops->get_interface(parent); + if (ifc < 0) { + err = ifc; + goto err_free_cpp; + } + cpp->interface = ifc; + if (ops->read_serial) { + err = ops->read_serial(parent, cpp->serial); + if (err) + goto err_free_cpp; + } + + rwlock_init(&cpp->resource_lock); + init_waitqueue_head(&cpp->waitq); + lockdep_set_class(&cpp->resource_lock, &nfp_cpp_resource_lock_key); + INIT_LIST_HEAD(&cpp->resource_list); + INIT_LIST_HEAD(&cpp->area_cache_list); + mutex_init(&cpp->area_cache_mutex); + cpp->dev.init_name = "cpp"; + cpp->dev.parent = parent; + cpp->dev.release = nfp_cpp_dev_release; + err = device_register(&cpp->dev); + if (err < 0) { + put_device(&cpp->dev); + goto err_free_cpp; + } + + dev_set_drvdata(&cpp->dev, cpp); + + /* NOTE: cpp_lock is NOT locked for op->init, + * since it may call NFP CPP API operations + */ + if (cpp->op->init) { + err = cpp->op->init(cpp); + if (err < 0) { + dev_err(parent, + "NFP interface initialization failed\n"); + goto err_out; + } + } + + err = nfp_cpp_model_autodetect(cpp, &cpp->model); + if (err < 0) { + dev_err(parent, "NFP model detection failed\n"); + goto err_out; + } + + for (tgt = 0; tgt < ARRAY_SIZE(cpp->imb_cat_table); tgt++) { + /* Hardcoded XPB IMB Base, island 0 */ + xpbaddr = 0x000a0000 + (tgt * 4); + err = nfp_xpb_readl(cpp, xpbaddr, + &cpp->imb_cat_table[tgt]); + if (err < 0) { + dev_err(parent, + "Can't read CPP mapping from device\n"); + goto err_out; + } + } + + nfp_cpp_readl(cpp, arm, NFP_ARM_GCSR + NFP_ARM_GCSR_SOFTMODEL2, + &mask[0]); + nfp_cpp_readl(cpp, arm, NFP_ARM_GCSR + NFP_ARM_GCSR_SOFTMODEL3, + &mask[1]); + + err = nfp_cpp_set_mu_locality_lsb(cpp); + if (err < 0) { + dev_err(parent, "Can't calculate MU locality bit offset\n"); + goto err_out; + } + + dev_info(cpp->dev.parent, "Model: 0x%08x, SN: %pM, Ifc: 0x%04x\n", + nfp_cpp_model(cpp), cpp->serial, nfp_cpp_interface(cpp)); + + return cpp; + +err_out: + device_unregister(&cpp->dev); +err_free_cpp: + kfree(cpp); +err_malloc: + return ERR_PTR(err); +} + +/** + * nfp_cpp_priv() - Get the operations private data of a CPP handle + * @cpp: CPP handle + * + * Return: Private data for the NFP CPP handle + */ +void *nfp_cpp_priv(struct nfp_cpp *cpp) +{ + return cpp->priv; +} + +/** + * nfp_cpp_device() - Get the Linux device handle of a CPP handle + * @cpp: CPP handle + * + * Return: Device for the NFP CPP bus + */ +struct device *nfp_cpp_device(struct nfp_cpp *cpp) +{ + return &cpp->dev; +} + +#define NFP_EXPL_OP(func, expl, args...) \ + ({ \ + struct nfp_cpp *cpp = nfp_cpp_explicit_cpp(expl); \ + int err = -ENODEV; \ + \ + if (cpp->op->func) \ + err = cpp->op->func(expl, ##args); \ + err; \ + }) + +#define NFP_EXPL_OP_NR(func, expl, args...) \ + ({ \ + struct nfp_cpp *cpp = nfp_cpp_explicit_cpp(expl); \ + \ + if (cpp->op->func) \ + cpp->op->func(expl, ##args); \ + \ + }) + +/** + * nfp_cpp_explicit_acquire() - Acquire explicit access handle + * @cpp: NFP CPP handle + * + * The 'data_ref' and 'signal_ref' values are useful when + * constructing the NFP_EXPL_CSR1 and NFP_EXPL_POST values. + * + * Return: NFP CPP explicit handle + */ +struct nfp_cpp_explicit *nfp_cpp_explicit_acquire(struct nfp_cpp *cpp) +{ + struct nfp_cpp_explicit *expl; + int err; + + expl = kzalloc(sizeof(*expl) + cpp->op->explicit_priv_size, GFP_KERNEL); + if (!expl) + return NULL; + + expl->cpp = cpp; + err = NFP_EXPL_OP(explicit_acquire, expl); + if (err < 0) { + kfree(expl); + return NULL; + } + + return expl; +} + +/** + * nfp_cpp_explicit_set_target() - Set target fields for explicit + * @expl: Explicit handle + * @cpp_id: CPP ID field + * @len: CPP Length field + * @mask: CPP Mask field + * + * Return: 0, or -ERRNO + */ +int nfp_cpp_explicit_set_target(struct nfp_cpp_explicit *expl, + u32 cpp_id, u8 len, u8 mask) +{ + expl->cmd.cpp_id = cpp_id; + expl->cmd.len = len; + expl->cmd.byte_mask = mask; + + return 0; +} + +/** + * nfp_cpp_explicit_set_data() - Set data fields for explicit + * @expl: Explicit handle + * @data_master: CPP Data Master field + * @data_ref: CPP Data Ref field + * + * Return: 0, or -ERRNO + */ +int nfp_cpp_explicit_set_data(struct nfp_cpp_explicit *expl, + u8 data_master, u16 data_ref) +{ + expl->cmd.data_master = data_master; + expl->cmd.data_ref = data_ref; + + return 0; +} + +/** + * nfp_cpp_explicit_set_signal() - Set signal fields for explicit + * @expl: Explicit handle + * @signal_master: CPP Signal Master field + * @signal_ref: CPP Signal Ref field + * + * Return: 0, or -ERRNO + */ +int nfp_cpp_explicit_set_signal(struct nfp_cpp_explicit *expl, + u8 signal_master, u8 signal_ref) +{ + expl->cmd.signal_master = signal_master; + expl->cmd.signal_ref = signal_ref; + + return 0; +} + +/** + * nfp_cpp_explicit_set_posted() - Set completion fields for explicit + * @expl: Explicit handle + * @posted: True for signaled completion, false otherwise + * @siga: CPP Signal A field + * @siga_mode: CPP Signal A Mode field + * @sigb: CPP Signal B field + * @sigb_mode: CPP Signal B Mode field + * + * Return: 0, or -ERRNO + */ +int nfp_cpp_explicit_set_posted(struct nfp_cpp_explicit *expl, int posted, + u8 siga, + enum nfp_cpp_explicit_signal_mode siga_mode, + u8 sigb, + enum nfp_cpp_explicit_signal_mode sigb_mode) +{ + expl->cmd.posted = posted; + expl->cmd.siga = siga; + expl->cmd.sigb = sigb; + expl->cmd.siga_mode = siga_mode; + expl->cmd.sigb_mode = sigb_mode; + + return 0; +} + +/** + * nfp_cpp_explicit_put() - Set up the write (pull) data for a explicit access + * @expl: NFP CPP Explicit handle + * @buff: Data to have the target pull in the transaction + * @len: Length of data, in bytes + * + * The 'len' parameter must be less than or equal to 128 bytes. + * + * If this function is called before the configuration + * registers are set, it will return -EINVAL. + * + * Return: 0, or -ERRNO + */ +int nfp_cpp_explicit_put(struct nfp_cpp_explicit *expl, + const void *buff, size_t len) +{ + return NFP_EXPL_OP(explicit_put, expl, buff, len); +} + +/** + * nfp_cpp_explicit_do() - Execute a transaction, and wait for it to complete + * @expl: NFP CPP Explicit handle + * @address: Address to send in the explicit transaction + * + * If this function is called before the configuration + * registers are set, it will return -1, with an errno of EINVAL. + * + * Return: 0, or -ERRNO + */ +int nfp_cpp_explicit_do(struct nfp_cpp_explicit *expl, u64 address) +{ + return NFP_EXPL_OP(explicit_do, expl, &expl->cmd, address); +} + +/** + * nfp_cpp_explicit_get() - Get the 'push' (read) data from a explicit access + * @expl: NFP CPP Explicit handle + * @buff: Data that the target pushed in the transaction + * @len: Length of data, in bytes + * + * The 'len' parameter must be less than or equal to 128 bytes. + * + * If this function is called before all three configuration + * registers are set, it will return -1, with an errno of EINVAL. + * + * If this function is called before nfp_cpp_explicit_do() + * has completed, it will return -1, with an errno of EBUSY. + * + * Return: 0, or -ERRNO + */ +int nfp_cpp_explicit_get(struct nfp_cpp_explicit *expl, void *buff, size_t len) +{ + return NFP_EXPL_OP(explicit_get, expl, buff, len); +} + +/** + * nfp_cpp_explicit_release() - Release explicit access handle + * @expl: NFP CPP Explicit handle + * + */ +void nfp_cpp_explicit_release(struct nfp_cpp_explicit *expl) +{ + NFP_EXPL_OP_NR(explicit_release, expl); + kfree(expl); +} + +/** + * nfp_cpp_explicit_cpp() - return CPP handle for CPP explicit + * @cpp_explicit: CPP explicit handle + * + * Return: NFP CPP handle of the explicit + */ +struct nfp_cpp *nfp_cpp_explicit_cpp(struct nfp_cpp_explicit *cpp_explicit) +{ + return cpp_explicit->cpp; +} + +/** + * nfp_cpp_explicit_priv() - return private struct for CPP explicit + * @cpp_explicit: CPP explicit handle + * + * Return: private data of the explicit, or NULL + */ +void *nfp_cpp_explicit_priv(struct nfp_cpp_explicit *cpp_explicit) +{ + return &cpp_explicit[1]; +} diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpplib.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpplib.c new file mode 100644 index 000000000..508ae6b57 --- /dev/null +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_cpplib.c @@ -0,0 +1,298 @@ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +/* Copyright (C) 2015-2018 Netronome Systems, Inc. */ + +/* + * nfp_cpplib.c + * Library of functions to access the NFP's CPP bus + * Authors: Jakub Kicinski <jakub.kicinski@netronome.com> + * Jason McMullan <jason.mcmullan@netronome.com> + * Rolf Neugebauer <rolf.neugebauer@netronome.com> + */ + +#include <asm/unaligned.h> +#include <linux/bitfield.h> +#include <linux/delay.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/slab.h> +#include <linux/sched.h> + +#include "nfp_cpp.h" +#include "nfp6000/nfp6000.h" +#include "nfp6000/nfp_xpb.h" + +/* NFP6000 PL */ +#define NFP_PL_DEVICE_PART_NFP6000 0x6200 +#define NFP_PL_DEVICE_ID 0x00000004 +#define NFP_PL_DEVICE_ID_MASK GENMASK(7, 0) +#define NFP_PL_DEVICE_PART_MASK GENMASK(31, 16) +#define NFP_PL_DEVICE_MODEL_MASK (NFP_PL_DEVICE_PART_MASK | \ + NFP_PL_DEVICE_ID_MASK) + +/** + * nfp_cpp_readl() - Read a u32 word from a CPP location + * @cpp: CPP device handle + * @cpp_id: CPP ID for operation + * @address: Address for operation + * @value: Pointer to read buffer + * + * Return: 0 on success, or -ERRNO + */ +int nfp_cpp_readl(struct nfp_cpp *cpp, u32 cpp_id, + unsigned long long address, u32 *value) +{ + u8 tmp[4]; + int n; + + n = nfp_cpp_read(cpp, cpp_id, address, tmp, sizeof(tmp)); + if (n != sizeof(tmp)) + return n < 0 ? n : -EIO; + + *value = get_unaligned_le32(tmp); + return 0; +} + +/** + * nfp_cpp_writel() - Write a u32 word to a CPP location + * @cpp: CPP device handle + * @cpp_id: CPP ID for operation + * @address: Address for operation + * @value: Value to write + * + * Return: 0 on success, or -ERRNO + */ +int nfp_cpp_writel(struct nfp_cpp *cpp, u32 cpp_id, + unsigned long long address, u32 value) +{ + u8 tmp[4]; + int n; + + put_unaligned_le32(value, tmp); + n = nfp_cpp_write(cpp, cpp_id, address, tmp, sizeof(tmp)); + + return n == sizeof(tmp) ? 0 : n < 0 ? n : -EIO; +} + +/** + * nfp_cpp_readq() - Read a u64 word from a CPP location + * @cpp: CPP device handle + * @cpp_id: CPP ID for operation + * @address: Address for operation + * @value: Pointer to read buffer + * + * Return: 0 on success, or -ERRNO + */ +int nfp_cpp_readq(struct nfp_cpp *cpp, u32 cpp_id, + unsigned long long address, u64 *value) +{ + u8 tmp[8]; + int n; + + n = nfp_cpp_read(cpp, cpp_id, address, tmp, sizeof(tmp)); + if (n != sizeof(tmp)) + return n < 0 ? n : -EIO; + + *value = get_unaligned_le64(tmp); + return 0; +} + +/** + * nfp_cpp_writeq() - Write a u64 word to a CPP location + * @cpp: CPP device handle + * @cpp_id: CPP ID for operation + * @address: Address for operation + * @value: Value to write + * + * Return: 0 on success, or -ERRNO + */ +int nfp_cpp_writeq(struct nfp_cpp *cpp, u32 cpp_id, + unsigned long long address, u64 value) +{ + u8 tmp[8]; + int n; + + put_unaligned_le64(value, tmp); + n = nfp_cpp_write(cpp, cpp_id, address, tmp, sizeof(tmp)); + + return n == sizeof(tmp) ? 0 : n < 0 ? n : -EIO; +} + +/* NOTE: This code should not use nfp_xpb_* functions, + * as those are model-specific + */ +int nfp_cpp_model_autodetect(struct nfp_cpp *cpp, u32 *model) +{ + u32 reg; + int err; + + err = nfp_xpb_readl(cpp, NFP_XPB_DEVICE(1, 1, 16) + NFP_PL_DEVICE_ID, + ®); + if (err < 0) + return err; + + *model = reg & NFP_PL_DEVICE_MODEL_MASK; + /* Disambiguate the NFP4000/NFP5000/NFP6000 chips */ + if (FIELD_GET(NFP_PL_DEVICE_PART_MASK, reg) == + NFP_PL_DEVICE_PART_NFP6000) { + if (*model & NFP_PL_DEVICE_ID_MASK) + *model -= 0x10; + } + + return 0; +} + +static u8 nfp_bytemask(int width, u64 addr) +{ + if (width == 8) + return 0xff; + else if (width == 4) + return 0x0f << (addr & 4); + else if (width == 2) + return 0x03 << (addr & 6); + else if (width == 1) + return 0x01 << (addr & 7); + else + return 0; +} + +int nfp_cpp_explicit_read(struct nfp_cpp *cpp, u32 cpp_id, + u64 addr, void *buff, size_t len, int width_read) +{ + struct nfp_cpp_explicit *expl; + char *tmp = buff; + int err, i, incr; + u8 byte_mask; + + if (len & (width_read - 1)) + return -EINVAL; + + expl = nfp_cpp_explicit_acquire(cpp); + if (!expl) + return -EBUSY; + + incr = min_t(int, 16 * width_read, 128); + incr = min_t(int, incr, len); + + /* Translate a NFP_CPP_ACTION_RW to action 0 */ + if (NFP_CPP_ID_ACTION_of(cpp_id) == NFP_CPP_ACTION_RW) + cpp_id = NFP_CPP_ID(NFP_CPP_ID_TARGET_of(cpp_id), 0, + NFP_CPP_ID_TOKEN_of(cpp_id)); + + byte_mask = nfp_bytemask(width_read, addr); + + nfp_cpp_explicit_set_target(expl, cpp_id, + incr / width_read - 1, byte_mask); + nfp_cpp_explicit_set_posted(expl, 1, 0, NFP_SIGNAL_PUSH, + 0, NFP_SIGNAL_NONE); + + for (i = 0; i < len; i += incr, addr += incr, tmp += incr) { + if (i + incr > len) { + incr = len - i; + nfp_cpp_explicit_set_target(expl, cpp_id, + incr / width_read - 1, + 0xff); + } + + err = nfp_cpp_explicit_do(expl, addr); + if (err < 0) + goto exit_release; + + err = nfp_cpp_explicit_get(expl, tmp, incr); + if (err < 0) + goto exit_release; + } + err = len; +exit_release: + nfp_cpp_explicit_release(expl); + + return err; +} + +int nfp_cpp_explicit_write(struct nfp_cpp *cpp, u32 cpp_id, u64 addr, + const void *buff, size_t len, int width_write) +{ + struct nfp_cpp_explicit *expl; + const char *tmp = buff; + int err, i, incr; + u8 byte_mask; + + if (len & (width_write - 1)) + return -EINVAL; + + expl = nfp_cpp_explicit_acquire(cpp); + if (!expl) + return -EBUSY; + + incr = min_t(int, 16 * width_write, 128); + incr = min_t(int, incr, len); + + /* Translate a NFP_CPP_ACTION_RW to action 1 */ + if (NFP_CPP_ID_ACTION_of(cpp_id) == NFP_CPP_ACTION_RW) + cpp_id = NFP_CPP_ID(NFP_CPP_ID_TARGET_of(cpp_id), 1, + NFP_CPP_ID_TOKEN_of(cpp_id)); + + byte_mask = nfp_bytemask(width_write, addr); + + nfp_cpp_explicit_set_target(expl, cpp_id, + incr / width_write - 1, byte_mask); + nfp_cpp_explicit_set_posted(expl, 1, 0, NFP_SIGNAL_PULL, + 0, NFP_SIGNAL_NONE); + + for (i = 0; i < len; i += incr, addr += incr, tmp += incr) { + if (i + incr > len) { + incr = len - i; + nfp_cpp_explicit_set_target(expl, cpp_id, + incr / width_write - 1, + 0xff); + } + + err = nfp_cpp_explicit_put(expl, tmp, incr); + if (err < 0) + goto exit_release; + + err = nfp_cpp_explicit_do(expl, addr); + if (err < 0) + goto exit_release; + } + err = len; +exit_release: + nfp_cpp_explicit_release(expl); + + return err; +} + +/** + * nfp_cpp_map_area() - Helper function to map an area + * @cpp: NFP CPP handler + * @name: Name for the area + * @cpp_id: CPP ID for operation + * @addr: CPP address + * @size: Size of the area + * @area: Area handle (output) + * + * Map an area of IOMEM access. To undo the effect of this function call + * @nfp_cpp_area_release_free(*area). + * + * Return: Pointer to memory mapped area or ERR_PTR + */ +u8 __iomem * +nfp_cpp_map_area(struct nfp_cpp *cpp, const char *name, u32 cpp_id, u64 addr, + unsigned long size, struct nfp_cpp_area **area) +{ + u8 __iomem *res; + + *area = nfp_cpp_area_alloc_acquire(cpp, name, cpp_id, addr, size); + if (!*area) + goto err_eio; + + res = nfp_cpp_area_iomem(*area); + if (!res) + goto err_release_free; + + return res; + +err_release_free: + nfp_cpp_area_release_free(*area); +err_eio: + return (u8 __iomem *)ERR_PTR(-EIO); +} diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_dev.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_dev.c new file mode 100644 index 000000000..0725b51c2 --- /dev/null +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_dev.c @@ -0,0 +1,49 @@ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +/* Copyright (C) 2019 Netronome Systems, Inc. */ + +#include <linux/dma-mapping.h> +#include <linux/kernel.h> +#include <linux/sizes.h> + +#include "nfp_dev.h" + +const struct nfp_dev_info nfp_dev_info[NFP_DEV_CNT] = { + [NFP_DEV_NFP3800] = { + .dma_mask = DMA_BIT_MASK(48), + .qc_idx_mask = GENMASK(8, 0), + .qc_addr_offset = 0x400000, + .min_qc_size = 512, + .max_qc_size = SZ_64K, + + .chip_names = "NFP3800", + .pcie_cfg_expbar_offset = 0x0a00, + .pcie_expl_offset = 0xd000, + .qc_area_sz = 0x100000, + }, + [NFP_DEV_NFP3800_VF] = { + .dma_mask = DMA_BIT_MASK(48), + .qc_idx_mask = GENMASK(8, 0), + .qc_addr_offset = 0, + .min_qc_size = 512, + .max_qc_size = SZ_64K, + }, + [NFP_DEV_NFP6000] = { + .dma_mask = DMA_BIT_MASK(40), + .qc_idx_mask = GENMASK(7, 0), + .qc_addr_offset = 0x80000, + .min_qc_size = 256, + .max_qc_size = SZ_256K, + + .chip_names = "NFP4000/NFP5000/NFP6000", + .pcie_cfg_expbar_offset = 0x0400, + .pcie_expl_offset = 0x1000, + .qc_area_sz = 0x80000, + }, + [NFP_DEV_NFP6000_VF] = { + .dma_mask = DMA_BIT_MASK(40), + .qc_idx_mask = GENMASK(7, 0), + .qc_addr_offset = 0, + .min_qc_size = 256, + .max_qc_size = SZ_256K, + }, +}; diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_dev.h b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_dev.h new file mode 100644 index 000000000..e4d38178d --- /dev/null +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_dev.h @@ -0,0 +1,42 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ +/* Copyright (C) 2019 Netronome Systems, Inc. */ + +#ifndef _NFP_DEV_H_ +#define _NFP_DEV_H_ + +#include <linux/types.h> + +#define PCI_VENDOR_ID_CORIGINE 0x1da8 +#define PCI_DEVICE_ID_NFP3800 0x3800 +#define PCI_DEVICE_ID_NFP4000 0x4000 +#define PCI_DEVICE_ID_NFP5000 0x5000 +#define PCI_DEVICE_ID_NFP6000 0x6000 +#define PCI_DEVICE_ID_NFP3800_VF 0x3803 +#define PCI_DEVICE_ID_NFP6000_VF 0x6003 + +enum nfp_dev_id { + NFP_DEV_NFP3800, + NFP_DEV_NFP3800_VF, + NFP_DEV_NFP6000, + NFP_DEV_NFP6000_VF, + NFP_DEV_CNT, +}; + +struct nfp_dev_info { + /* Required fields */ + u64 dma_mask; + u32 qc_idx_mask; + u32 qc_addr_offset; + u32 min_qc_size; + u32 max_qc_size; + + /* PF-only fields */ + const char *chip_names; + u32 pcie_cfg_expbar_offset; + u32 pcie_expl_offset; + u32 qc_area_sz; +}; + +extern const struct nfp_dev_info nfp_dev_info[NFP_DEV_CNT]; + +#endif diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_hwinfo.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_hwinfo.c new file mode 100644 index 000000000..f05dd34ab --- /dev/null +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_hwinfo.c @@ -0,0 +1,284 @@ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +/* Copyright (C) 2015-2017 Netronome Systems, Inc. */ + +/* Parse the hwinfo table that the ARM firmware builds in the ARM scratch SRAM + * after chip reset. + * + * Examples of the fields: + * me.count = 40 + * me.mask = 0x7f_ffff_ffff + * + * me.count is the total number of MEs on the system. + * me.mask is the bitmask of MEs that are available for application usage. + * + * (ie, in this example, ME 39 has been reserved by boardconfig.) + */ + +#include <asm/byteorder.h> +#include <asm/unaligned.h> +#include <linux/delay.h> +#include <linux/log2.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/slab.h> + +#define NFP_SUBSYS "nfp_hwinfo" + +#include "crc32.h" +#include "nfp.h" +#include "nfp_cpp.h" +#include "nfp6000/nfp6000.h" + +#define HWINFO_SIZE_MIN 0x100 +#define HWINFO_WAIT 20 /* seconds */ + +/* The Hardware Info Table defines the properties of the system. + * + * HWInfo v1 Table (fixed size) + * + * 0x0000: u32 version Hardware Info Table version (1.0) + * 0x0004: u32 size Total size of the table, including + * the CRC32 (IEEE 802.3) + * 0x0008: u32 jumptab Offset of key/value table + * 0x000c: u32 keys Total number of keys in the key/value table + * NNNNNN: Key/value jump table and string data + * (size - 4): u32 crc32 CRC32 (same as IEEE 802.3, POSIX csum, etc) + * CRC32("",0) = ~0, CRC32("a",1) = 0x48C279FE + * + * HWInfo v2 Table (variable size) + * + * 0x0000: u32 version Hardware Info Table version (2.0) + * 0x0004: u32 size Current size of the data area, excluding CRC32 + * 0x0008: u32 limit Maximum size of the table + * 0x000c: u32 reserved Unused, set to zero + * NNNNNN: Key/value data + * (size - 4): u32 crc32 CRC32 (same as IEEE 802.3, POSIX csum, etc) + * CRC32("",0) = ~0, CRC32("a",1) = 0x48C279FE + * + * If the HWInfo table is in the process of being updated, the low bit + * of version will be set. + * + * HWInfo v1 Key/Value Table + * ------------------------- + * + * The key/value table is a set of offsets to ASCIIZ strings which have + * been strcmp(3) sorted (yes, please use bsearch(3) on the table). + * + * All keys are guaranteed to be unique. + * + * N+0: u32 key_1 Offset to the first key + * N+4: u32 val_1 Offset to the first value + * N+8: u32 key_2 Offset to the second key + * N+c: u32 val_2 Offset to the second value + * ... + * + * HWInfo v2 Key/Value Table + * ------------------------- + * + * Packed UTF8Z strings, ie 'key1\000value1\000key2\000value2\000' + * + * Unsorted. + */ + +#define NFP_HWINFO_VERSION_1 ('H' << 24 | 'I' << 16 | 1 << 8 | 0 << 1 | 0) +#define NFP_HWINFO_VERSION_2 ('H' << 24 | 'I' << 16 | 2 << 8 | 0 << 1 | 0) +#define NFP_HWINFO_VERSION_UPDATING BIT(0) + +struct nfp_hwinfo { + u8 start[0]; + + __le32 version; + __le32 size; + + /* v2 specific fields */ + __le32 limit; + __le32 resv; + + char data[]; +}; + +static bool nfp_hwinfo_is_updating(struct nfp_hwinfo *hwinfo) +{ + return le32_to_cpu(hwinfo->version) & NFP_HWINFO_VERSION_UPDATING; +} + +static int +hwinfo_db_walk(struct nfp_cpp *cpp, struct nfp_hwinfo *hwinfo, u32 size) +{ + const char *key, *val, *end = hwinfo->data + size; + + for (key = hwinfo->data; *key && key < end; + key = val + strlen(val) + 1) { + + val = key + strlen(key) + 1; + if (val >= end) { + nfp_warn(cpp, "Bad HWINFO - overflowing key\n"); + return -EINVAL; + } + + if (val + strlen(val) + 1 > end) { + nfp_warn(cpp, "Bad HWINFO - overflowing value\n"); + return -EINVAL; + } + } + + return 0; +} + +static int +hwinfo_db_validate(struct nfp_cpp *cpp, struct nfp_hwinfo *db, u32 len) +{ + u32 size, crc; + + size = le32_to_cpu(db->size); + if (size > len) { + nfp_err(cpp, "Unsupported hwinfo size %u > %u\n", size, len); + return -EINVAL; + } + + size -= sizeof(u32); + crc = crc32_posix(db, size); + if (crc != get_unaligned_le32(db->start + size)) { + nfp_err(cpp, "Corrupt hwinfo table (CRC mismatch), calculated 0x%x, expected 0x%x\n", + crc, get_unaligned_le32(db->start + size)); + + return -EINVAL; + } + + return hwinfo_db_walk(cpp, db, size); +} + +static struct nfp_hwinfo * +hwinfo_try_fetch(struct nfp_cpp *cpp, size_t *cpp_size) +{ + struct nfp_hwinfo *header; + struct nfp_resource *res; + u64 cpp_addr; + u32 cpp_id; + int err; + u8 *db; + + res = nfp_resource_acquire(cpp, NFP_RESOURCE_NFP_HWINFO); + if (!IS_ERR(res)) { + cpp_id = nfp_resource_cpp_id(res); + cpp_addr = nfp_resource_address(res); + *cpp_size = nfp_resource_size(res); + + nfp_resource_release(res); + + if (*cpp_size < HWINFO_SIZE_MIN) + return NULL; + } else if (PTR_ERR(res) == -ENOENT) { + /* Try getting the HWInfo table from the 'classic' location */ + cpp_id = NFP_CPP_ISLAND_ID(NFP_CPP_TARGET_MU, + NFP_CPP_ACTION_RW, 0, 1); + cpp_addr = 0x30000; + *cpp_size = 0x0e000; + } else { + return NULL; + } + + db = kmalloc(*cpp_size + 1, GFP_KERNEL); + if (!db) + return NULL; + + err = nfp_cpp_read(cpp, cpp_id, cpp_addr, db, *cpp_size); + if (err != *cpp_size) + goto exit_free; + + header = (void *)db; + if (nfp_hwinfo_is_updating(header)) + goto exit_free; + + if (le32_to_cpu(header->version) != NFP_HWINFO_VERSION_2) { + nfp_err(cpp, "Unknown HWInfo version: 0x%08x\n", + le32_to_cpu(header->version)); + goto exit_free; + } + + /* NULL-terminate for safety */ + db[*cpp_size] = '\0'; + + return (void *)db; +exit_free: + kfree(db); + return NULL; +} + +static struct nfp_hwinfo *hwinfo_fetch(struct nfp_cpp *cpp, size_t *hwdb_size) +{ + const unsigned long wait_until = jiffies + HWINFO_WAIT * HZ; + struct nfp_hwinfo *db; + int err; + + for (;;) { + const unsigned long start_time = jiffies; + + db = hwinfo_try_fetch(cpp, hwdb_size); + if (db) + return db; + + err = msleep_interruptible(100); + if (err || time_after(start_time, wait_until)) { + nfp_err(cpp, "NFP access error\n"); + return NULL; + } + } +} + +struct nfp_hwinfo *nfp_hwinfo_read(struct nfp_cpp *cpp) +{ + struct nfp_hwinfo *db; + size_t hwdb_size = 0; + int err; + + db = hwinfo_fetch(cpp, &hwdb_size); + if (!db) + return NULL; + + err = hwinfo_db_validate(cpp, db, hwdb_size); + if (err) { + kfree(db); + return NULL; + } + + return db; +} + +/** + * nfp_hwinfo_lookup() - Find a value in the HWInfo table by name + * @hwinfo: NFP HWinfo table + * @lookup: HWInfo name to search for + * + * Return: Value of the HWInfo name, or NULL + */ +const char *nfp_hwinfo_lookup(struct nfp_hwinfo *hwinfo, const char *lookup) +{ + const char *key, *val, *end; + + if (!hwinfo || !lookup) + return NULL; + + end = hwinfo->data + le32_to_cpu(hwinfo->size) - sizeof(u32); + + for (key = hwinfo->data; *key && key < end; + key = val + strlen(val) + 1) { + + val = key + strlen(key) + 1; + + if (strcmp(key, lookup) == 0) + return val; + } + + return NULL; +} + +char *nfp_hwinfo_get_packed_strings(struct nfp_hwinfo *hwinfo) +{ + return hwinfo->data; +} + +u32 nfp_hwinfo_get_packed_str_size(struct nfp_hwinfo *hwinfo) +{ + return le32_to_cpu(hwinfo->size) - sizeof(u32); +} diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_mip.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_mip.c new file mode 100644 index 000000000..79e179435 --- /dev/null +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_mip.c @@ -0,0 +1,151 @@ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +/* Copyright (C) 2015-2017 Netronome Systems, Inc. */ + +/* + * nfp_mip.c + * Authors: Jakub Kicinski <jakub.kicinski@netronome.com> + * Jason McMullan <jason.mcmullan@netronome.com> + * Espen Skoglund <espen.skoglund@netronome.com> + */ +#include <linux/kernel.h> +#include <linux/slab.h> + +#include "nfp.h" +#include "nfp_cpp.h" +#include "nfp_nffw.h" + +#define NFP_MIP_SIGNATURE cpu_to_le32(0x0050494d) /* "MIP\0" */ +#define NFP_MIP_VERSION cpu_to_le32(1) +#define NFP_MIP_MAX_OFFSET (256 * 1024) + +struct nfp_mip { + __le32 signature; + __le32 mip_version; + __le32 mip_size; + __le32 first_entry; + + __le32 version; + __le32 buildnum; + __le32 buildtime; + __le32 loadtime; + + __le32 symtab_addr; + __le32 symtab_size; + __le32 strtab_addr; + __le32 strtab_size; + + char name[16]; + char toolchain[32]; +}; + +/* Read memory and check if it could be a valid MIP */ +static int +nfp_mip_try_read(struct nfp_cpp *cpp, u32 cpp_id, u64 addr, struct nfp_mip *mip) +{ + int ret; + + ret = nfp_cpp_read(cpp, cpp_id, addr, mip, sizeof(*mip)); + if (ret != sizeof(*mip)) { + nfp_err(cpp, "Failed to read MIP data (%d, %zu)\n", + ret, sizeof(*mip)); + return -EIO; + } + if (mip->signature != NFP_MIP_SIGNATURE) { + nfp_warn(cpp, "Incorrect MIP signature (0x%08x)\n", + le32_to_cpu(mip->signature)); + return -EINVAL; + } + if (mip->mip_version != NFP_MIP_VERSION) { + nfp_warn(cpp, "Unsupported MIP version (%d)\n", + le32_to_cpu(mip->mip_version)); + return -EINVAL; + } + + return 0; +} + +/* Try to locate MIP using the resource table */ +static int nfp_mip_read_resource(struct nfp_cpp *cpp, struct nfp_mip *mip) +{ + struct nfp_nffw_info *nffw_info; + u32 cpp_id; + u64 addr; + int err; + + nffw_info = nfp_nffw_info_open(cpp); + if (IS_ERR(nffw_info)) + return PTR_ERR(nffw_info); + + err = nfp_nffw_info_mip_first(nffw_info, &cpp_id, &addr); + if (err) + goto exit_close_nffw; + + err = nfp_mip_try_read(cpp, cpp_id, addr, mip); +exit_close_nffw: + nfp_nffw_info_close(nffw_info); + return err; +} + +/** + * nfp_mip_open() - Get device MIP structure + * @cpp: NFP CPP Handle + * + * Copy MIP structure from NFP device and return it. The returned + * structure is handled internally by the library and should be + * freed by calling nfp_mip_close(). + * + * Return: pointer to mip, NULL on failure. + */ +const struct nfp_mip *nfp_mip_open(struct nfp_cpp *cpp) +{ + struct nfp_mip *mip; + int err; + + mip = kmalloc(sizeof(*mip), GFP_KERNEL); + if (!mip) + return NULL; + + err = nfp_mip_read_resource(cpp, mip); + if (err) { + kfree(mip); + return NULL; + } + + mip->name[sizeof(mip->name) - 1] = 0; + + return mip; +} + +void nfp_mip_close(const struct nfp_mip *mip) +{ + kfree(mip); +} + +const char *nfp_mip_name(const struct nfp_mip *mip) +{ + return mip->name; +} + +/** + * nfp_mip_symtab() - Get the address and size of the MIP symbol table + * @mip: MIP handle + * @addr: Location for NFP DDR address of MIP symbol table + * @size: Location for size of MIP symbol table + */ +void nfp_mip_symtab(const struct nfp_mip *mip, u32 *addr, u32 *size) +{ + *addr = le32_to_cpu(mip->symtab_addr); + *size = le32_to_cpu(mip->symtab_size); +} + +/** + * nfp_mip_strtab() - Get the address and size of the MIP symbol name table + * @mip: MIP handle + * @addr: Location for NFP DDR address of MIP symbol name table + * @size: Location for size of MIP symbol name table + */ +void nfp_mip_strtab(const struct nfp_mip *mip, u32 *addr, u32 *size) +{ + *addr = le32_to_cpu(mip->strtab_addr); + *size = le32_to_cpu(mip->strtab_size); +} diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_mutex.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_mutex.c new file mode 100644 index 000000000..7bc17b94a --- /dev/null +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_mutex.c @@ -0,0 +1,368 @@ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +/* Copyright (C) 2015-2018 Netronome Systems, Inc. */ + +#include <linux/delay.h> +#include <linux/device.h> +#include <linux/jiffies.h> +#include <linux/types.h> +#include <linux/slab.h> +#include <linux/wait.h> + +#include "nfp_cpp.h" +#include "nfp6000/nfp6000.h" + +struct nfp_cpp_mutex { + struct nfp_cpp *cpp; + int target; + u16 depth; + unsigned long long address; + u32 key; +}; + +static u32 nfp_mutex_locked(u16 interface) +{ + return (u32)interface << 16 | 0x000f; +} + +static u32 nfp_mutex_unlocked(u16 interface) +{ + return (u32)interface << 16 | 0x0000; +} + +static u32 nfp_mutex_owner(u32 val) +{ + return val >> 16; +} + +static bool nfp_mutex_is_locked(u32 val) +{ + return (val & 0xffff) == 0x000f; +} + +static bool nfp_mutex_is_unlocked(u32 val) +{ + return (val & 0xffff) == 0000; +} + +/* If you need more than 65536 recursive locks, please rethink your code. */ +#define NFP_MUTEX_DEPTH_MAX 0xffff + +static int +nfp_cpp_mutex_validate(u16 interface, int *target, unsigned long long address) +{ + /* Not permitted on invalid interfaces */ + if (NFP_CPP_INTERFACE_TYPE_of(interface) == + NFP_CPP_INTERFACE_TYPE_INVALID) + return -EINVAL; + + /* Address must be 64-bit aligned */ + if (address & 7) + return -EINVAL; + + if (*target != NFP_CPP_TARGET_MU) + return -EINVAL; + + return 0; +} + +/** + * nfp_cpp_mutex_init() - Initialize a mutex location + * @cpp: NFP CPP handle + * @target: NFP CPP target ID (ie NFP_CPP_TARGET_CLS or NFP_CPP_TARGET_MU) + * @address: Offset into the address space of the NFP CPP target ID + * @key: Unique 32-bit value for this mutex + * + * The CPP target:address must point to a 64-bit aligned location, and + * will initialize 64 bits of data at the location. + * + * This creates the initial mutex state, as locked by this + * nfp_cpp_interface(). + * + * This function should only be called when setting up + * the initial lock state upon boot-up of the system. + * + * Return: 0 on success, or -errno on failure + */ +int nfp_cpp_mutex_init(struct nfp_cpp *cpp, + int target, unsigned long long address, u32 key) +{ + const u32 muw = NFP_CPP_ID(target, 4, 0); /* atomic_write */ + u16 interface = nfp_cpp_interface(cpp); + int err; + + err = nfp_cpp_mutex_validate(interface, &target, address); + if (err) + return err; + + err = nfp_cpp_writel(cpp, muw, address + 4, key); + if (err) + return err; + + err = nfp_cpp_writel(cpp, muw, address, nfp_mutex_locked(interface)); + if (err) + return err; + + return 0; +} + +/** + * nfp_cpp_mutex_alloc() - Create a mutex handle + * @cpp: NFP CPP handle + * @target: NFP CPP target ID (ie NFP_CPP_TARGET_CLS or NFP_CPP_TARGET_MU) + * @address: Offset into the address space of the NFP CPP target ID + * @key: 32-bit unique key (must match the key at this location) + * + * The CPP target:address must point to a 64-bit aligned location, and + * reserve 64 bits of data at the location for use by the handle. + * + * Only target/address pairs that point to entities that support the + * MU Atomic Engine's CmpAndSwap32 command are supported. + * + * Return: A non-NULL struct nfp_cpp_mutex * on success, NULL on failure. + */ +struct nfp_cpp_mutex *nfp_cpp_mutex_alloc(struct nfp_cpp *cpp, int target, + unsigned long long address, u32 key) +{ + const u32 mur = NFP_CPP_ID(target, 3, 0); /* atomic_read */ + u16 interface = nfp_cpp_interface(cpp); + struct nfp_cpp_mutex *mutex; + int err; + u32 tmp; + + err = nfp_cpp_mutex_validate(interface, &target, address); + if (err) + return NULL; + + err = nfp_cpp_readl(cpp, mur, address + 4, &tmp); + if (err < 0) + return NULL; + + if (tmp != key) + return NULL; + + mutex = kzalloc(sizeof(*mutex), GFP_KERNEL); + if (!mutex) + return NULL; + + mutex->cpp = cpp; + mutex->target = target; + mutex->address = address; + mutex->key = key; + mutex->depth = 0; + + return mutex; +} + +/** + * nfp_cpp_mutex_free() - Free a mutex handle - does not alter the lock state + * @mutex: NFP CPP Mutex handle + */ +void nfp_cpp_mutex_free(struct nfp_cpp_mutex *mutex) +{ + kfree(mutex); +} + +/** + * nfp_cpp_mutex_lock() - Lock a mutex handle, using the NFP MU Atomic Engine + * @mutex: NFP CPP Mutex handle + * + * Return: 0 on success, or -errno on failure + */ +int nfp_cpp_mutex_lock(struct nfp_cpp_mutex *mutex) +{ + unsigned long warn_at = jiffies + NFP_MUTEX_WAIT_FIRST_WARN * HZ; + unsigned long err_at = jiffies + NFP_MUTEX_WAIT_ERROR * HZ; + unsigned int timeout_ms = 1; + int err; + + /* We can't use a waitqueue here, because the unlocker + * might be on a separate CPU. + * + * So just wait for now. + */ + for (;;) { + err = nfp_cpp_mutex_trylock(mutex); + if (err != -EBUSY) + break; + + err = msleep_interruptible(timeout_ms); + if (err != 0) { + nfp_info(mutex->cpp, + "interrupted waiting for NFP mutex\n"); + return -ERESTARTSYS; + } + + if (time_is_before_eq_jiffies(warn_at)) { + warn_at = jiffies + NFP_MUTEX_WAIT_NEXT_WARN * HZ; + nfp_warn(mutex->cpp, + "Warning: waiting for NFP mutex [depth:%hd target:%d addr:%llx key:%08x]\n", + mutex->depth, + mutex->target, mutex->address, mutex->key); + } + if (time_is_before_eq_jiffies(err_at)) { + nfp_err(mutex->cpp, "Error: mutex wait timed out\n"); + return -EBUSY; + } + } + + return err; +} + +/** + * nfp_cpp_mutex_unlock() - Unlock a mutex handle, using the MU Atomic Engine + * @mutex: NFP CPP Mutex handle + * + * Return: 0 on success, or -errno on failure + */ +int nfp_cpp_mutex_unlock(struct nfp_cpp_mutex *mutex) +{ + const u32 muw = NFP_CPP_ID(mutex->target, 4, 0); /* atomic_write */ + const u32 mur = NFP_CPP_ID(mutex->target, 3, 0); /* atomic_read */ + struct nfp_cpp *cpp = mutex->cpp; + u32 key, value; + u16 interface; + int err; + + interface = nfp_cpp_interface(cpp); + + if (mutex->depth > 1) { + mutex->depth--; + return 0; + } + + err = nfp_cpp_readl(mutex->cpp, mur, mutex->address + 4, &key); + if (err < 0) + return err; + + if (key != mutex->key) + return -EPERM; + + err = nfp_cpp_readl(mutex->cpp, mur, mutex->address, &value); + if (err < 0) + return err; + + if (value != nfp_mutex_locked(interface)) + return -EACCES; + + err = nfp_cpp_writel(cpp, muw, mutex->address, + nfp_mutex_unlocked(interface)); + if (err < 0) + return err; + + mutex->depth = 0; + return 0; +} + +/** + * nfp_cpp_mutex_trylock() - Attempt to lock a mutex handle + * @mutex: NFP CPP Mutex handle + * + * Return: 0 if the lock succeeded, -errno on failure + */ +int nfp_cpp_mutex_trylock(struct nfp_cpp_mutex *mutex) +{ + const u32 muw = NFP_CPP_ID(mutex->target, 4, 0); /* atomic_write */ + const u32 mus = NFP_CPP_ID(mutex->target, 5, 3); /* test_set_imm */ + const u32 mur = NFP_CPP_ID(mutex->target, 3, 0); /* atomic_read */ + struct nfp_cpp *cpp = mutex->cpp; + u32 key, value, tmp; + int err; + + if (mutex->depth > 0) { + if (mutex->depth == NFP_MUTEX_DEPTH_MAX) + return -E2BIG; + mutex->depth++; + return 0; + } + + /* Verify that the lock marker is not damaged */ + err = nfp_cpp_readl(cpp, mur, mutex->address + 4, &key); + if (err < 0) + return err; + + if (key != mutex->key) + return -EPERM; + + /* Compare against the unlocked state, and if true, + * write the interface id into the top 16 bits, and + * mark as locked. + */ + value = nfp_mutex_locked(nfp_cpp_interface(cpp)); + + /* We use test_set_imm here, as it implies a read + * of the current state, and sets the bits in the + * bytemask of the command to 1s. Since the mutex + * is guaranteed to be 64-bit aligned, the bytemask + * of this 32-bit command is ensured to be 8'b00001111, + * which implies that the lower 4 bits will be set to + * ones regardless of the initial state. + * + * Since this is a 'Readback' operation, with no Pull + * data, we can treat this as a normal Push (read) + * atomic, which returns the original value. + */ + err = nfp_cpp_readl(cpp, mus, mutex->address, &tmp); + if (err < 0) + return err; + + /* Was it unlocked? */ + if (nfp_mutex_is_unlocked(tmp)) { + /* The read value can only be 0x....0000 in the unlocked state. + * If there was another contending for this lock, then + * the lock state would be 0x....000f + */ + + /* Write our owner ID into the lock + * While not strictly necessary, this helps with + * debug and bookkeeping. + */ + err = nfp_cpp_writel(cpp, muw, mutex->address, value); + if (err < 0) + return err; + + mutex->depth = 1; + return 0; + } + + return nfp_mutex_is_locked(tmp) ? -EBUSY : -EINVAL; +} + +/** + * nfp_cpp_mutex_reclaim() - Unlock mutex if held by local endpoint + * @cpp: NFP CPP handle + * @target: NFP CPP target ID (ie NFP_CPP_TARGET_CLS or NFP_CPP_TARGET_MU) + * @address: Offset into the address space of the NFP CPP target ID + * + * Release lock if held by local system. Extreme care is advised, call only + * when no local lock users can exist. + * + * Return: 0 if the lock was OK, 1 if locked by us, -errno on invalid mutex + */ +int nfp_cpp_mutex_reclaim(struct nfp_cpp *cpp, int target, + unsigned long long address) +{ + const u32 mur = NFP_CPP_ID(target, 3, 0); /* atomic_read */ + const u32 muw = NFP_CPP_ID(target, 4, 0); /* atomic_write */ + u16 interface = nfp_cpp_interface(cpp); + int err; + u32 tmp; + + err = nfp_cpp_mutex_validate(interface, &target, address); + if (err) + return err; + + /* Check lock */ + err = nfp_cpp_readl(cpp, mur, address, &tmp); + if (err < 0) + return err; + + if (nfp_mutex_is_unlocked(tmp) || nfp_mutex_owner(tmp) != interface) + return 0; + + /* Bust the lock */ + err = nfp_cpp_writel(cpp, muw, address, nfp_mutex_unlocked(interface)); + if (err < 0) + return err; + + return 1; +} diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nffw.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nffw.c new file mode 100644 index 000000000..e2e5fd003 --- /dev/null +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nffw.c @@ -0,0 +1,261 @@ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +/* Copyright (C) 2015-2018 Netronome Systems, Inc. */ + +/* + * nfp_nffw.c + * Authors: Jakub Kicinski <jakub.kicinski@netronome.com> + * Jason McMullan <jason.mcmullan@netronome.com> + * Francois H. Theron <francois.theron@netronome.com> + */ + +#include <linux/kernel.h> +#include <linux/slab.h> + +#include "nfp.h" +#include "nfp_cpp.h" +#include "nfp_nffw.h" +#include "nfp6000/nfp6000.h" + +/* Init-CSR owner IDs for firmware map to firmware IDs which start at 4. + * Lower IDs are reserved for target and loader IDs. + */ +#define NFFW_FWID_EXT 3 /* For active MEs that we didn't load. */ +#define NFFW_FWID_BASE 4 + +#define NFFW_FWID_ALL 255 + +/* + * NFFW_INFO_VERSION history: + * 0: This was never actually used (before versioning), but it refers to + * the previous struct which had FWINFO_CNT = MEINFO_CNT = 120 that later + * changed to 200. + * 1: First versioned struct, with + * FWINFO_CNT = 120 + * MEINFO_CNT = 120 + * 2: FWINFO_CNT = 200 + * MEINFO_CNT = 200 + */ +#define NFFW_INFO_VERSION_CURRENT 2 + +/* Enough for all current chip families */ +#define NFFW_MEINFO_CNT_V1 120 +#define NFFW_FWINFO_CNT_V1 120 +#define NFFW_MEINFO_CNT_V2 200 +#define NFFW_FWINFO_CNT_V2 200 + +/* Work in 32-bit words to make cross-platform endianness easier to handle */ + +/** nfp.nffw meinfo **/ +struct nffw_meinfo { + __le32 ctxmask__fwid__meid; +}; + +struct nffw_fwinfo { + __le32 loaded__mu_da__mip_off_hi; + __le32 mip_cppid; /* 0 means no MIP */ + __le32 mip_offset_lo; +}; + +struct nfp_nffw_info_v1 { + struct nffw_meinfo meinfo[NFFW_MEINFO_CNT_V1]; + struct nffw_fwinfo fwinfo[NFFW_FWINFO_CNT_V1]; +}; + +struct nfp_nffw_info_v2 { + struct nffw_meinfo meinfo[NFFW_MEINFO_CNT_V2]; + struct nffw_fwinfo fwinfo[NFFW_FWINFO_CNT_V2]; +}; + +/** Resource: nfp.nffw main **/ +struct nfp_nffw_info_data { + __le32 flags[2]; + union { + struct nfp_nffw_info_v1 v1; + struct nfp_nffw_info_v2 v2; + } info; +}; + +struct nfp_nffw_info { + struct nfp_cpp *cpp; + struct nfp_resource *res; + + struct nfp_nffw_info_data fwinf; +}; + +/* flg_info_version = flags[0]<27:16> + * This is a small version counter intended only to detect if the current + * implementation can read the current struct. Struct changes should be very + * rare and as such a 12-bit counter should cover large spans of time. By the + * time it wraps around, we don't expect to have 4096 versions of this struct + * to be in use at the same time. + */ +static u32 nffw_res_info_version_get(const struct nfp_nffw_info_data *res) +{ + return (le32_to_cpu(res->flags[0]) >> 16) & 0xfff; +} + +/* flg_init = flags[0]<0> */ +static u32 nffw_res_flg_init_get(const struct nfp_nffw_info_data *res) +{ + return (le32_to_cpu(res->flags[0]) >> 0) & 1; +} + +/* loaded = loaded__mu_da__mip_off_hi<31:31> */ +static u32 nffw_fwinfo_loaded_get(const struct nffw_fwinfo *fi) +{ + return (le32_to_cpu(fi->loaded__mu_da__mip_off_hi) >> 31) & 1; +} + +/* mip_cppid = mip_cppid */ +static u32 nffw_fwinfo_mip_cppid_get(const struct nffw_fwinfo *fi) +{ + return le32_to_cpu(fi->mip_cppid); +} + +/* loaded = loaded__mu_da__mip_off_hi<8:8> */ +static u32 nffw_fwinfo_mip_mu_da_get(const struct nffw_fwinfo *fi) +{ + return (le32_to_cpu(fi->loaded__mu_da__mip_off_hi) >> 8) & 1; +} + +/* mip_offset = (loaded__mu_da__mip_off_hi<7:0> << 8) | mip_offset_lo */ +static u64 nffw_fwinfo_mip_offset_get(const struct nffw_fwinfo *fi) +{ + u64 mip_off_hi = le32_to_cpu(fi->loaded__mu_da__mip_off_hi); + + return (mip_off_hi & 0xFF) << 32 | le32_to_cpu(fi->mip_offset_lo); +} + +static unsigned int +nffw_res_fwinfos(struct nfp_nffw_info_data *fwinf, struct nffw_fwinfo **arr) +{ + /* For the this code, version 0 is most likely to be + * version 1 in this case. Since the kernel driver + * does not take responsibility for initialising the + * nfp.nffw resource, any previous code (CA firmware or + * userspace) that left the version 0 and did set + * the init flag is going to be version 1. + */ + switch (nffw_res_info_version_get(fwinf)) { + case 0: + case 1: + *arr = &fwinf->info.v1.fwinfo[0]; + return NFFW_FWINFO_CNT_V1; + case 2: + *arr = &fwinf->info.v2.fwinfo[0]; + return NFFW_FWINFO_CNT_V2; + default: + *arr = NULL; + return 0; + } +} + +/** + * nfp_nffw_info_open() - Acquire the lock on the NFFW table + * @cpp: NFP CPP handle + * + * Return: pointer to nfp_nffw_info object or ERR_PTR() + */ +struct nfp_nffw_info *nfp_nffw_info_open(struct nfp_cpp *cpp) +{ + struct nfp_nffw_info_data *fwinf; + struct nfp_nffw_info *state; + u32 info_ver; + int err; + + state = kzalloc(sizeof(*state), GFP_KERNEL); + if (!state) + return ERR_PTR(-ENOMEM); + + state->res = nfp_resource_acquire(cpp, NFP_RESOURCE_NFP_NFFW); + if (IS_ERR(state->res)) + goto err_free; + + fwinf = &state->fwinf; + + if (sizeof(*fwinf) > nfp_resource_size(state->res)) + goto err_release; + + err = nfp_cpp_read(cpp, nfp_resource_cpp_id(state->res), + nfp_resource_address(state->res), + fwinf, sizeof(*fwinf)); + if (err < (int)sizeof(*fwinf)) + goto err_release; + + if (!nffw_res_flg_init_get(fwinf)) + goto err_release; + + info_ver = nffw_res_info_version_get(fwinf); + if (info_ver > NFFW_INFO_VERSION_CURRENT) + goto err_release; + + state->cpp = cpp; + return state; + +err_release: + nfp_resource_release(state->res); +err_free: + kfree(state); + return ERR_PTR(-EIO); +} + +/** + * nfp_nffw_info_close() - Release the lock on the NFFW table and free state + * @state: NFP FW info state + */ +void nfp_nffw_info_close(struct nfp_nffw_info *state) +{ + nfp_resource_release(state->res); + kfree(state); +} + +/** + * nfp_nffw_info_fwid_first() - Return the first firmware ID in the NFFW + * @state: NFP FW info state + * + * Return: First NFFW firmware info, NULL on failure + */ +static struct nffw_fwinfo *nfp_nffw_info_fwid_first(struct nfp_nffw_info *state) +{ + struct nffw_fwinfo *fwinfo; + unsigned int cnt, i; + + cnt = nffw_res_fwinfos(&state->fwinf, &fwinfo); + if (!cnt) + return NULL; + + for (i = 0; i < cnt; i++) + if (nffw_fwinfo_loaded_get(&fwinfo[i])) + return &fwinfo[i]; + + return NULL; +} + +/** + * nfp_nffw_info_mip_first() - Retrieve the location of the first FW's MIP + * @state: NFP FW info state + * @cpp_id: Pointer to the CPP ID of the MIP + * @off: Pointer to the CPP Address of the MIP + * + * Return: 0, or -ERRNO + */ +int nfp_nffw_info_mip_first(struct nfp_nffw_info *state, u32 *cpp_id, u64 *off) +{ + struct nffw_fwinfo *fwinfo; + + fwinfo = nfp_nffw_info_fwid_first(state); + if (!fwinfo) + return -EINVAL; + + *cpp_id = nffw_fwinfo_mip_cppid_get(fwinfo); + *off = nffw_fwinfo_mip_offset_get(fwinfo); + + if (nffw_fwinfo_mip_mu_da_get(fwinfo)) { + int locality_off = nfp_cpp_mu_locality_lsb(state->cpp); + + *off &= ~(NFP_MU_ADDR_ACCESS_TYPE_MASK << locality_off); + *off |= NFP_MU_ADDR_ACCESS_TYPE_DIRECT << locality_off; + } + + return 0; +} diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nffw.h b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nffw.h new file mode 100644 index 000000000..49a4d3f56 --- /dev/null +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nffw.h @@ -0,0 +1,107 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ +/* Copyright (C) 2015-2018 Netronome Systems, Inc. */ + +/* + * nfp_nffw.h + * Authors: Jason McMullan <jason.mcmullan@netronome.com> + * Francois H. Theron <francois.theron@netronome.com> + */ + +#ifndef NFP_NFFW_H +#define NFP_NFFW_H + +/* Implemented in nfp_nffw.c */ + +struct nfp_nffw_info; + +struct nfp_nffw_info *nfp_nffw_info_open(struct nfp_cpp *cpp); +void nfp_nffw_info_close(struct nfp_nffw_info *state); +int nfp_nffw_info_mip_first(struct nfp_nffw_info *state, u32 *cpp_id, u64 *off); + +/* Implemented in nfp_mip.c */ + +struct nfp_mip; + +const struct nfp_mip *nfp_mip_open(struct nfp_cpp *cpp); +void nfp_mip_close(const struct nfp_mip *mip); + +const char *nfp_mip_name(const struct nfp_mip *mip); +void nfp_mip_symtab(const struct nfp_mip *mip, u32 *addr, u32 *size); +void nfp_mip_strtab(const struct nfp_mip *mip, u32 *addr, u32 *size); + +/* Implemented in nfp_rtsym.c */ + +enum nfp_rtsym_type { + NFP_RTSYM_TYPE_NONE = 0, + NFP_RTSYM_TYPE_OBJECT = 1, + NFP_RTSYM_TYPE_FUNCTION = 2, + NFP_RTSYM_TYPE_ABS = 3, +}; + +#define NFP_RTSYM_TARGET_NONE 0 +#define NFP_RTSYM_TARGET_LMEM -1 +#define NFP_RTSYM_TARGET_EMU_CACHE -7 + +/** + * struct nfp_rtsym - RTSYM descriptor + * @name: Symbol name + * @addr: Address in the domain/target's address space + * @size: Size (in bytes) of the symbol + * @type: NFP_RTSYM_TYPE_* of the symbol + * @target: CPP Target identifier, or NFP_RTSYM_TARGET_* + * @domain: CPP Target Domain (island) + */ +struct nfp_rtsym { + const char *name; + u64 addr; + u64 size; + enum nfp_rtsym_type type; + int target; + int domain; +}; + +struct nfp_rtsym_table; + +struct nfp_rtsym_table *nfp_rtsym_table_read(struct nfp_cpp *cpp); +struct nfp_rtsym_table * +__nfp_rtsym_table_read(struct nfp_cpp *cpp, const struct nfp_mip *mip); +int nfp_rtsym_count(struct nfp_rtsym_table *rtbl); +const struct nfp_rtsym *nfp_rtsym_get(struct nfp_rtsym_table *rtbl, int idx); +const struct nfp_rtsym * +nfp_rtsym_lookup(struct nfp_rtsym_table *rtbl, const char *name); + +u64 nfp_rtsym_size(const struct nfp_rtsym *rtsym); +int __nfp_rtsym_read(struct nfp_cpp *cpp, const struct nfp_rtsym *sym, + u8 action, u8 token, u64 off, void *buf, size_t len); +int nfp_rtsym_read(struct nfp_cpp *cpp, const struct nfp_rtsym *sym, u64 off, + void *buf, size_t len); +int __nfp_rtsym_readl(struct nfp_cpp *cpp, const struct nfp_rtsym *sym, + u8 action, u8 token, u64 off, u32 *value); +int nfp_rtsym_readl(struct nfp_cpp *cpp, const struct nfp_rtsym *sym, u64 off, + u32 *value); +int __nfp_rtsym_readq(struct nfp_cpp *cpp, const struct nfp_rtsym *sym, + u8 action, u8 token, u64 off, u64 *value); +int nfp_rtsym_readq(struct nfp_cpp *cpp, const struct nfp_rtsym *sym, u64 off, + u64 *value); +int __nfp_rtsym_write(struct nfp_cpp *cpp, const struct nfp_rtsym *sym, + u8 action, u8 token, u64 off, void *buf, size_t len); +int nfp_rtsym_write(struct nfp_cpp *cpp, const struct nfp_rtsym *sym, u64 off, + void *buf, size_t len); +int __nfp_rtsym_writel(struct nfp_cpp *cpp, const struct nfp_rtsym *sym, + u8 action, u8 token, u64 off, u32 value); +int nfp_rtsym_writel(struct nfp_cpp *cpp, const struct nfp_rtsym *sym, u64 off, + u32 value); +int __nfp_rtsym_writeq(struct nfp_cpp *cpp, const struct nfp_rtsym *sym, + u8 action, u8 token, u64 off, u64 value); +int nfp_rtsym_writeq(struct nfp_cpp *cpp, const struct nfp_rtsym *sym, u64 off, + u64 value); + +u64 nfp_rtsym_read_le(struct nfp_rtsym_table *rtbl, const char *name, + int *error); +int nfp_rtsym_write_le(struct nfp_rtsym_table *rtbl, const char *name, + u64 value); +u8 __iomem * +nfp_rtsym_map(struct nfp_rtsym_table *rtbl, const char *name, const char *id, + unsigned int min_size, struct nfp_cpp_area **area); + +#endif /* NFP_NFFW_H */ diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c new file mode 100644 index 000000000..7136bc485 --- /dev/null +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.c @@ -0,0 +1,1120 @@ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +/* Copyright (C) 2015-2018 Netronome Systems, Inc. */ + +/* + * nfp_nsp.c + * Author: Jakub Kicinski <jakub.kicinski@netronome.com> + * Jason McMullan <jason.mcmullan@netronome.com> + */ + +#include <asm/unaligned.h> +#include <linux/bitfield.h> +#include <linux/delay.h> +#include <linux/firmware.h> +#include <linux/kernel.h> +#include <linux/kthread.h> +#include <linux/overflow.h> +#include <linux/sizes.h> +#include <linux/slab.h> + +#define NFP_SUBSYS "nfp_nsp" + +#include "nfp.h" +#include "nfp_cpp.h" +#include "nfp_nsp.h" + +#define NFP_NSP_TIMEOUT_DEFAULT 30 +#define NFP_NSP_TIMEOUT_BOOT 30 + +/* Offsets relative to the CSR base */ +#define NSP_STATUS 0x00 +#define NSP_STATUS_MAGIC GENMASK_ULL(63, 48) +#define NSP_STATUS_MAJOR GENMASK_ULL(47, 44) +#define NSP_STATUS_MINOR GENMASK_ULL(43, 32) +#define NSP_STATUS_CODE GENMASK_ULL(31, 16) +#define NSP_STATUS_RESULT GENMASK_ULL(15, 8) +#define NSP_STATUS_BUSY BIT_ULL(0) + +#define NSP_COMMAND 0x08 +#define NSP_COMMAND_OPTION GENMASK_ULL(63, 32) +#define NSP_COMMAND_CODE GENMASK_ULL(31, 16) +#define NSP_COMMAND_DMA_BUF BIT_ULL(1) +#define NSP_COMMAND_START BIT_ULL(0) + +/* CPP address to retrieve the data from */ +#define NSP_BUFFER 0x10 +#define NSP_BUFFER_CPP GENMASK_ULL(63, 40) +#define NSP_BUFFER_ADDRESS GENMASK_ULL(39, 0) + +#define NSP_DFLT_BUFFER 0x18 +#define NSP_DFLT_BUFFER_CPP GENMASK_ULL(63, 40) +#define NSP_DFLT_BUFFER_ADDRESS GENMASK_ULL(39, 0) + +#define NSP_DFLT_BUFFER_CONFIG 0x20 +#define NSP_DFLT_BUFFER_DMA_CHUNK_ORDER GENMASK_ULL(63, 58) +#define NSP_DFLT_BUFFER_SIZE_4KB GENMASK_ULL(15, 8) +#define NSP_DFLT_BUFFER_SIZE_MB GENMASK_ULL(7, 0) + +#define NFP_CAP_CMD_DMA_SG 0x28 + +#define NSP_MAGIC 0xab10 +#define NSP_MAJOR 0 +#define NSP_MINOR 8 + +#define NSP_CODE_MAJOR GENMASK(15, 12) +#define NSP_CODE_MINOR GENMASK(11, 0) + +#define NFP_FW_LOAD_RET_MAJOR GENMASK(15, 8) +#define NFP_FW_LOAD_RET_MINOR GENMASK(23, 16) + +#define NFP_HWINFO_LOOKUP_SIZE GENMASK(11, 0) + +#define NFP_VERSIONS_SIZE GENMASK(11, 0) +#define NFP_VERSIONS_CNT_OFF 0 +#define NFP_VERSIONS_BSP_OFF 2 +#define NFP_VERSIONS_CPLD_OFF 6 +#define NFP_VERSIONS_APP_OFF 10 +#define NFP_VERSIONS_BUNDLE_OFF 14 +#define NFP_VERSIONS_UNDI_OFF 18 +#define NFP_VERSIONS_NCSI_OFF 22 +#define NFP_VERSIONS_CFGR_OFF 26 + +#define NSP_SFF_EEPROM_BLOCK_LEN 8 + +enum nfp_nsp_cmd { + SPCODE_NOOP = 0, /* No operation */ + SPCODE_SOFT_RESET = 1, /* Soft reset the NFP */ + SPCODE_FW_DEFAULT = 2, /* Load default (UNDI) FW */ + SPCODE_PHY_INIT = 3, /* Initialize the PHY */ + SPCODE_MAC_INIT = 4, /* Initialize the MAC */ + SPCODE_PHY_RXADAPT = 5, /* Re-run PHY RX Adaptation */ + SPCODE_FW_LOAD = 6, /* Load fw from buffer, len in option */ + SPCODE_ETH_RESCAN = 7, /* Rescan ETHs, write ETH_TABLE to buf */ + SPCODE_ETH_CONTROL = 8, /* Update media config from buffer */ + SPCODE_NSP_WRITE_FLASH = 11, /* Load and flash image from buffer */ + SPCODE_NSP_SENSORS = 12, /* Read NSP sensor(s) */ + SPCODE_NSP_IDENTIFY = 13, /* Read NSP version */ + SPCODE_FW_STORED = 16, /* If no FW loaded, load flash app FW */ + SPCODE_HWINFO_LOOKUP = 17, /* Lookup HWinfo with overwrites etc. */ + SPCODE_HWINFO_SET = 18, /* Set HWinfo entry */ + SPCODE_FW_LOADED = 19, /* Is application firmware loaded */ + SPCODE_VERSIONS = 21, /* Report FW versions */ + SPCODE_READ_SFF_EEPROM = 22, /* Read module EEPROM */ + SPCODE_READ_MEDIA = 23, /* Get either the supported or advertised media for a port */ +}; + +struct nfp_nsp_dma_buf { + __le32 chunk_cnt; + __le32 reserved[3]; + struct { + __le32 size; + __le32 reserved; + __le64 addr; + } descs[]; +}; + +static const struct { + int code; + const char *msg; +} nsp_errors[] = { + { 6010, "could not map to phy for port" }, + { 6011, "not an allowed rate/lanes for port" }, + { 6012, "not an allowed rate/lanes for port" }, + { 6013, "high/low error, change other port first" }, + { 6014, "config not found in flash" }, +}; + +struct nfp_nsp { + struct nfp_cpp *cpp; + struct nfp_resource *res; + struct { + u16 major; + u16 minor; + } ver; + + /* Eth table config state */ + bool modified; + unsigned int idx; + void *entries; +}; + +/** + * struct nfp_nsp_command_arg - NFP command argument structure + * @code: NFP SP Command Code + * @dma: @buf points to a host buffer, not NSP buffer + * @timeout_sec:Timeout value to wait for completion in seconds + * @option: NFP SP Command Argument + * @buf: NFP SP Buffer Address + * @error_cb: Callback for interpreting option if error occurred + * @error_quiet:Don't print command error/warning. Protocol errors are still + * logged. + */ +struct nfp_nsp_command_arg { + u16 code; + bool dma; + unsigned int timeout_sec; + u32 option; + u64 buf; + void (*error_cb)(struct nfp_nsp *state, u32 ret_val); + bool error_quiet; +}; + +/** + * struct nfp_nsp_command_buf_arg - NFP command with buffer argument structure + * @arg: NFP command argument structure + * @in_buf: Buffer with data for input + * @in_size: Size of @in_buf + * @out_buf: Buffer for output data + * @out_size: Size of @out_buf + */ +struct nfp_nsp_command_buf_arg { + struct nfp_nsp_command_arg arg; + const void *in_buf; + unsigned int in_size; + void *out_buf; + unsigned int out_size; +}; + +struct nfp_cpp *nfp_nsp_cpp(struct nfp_nsp *state) +{ + return state->cpp; +} + +bool nfp_nsp_config_modified(struct nfp_nsp *state) +{ + return state->modified; +} + +void nfp_nsp_config_set_modified(struct nfp_nsp *state, bool modified) +{ + state->modified = modified; +} + +void *nfp_nsp_config_entries(struct nfp_nsp *state) +{ + return state->entries; +} + +unsigned int nfp_nsp_config_idx(struct nfp_nsp *state) +{ + return state->idx; +} + +void +nfp_nsp_config_set_state(struct nfp_nsp *state, void *entries, unsigned int idx) +{ + state->entries = entries; + state->idx = idx; +} + +void nfp_nsp_config_clear_state(struct nfp_nsp *state) +{ + state->entries = NULL; + state->idx = 0; +} + +static void nfp_nsp_print_extended_error(struct nfp_nsp *state, u32 ret_val) +{ + int i; + + if (!ret_val) + return; + + for (i = 0; i < ARRAY_SIZE(nsp_errors); i++) + if (ret_val == nsp_errors[i].code) + nfp_err(state->cpp, "err msg: %s\n", nsp_errors[i].msg); +} + +static int nfp_nsp_check(struct nfp_nsp *state) +{ + struct nfp_cpp *cpp = state->cpp; + u64 nsp_status, reg; + u32 nsp_cpp; + int err; + + nsp_cpp = nfp_resource_cpp_id(state->res); + nsp_status = nfp_resource_address(state->res) + NSP_STATUS; + + err = nfp_cpp_readq(cpp, nsp_cpp, nsp_status, ®); + if (err < 0) + return err; + + if (FIELD_GET(NSP_STATUS_MAGIC, reg) != NSP_MAGIC) { + nfp_err(cpp, "Cannot detect NFP Service Processor\n"); + return -ENODEV; + } + + state->ver.major = FIELD_GET(NSP_STATUS_MAJOR, reg); + state->ver.minor = FIELD_GET(NSP_STATUS_MINOR, reg); + + if (state->ver.major != NSP_MAJOR) { + nfp_err(cpp, "Unsupported ABI %hu.%hu\n", + state->ver.major, state->ver.minor); + return -EINVAL; + } + if (state->ver.minor < NSP_MINOR) { + nfp_err(cpp, "ABI too old to support NIC operation (%u.%hu < %u.%u), please update the management FW on the flash\n", + NSP_MAJOR, state->ver.minor, NSP_MAJOR, NSP_MINOR); + return -EINVAL; + } + + if (reg & NSP_STATUS_BUSY) { + nfp_err(cpp, "Service processor busy!\n"); + return -EBUSY; + } + + return 0; +} + +/** + * nfp_nsp_open() - Prepare for communication and lock the NSP resource. + * @cpp: NFP CPP Handle + */ +struct nfp_nsp *nfp_nsp_open(struct nfp_cpp *cpp) +{ + struct nfp_resource *res; + struct nfp_nsp *state; + int err; + + res = nfp_resource_acquire(cpp, NFP_RESOURCE_NSP); + if (IS_ERR(res)) + return (void *)res; + + state = kzalloc(sizeof(*state), GFP_KERNEL); + if (!state) { + nfp_resource_release(res); + return ERR_PTR(-ENOMEM); + } + state->cpp = cpp; + state->res = res; + + err = nfp_nsp_check(state); + if (err) { + nfp_nsp_close(state); + return ERR_PTR(err); + } + + return state; +} + +/** + * nfp_nsp_close() - Clean up and unlock the NSP resource. + * @state: NFP SP state + */ +void nfp_nsp_close(struct nfp_nsp *state) +{ + nfp_resource_release(state->res); + kfree(state); +} + +u16 nfp_nsp_get_abi_ver_major(struct nfp_nsp *state) +{ + return state->ver.major; +} + +u16 nfp_nsp_get_abi_ver_minor(struct nfp_nsp *state) +{ + return state->ver.minor; +} + +static int +nfp_nsp_wait_reg(struct nfp_cpp *cpp, u64 *reg, u32 nsp_cpp, u64 addr, + u64 mask, u64 val, u32 timeout_sec) +{ + const unsigned long wait_until = jiffies + timeout_sec * HZ; + int err; + + for (;;) { + const unsigned long start_time = jiffies; + + err = nfp_cpp_readq(cpp, nsp_cpp, addr, reg); + if (err < 0) + return err; + + if ((*reg & mask) == val) + return 0; + + msleep(25); + + if (time_after(start_time, wait_until)) + return -ETIMEDOUT; + } +} + +/** + * __nfp_nsp_command() - Execute a command on the NFP Service Processor + * @state: NFP SP state + * @arg: NFP command argument structure + * + * Return: 0 for success with no result + * + * positive value for NSP completion with a result code + * + * -EAGAIN if the NSP is not yet present + * -ENODEV if the NSP is not a supported model + * -EBUSY if the NSP is stuck + * -EINTR if interrupted while waiting for completion + * -ETIMEDOUT if the NSP took longer than @timeout_sec seconds to complete + */ +static int +__nfp_nsp_command(struct nfp_nsp *state, const struct nfp_nsp_command_arg *arg) +{ + u64 reg, ret_val, nsp_base, nsp_buffer, nsp_status, nsp_command; + struct nfp_cpp *cpp = state->cpp; + u32 nsp_cpp; + int err; + + nsp_cpp = nfp_resource_cpp_id(state->res); + nsp_base = nfp_resource_address(state->res); + nsp_status = nsp_base + NSP_STATUS; + nsp_command = nsp_base + NSP_COMMAND; + nsp_buffer = nsp_base + NSP_BUFFER; + + err = nfp_nsp_check(state); + if (err) + return err; + + err = nfp_cpp_writeq(cpp, nsp_cpp, nsp_buffer, arg->buf); + if (err < 0) + return err; + + err = nfp_cpp_writeq(cpp, nsp_cpp, nsp_command, + FIELD_PREP(NSP_COMMAND_OPTION, arg->option) | + FIELD_PREP(NSP_COMMAND_CODE, arg->code) | + FIELD_PREP(NSP_COMMAND_DMA_BUF, arg->dma) | + FIELD_PREP(NSP_COMMAND_START, 1)); + if (err < 0) + return err; + + /* Wait for NSP_COMMAND_START to go to 0 */ + err = nfp_nsp_wait_reg(cpp, ®, nsp_cpp, nsp_command, + NSP_COMMAND_START, 0, NFP_NSP_TIMEOUT_DEFAULT); + if (err) { + nfp_err(cpp, "Error %d waiting for code 0x%04x to start\n", + err, arg->code); + return err; + } + + /* Wait for NSP_STATUS_BUSY to go to 0 */ + err = nfp_nsp_wait_reg(cpp, ®, nsp_cpp, nsp_status, NSP_STATUS_BUSY, + 0, arg->timeout_sec ?: NFP_NSP_TIMEOUT_DEFAULT); + if (err) { + nfp_err(cpp, "Error %d waiting for code 0x%04x to complete\n", + err, arg->code); + return err; + } + + err = nfp_cpp_readq(cpp, nsp_cpp, nsp_command, &ret_val); + if (err < 0) + return err; + ret_val = FIELD_GET(NSP_COMMAND_OPTION, ret_val); + + err = FIELD_GET(NSP_STATUS_RESULT, reg); + if (err) { + if (!arg->error_quiet) + nfp_warn(cpp, "Result (error) code set: %d (%d) command: %d\n", + -err, (int)ret_val, arg->code); + + if (arg->error_cb) + arg->error_cb(state, ret_val); + else + nfp_nsp_print_extended_error(state, ret_val); + return -err; + } + + return ret_val; +} + +static int nfp_nsp_command(struct nfp_nsp *state, u16 code) +{ + const struct nfp_nsp_command_arg arg = { + .code = code, + }; + + return __nfp_nsp_command(state, &arg); +} + +static int +nfp_nsp_command_buf_def(struct nfp_nsp *nsp, + struct nfp_nsp_command_buf_arg *arg) +{ + struct nfp_cpp *cpp = nsp->cpp; + u64 reg, cpp_buf; + int err, ret; + u32 cpp_id; + + err = nfp_cpp_readq(cpp, nfp_resource_cpp_id(nsp->res), + nfp_resource_address(nsp->res) + + NSP_DFLT_BUFFER, + ®); + if (err < 0) + return err; + + cpp_id = FIELD_GET(NSP_DFLT_BUFFER_CPP, reg) << 8; + cpp_buf = FIELD_GET(NSP_DFLT_BUFFER_ADDRESS, reg); + + if (arg->in_buf && arg->in_size) { + err = nfp_cpp_write(cpp, cpp_id, cpp_buf, + arg->in_buf, arg->in_size); + if (err < 0) + return err; + } + /* Zero out remaining part of the buffer */ + if (arg->out_buf && arg->out_size && arg->out_size > arg->in_size) { + err = nfp_cpp_write(cpp, cpp_id, cpp_buf + arg->in_size, + arg->out_buf, arg->out_size - arg->in_size); + if (err < 0) + return err; + } + + if (!FIELD_FIT(NSP_BUFFER_CPP, cpp_id >> 8) || + !FIELD_FIT(NSP_BUFFER_ADDRESS, cpp_buf)) { + nfp_err(cpp, "Buffer out of reach %08x %016llx\n", + cpp_id, cpp_buf); + return -EINVAL; + } + + arg->arg.buf = FIELD_PREP(NSP_BUFFER_CPP, cpp_id >> 8) | + FIELD_PREP(NSP_BUFFER_ADDRESS, cpp_buf); + ret = __nfp_nsp_command(nsp, &arg->arg); + if (ret < 0) + return ret; + + if (arg->out_buf && arg->out_size) { + err = nfp_cpp_read(cpp, cpp_id, cpp_buf, + arg->out_buf, arg->out_size); + if (err < 0) + return err; + } + + return ret; +} + +static int +nfp_nsp_command_buf_dma_sg(struct nfp_nsp *nsp, + struct nfp_nsp_command_buf_arg *arg, + unsigned int max_size, unsigned int chunk_order, + unsigned int dma_order) +{ + struct nfp_cpp *cpp = nsp->cpp; + struct nfp_nsp_dma_buf *desc; + struct { + dma_addr_t dma_addr; + unsigned long len; + void *chunk; + } *chunks; + size_t chunk_size, dma_size; + dma_addr_t dma_desc; + struct device *dev; + unsigned long off; + int i, ret, nseg; + size_t desc_sz; + + chunk_size = BIT_ULL(chunk_order); + dma_size = BIT_ULL(dma_order); + nseg = DIV_ROUND_UP(max_size, chunk_size); + + chunks = kcalloc(nseg, sizeof(*chunks), GFP_KERNEL); + if (!chunks) + return -ENOMEM; + + off = 0; + ret = -ENOMEM; + for (i = 0; i < nseg; i++) { + unsigned long coff; + + chunks[i].chunk = kmalloc(chunk_size, + GFP_KERNEL | __GFP_NOWARN); + if (!chunks[i].chunk) + goto exit_free_prev; + + chunks[i].len = min_t(u64, chunk_size, max_size - off); + + coff = 0; + if (arg->in_size > off) { + coff = min_t(u64, arg->in_size - off, chunk_size); + memcpy(chunks[i].chunk, arg->in_buf + off, coff); + } + memset(chunks[i].chunk + coff, 0, chunk_size - coff); + + off += chunks[i].len; + } + + dev = nfp_cpp_device(cpp)->parent; + + for (i = 0; i < nseg; i++) { + dma_addr_t addr; + + addr = dma_map_single(dev, chunks[i].chunk, chunks[i].len, + DMA_BIDIRECTIONAL); + chunks[i].dma_addr = addr; + + ret = dma_mapping_error(dev, addr); + if (ret) + goto exit_unmap_prev; + + if (WARN_ONCE(round_down(addr, dma_size) != + round_down(addr + chunks[i].len - 1, dma_size), + "unaligned DMA address: %pad %lu %zd\n", + &addr, chunks[i].len, dma_size)) { + ret = -EFAULT; + i++; + goto exit_unmap_prev; + } + } + + desc_sz = struct_size(desc, descs, nseg); + desc = kmalloc(desc_sz, GFP_KERNEL); + if (!desc) { + ret = -ENOMEM; + goto exit_unmap_all; + } + + desc->chunk_cnt = cpu_to_le32(nseg); + for (i = 0; i < nseg; i++) { + desc->descs[i].size = cpu_to_le32(chunks[i].len); + desc->descs[i].addr = cpu_to_le64(chunks[i].dma_addr); + } + + dma_desc = dma_map_single(dev, desc, desc_sz, DMA_TO_DEVICE); + ret = dma_mapping_error(dev, dma_desc); + if (ret) + goto exit_free_desc; + + arg->arg.dma = true; + arg->arg.buf = dma_desc; + ret = __nfp_nsp_command(nsp, &arg->arg); + if (ret < 0) + goto exit_unmap_desc; + + i = 0; + off = 0; + while (off < arg->out_size) { + unsigned int len; + + len = min_t(u64, chunks[i].len, arg->out_size - off); + memcpy(arg->out_buf + off, chunks[i].chunk, len); + off += len; + i++; + } + +exit_unmap_desc: + dma_unmap_single(dev, dma_desc, desc_sz, DMA_TO_DEVICE); +exit_free_desc: + kfree(desc); +exit_unmap_all: + i = nseg; +exit_unmap_prev: + while (--i >= 0) + dma_unmap_single(dev, chunks[i].dma_addr, chunks[i].len, + DMA_BIDIRECTIONAL); + i = nseg; +exit_free_prev: + while (--i >= 0) + kfree(chunks[i].chunk); + kfree(chunks); + if (ret < 0) + nfp_err(cpp, "NSP: SG DMA failed for command 0x%04x: %d (sz:%d cord:%d)\n", + arg->arg.code, ret, max_size, chunk_order); + return ret; +} + +static int +nfp_nsp_command_buf_dma(struct nfp_nsp *nsp, + struct nfp_nsp_command_buf_arg *arg, + unsigned int max_size, unsigned int dma_order) +{ + unsigned int chunk_order, buf_order; + struct nfp_cpp *cpp = nsp->cpp; + bool sg_ok; + u64 reg; + int err; + + buf_order = order_base_2(roundup_pow_of_two(max_size)); + + err = nfp_cpp_readq(cpp, nfp_resource_cpp_id(nsp->res), + nfp_resource_address(nsp->res) + NFP_CAP_CMD_DMA_SG, + ®); + if (err < 0) + return err; + sg_ok = reg & BIT_ULL(arg->arg.code - 1); + + if (!sg_ok) { + if (buf_order > dma_order) { + nfp_err(cpp, "NSP: can't service non-SG DMA for command 0x%04x\n", + arg->arg.code); + return -ENOMEM; + } + chunk_order = buf_order; + } else { + chunk_order = min_t(unsigned int, dma_order, PAGE_SHIFT); + } + + return nfp_nsp_command_buf_dma_sg(nsp, arg, max_size, chunk_order, + dma_order); +} + +static int +nfp_nsp_command_buf(struct nfp_nsp *nsp, struct nfp_nsp_command_buf_arg *arg) +{ + unsigned int dma_order, def_size, max_size; + struct nfp_cpp *cpp = nsp->cpp; + u64 reg; + int err; + + if (nsp->ver.minor < 13) { + nfp_err(cpp, "NSP: Code 0x%04x with buffer not supported (ABI %hu.%hu)\n", + arg->arg.code, nsp->ver.major, nsp->ver.minor); + return -EOPNOTSUPP; + } + + err = nfp_cpp_readq(cpp, nfp_resource_cpp_id(nsp->res), + nfp_resource_address(nsp->res) + + NSP_DFLT_BUFFER_CONFIG, + ®); + if (err < 0) + return err; + + /* Zero out undefined part of the out buffer */ + if (arg->out_buf && arg->out_size && arg->out_size > arg->in_size) + memset(arg->out_buf, 0, arg->out_size - arg->in_size); + + max_size = max(arg->in_size, arg->out_size); + def_size = FIELD_GET(NSP_DFLT_BUFFER_SIZE_MB, reg) * SZ_1M + + FIELD_GET(NSP_DFLT_BUFFER_SIZE_4KB, reg) * SZ_4K; + dma_order = FIELD_GET(NSP_DFLT_BUFFER_DMA_CHUNK_ORDER, reg); + if (def_size >= max_size) { + return nfp_nsp_command_buf_def(nsp, arg); + } else if (!dma_order) { + nfp_err(cpp, "NSP: default buffer too small for command 0x%04x (%u < %u)\n", + arg->arg.code, def_size, max_size); + return -EINVAL; + } + + return nfp_nsp_command_buf_dma(nsp, arg, max_size, dma_order); +} + +int nfp_nsp_wait(struct nfp_nsp *state) +{ + const unsigned long wait_until = jiffies + NFP_NSP_TIMEOUT_BOOT * HZ; + int err; + + nfp_dbg(state->cpp, "Waiting for NSP to respond (%u sec max).\n", + NFP_NSP_TIMEOUT_BOOT); + + for (;;) { + const unsigned long start_time = jiffies; + + err = nfp_nsp_command(state, SPCODE_NOOP); + if (err != -EAGAIN) + break; + + if (msleep_interruptible(25)) { + err = -ERESTARTSYS; + break; + } + + if (time_after(start_time, wait_until)) { + err = -ETIMEDOUT; + break; + } + } + if (err) + nfp_err(state->cpp, "NSP failed to respond %d\n", err); + + return err; +} + +int nfp_nsp_device_soft_reset(struct nfp_nsp *state) +{ + return nfp_nsp_command(state, SPCODE_SOFT_RESET); +} + +int nfp_nsp_mac_reinit(struct nfp_nsp *state) +{ + return nfp_nsp_command(state, SPCODE_MAC_INIT); +} + +static void nfp_nsp_load_fw_extended_msg(struct nfp_nsp *state, u32 ret_val) +{ + static const char * const major_msg[] = { + /* 0 */ "Firmware from driver loaded", + /* 1 */ "Firmware from flash loaded", + /* 2 */ "Firmware loading failure", + }; + static const char * const minor_msg[] = { + /* 0 */ "", + /* 1 */ "no named partition on flash", + /* 2 */ "error reading from flash", + /* 3 */ "can not deflate", + /* 4 */ "not a trusted file", + /* 5 */ "can not parse FW file", + /* 6 */ "MIP not found in FW file", + /* 7 */ "null firmware name in MIP", + /* 8 */ "FW version none", + /* 9 */ "FW build number none", + /* 10 */ "no FW selection policy HWInfo key found", + /* 11 */ "static FW selection policy", + /* 12 */ "FW version has precedence", + /* 13 */ "different FW application load requested", + /* 14 */ "development build", + }; + unsigned int major, minor; + const char *level; + + major = FIELD_GET(NFP_FW_LOAD_RET_MAJOR, ret_val); + minor = FIELD_GET(NFP_FW_LOAD_RET_MINOR, ret_val); + + if (!nfp_nsp_has_stored_fw_load(state)) + return; + + /* Lower the message level in legacy case */ + if (major == 0 && (minor == 0 || minor == 10)) + level = KERN_DEBUG; + else if (major == 2) + level = KERN_ERR; + else + level = KERN_INFO; + + if (major >= ARRAY_SIZE(major_msg)) + nfp_printk(level, state->cpp, "FW loading status: %x\n", + ret_val); + else if (minor >= ARRAY_SIZE(minor_msg)) + nfp_printk(level, state->cpp, "%s, reason code: %d\n", + major_msg[major], minor); + else + nfp_printk(level, state->cpp, "%s%c %s\n", + major_msg[major], minor ? ',' : '.', + minor_msg[minor]); +} + +int nfp_nsp_load_fw(struct nfp_nsp *state, const struct firmware *fw) +{ + struct nfp_nsp_command_buf_arg load_fw = { + { + .code = SPCODE_FW_LOAD, + .option = fw->size, + .error_cb = nfp_nsp_load_fw_extended_msg, + }, + .in_buf = fw->data, + .in_size = fw->size, + }; + int ret; + + ret = nfp_nsp_command_buf(state, &load_fw); + if (ret < 0) + return ret; + + nfp_nsp_load_fw_extended_msg(state, ret); + return 0; +} + +int nfp_nsp_write_flash(struct nfp_nsp *state, const struct firmware *fw) +{ + struct nfp_nsp_command_buf_arg write_flash = { + { + .code = SPCODE_NSP_WRITE_FLASH, + .option = fw->size, + .timeout_sec = 900, + }, + .in_buf = fw->data, + .in_size = fw->size, + }; + + return nfp_nsp_command_buf(state, &write_flash); +} + +int nfp_nsp_read_eth_table(struct nfp_nsp *state, void *buf, unsigned int size) +{ + struct nfp_nsp_command_buf_arg eth_rescan = { + { + .code = SPCODE_ETH_RESCAN, + .option = size, + }, + .out_buf = buf, + .out_size = size, + }; + + return nfp_nsp_command_buf(state, ð_rescan); +} + +int nfp_nsp_write_eth_table(struct nfp_nsp *state, + const void *buf, unsigned int size) +{ + struct nfp_nsp_command_buf_arg eth_ctrl = { + { + .code = SPCODE_ETH_CONTROL, + .option = size, + }, + .in_buf = buf, + .in_size = size, + }; + + return nfp_nsp_command_buf(state, ð_ctrl); +} + +int nfp_nsp_read_identify(struct nfp_nsp *state, void *buf, unsigned int size) +{ + struct nfp_nsp_command_buf_arg identify = { + { + .code = SPCODE_NSP_IDENTIFY, + .option = size, + }, + .out_buf = buf, + .out_size = size, + }; + + return nfp_nsp_command_buf(state, &identify); +} + +int nfp_nsp_read_sensors(struct nfp_nsp *state, unsigned int sensor_mask, + void *buf, unsigned int size) +{ + struct nfp_nsp_command_buf_arg sensors = { + { + .code = SPCODE_NSP_SENSORS, + .option = sensor_mask, + }, + .out_buf = buf, + .out_size = size, + }; + + return nfp_nsp_command_buf(state, &sensors); +} + +int nfp_nsp_load_stored_fw(struct nfp_nsp *state) +{ + const struct nfp_nsp_command_arg arg = { + .code = SPCODE_FW_STORED, + .error_cb = nfp_nsp_load_fw_extended_msg, + }; + int ret; + + ret = __nfp_nsp_command(state, &arg); + if (ret < 0) + return ret; + + nfp_nsp_load_fw_extended_msg(state, ret); + return 0; +} + +static int +__nfp_nsp_hwinfo_lookup(struct nfp_nsp *state, void *buf, unsigned int size, + bool optional) +{ + struct nfp_nsp_command_buf_arg hwinfo_lookup = { + { + .code = SPCODE_HWINFO_LOOKUP, + .option = size, + .error_quiet = optional, + }, + .in_buf = buf, + .in_size = size, + .out_buf = buf, + .out_size = size, + }; + + return nfp_nsp_command_buf(state, &hwinfo_lookup); +} + +int nfp_nsp_hwinfo_lookup(struct nfp_nsp *state, void *buf, unsigned int size) +{ + int err; + + size = min_t(u32, size, NFP_HWINFO_LOOKUP_SIZE); + + err = __nfp_nsp_hwinfo_lookup(state, buf, size, false); + if (err) + return err; + + if (strnlen(buf, size) == size) { + nfp_err(state->cpp, "NSP HWinfo value not NULL-terminated\n"); + return -EINVAL; + } + + return 0; +} + +int nfp_nsp_hwinfo_lookup_optional(struct nfp_nsp *state, void *buf, + unsigned int size, const char *default_val) +{ + int err; + + /* Ensure that the default value is usable irrespective of whether + * it is actually going to be used. + */ + if (strnlen(default_val, size) == size) + return -EINVAL; + + if (!nfp_nsp_has_hwinfo_lookup(state)) { + strcpy(buf, default_val); + return 0; + } + + size = min_t(u32, size, NFP_HWINFO_LOOKUP_SIZE); + + err = __nfp_nsp_hwinfo_lookup(state, buf, size, true); + if (err) { + if (err == -ENOENT) { + strcpy(buf, default_val); + return 0; + } + + nfp_err(state->cpp, "NSP HWinfo lookup failed: %d\n", err); + return err; + } + + if (strnlen(buf, size) == size) { + nfp_err(state->cpp, "NSP HWinfo value not NULL-terminated\n"); + return -EINVAL; + } + + return 0; +} + +int nfp_nsp_hwinfo_set(struct nfp_nsp *state, void *buf, unsigned int size) +{ + struct nfp_nsp_command_buf_arg hwinfo_set = { + { + .code = SPCODE_HWINFO_SET, + .option = size, + }, + .in_buf = buf, + .in_size = size, + }; + + return nfp_nsp_command_buf(state, &hwinfo_set); +} + +int nfp_nsp_fw_loaded(struct nfp_nsp *state) +{ + const struct nfp_nsp_command_arg arg = { + .code = SPCODE_FW_LOADED, + }; + + return __nfp_nsp_command(state, &arg); +} + +int nfp_nsp_versions(struct nfp_nsp *state, void *buf, unsigned int size) +{ + struct nfp_nsp_command_buf_arg versions = { + { + .code = SPCODE_VERSIONS, + .option = min_t(u32, size, NFP_VERSIONS_SIZE), + }, + .out_buf = buf, + .out_size = min_t(u32, size, NFP_VERSIONS_SIZE), + }; + + return nfp_nsp_command_buf(state, &versions); +} + +const char *nfp_nsp_versions_get(enum nfp_nsp_versions id, bool flash, + const u8 *buf, unsigned int size) +{ + static const u32 id2off[] = { + [NFP_VERSIONS_BSP] = NFP_VERSIONS_BSP_OFF, + [NFP_VERSIONS_CPLD] = NFP_VERSIONS_CPLD_OFF, + [NFP_VERSIONS_APP] = NFP_VERSIONS_APP_OFF, + [NFP_VERSIONS_BUNDLE] = NFP_VERSIONS_BUNDLE_OFF, + [NFP_VERSIONS_UNDI] = NFP_VERSIONS_UNDI_OFF, + [NFP_VERSIONS_NCSI] = NFP_VERSIONS_NCSI_OFF, + [NFP_VERSIONS_CFGR] = NFP_VERSIONS_CFGR_OFF, + }; + unsigned int field, buf_field_cnt, buf_off; + + if (id >= ARRAY_SIZE(id2off) || !id2off[id]) + return ERR_PTR(-EINVAL); + + field = id * 2 + flash; + + buf_field_cnt = get_unaligned_le16(buf); + if (buf_field_cnt <= field) + return ERR_PTR(-ENOENT); + + buf_off = get_unaligned_le16(buf + id2off[id] + flash * 2); + if (!buf_off) + return ERR_PTR(-ENOENT); + + if (buf_off >= size) + return ERR_PTR(-EINVAL); + if (strnlen(&buf[buf_off], size - buf_off) == size - buf_off) + return ERR_PTR(-EINVAL); + + return (const char *)&buf[buf_off]; +} + +static int +__nfp_nsp_module_eeprom(struct nfp_nsp *state, void *buf, unsigned int size) +{ + struct nfp_nsp_command_buf_arg module_eeprom = { + { + .code = SPCODE_READ_SFF_EEPROM, + .option = size, + }, + .in_buf = buf, + .in_size = size, + .out_buf = buf, + .out_size = size, + }; + + return nfp_nsp_command_buf(state, &module_eeprom); +} + +int nfp_nsp_read_module_eeprom(struct nfp_nsp *state, int eth_index, + unsigned int offset, void *data, + unsigned int len, unsigned int *read_len) +{ + struct eeprom_buf { + u8 metalen; + __le16 length; + __le16 offset; + __le16 readlen; + u8 eth_index; + u8 data[]; + } __packed *buf; + int bufsz, ret; + + BUILD_BUG_ON(offsetof(struct eeprom_buf, data) % 8); + + /* Buffer must be large enough and rounded to the next block size. */ + bufsz = struct_size(buf, data, round_up(len, NSP_SFF_EEPROM_BLOCK_LEN)); + buf = kzalloc(bufsz, GFP_KERNEL); + if (!buf) + return -ENOMEM; + + buf->metalen = + offsetof(struct eeprom_buf, data) / NSP_SFF_EEPROM_BLOCK_LEN; + buf->length = cpu_to_le16(len); + buf->offset = cpu_to_le16(offset); + buf->eth_index = eth_index; + + ret = __nfp_nsp_module_eeprom(state, buf, bufsz); + + *read_len = min_t(unsigned int, len, le16_to_cpu(buf->readlen)); + if (*read_len) + memcpy(data, buf->data, *read_len); + + if (!ret && *read_len < len) + ret = -EIO; + + kfree(buf); + + return ret; +}; + +int nfp_nsp_read_media(struct nfp_nsp *state, void *buf, unsigned int size) +{ + struct nfp_nsp_command_buf_arg media = { + { + .code = SPCODE_READ_MEDIA, + .option = size, + }, + .in_buf = buf, + .in_size = size, + .out_buf = buf, + .out_size = size, + }; + + return nfp_nsp_command_buf(state, &media); +} diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h new file mode 100644 index 000000000..8f5cab003 --- /dev/null +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp.h @@ -0,0 +1,330 @@ +/* SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) */ +/* Copyright (C) 2015-2018 Netronome Systems, Inc. */ + +#ifndef NSP_NSP_H +#define NSP_NSP_H 1 + +#include <linux/types.h> +#include <linux/if_ether.h> + +struct firmware; +struct nfp_cpp; +struct nfp_nsp; + +struct nfp_nsp *nfp_nsp_open(struct nfp_cpp *cpp); +void nfp_nsp_close(struct nfp_nsp *state); +u16 nfp_nsp_get_abi_ver_major(struct nfp_nsp *state); +u16 nfp_nsp_get_abi_ver_minor(struct nfp_nsp *state); +int nfp_nsp_wait(struct nfp_nsp *state); +int nfp_nsp_device_soft_reset(struct nfp_nsp *state); +int nfp_nsp_load_fw(struct nfp_nsp *state, const struct firmware *fw); +int nfp_nsp_write_flash(struct nfp_nsp *state, const struct firmware *fw); +int nfp_nsp_mac_reinit(struct nfp_nsp *state); +int nfp_nsp_load_stored_fw(struct nfp_nsp *state); +int nfp_nsp_hwinfo_lookup(struct nfp_nsp *state, void *buf, unsigned int size); +int nfp_nsp_hwinfo_lookup_optional(struct nfp_nsp *state, void *buf, + unsigned int size, const char *default_val); +int nfp_nsp_hwinfo_set(struct nfp_nsp *state, void *buf, unsigned int size); +int nfp_nsp_fw_loaded(struct nfp_nsp *state); +int nfp_nsp_read_module_eeprom(struct nfp_nsp *state, int eth_index, + unsigned int offset, void *data, + unsigned int len, unsigned int *read_len); + +static inline bool nfp_nsp_has_mac_reinit(struct nfp_nsp *state) +{ + return nfp_nsp_get_abi_ver_minor(state) > 20; +} + +static inline bool nfp_nsp_has_stored_fw_load(struct nfp_nsp *state) +{ + return nfp_nsp_get_abi_ver_minor(state) > 23; +} + +static inline bool nfp_nsp_has_hwinfo_lookup(struct nfp_nsp *state) +{ + return nfp_nsp_get_abi_ver_minor(state) > 24; +} + +static inline bool nfp_nsp_has_hwinfo_set(struct nfp_nsp *state) +{ + return nfp_nsp_get_abi_ver_minor(state) > 25; +} + +static inline bool nfp_nsp_has_fw_loaded(struct nfp_nsp *state) +{ + return nfp_nsp_get_abi_ver_minor(state) > 25; +} + +static inline bool nfp_nsp_has_versions(struct nfp_nsp *state) +{ + return nfp_nsp_get_abi_ver_minor(state) > 27; +} + +static inline bool nfp_nsp_has_read_module_eeprom(struct nfp_nsp *state) +{ + return nfp_nsp_get_abi_ver_minor(state) > 28; +} + +static inline bool nfp_nsp_has_read_media(struct nfp_nsp *state) +{ + return nfp_nsp_get_abi_ver_minor(state) > 33; +} + +enum nfp_eth_interface { + NFP_INTERFACE_NONE = 0, + NFP_INTERFACE_SFP = 1, + NFP_INTERFACE_SFPP = 10, + NFP_INTERFACE_SFP28 = 28, + NFP_INTERFACE_QSFP = 40, + NFP_INTERFACE_RJ45 = 45, + NFP_INTERFACE_CXP = 100, + NFP_INTERFACE_QSFP28 = 112, +}; + +enum nfp_eth_media { + NFP_MEDIA_DAC_PASSIVE = 0, + NFP_MEDIA_DAC_ACTIVE, + NFP_MEDIA_FIBRE, +}; + +enum nfp_eth_aneg { + NFP_ANEG_AUTO = 0, + NFP_ANEG_SEARCH, + NFP_ANEG_25G_CONSORTIUM, + NFP_ANEG_25G_IEEE, + NFP_ANEG_DISABLED, +}; + +enum nfp_eth_fec { + NFP_FEC_AUTO_BIT = 0, + NFP_FEC_BASER_BIT, + NFP_FEC_REED_SOLOMON_BIT, + NFP_FEC_DISABLED_BIT, +}; + +/* link modes about RJ45 haven't been used, so there's no mapping to them */ +enum nfp_ethtool_link_mode_list { + NFP_MEDIA_W0_RJ45_10M, + NFP_MEDIA_W0_RJ45_10M_HD, + NFP_MEDIA_W0_RJ45_100M, + NFP_MEDIA_W0_RJ45_100M_HD, + NFP_MEDIA_W0_RJ45_1G, + NFP_MEDIA_W0_RJ45_2P5G, + NFP_MEDIA_W0_RJ45_5G, + NFP_MEDIA_W0_RJ45_10G, + NFP_MEDIA_1000BASE_CX, + NFP_MEDIA_1000BASE_KX, + NFP_MEDIA_10GBASE_KX4, + NFP_MEDIA_10GBASE_KR, + NFP_MEDIA_10GBASE_CX4, + NFP_MEDIA_10GBASE_CR, + NFP_MEDIA_10GBASE_SR, + NFP_MEDIA_10GBASE_ER, + NFP_MEDIA_25GBASE_KR, + NFP_MEDIA_25GBASE_KR_S, + NFP_MEDIA_25GBASE_CR, + NFP_MEDIA_25GBASE_CR_S, + NFP_MEDIA_25GBASE_SR, + NFP_MEDIA_40GBASE_CR4, + NFP_MEDIA_40GBASE_KR4, + NFP_MEDIA_40GBASE_SR4, + NFP_MEDIA_40GBASE_LR4, + NFP_MEDIA_50GBASE_KR, + NFP_MEDIA_50GBASE_SR, + NFP_MEDIA_50GBASE_CR, + NFP_MEDIA_50GBASE_LR, + NFP_MEDIA_50GBASE_ER, + NFP_MEDIA_50GBASE_FR, + NFP_MEDIA_100GBASE_KR4, + NFP_MEDIA_100GBASE_SR4, + NFP_MEDIA_100GBASE_CR4, + NFP_MEDIA_100GBASE_KP4, + NFP_MEDIA_100GBASE_CR10, + NFP_MEDIA_LINK_MODES_NUMBER +}; + +#define NFP_FEC_AUTO BIT(NFP_FEC_AUTO_BIT) +#define NFP_FEC_BASER BIT(NFP_FEC_BASER_BIT) +#define NFP_FEC_REED_SOLOMON BIT(NFP_FEC_REED_SOLOMON_BIT) +#define NFP_FEC_DISABLED BIT(NFP_FEC_DISABLED_BIT) + +/* Defines the valid values of the 'abi_drv_reset' hwinfo key */ +#define NFP_NSP_DRV_RESET_DISK 0 +#define NFP_NSP_DRV_RESET_ALWAYS 1 +#define NFP_NSP_DRV_RESET_NEVER 2 +#define NFP_NSP_DRV_RESET_DEFAULT "0" + +/* Defines the valid values of the 'app_fw_from_flash' hwinfo key */ +#define NFP_NSP_APP_FW_LOAD_DISK 0 +#define NFP_NSP_APP_FW_LOAD_FLASH 1 +#define NFP_NSP_APP_FW_LOAD_PREF 2 +#define NFP_NSP_APP_FW_LOAD_DEFAULT "2" + +/* Define the default value for the 'abi_drv_load_ifc' key */ +#define NFP_NSP_DRV_LOAD_IFC_DEFAULT "0x10ff" + +/** + * struct nfp_eth_table - ETH table information + * @count: number of table entries + * @max_index: max of @index fields of all @ports + * @ports: table of ports + * + * @ports.eth_index: port index according to legacy ethX numbering + * @ports.index: chip-wide first channel index + * @ports.nbi: NBI index + * @ports.base: first channel index (within NBI) + * @ports.lanes: number of channels + * @ports.speed: interface speed (in Mbps) + * @ports.interface: interface (module) plugged in + * @ports.media: media type of the @interface + * @ports.fec: forward error correction mode + * @ports.act_fec: active forward error correction mode + * @ports.aneg: auto negotiation mode + * @ports.mac_addr: interface MAC address + * @ports.label_port: port id + * @ports.label_subport: id of interface within port (for split ports) + * @ports.enabled: is enabled? + * @ports.tx_enabled: is TX enabled? + * @ports.rx_enabled: is RX enabled? + * @ports.override_changed: is media reconfig pending? + * + * @ports.port_type: one of %PORT_* defines for ethtool + * @ports.port_lanes: total number of lanes on the port (sum of lanes of all + * subports) + * @ports.is_split: is interface part of a split port + * @ports.fec_modes_supported: bitmap of FEC modes supported + */ +struct nfp_eth_table { + unsigned int count; + unsigned int max_index; + struct nfp_eth_table_port { + unsigned int eth_index; + unsigned int index; + unsigned int nbi; + unsigned int base; + unsigned int lanes; + unsigned int speed; + + unsigned int interface; + enum nfp_eth_media media; + + enum nfp_eth_fec fec; + enum nfp_eth_fec act_fec; + enum nfp_eth_aneg aneg; + + u8 mac_addr[ETH_ALEN]; + + u8 label_port; + u8 label_subport; + + bool enabled; + bool tx_enabled; + bool rx_enabled; + bool supp_aneg; + + bool override_changed; + + /* Computed fields */ + u8 port_type; + + unsigned int port_lanes; + + bool is_split; + + unsigned int fec_modes_supported; + } ports[]; +}; + +struct nfp_eth_table *nfp_eth_read_ports(struct nfp_cpp *cpp); +struct nfp_eth_table * +__nfp_eth_read_ports(struct nfp_cpp *cpp, struct nfp_nsp *nsp); + +int nfp_eth_set_mod_enable(struct nfp_cpp *cpp, unsigned int idx, bool enable); +int nfp_eth_set_configured(struct nfp_cpp *cpp, unsigned int idx, + bool configed); +int +nfp_eth_set_fec(struct nfp_cpp *cpp, unsigned int idx, enum nfp_eth_fec mode); + +int nfp_eth_set_idmode(struct nfp_cpp *cpp, unsigned int idx, bool state); + +static inline bool nfp_eth_can_support_fec(struct nfp_eth_table_port *eth_port) +{ + return !!eth_port->fec_modes_supported; +} + +static inline unsigned int +nfp_eth_supported_fec_modes(struct nfp_eth_table_port *eth_port) +{ + return eth_port->fec_modes_supported; +} + +struct nfp_nsp *nfp_eth_config_start(struct nfp_cpp *cpp, unsigned int idx); +int nfp_eth_config_commit_end(struct nfp_nsp *nsp); +void nfp_eth_config_cleanup_end(struct nfp_nsp *nsp); + +int __nfp_eth_set_aneg(struct nfp_nsp *nsp, enum nfp_eth_aneg mode); +int __nfp_eth_set_speed(struct nfp_nsp *nsp, unsigned int speed); +int __nfp_eth_set_split(struct nfp_nsp *nsp, unsigned int lanes); + +/** + * struct nfp_nsp_identify - NSP static information + * @version: opaque version string + * @flags: version flags + * @br_primary: branch id of primary bootloader + * @br_secondary: branch id of secondary bootloader + * @br_nsp: branch id of NSP + * @primary: version of primarary bootloader + * @secondary: version id of secondary bootloader + * @nsp: version id of NSP + * @sensor_mask: mask of present sensors available on NIC + */ +struct nfp_nsp_identify { + char version[40]; + u8 flags; + u8 br_primary; + u8 br_secondary; + u8 br_nsp; + u16 primary; + u16 secondary; + u16 nsp; + u64 sensor_mask; +}; + +struct nfp_nsp_identify *__nfp_nsp_identify(struct nfp_nsp *nsp); + +enum nfp_nsp_sensor_id { + NFP_SENSOR_CHIP_TEMPERATURE, + NFP_SENSOR_ASSEMBLY_POWER, + NFP_SENSOR_ASSEMBLY_12V_POWER, + NFP_SENSOR_ASSEMBLY_3V3_POWER, +}; + +int nfp_hwmon_read_sensor(struct nfp_cpp *cpp, enum nfp_nsp_sensor_id id, + long *val); + +struct nfp_eth_media_buf { + u8 eth_index; + u8 reserved[7]; + __le64 supported_modes[2]; + __le64 advertised_modes[2]; +}; + +int nfp_nsp_read_media(struct nfp_nsp *state, void *buf, unsigned int size); +int nfp_eth_read_media(struct nfp_cpp *cpp, struct nfp_eth_media_buf *ethm); + +#define NFP_NSP_VERSION_BUFSZ 1024 /* reasonable size, not in the ABI */ + +enum nfp_nsp_versions { + NFP_VERSIONS_BSP, + NFP_VERSIONS_CPLD, + NFP_VERSIONS_APP, + NFP_VERSIONS_BUNDLE, + NFP_VERSIONS_UNDI, + NFP_VERSIONS_NCSI, + NFP_VERSIONS_CFGR, +}; + +int nfp_nsp_versions(struct nfp_nsp *state, void *buf, unsigned int size); +const char *nfp_nsp_versions_get(enum nfp_nsp_versions id, bool flash, + const u8 *buf, unsigned int size); +#endif diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_cmds.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_cmds.c new file mode 100644 index 000000000..0997d1271 --- /dev/null +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_cmds.c @@ -0,0 +1,104 @@ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +/* Copyright (C) 2017 Netronome Systems, Inc. */ + +#include <linux/kernel.h> +#include <linux/slab.h> + +#include "nfp.h" +#include "nfp_nsp.h" + +struct nsp_identify { + u8 version[40]; + u8 flags; + u8 br_primary; + u8 br_secondary; + u8 br_nsp; + __le16 primary; + __le16 secondary; + __le16 nsp; + u8 reserved[6]; + __le64 sensor_mask; +}; + +struct nfp_nsp_identify *__nfp_nsp_identify(struct nfp_nsp *nsp) +{ + struct nfp_nsp_identify *nspi = NULL; + struct nsp_identify *ni; + int ret; + + if (nfp_nsp_get_abi_ver_minor(nsp) < 15) + return NULL; + + ni = kzalloc(sizeof(*ni), GFP_KERNEL); + if (!ni) + return NULL; + + ret = nfp_nsp_read_identify(nsp, ni, sizeof(*ni)); + if (ret < 0) { + nfp_err(nfp_nsp_cpp(nsp), "reading bsp version failed %d\n", + ret); + goto exit_free; + } + + nspi = kzalloc(sizeof(*nspi), GFP_KERNEL); + if (!nspi) + goto exit_free; + + memcpy(nspi->version, ni->version, sizeof(nspi->version)); + nspi->version[sizeof(nspi->version) - 1] = '\0'; + nspi->flags = ni->flags; + nspi->br_primary = ni->br_primary; + nspi->br_secondary = ni->br_secondary; + nspi->br_nsp = ni->br_nsp; + nspi->primary = le16_to_cpu(ni->primary); + nspi->secondary = le16_to_cpu(ni->secondary); + nspi->nsp = le16_to_cpu(ni->nsp); + nspi->sensor_mask = le64_to_cpu(ni->sensor_mask); + +exit_free: + kfree(ni); + return nspi; +} + +struct nfp_sensors { + __le32 chip_temp; + __le32 assembly_power; + __le32 assembly_12v_power; + __le32 assembly_3v3_power; +}; + +int nfp_hwmon_read_sensor(struct nfp_cpp *cpp, enum nfp_nsp_sensor_id id, + long *val) +{ + struct nfp_sensors s; + struct nfp_nsp *nsp; + int ret; + + nsp = nfp_nsp_open(cpp); + if (IS_ERR(nsp)) + return PTR_ERR(nsp); + + ret = nfp_nsp_read_sensors(nsp, BIT(id), &s, sizeof(s)); + nfp_nsp_close(nsp); + + if (ret < 0) + return ret; + + switch (id) { + case NFP_SENSOR_CHIP_TEMPERATURE: + *val = le32_to_cpu(s.chip_temp); + break; + case NFP_SENSOR_ASSEMBLY_POWER: + *val = le32_to_cpu(s.assembly_power); + break; + case NFP_SENSOR_ASSEMBLY_12V_POWER: + *val = le32_to_cpu(s.assembly_12v_power); + break; + case NFP_SENSOR_ASSEMBLY_3V3_POWER: + *val = le32_to_cpu(s.assembly_3v3_power); + break; + default: + return -EINVAL; + } + return 0; +} diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c new file mode 100644 index 000000000..570ac1bb2 --- /dev/null +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_nsp_eth.c @@ -0,0 +1,675 @@ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +/* Copyright (C) 2015-2017 Netronome Systems, Inc. */ + +/* Authors: David Brunecz <david.brunecz@netronome.com> + * Jakub Kicinski <jakub.kicinski@netronome.com> + * Jason Mcmullan <jason.mcmullan@netronome.com> + */ + +#include <linux/bitfield.h> +#include <linux/ethtool.h> +#include <linux/if_ether.h> +#include <linux/kernel.h> +#include <linux/module.h> + +#include "nfp.h" +#include "nfp_nsp.h" +#include "nfp6000/nfp6000.h" + +#define NSP_ETH_NBI_PORT_COUNT 24 +#define NSP_ETH_MAX_COUNT (2 * NSP_ETH_NBI_PORT_COUNT) +#define NSP_ETH_TABLE_SIZE (NSP_ETH_MAX_COUNT * \ + sizeof(union eth_table_entry)) + +#define NSP_ETH_PORT_LANES GENMASK_ULL(3, 0) +#define NSP_ETH_PORT_INDEX GENMASK_ULL(15, 8) +#define NSP_ETH_PORT_LABEL GENMASK_ULL(53, 48) +#define NSP_ETH_PORT_PHYLABEL GENMASK_ULL(59, 54) +#define NSP_ETH_PORT_FEC_SUPP_BASER BIT_ULL(60) +#define NSP_ETH_PORT_FEC_SUPP_RS BIT_ULL(61) +#define NSP_ETH_PORT_SUPP_ANEG BIT_ULL(63) + +#define NSP_ETH_PORT_LANES_MASK cpu_to_le64(NSP_ETH_PORT_LANES) + +#define NSP_ETH_STATE_CONFIGURED BIT_ULL(0) +#define NSP_ETH_STATE_ENABLED BIT_ULL(1) +#define NSP_ETH_STATE_TX_ENABLED BIT_ULL(2) +#define NSP_ETH_STATE_RX_ENABLED BIT_ULL(3) +#define NSP_ETH_STATE_RATE GENMASK_ULL(11, 8) +#define NSP_ETH_STATE_INTERFACE GENMASK_ULL(19, 12) +#define NSP_ETH_STATE_MEDIA GENMASK_ULL(21, 20) +#define NSP_ETH_STATE_OVRD_CHNG BIT_ULL(22) +#define NSP_ETH_STATE_ANEG GENMASK_ULL(25, 23) +#define NSP_ETH_STATE_FEC GENMASK_ULL(27, 26) +#define NSP_ETH_STATE_ACT_FEC GENMASK_ULL(29, 28) + +#define NSP_ETH_CTRL_CONFIGURED BIT_ULL(0) +#define NSP_ETH_CTRL_ENABLED BIT_ULL(1) +#define NSP_ETH_CTRL_TX_ENABLED BIT_ULL(2) +#define NSP_ETH_CTRL_RX_ENABLED BIT_ULL(3) +#define NSP_ETH_CTRL_SET_RATE BIT_ULL(4) +#define NSP_ETH_CTRL_SET_LANES BIT_ULL(5) +#define NSP_ETH_CTRL_SET_ANEG BIT_ULL(6) +#define NSP_ETH_CTRL_SET_FEC BIT_ULL(7) +#define NSP_ETH_CTRL_SET_IDMODE BIT_ULL(8) + +enum nfp_eth_raw { + NSP_ETH_RAW_PORT = 0, + NSP_ETH_RAW_STATE, + NSP_ETH_RAW_MAC, + NSP_ETH_RAW_CONTROL, + + NSP_ETH_NUM_RAW +}; + +enum nfp_eth_rate { + RATE_INVALID = 0, + RATE_10M, + RATE_100M, + RATE_1G, + RATE_10G, + RATE_25G, +}; + +union eth_table_entry { + struct { + __le64 port; + __le64 state; + u8 mac_addr[6]; + u8 resv[2]; + __le64 control; + }; + __le64 raw[NSP_ETH_NUM_RAW]; +}; + +static const struct { + enum nfp_eth_rate rate; + unsigned int speed; +} nsp_eth_rate_tbl[] = { + { RATE_INVALID, 0, }, + { RATE_10M, SPEED_10, }, + { RATE_100M, SPEED_100, }, + { RATE_1G, SPEED_1000, }, + { RATE_10G, SPEED_10000, }, + { RATE_25G, SPEED_25000, }, +}; + +static unsigned int nfp_eth_rate2speed(enum nfp_eth_rate rate) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(nsp_eth_rate_tbl); i++) + if (nsp_eth_rate_tbl[i].rate == rate) + return nsp_eth_rate_tbl[i].speed; + + return 0; +} + +static unsigned int nfp_eth_speed2rate(unsigned int speed) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(nsp_eth_rate_tbl); i++) + if (nsp_eth_rate_tbl[i].speed == speed) + return nsp_eth_rate_tbl[i].rate; + + return RATE_INVALID; +} + +static void nfp_eth_copy_mac_reverse(u8 *dst, const u8 *src) +{ + int i; + + for (i = 0; i < ETH_ALEN; i++) + dst[ETH_ALEN - i - 1] = src[i]; +} + +static void +nfp_eth_port_translate(struct nfp_nsp *nsp, const union eth_table_entry *src, + unsigned int index, struct nfp_eth_table_port *dst) +{ + unsigned int rate; + unsigned int fec; + u64 port, state; + + port = le64_to_cpu(src->port); + state = le64_to_cpu(src->state); + + dst->eth_index = FIELD_GET(NSP_ETH_PORT_INDEX, port); + dst->index = index; + dst->nbi = index / NSP_ETH_NBI_PORT_COUNT; + dst->base = index % NSP_ETH_NBI_PORT_COUNT; + dst->lanes = FIELD_GET(NSP_ETH_PORT_LANES, port); + + dst->enabled = FIELD_GET(NSP_ETH_STATE_ENABLED, state); + dst->tx_enabled = FIELD_GET(NSP_ETH_STATE_TX_ENABLED, state); + dst->rx_enabled = FIELD_GET(NSP_ETH_STATE_RX_ENABLED, state); + + rate = nfp_eth_rate2speed(FIELD_GET(NSP_ETH_STATE_RATE, state)); + dst->speed = dst->lanes * rate; + + dst->interface = FIELD_GET(NSP_ETH_STATE_INTERFACE, state); + dst->media = FIELD_GET(NSP_ETH_STATE_MEDIA, state); + + nfp_eth_copy_mac_reverse(dst->mac_addr, src->mac_addr); + + dst->label_port = FIELD_GET(NSP_ETH_PORT_PHYLABEL, port); + dst->label_subport = FIELD_GET(NSP_ETH_PORT_LABEL, port); + + if (nfp_nsp_get_abi_ver_minor(nsp) < 17) + return; + + dst->override_changed = FIELD_GET(NSP_ETH_STATE_OVRD_CHNG, state); + dst->aneg = FIELD_GET(NSP_ETH_STATE_ANEG, state); + + if (nfp_nsp_get_abi_ver_minor(nsp) < 22) + return; + + fec = FIELD_GET(NSP_ETH_PORT_FEC_SUPP_BASER, port); + dst->fec_modes_supported |= fec << NFP_FEC_BASER_BIT; + fec = FIELD_GET(NSP_ETH_PORT_FEC_SUPP_RS, port); + dst->fec_modes_supported |= fec << NFP_FEC_REED_SOLOMON_BIT; + if (dst->fec_modes_supported) + dst->fec_modes_supported |= NFP_FEC_AUTO | NFP_FEC_DISABLED; + + dst->fec = FIELD_GET(NSP_ETH_STATE_FEC, state); + dst->act_fec = dst->fec; + + if (nfp_nsp_get_abi_ver_minor(nsp) < 33) + return; + + dst->act_fec = FIELD_GET(NSP_ETH_STATE_ACT_FEC, state); + dst->supp_aneg = FIELD_GET(NSP_ETH_PORT_SUPP_ANEG, port); +} + +static void +nfp_eth_calc_port_geometry(struct nfp_cpp *cpp, struct nfp_eth_table *table) +{ + unsigned int i, j; + + for (i = 0; i < table->count; i++) { + table->max_index = max(table->max_index, table->ports[i].index); + + for (j = 0; j < table->count; j++) { + if (table->ports[i].label_port != + table->ports[j].label_port) + continue; + table->ports[i].port_lanes += table->ports[j].lanes; + + if (i == j) + continue; + if (table->ports[i].label_subport == + table->ports[j].label_subport) + nfp_warn(cpp, + "Port %d subport %d is a duplicate\n", + table->ports[i].label_port, + table->ports[i].label_subport); + + table->ports[i].is_split = true; + } + } +} + +static void +nfp_eth_calc_port_type(struct nfp_cpp *cpp, struct nfp_eth_table_port *entry) +{ + if (entry->interface == NFP_INTERFACE_NONE) { + entry->port_type = PORT_NONE; + return; + } else if (entry->interface == NFP_INTERFACE_RJ45) { + entry->port_type = PORT_TP; + return; + } + + if (entry->media == NFP_MEDIA_FIBRE) + entry->port_type = PORT_FIBRE; + else + entry->port_type = PORT_DA; +} + +/** + * nfp_eth_read_ports() - retrieve port information + * @cpp: NFP CPP handle + * + * Read the port information from the device. Returned structure should + * be freed with kfree() once no longer needed. + * + * Return: populated ETH table or NULL on error. + */ +struct nfp_eth_table *nfp_eth_read_ports(struct nfp_cpp *cpp) +{ + struct nfp_eth_table *ret; + struct nfp_nsp *nsp; + + nsp = nfp_nsp_open(cpp); + if (IS_ERR(nsp)) + return NULL; + + ret = __nfp_eth_read_ports(cpp, nsp); + nfp_nsp_close(nsp); + + return ret; +} + +struct nfp_eth_table * +__nfp_eth_read_ports(struct nfp_cpp *cpp, struct nfp_nsp *nsp) +{ + union eth_table_entry *entries; + struct nfp_eth_table *table; + int i, j, ret, cnt = 0; + + entries = kzalloc(NSP_ETH_TABLE_SIZE, GFP_KERNEL); + if (!entries) + return NULL; + + ret = nfp_nsp_read_eth_table(nsp, entries, NSP_ETH_TABLE_SIZE); + if (ret < 0) { + nfp_err(cpp, "reading port table failed %d\n", ret); + goto err; + } + + for (i = 0; i < NSP_ETH_MAX_COUNT; i++) + if (entries[i].port & NSP_ETH_PORT_LANES_MASK) + cnt++; + + /* Some versions of flash will give us 0 instead of port count. + * For those that give a port count, verify it against the value + * calculated above. + */ + if (ret && ret != cnt) { + nfp_err(cpp, "table entry count reported (%d) does not match entries present (%d)\n", + ret, cnt); + goto err; + } + + table = kzalloc(struct_size(table, ports, cnt), GFP_KERNEL); + if (!table) + goto err; + + table->count = cnt; + for (i = 0, j = 0; i < NSP_ETH_MAX_COUNT; i++) + if (entries[i].port & NSP_ETH_PORT_LANES_MASK) + nfp_eth_port_translate(nsp, &entries[i], i, + &table->ports[j++]); + + nfp_eth_calc_port_geometry(cpp, table); + for (i = 0; i < table->count; i++) + nfp_eth_calc_port_type(cpp, &table->ports[i]); + + kfree(entries); + + return table; + +err: + kfree(entries); + return NULL; +} + +struct nfp_nsp *nfp_eth_config_start(struct nfp_cpp *cpp, unsigned int idx) +{ + union eth_table_entry *entries; + struct nfp_nsp *nsp; + int ret; + + entries = kzalloc(NSP_ETH_TABLE_SIZE, GFP_KERNEL); + if (!entries) + return ERR_PTR(-ENOMEM); + + nsp = nfp_nsp_open(cpp); + if (IS_ERR(nsp)) { + kfree(entries); + return nsp; + } + + ret = nfp_nsp_read_eth_table(nsp, entries, NSP_ETH_TABLE_SIZE); + if (ret < 0) { + nfp_err(cpp, "reading port table failed %d\n", ret); + goto err; + } + + if (!(entries[idx].port & NSP_ETH_PORT_LANES_MASK)) { + nfp_warn(cpp, "trying to set port state on disabled port %d\n", + idx); + goto err; + } + + nfp_nsp_config_set_state(nsp, entries, idx); + return nsp; + +err: + nfp_nsp_close(nsp); + kfree(entries); + return ERR_PTR(-EIO); +} + +void nfp_eth_config_cleanup_end(struct nfp_nsp *nsp) +{ + union eth_table_entry *entries = nfp_nsp_config_entries(nsp); + + nfp_nsp_config_set_modified(nsp, false); + nfp_nsp_config_clear_state(nsp); + nfp_nsp_close(nsp); + kfree(entries); +} + +/** + * nfp_eth_config_commit_end() - perform recorded configuration changes + * @nsp: NFP NSP handle returned from nfp_eth_config_start() + * + * Perform the configuration which was requested with __nfp_eth_set_*() + * helpers and recorded in @nsp state. If device was already configured + * as requested or no __nfp_eth_set_*() operations were made no NSP command + * will be performed. + * + * Return: + * 0 - configuration successful; + * 1 - no changes were needed; + * -ERRNO - configuration failed. + */ +int nfp_eth_config_commit_end(struct nfp_nsp *nsp) +{ + union eth_table_entry *entries = nfp_nsp_config_entries(nsp); + int ret = 1; + + if (nfp_nsp_config_modified(nsp)) { + ret = nfp_nsp_write_eth_table(nsp, entries, NSP_ETH_TABLE_SIZE); + ret = ret < 0 ? ret : 0; + } + + nfp_eth_config_cleanup_end(nsp); + + return ret; +} + +/** + * nfp_eth_set_mod_enable() - set PHY module enable control bit + * @cpp: NFP CPP handle + * @idx: NFP chip-wide port index + * @enable: Desired state + * + * Enable or disable PHY module (this usually means setting the TX lanes + * disable bits). + * + * Return: + * 0 - configuration successful; + * 1 - no changes were needed; + * -ERRNO - configuration failed. + */ +int nfp_eth_set_mod_enable(struct nfp_cpp *cpp, unsigned int idx, bool enable) +{ + union eth_table_entry *entries; + struct nfp_nsp *nsp; + u64 reg; + + nsp = nfp_eth_config_start(cpp, idx); + if (IS_ERR(nsp)) + return PTR_ERR(nsp); + + entries = nfp_nsp_config_entries(nsp); + + /* Check if we are already in requested state */ + reg = le64_to_cpu(entries[idx].state); + if (enable != FIELD_GET(NSP_ETH_CTRL_ENABLED, reg)) { + reg = le64_to_cpu(entries[idx].control); + reg &= ~NSP_ETH_CTRL_ENABLED; + reg |= FIELD_PREP(NSP_ETH_CTRL_ENABLED, enable); + entries[idx].control = cpu_to_le64(reg); + + nfp_nsp_config_set_modified(nsp, true); + } + + return nfp_eth_config_commit_end(nsp); +} + +/** + * nfp_eth_set_configured() - set PHY module configured control bit + * @cpp: NFP CPP handle + * @idx: NFP chip-wide port index + * @configed: Desired state + * + * Set the ifup/ifdown state on the PHY. + * + * Return: + * 0 - configuration successful; + * 1 - no changes were needed; + * -ERRNO - configuration failed. + */ +int nfp_eth_set_configured(struct nfp_cpp *cpp, unsigned int idx, bool configed) +{ + union eth_table_entry *entries; + struct nfp_nsp *nsp; + u64 reg; + + nsp = nfp_eth_config_start(cpp, idx); + if (IS_ERR(nsp)) + return PTR_ERR(nsp); + + /* Older ABI versions did support this feature, however this has only + * been reliable since ABI 20. + */ + if (nfp_nsp_get_abi_ver_minor(nsp) < 20) { + nfp_eth_config_cleanup_end(nsp); + return -EOPNOTSUPP; + } + + entries = nfp_nsp_config_entries(nsp); + + /* Check if we are already in requested state */ + reg = le64_to_cpu(entries[idx].state); + if (configed != FIELD_GET(NSP_ETH_STATE_CONFIGURED, reg)) { + reg = le64_to_cpu(entries[idx].control); + reg &= ~NSP_ETH_CTRL_CONFIGURED; + reg |= FIELD_PREP(NSP_ETH_CTRL_CONFIGURED, configed); + entries[idx].control = cpu_to_le64(reg); + + nfp_nsp_config_set_modified(nsp, true); + } + + return nfp_eth_config_commit_end(nsp); +} + +static int +nfp_eth_set_bit_config(struct nfp_nsp *nsp, unsigned int raw_idx, + const u64 mask, const unsigned int shift, + unsigned int val, const u64 ctrl_bit) +{ + union eth_table_entry *entries = nfp_nsp_config_entries(nsp); + unsigned int idx = nfp_nsp_config_idx(nsp); + u64 reg; + + /* Note: set features were added in ABI 0.14 but the error + * codes were initially not populated correctly. + */ + if (nfp_nsp_get_abi_ver_minor(nsp) < 17) { + nfp_err(nfp_nsp_cpp(nsp), + "set operations not supported, please update flash\n"); + return -EOPNOTSUPP; + } + + /* Check if we are already in requested state */ + reg = le64_to_cpu(entries[idx].raw[raw_idx]); + if (val == (reg & mask) >> shift) + return 0; + + reg &= ~mask; + reg |= (val << shift) & mask; + entries[idx].raw[raw_idx] = cpu_to_le64(reg); + + entries[idx].control |= cpu_to_le64(ctrl_bit); + + nfp_nsp_config_set_modified(nsp, true); + + return 0; +} + +int nfp_eth_set_idmode(struct nfp_cpp *cpp, unsigned int idx, bool state) +{ + union eth_table_entry *entries; + struct nfp_nsp *nsp; + u64 reg; + + nsp = nfp_eth_config_start(cpp, idx); + if (IS_ERR(nsp)) + return PTR_ERR(nsp); + + /* Set this features were added in ABI 0.32 */ + if (nfp_nsp_get_abi_ver_minor(nsp) < 32) { + nfp_err(nfp_nsp_cpp(nsp), + "set id mode operation not supported, please update flash\n"); + nfp_eth_config_cleanup_end(nsp); + return -EOPNOTSUPP; + } + + entries = nfp_nsp_config_entries(nsp); + + reg = le64_to_cpu(entries[idx].control); + reg &= ~NSP_ETH_CTRL_SET_IDMODE; + reg |= FIELD_PREP(NSP_ETH_CTRL_SET_IDMODE, state); + entries[idx].control = cpu_to_le64(reg); + + nfp_nsp_config_set_modified(nsp, true); + + return nfp_eth_config_commit_end(nsp); +} + +#define NFP_ETH_SET_BIT_CONFIG(nsp, raw_idx, mask, val, ctrl_bit) \ + ({ \ + __BF_FIELD_CHECK(mask, 0ULL, val, "NFP_ETH_SET_BIT_CONFIG: "); \ + nfp_eth_set_bit_config(nsp, raw_idx, mask, __bf_shf(mask), \ + val, ctrl_bit); \ + }) + +/** + * __nfp_eth_set_aneg() - set PHY autonegotiation control bit + * @nsp: NFP NSP handle returned from nfp_eth_config_start() + * @mode: Desired autonegotiation mode + * + * Allow/disallow PHY module to advertise/perform autonegotiation. + * Will write to hwinfo overrides in the flash (persistent config). + * + * Return: 0 or -ERRNO. + */ +int __nfp_eth_set_aneg(struct nfp_nsp *nsp, enum nfp_eth_aneg mode) +{ + return NFP_ETH_SET_BIT_CONFIG(nsp, NSP_ETH_RAW_STATE, + NSP_ETH_STATE_ANEG, mode, + NSP_ETH_CTRL_SET_ANEG); +} + +/** + * __nfp_eth_set_fec() - set PHY forward error correction control bit + * @nsp: NFP NSP handle returned from nfp_eth_config_start() + * @mode: Desired fec mode + * + * Set the PHY module forward error correction mode. + * Will write to hwinfo overrides in the flash (persistent config). + * + * Return: 0 or -ERRNO. + */ +static int __nfp_eth_set_fec(struct nfp_nsp *nsp, enum nfp_eth_fec mode) +{ + return NFP_ETH_SET_BIT_CONFIG(nsp, NSP_ETH_RAW_STATE, + NSP_ETH_STATE_FEC, mode, + NSP_ETH_CTRL_SET_FEC); +} + +/** + * nfp_eth_set_fec() - set PHY forward error correction control mode + * @cpp: NFP CPP handle + * @idx: NFP chip-wide port index + * @mode: Desired fec mode + * + * Return: + * 0 - configuration successful; + * 1 - no changes were needed; + * -ERRNO - configuration failed. + */ +int +nfp_eth_set_fec(struct nfp_cpp *cpp, unsigned int idx, enum nfp_eth_fec mode) +{ + struct nfp_nsp *nsp; + int err; + + nsp = nfp_eth_config_start(cpp, idx); + if (IS_ERR(nsp)) + return PTR_ERR(nsp); + + err = __nfp_eth_set_fec(nsp, mode); + if (err) { + nfp_eth_config_cleanup_end(nsp); + return err; + } + + return nfp_eth_config_commit_end(nsp); +} + +/** + * __nfp_eth_set_speed() - set interface speed/rate + * @nsp: NFP NSP handle returned from nfp_eth_config_start() + * @speed: Desired speed (per lane) + * + * Set lane speed. Provided @speed value should be subport speed divided + * by number of lanes this subport is spanning (i.e. 10000 for 40G, 25000 for + * 50G, etc.) + * Will write to hwinfo overrides in the flash (persistent config). + * + * Return: 0 or -ERRNO. + */ +int __nfp_eth_set_speed(struct nfp_nsp *nsp, unsigned int speed) +{ + enum nfp_eth_rate rate; + + rate = nfp_eth_speed2rate(speed); + if (rate == RATE_INVALID) { + nfp_warn(nfp_nsp_cpp(nsp), + "could not find matching lane rate for speed %u\n", + speed); + return -EINVAL; + } + + return NFP_ETH_SET_BIT_CONFIG(nsp, NSP_ETH_RAW_STATE, + NSP_ETH_STATE_RATE, rate, + NSP_ETH_CTRL_SET_RATE); +} + +/** + * __nfp_eth_set_split() - set interface lane split + * @nsp: NFP NSP handle returned from nfp_eth_config_start() + * @lanes: Desired lanes per port + * + * Set number of lanes in the port. + * Will write to hwinfo overrides in the flash (persistent config). + * + * Return: 0 or -ERRNO. + */ +int __nfp_eth_set_split(struct nfp_nsp *nsp, unsigned int lanes) +{ + return NFP_ETH_SET_BIT_CONFIG(nsp, NSP_ETH_RAW_PORT, NSP_ETH_PORT_LANES, + lanes, NSP_ETH_CTRL_SET_LANES); +} + +int nfp_eth_read_media(struct nfp_cpp *cpp, struct nfp_eth_media_buf *ethm) +{ + struct nfp_nsp *nsp; + int ret; + + nsp = nfp_nsp_open(cpp); + if (IS_ERR(nsp)) { + nfp_err(cpp, "Failed to access the NSP: %pe\n", nsp); + return PTR_ERR(nsp); + } + + if (!nfp_nsp_has_read_media(nsp)) { + nfp_warn(cpp, "Reading media link modes not supported. Please update flash\n"); + ret = -EOPNOTSUPP; + goto exit_close_nsp; + } + + ret = nfp_nsp_read_media(nsp, ethm, sizeof(*ethm)); + if (ret) + nfp_err(cpp, "Reading media link modes failed: %pe\n", ERR_PTR(ret)); + +exit_close_nsp: + nfp_nsp_close(nsp); + return ret; +} diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_resource.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_resource.c new file mode 100644 index 000000000..ce7492a6a --- /dev/null +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_resource.c @@ -0,0 +1,366 @@ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +/* Copyright (C) 2015-2018 Netronome Systems, Inc. */ + +/* + * nfp_resource.c + * Author: Jakub Kicinski <jakub.kicinski@netronome.com> + * Jason McMullan <jason.mcmullan@netronome.com> + */ +#include <linux/delay.h> +#include <linux/kernel.h> +#include <linux/slab.h> + +#include "crc32.h" +#include "nfp.h" +#include "nfp_cpp.h" +#include "nfp6000/nfp6000.h" + +#define NFP_RESOURCE_TBL_TARGET NFP_CPP_TARGET_MU +#define NFP_RESOURCE_TBL_BASE 0x8100000000ULL + +/* NFP Resource Table self-identifier */ +#define NFP_RESOURCE_TBL_NAME "nfp.res" +#define NFP_RESOURCE_TBL_KEY 0x00000000 /* Special key for entry 0 */ + +#define NFP_RESOURCE_ENTRY_NAME_SZ 8 + +/** + * struct nfp_resource_entry - Resource table entry + * @mutex: NFP CPP Lock + * @mutex.owner: NFP CPP Lock, interface owner + * @mutex.key: NFP CPP Lock, posix_crc32(name, 8) + * @region: Memory region descriptor + * @region.name: ASCII, zero padded name + * @region.reserved: padding + * @region.cpp_action: CPP Action + * @region.cpp_token: CPP Token + * @region.cpp_target: CPP Target ID + * @region.page_offset: 256-byte page offset into target's CPP address + * @region.page_size: size, in 256-byte pages + */ +struct nfp_resource_entry { + struct nfp_resource_entry_mutex { + u32 owner; + u32 key; + } mutex; + struct nfp_resource_entry_region { + u8 name[NFP_RESOURCE_ENTRY_NAME_SZ]; + u8 reserved[5]; + u8 cpp_action; + u8 cpp_token; + u8 cpp_target; + u32 page_offset; + u32 page_size; + } region; +}; + +#define NFP_RESOURCE_TBL_SIZE 4096 +#define NFP_RESOURCE_TBL_ENTRIES (NFP_RESOURCE_TBL_SIZE / \ + sizeof(struct nfp_resource_entry)) + +struct nfp_resource { + char name[NFP_RESOURCE_ENTRY_NAME_SZ + 1]; + u32 cpp_id; + u64 addr; + u64 size; + struct nfp_cpp_mutex *mutex; +}; + +static int nfp_cpp_resource_find(struct nfp_cpp *cpp, struct nfp_resource *res) +{ + struct nfp_resource_entry entry; + u32 cpp_id, key; + int ret, i; + + cpp_id = NFP_CPP_ID(NFP_RESOURCE_TBL_TARGET, 3, 0); /* Atomic read */ + + /* Search for a matching entry */ + if (!strcmp(res->name, NFP_RESOURCE_TBL_NAME)) { + nfp_err(cpp, "Grabbing device lock not supported\n"); + return -EOPNOTSUPP; + } + key = crc32_posix(res->name, NFP_RESOURCE_ENTRY_NAME_SZ); + + for (i = 0; i < NFP_RESOURCE_TBL_ENTRIES; i++) { + u64 addr = NFP_RESOURCE_TBL_BASE + + sizeof(struct nfp_resource_entry) * i; + + ret = nfp_cpp_read(cpp, cpp_id, addr, &entry, sizeof(entry)); + if (ret != sizeof(entry)) + return -EIO; + + if (entry.mutex.key != key) + continue; + + /* Found key! */ + res->mutex = + nfp_cpp_mutex_alloc(cpp, + NFP_RESOURCE_TBL_TARGET, addr, key); + res->cpp_id = NFP_CPP_ID(entry.region.cpp_target, + entry.region.cpp_action, + entry.region.cpp_token); + res->addr = (u64)entry.region.page_offset << 8; + res->size = (u64)entry.region.page_size << 8; + + return 0; + } + + return -ENOENT; +} + +static int +nfp_resource_try_acquire(struct nfp_cpp *cpp, struct nfp_resource *res, + struct nfp_cpp_mutex *dev_mutex) +{ + int err; + + if (nfp_cpp_mutex_lock(dev_mutex)) + return -EINVAL; + + err = nfp_cpp_resource_find(cpp, res); + if (err) + goto err_unlock_dev; + + err = nfp_cpp_mutex_trylock(res->mutex); + if (err) + goto err_res_mutex_free; + + nfp_cpp_mutex_unlock(dev_mutex); + + return 0; + +err_res_mutex_free: + nfp_cpp_mutex_free(res->mutex); +err_unlock_dev: + nfp_cpp_mutex_unlock(dev_mutex); + + return err; +} + +/** + * nfp_resource_acquire() - Acquire a resource handle + * @cpp: NFP CPP handle + * @name: Name of the resource + * + * NOTE: This function locks the acquired resource + * + * Return: NFP Resource handle, or ERR_PTR() + */ +struct nfp_resource * +nfp_resource_acquire(struct nfp_cpp *cpp, const char *name) +{ + unsigned long warn_at = jiffies + NFP_MUTEX_WAIT_FIRST_WARN * HZ; + unsigned long err_at = jiffies + NFP_MUTEX_WAIT_ERROR * HZ; + struct nfp_cpp_mutex *dev_mutex; + struct nfp_resource *res; + int err; + + res = kzalloc(sizeof(*res), GFP_KERNEL); + if (!res) + return ERR_PTR(-ENOMEM); + + strncpy(res->name, name, NFP_RESOURCE_ENTRY_NAME_SZ); + + dev_mutex = nfp_cpp_mutex_alloc(cpp, NFP_RESOURCE_TBL_TARGET, + NFP_RESOURCE_TBL_BASE, + NFP_RESOURCE_TBL_KEY); + if (!dev_mutex) { + kfree(res); + return ERR_PTR(-ENOMEM); + } + + for (;;) { + err = nfp_resource_try_acquire(cpp, res, dev_mutex); + if (!err) + break; + if (err != -EBUSY) + goto err_free; + + err = msleep_interruptible(1); + if (err != 0) { + err = -ERESTARTSYS; + goto err_free; + } + + if (time_is_before_eq_jiffies(warn_at)) { + warn_at = jiffies + NFP_MUTEX_WAIT_NEXT_WARN * HZ; + nfp_warn(cpp, "Warning: waiting for NFP resource %s\n", + name); + } + if (time_is_before_eq_jiffies(err_at)) { + nfp_err(cpp, "Error: resource %s timed out\n", name); + err = -EBUSY; + goto err_free; + } + } + + nfp_cpp_mutex_free(dev_mutex); + + return res; + +err_free: + nfp_cpp_mutex_free(dev_mutex); + kfree(res); + return ERR_PTR(err); +} + +/** + * nfp_resource_release() - Release a NFP Resource handle + * @res: NFP Resource handle + * + * NOTE: This function implictly unlocks the resource handle + */ +void nfp_resource_release(struct nfp_resource *res) +{ + nfp_cpp_mutex_unlock(res->mutex); + nfp_cpp_mutex_free(res->mutex); + kfree(res); +} + +/** + * nfp_resource_wait() - Wait for resource to appear + * @cpp: NFP CPP handle + * @name: Name of the resource + * @secs: Number of seconds to wait + * + * Wait for resource to appear in the resource table, grab and release + * its lock. The wait is jiffies-based, don't expect fine granularity. + * + * Return: 0 on success, errno otherwise. + */ +int nfp_resource_wait(struct nfp_cpp *cpp, const char *name, unsigned int secs) +{ + unsigned long warn_at = jiffies + NFP_MUTEX_WAIT_FIRST_WARN * HZ; + unsigned long err_at = jiffies + secs * HZ; + struct nfp_resource *res; + + while (true) { + res = nfp_resource_acquire(cpp, name); + if (!IS_ERR(res)) { + nfp_resource_release(res); + return 0; + } + + if (PTR_ERR(res) != -ENOENT) { + nfp_err(cpp, "error waiting for resource %s: %ld\n", + name, PTR_ERR(res)); + return PTR_ERR(res); + } + if (time_is_before_eq_jiffies(err_at)) { + nfp_err(cpp, "timeout waiting for resource %s\n", name); + return -ETIMEDOUT; + } + if (time_is_before_eq_jiffies(warn_at)) { + warn_at = jiffies + NFP_MUTEX_WAIT_NEXT_WARN * HZ; + nfp_info(cpp, "waiting for NFP resource %s\n", name); + } + if (msleep_interruptible(10)) { + nfp_err(cpp, "wait for resource %s interrupted\n", + name); + return -ERESTARTSYS; + } + } +} + +/** + * nfp_resource_cpp_id() - Return the cpp_id of a resource handle + * @res: NFP Resource handle + * + * Return: NFP CPP ID + */ +u32 nfp_resource_cpp_id(struct nfp_resource *res) +{ + return res->cpp_id; +} + +/** + * nfp_resource_name() - Return the name of a resource handle + * @res: NFP Resource handle + * + * Return: const char pointer to the name of the resource + */ +const char *nfp_resource_name(struct nfp_resource *res) +{ + return res->name; +} + +/** + * nfp_resource_address() - Return the address of a resource handle + * @res: NFP Resource handle + * + * Return: Address of the resource + */ +u64 nfp_resource_address(struct nfp_resource *res) +{ + return res->addr; +} + +/** + * nfp_resource_size() - Return the size in bytes of a resource handle + * @res: NFP Resource handle + * + * Return: Size of the resource in bytes + */ +u64 nfp_resource_size(struct nfp_resource *res) +{ + return res->size; +} + +/** + * nfp_resource_table_init() - Run initial checks on the resource table + * @cpp: NFP CPP handle + * + * Start-of-day init procedure for resource table. Must be called before + * any local resource table users may exist. + * + * Return: 0 on success, -errno on failure + */ +int nfp_resource_table_init(struct nfp_cpp *cpp) +{ + struct nfp_cpp_mutex *dev_mutex; + int i, err; + + err = nfp_cpp_mutex_reclaim(cpp, NFP_RESOURCE_TBL_TARGET, + NFP_RESOURCE_TBL_BASE); + if (err < 0) { + nfp_err(cpp, "Error: failed to reclaim resource table mutex\n"); + return err; + } + if (err) + nfp_warn(cpp, "Warning: busted main resource table mutex\n"); + + dev_mutex = nfp_cpp_mutex_alloc(cpp, NFP_RESOURCE_TBL_TARGET, + NFP_RESOURCE_TBL_BASE, + NFP_RESOURCE_TBL_KEY); + if (!dev_mutex) + return -ENOMEM; + + if (nfp_cpp_mutex_lock(dev_mutex)) { + nfp_err(cpp, "Error: failed to claim resource table mutex\n"); + nfp_cpp_mutex_free(dev_mutex); + return -EINVAL; + } + + /* Resource 0 is the dev_mutex, start from 1 */ + for (i = 1; i < NFP_RESOURCE_TBL_ENTRIES; i++) { + u64 addr = NFP_RESOURCE_TBL_BASE + + sizeof(struct nfp_resource_entry) * i; + + err = nfp_cpp_mutex_reclaim(cpp, NFP_RESOURCE_TBL_TARGET, addr); + if (err < 0) { + nfp_err(cpp, + "Error: failed to reclaim resource %d mutex\n", + i); + goto err_unlock; + } + if (err) + nfp_warn(cpp, "Warning: busted resource %d mutex\n", i); + } + + err = 0; +err_unlock: + nfp_cpp_mutex_unlock(dev_mutex); + nfp_cpp_mutex_free(dev_mutex); + + return err; +} diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_rtsym.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_rtsym.c new file mode 100644 index 000000000..2260c2403 --- /dev/null +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_rtsym.c @@ -0,0 +1,556 @@ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +/* Copyright (C) 2015-2018 Netronome Systems, Inc. */ + +/* + * nfp_rtsym.c + * Interface for accessing run-time symbol table + * Authors: Jakub Kicinski <jakub.kicinski@netronome.com> + * Jason McMullan <jason.mcmullan@netronome.com> + * Espen Skoglund <espen.skoglund@netronome.com> + * Francois H. Theron <francois.theron@netronome.com> + */ + +#include <asm/unaligned.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/slab.h> +#include <linux/io-64-nonatomic-hi-lo.h> + +#include "nfp.h" +#include "nfp_cpp.h" +#include "nfp_nffw.h" +#include "nfp6000/nfp6000.h" + +/* These need to match the linker */ +#define SYM_TGT_LMEM 0 +#define SYM_TGT_EMU_CACHE 0x17 + +struct nfp_rtsym_entry { + u8 type; + u8 target; + u8 island; + u8 addr_hi; + __le32 addr_lo; + __le16 name; + u8 menum; + u8 size_hi; + __le32 size_lo; +}; + +struct nfp_rtsym_table { + struct nfp_cpp *cpp; + int num; + char *strtab; + struct nfp_rtsym symtab[]; +}; + +static int nfp_meid(u8 island_id, u8 menum) +{ + return (island_id & 0x3F) == island_id && menum < 12 ? + (island_id << 4) | (menum + 4) : -1; +} + +static void +nfp_rtsym_sw_entry_init(struct nfp_rtsym_table *cache, u32 strtab_size, + struct nfp_rtsym *sw, struct nfp_rtsym_entry *fw) +{ + sw->type = fw->type; + sw->name = cache->strtab + le16_to_cpu(fw->name) % strtab_size; + sw->addr = ((u64)fw->addr_hi << 32) | le32_to_cpu(fw->addr_lo); + sw->size = ((u64)fw->size_hi << 32) | le32_to_cpu(fw->size_lo); + + switch (fw->target) { + case SYM_TGT_LMEM: + sw->target = NFP_RTSYM_TARGET_LMEM; + break; + case SYM_TGT_EMU_CACHE: + sw->target = NFP_RTSYM_TARGET_EMU_CACHE; + break; + default: + sw->target = fw->target; + break; + } + + if (fw->menum != 0xff) + sw->domain = nfp_meid(fw->island, fw->menum); + else if (fw->island != 0xff) + sw->domain = fw->island; + else + sw->domain = -1; +} + +struct nfp_rtsym_table *nfp_rtsym_table_read(struct nfp_cpp *cpp) +{ + struct nfp_rtsym_table *rtbl; + const struct nfp_mip *mip; + + mip = nfp_mip_open(cpp); + rtbl = __nfp_rtsym_table_read(cpp, mip); + nfp_mip_close(mip); + + return rtbl; +} + +struct nfp_rtsym_table * +__nfp_rtsym_table_read(struct nfp_cpp *cpp, const struct nfp_mip *mip) +{ + const u32 dram = NFP_CPP_ID(NFP_CPP_TARGET_MU, NFP_CPP_ACTION_RW, 0) | + NFP_ISL_EMEM0; + u32 strtab_addr, symtab_addr, strtab_size, symtab_size; + struct nfp_rtsym_entry *rtsymtab; + struct nfp_rtsym_table *cache; + int err, n, size; + + if (!mip) + return NULL; + + nfp_mip_strtab(mip, &strtab_addr, &strtab_size); + nfp_mip_symtab(mip, &symtab_addr, &symtab_size); + + if (!symtab_size || !strtab_size || symtab_size % sizeof(*rtsymtab)) + return NULL; + + /* Align to 64 bits */ + symtab_size = round_up(symtab_size, 8); + strtab_size = round_up(strtab_size, 8); + + rtsymtab = kmalloc(symtab_size, GFP_KERNEL); + if (!rtsymtab) + return NULL; + + size = sizeof(*cache); + size += symtab_size / sizeof(*rtsymtab) * sizeof(struct nfp_rtsym); + size += strtab_size + 1; + cache = kmalloc(size, GFP_KERNEL); + if (!cache) + goto exit_free_rtsym_raw; + + cache->cpp = cpp; + cache->num = symtab_size / sizeof(*rtsymtab); + cache->strtab = (void *)&cache->symtab[cache->num]; + + err = nfp_cpp_read(cpp, dram, symtab_addr, rtsymtab, symtab_size); + if (err != symtab_size) + goto exit_free_cache; + + err = nfp_cpp_read(cpp, dram, strtab_addr, cache->strtab, strtab_size); + if (err != strtab_size) + goto exit_free_cache; + cache->strtab[strtab_size] = '\0'; + + for (n = 0; n < cache->num; n++) + nfp_rtsym_sw_entry_init(cache, strtab_size, + &cache->symtab[n], &rtsymtab[n]); + + kfree(rtsymtab); + + return cache; + +exit_free_cache: + kfree(cache); +exit_free_rtsym_raw: + kfree(rtsymtab); + return NULL; +} + +/** + * nfp_rtsym_count() - Get the number of RTSYM descriptors + * @rtbl: NFP RTsym table + * + * Return: Number of RTSYM descriptors + */ +int nfp_rtsym_count(struct nfp_rtsym_table *rtbl) +{ + if (!rtbl) + return -EINVAL; + return rtbl->num; +} + +/** + * nfp_rtsym_get() - Get the Nth RTSYM descriptor + * @rtbl: NFP RTsym table + * @idx: Index (0-based) of the RTSYM descriptor + * + * Return: const pointer to a struct nfp_rtsym descriptor, or NULL + */ +const struct nfp_rtsym *nfp_rtsym_get(struct nfp_rtsym_table *rtbl, int idx) +{ + if (!rtbl) + return NULL; + if (idx >= rtbl->num) + return NULL; + + return &rtbl->symtab[idx]; +} + +/** + * nfp_rtsym_lookup() - Return the RTSYM descriptor for a symbol name + * @rtbl: NFP RTsym table + * @name: Symbol name + * + * Return: const pointer to a struct nfp_rtsym descriptor, or NULL + */ +const struct nfp_rtsym * +nfp_rtsym_lookup(struct nfp_rtsym_table *rtbl, const char *name) +{ + int n; + + if (!rtbl) + return NULL; + + for (n = 0; n < rtbl->num; n++) + if (strcmp(name, rtbl->symtab[n].name) == 0) + return &rtbl->symtab[n]; + + return NULL; +} + +u64 nfp_rtsym_size(const struct nfp_rtsym *sym) +{ + switch (sym->type) { + case NFP_RTSYM_TYPE_NONE: + pr_err("rtsym '%s': type NONE\n", sym->name); + return 0; + default: + pr_warn("rtsym '%s': unknown type: %d\n", sym->name, sym->type); + fallthrough; + case NFP_RTSYM_TYPE_OBJECT: + case NFP_RTSYM_TYPE_FUNCTION: + return sym->size; + case NFP_RTSYM_TYPE_ABS: + return sizeof(u64); + } +} + +static int +nfp_rtsym_to_dest(struct nfp_cpp *cpp, const struct nfp_rtsym *sym, + u8 action, u8 token, u64 off, u32 *cpp_id, u64 *addr) +{ + if (sym->type != NFP_RTSYM_TYPE_OBJECT) { + nfp_err(cpp, "rtsym '%s': direct access to non-object rtsym\n", + sym->name); + return -EINVAL; + } + + *addr = sym->addr + off; + + if (sym->target == NFP_RTSYM_TARGET_EMU_CACHE) { + int locality_off = nfp_cpp_mu_locality_lsb(cpp); + + *addr &= ~(NFP_MU_ADDR_ACCESS_TYPE_MASK << locality_off); + *addr |= NFP_MU_ADDR_ACCESS_TYPE_DIRECT << locality_off; + + *cpp_id = NFP_CPP_ISLAND_ID(NFP_CPP_TARGET_MU, action, token, + sym->domain); + } else if (sym->target < 0) { + nfp_err(cpp, "rtsym '%s': unhandled target encoding: %d\n", + sym->name, sym->target); + return -EINVAL; + } else { + *cpp_id = NFP_CPP_ISLAND_ID(sym->target, action, token, + sym->domain); + } + + return 0; +} + +int __nfp_rtsym_read(struct nfp_cpp *cpp, const struct nfp_rtsym *sym, + u8 action, u8 token, u64 off, void *buf, size_t len) +{ + u64 sym_size = nfp_rtsym_size(sym); + u32 cpp_id; + u64 addr; + int err; + + if (off > sym_size) { + nfp_err(cpp, "rtsym '%s': read out of bounds: off: %lld + len: %zd > size: %lld\n", + sym->name, off, len, sym_size); + return -ENXIO; + } + len = min_t(size_t, len, sym_size - off); + + if (sym->type == NFP_RTSYM_TYPE_ABS) { + u8 tmp[8]; + + put_unaligned_le64(sym->addr, tmp); + memcpy(buf, &tmp[off], len); + + return len; + } + + err = nfp_rtsym_to_dest(cpp, sym, action, token, off, &cpp_id, &addr); + if (err) + return err; + + return nfp_cpp_read(cpp, cpp_id, addr, buf, len); +} + +int nfp_rtsym_read(struct nfp_cpp *cpp, const struct nfp_rtsym *sym, u64 off, + void *buf, size_t len) +{ + return __nfp_rtsym_read(cpp, sym, NFP_CPP_ACTION_RW, 0, off, buf, len); +} + +int __nfp_rtsym_readl(struct nfp_cpp *cpp, const struct nfp_rtsym *sym, + u8 action, u8 token, u64 off, u32 *value) +{ + u32 cpp_id; + u64 addr; + int err; + + if (off + 4 > nfp_rtsym_size(sym)) { + nfp_err(cpp, "rtsym '%s': readl out of bounds: off: %lld + 4 > size: %lld\n", + sym->name, off, nfp_rtsym_size(sym)); + return -ENXIO; + } + + err = nfp_rtsym_to_dest(cpp, sym, action, token, off, &cpp_id, &addr); + if (err) + return err; + + return nfp_cpp_readl(cpp, cpp_id, addr, value); +} + +int nfp_rtsym_readl(struct nfp_cpp *cpp, const struct nfp_rtsym *sym, u64 off, + u32 *value) +{ + return __nfp_rtsym_readl(cpp, sym, NFP_CPP_ACTION_RW, 0, off, value); +} + +int __nfp_rtsym_readq(struct nfp_cpp *cpp, const struct nfp_rtsym *sym, + u8 action, u8 token, u64 off, u64 *value) +{ + u32 cpp_id; + u64 addr; + int err; + + if (off + 8 > nfp_rtsym_size(sym)) { + nfp_err(cpp, "rtsym '%s': readq out of bounds: off: %lld + 8 > size: %lld\n", + sym->name, off, nfp_rtsym_size(sym)); + return -ENXIO; + } + + if (sym->type == NFP_RTSYM_TYPE_ABS) { + *value = sym->addr; + return 0; + } + + err = nfp_rtsym_to_dest(cpp, sym, action, token, off, &cpp_id, &addr); + if (err) + return err; + + return nfp_cpp_readq(cpp, cpp_id, addr, value); +} + +int nfp_rtsym_readq(struct nfp_cpp *cpp, const struct nfp_rtsym *sym, u64 off, + u64 *value) +{ + return __nfp_rtsym_readq(cpp, sym, NFP_CPP_ACTION_RW, 0, off, value); +} + +int __nfp_rtsym_write(struct nfp_cpp *cpp, const struct nfp_rtsym *sym, + u8 action, u8 token, u64 off, void *buf, size_t len) +{ + u64 sym_size = nfp_rtsym_size(sym); + u32 cpp_id; + u64 addr; + int err; + + if (off > sym_size) { + nfp_err(cpp, "rtsym '%s': write out of bounds: off: %lld + len: %zd > size: %lld\n", + sym->name, off, len, sym_size); + return -ENXIO; + } + len = min_t(size_t, len, sym_size - off); + + err = nfp_rtsym_to_dest(cpp, sym, action, token, off, &cpp_id, &addr); + if (err) + return err; + + return nfp_cpp_write(cpp, cpp_id, addr, buf, len); +} + +int nfp_rtsym_write(struct nfp_cpp *cpp, const struct nfp_rtsym *sym, u64 off, + void *buf, size_t len) +{ + return __nfp_rtsym_write(cpp, sym, NFP_CPP_ACTION_RW, 0, off, buf, len); +} + +int __nfp_rtsym_writel(struct nfp_cpp *cpp, const struct nfp_rtsym *sym, + u8 action, u8 token, u64 off, u32 value) +{ + u32 cpp_id; + u64 addr; + int err; + + if (off + 4 > nfp_rtsym_size(sym)) { + nfp_err(cpp, "rtsym '%s': writel out of bounds: off: %lld + 4 > size: %lld\n", + sym->name, off, nfp_rtsym_size(sym)); + return -ENXIO; + } + + err = nfp_rtsym_to_dest(cpp, sym, action, token, off, &cpp_id, &addr); + if (err) + return err; + + return nfp_cpp_writel(cpp, cpp_id, addr, value); +} + +int nfp_rtsym_writel(struct nfp_cpp *cpp, const struct nfp_rtsym *sym, u64 off, + u32 value) +{ + return __nfp_rtsym_writel(cpp, sym, NFP_CPP_ACTION_RW, 0, off, value); +} + +int __nfp_rtsym_writeq(struct nfp_cpp *cpp, const struct nfp_rtsym *sym, + u8 action, u8 token, u64 off, u64 value) +{ + u32 cpp_id; + u64 addr; + int err; + + if (off + 8 > nfp_rtsym_size(sym)) { + nfp_err(cpp, "rtsym '%s': writeq out of bounds: off: %lld + 8 > size: %lld\n", + sym->name, off, nfp_rtsym_size(sym)); + return -ENXIO; + } + + err = nfp_rtsym_to_dest(cpp, sym, action, token, off, &cpp_id, &addr); + if (err) + return err; + + return nfp_cpp_writeq(cpp, cpp_id, addr, value); +} + +int nfp_rtsym_writeq(struct nfp_cpp *cpp, const struct nfp_rtsym *sym, u64 off, + u64 value) +{ + return __nfp_rtsym_writeq(cpp, sym, NFP_CPP_ACTION_RW, 0, off, value); +} + +/** + * nfp_rtsym_read_le() - Read a simple unsigned scalar value from symbol + * @rtbl: NFP RTsym table + * @name: Symbol name + * @error: Poniter to error code (optional) + * + * Lookup a symbol, map, read it and return it's value. Value of the symbol + * will be interpreted as a simple little-endian unsigned value. Symbol can + * be 4 or 8 bytes in size. + * + * Return: value read, on error sets the error and returns ~0ULL. + */ +u64 nfp_rtsym_read_le(struct nfp_rtsym_table *rtbl, const char *name, + int *error) +{ + const struct nfp_rtsym *sym; + u32 val32; + u64 val; + int err; + + sym = nfp_rtsym_lookup(rtbl, name); + if (!sym) { + err = -ENOENT; + goto exit; + } + + switch (nfp_rtsym_size(sym)) { + case 4: + err = nfp_rtsym_readl(rtbl->cpp, sym, 0, &val32); + val = val32; + break; + case 8: + err = nfp_rtsym_readq(rtbl->cpp, sym, 0, &val); + break; + default: + nfp_err(rtbl->cpp, + "rtsym '%s': unsupported or non-scalar size: %lld\n", + name, nfp_rtsym_size(sym)); + err = -EINVAL; + break; + } + +exit: + if (error) + *error = err; + + if (err) + return ~0ULL; + return val; +} + +/** + * nfp_rtsym_write_le() - Write an unsigned scalar value to a symbol + * @rtbl: NFP RTsym table + * @name: Symbol name + * @value: Value to write + * + * Lookup a symbol and write a value to it. Symbol can be 4 or 8 bytes in size. + * If 4 bytes then the lower 32-bits of 'value' are used. Value will be + * written as simple little-endian unsigned value. + * + * Return: 0 on success or error code. + */ +int nfp_rtsym_write_le(struct nfp_rtsym_table *rtbl, const char *name, + u64 value) +{ + const struct nfp_rtsym *sym; + int err; + + sym = nfp_rtsym_lookup(rtbl, name); + if (!sym) + return -ENOENT; + + switch (nfp_rtsym_size(sym)) { + case 4: + err = nfp_rtsym_writel(rtbl->cpp, sym, 0, value); + break; + case 8: + err = nfp_rtsym_writeq(rtbl->cpp, sym, 0, value); + break; + default: + nfp_err(rtbl->cpp, + "rtsym '%s': unsupported or non-scalar size: %lld\n", + name, nfp_rtsym_size(sym)); + err = -EINVAL; + break; + } + + return err; +} + +u8 __iomem * +nfp_rtsym_map(struct nfp_rtsym_table *rtbl, const char *name, const char *id, + unsigned int min_size, struct nfp_cpp_area **area) +{ + const struct nfp_rtsym *sym; + u8 __iomem *mem; + u32 cpp_id; + u64 addr; + int err; + + sym = nfp_rtsym_lookup(rtbl, name); + if (!sym) + return (u8 __iomem *)ERR_PTR(-ENOENT); + + err = nfp_rtsym_to_dest(rtbl->cpp, sym, NFP_CPP_ACTION_RW, 0, 0, + &cpp_id, &addr); + if (err) { + nfp_err(rtbl->cpp, "rtsym '%s': mapping failed\n", name); + return (u8 __iomem *)ERR_PTR(err); + } + + if (sym->size < min_size) { + nfp_err(rtbl->cpp, "rtsym '%s': too small\n", name); + return (u8 __iomem *)ERR_PTR(-EINVAL); + } + + mem = nfp_cpp_map_area(rtbl->cpp, id, cpp_id, addr, sym->size, area); + if (IS_ERR(mem)) { + nfp_err(rtbl->cpp, "rtysm '%s': failed to map: %ld\n", + name, PTR_ERR(mem)); + return mem; + } + + return mem; +} diff --git a/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_target.c b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_target.c new file mode 100644 index 000000000..79470f198 --- /dev/null +++ b/drivers/net/ethernet/netronome/nfp/nfpcore/nfp_target.c @@ -0,0 +1,742 @@ +// SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +/* Copyright (C) 2015-2018 Netronome Systems, Inc. */ + +/* + * nfp_target.c + * CPP Access Width Decoder + * Authors: Jakub Kicinski <jakub.kicinski@netronome.com> + * Jason McMullan <jason.mcmullan@netronome.com> + * Francois H. Theron <francois.theron@netronome.com> + */ + +#define pr_fmt(fmt) "NFP target: " fmt + +#include <linux/bitops.h> +#include <linux/kernel.h> +#include <linux/printk.h> + +#include "nfp_cpp.h" + +#include "nfp6000/nfp6000.h" + +#define P32 1 +#define P64 2 + +/* This structure ONLY includes items that can be done with a read or write of + * 32-bit or 64-bit words. All others are not listed. + */ + +#define AT(_action, _token, _pull, _push) \ + case NFP_CPP_ID(0, (_action), (_token)): \ + return PUSHPULL((_pull), (_push)) + +static int target_rw(u32 cpp_id, int pp, int start, int len) +{ + switch (cpp_id & NFP_CPP_ID(0, ~0, ~0)) { + AT(0, 0, 0, pp); + AT(1, 0, pp, 0); + AT(NFP_CPP_ACTION_RW, 0, pp, pp); + default: + return -EINVAL; + } +} + +static int nfp6000_nbi_dma(u32 cpp_id) +{ + switch (cpp_id & NFP_CPP_ID(0, ~0, ~0)) { + AT(0, 0, 0, P64); /* ReadNbiDma */ + AT(1, 0, P64, 0); /* WriteNbiDma */ + AT(NFP_CPP_ACTION_RW, 0, P64, P64); + default: + return -EINVAL; + } +} + +static int nfp6000_nbi_stats(u32 cpp_id) +{ + switch (cpp_id & NFP_CPP_ID(0, ~0, ~0)) { + AT(0, 0, 0, P32); /* ReadNbiStats */ + AT(1, 0, P32, 0); /* WriteNbiStats */ + AT(NFP_CPP_ACTION_RW, 0, P32, P32); + default: + return -EINVAL; + } +} + +static int nfp6000_nbi_tm(u32 cpp_id) +{ + switch (cpp_id & NFP_CPP_ID(0, ~0, ~0)) { + AT(0, 0, 0, P64); /* ReadNbiTM */ + AT(1, 0, P64, 0); /* WriteNbiTM */ + AT(NFP_CPP_ACTION_RW, 0, P64, P64); + default: + return -EINVAL; + } +} + +static int nfp6000_nbi_ppc(u32 cpp_id) +{ + switch (cpp_id & NFP_CPP_ID(0, ~0, ~0)) { + AT(0, 0, 0, P64); /* ReadNbiPreclassifier */ + AT(1, 0, P64, 0); /* WriteNbiPreclassifier */ + AT(NFP_CPP_ACTION_RW, 0, P64, P64); + default: + return -EINVAL; + } +} + +static int nfp6000_nbi(u32 cpp_id, u64 address) +{ + u64 rel_addr = address & 0x3fFFFF; + + if (rel_addr < (1 << 20)) + return nfp6000_nbi_dma(cpp_id); + if (rel_addr < (2 << 20)) + return nfp6000_nbi_stats(cpp_id); + if (rel_addr < (3 << 20)) + return nfp6000_nbi_tm(cpp_id); + return nfp6000_nbi_ppc(cpp_id); +} + +/* This structure ONLY includes items that can be done with a read or write of + * 32-bit or 64-bit words. All others are not listed. + */ +static int nfp6000_mu_common(u32 cpp_id) +{ + switch (cpp_id & NFP_CPP_ID(0, ~0, ~0)) { + AT(NFP_CPP_ACTION_RW, 0, P64, P64); /* read_be/write_be */ + AT(NFP_CPP_ACTION_RW, 1, P64, P64); /* read_le/write_le */ + AT(NFP_CPP_ACTION_RW, 2, P64, P64); /* read_swap_be/write_swap_be */ + AT(NFP_CPP_ACTION_RW, 3, P64, P64); /* read_swap_le/write_swap_le */ + AT(0, 0, 0, P64); /* read_be */ + AT(0, 1, 0, P64); /* read_le */ + AT(0, 2, 0, P64); /* read_swap_be */ + AT(0, 3, 0, P64); /* read_swap_le */ + AT(1, 0, P64, 0); /* write_be */ + AT(1, 1, P64, 0); /* write_le */ + AT(1, 2, P64, 0); /* write_swap_be */ + AT(1, 3, P64, 0); /* write_swap_le */ + AT(3, 0, 0, P32); /* atomic_read */ + AT(3, 2, P32, 0); /* mask_compare_write */ + AT(4, 0, P32, 0); /* atomic_write */ + AT(4, 2, 0, 0); /* atomic_write_imm */ + AT(4, 3, 0, P32); /* swap_imm */ + AT(5, 0, P32, 0); /* set */ + AT(5, 3, 0, P32); /* test_set_imm */ + AT(6, 0, P32, 0); /* clr */ + AT(6, 3, 0, P32); /* test_clr_imm */ + AT(7, 0, P32, 0); /* add */ + AT(7, 3, 0, P32); /* test_add_imm */ + AT(8, 0, P32, 0); /* addsat */ + AT(8, 3, 0, P32); /* test_subsat_imm */ + AT(9, 0, P32, 0); /* sub */ + AT(9, 3, 0, P32); /* test_sub_imm */ + AT(10, 0, P32, 0); /* subsat */ + AT(10, 3, 0, P32); /* test_subsat_imm */ + AT(13, 0, 0, P32); /* microq128_get */ + AT(13, 1, 0, P32); /* microq128_pop */ + AT(13, 2, P32, 0); /* microq128_put */ + AT(15, 0, P32, 0); /* xor */ + AT(15, 3, 0, P32); /* test_xor_imm */ + AT(28, 0, 0, P32); /* read32_be */ + AT(28, 1, 0, P32); /* read32_le */ + AT(28, 2, 0, P32); /* read32_swap_be */ + AT(28, 3, 0, P32); /* read32_swap_le */ + AT(31, 0, P32, 0); /* write32_be */ + AT(31, 1, P32, 0); /* write32_le */ + AT(31, 2, P32, 0); /* write32_swap_be */ + AT(31, 3, P32, 0); /* write32_swap_le */ + default: + return -EINVAL; + } +} + +static int nfp6000_mu_ctm(u32 cpp_id) +{ + switch (cpp_id & NFP_CPP_ID(0, ~0, ~0)) { + AT(16, 1, 0, P32); /* packet_read_packet_status */ + AT(17, 1, 0, P32); /* packet_credit_get */ + AT(17, 3, 0, P64); /* packet_add_thread */ + AT(18, 2, 0, P64); /* packet_free_and_return_pointer */ + AT(18, 3, 0, P64); /* packet_return_pointer */ + AT(21, 0, 0, P64); /* pe_dma_to_memory_indirect */ + AT(21, 1, 0, P64); /* pe_dma_to_memory_indirect_swap */ + AT(21, 2, 0, P64); /* pe_dma_to_memory_indirect_free */ + AT(21, 3, 0, P64); /* pe_dma_to_memory_indirect_free_swap */ + default: + return nfp6000_mu_common(cpp_id); + } +} + +static int nfp6000_mu_emu(u32 cpp_id) +{ + switch (cpp_id & NFP_CPP_ID(0, ~0, ~0)) { + AT(18, 0, 0, P32); /* read_queue */ + AT(18, 1, 0, P32); /* read_queue_ring */ + AT(18, 2, P32, 0); /* write_queue */ + AT(18, 3, P32, 0); /* write_queue_ring */ + AT(20, 2, P32, 0); /* journal */ + AT(21, 0, 0, P32); /* get */ + AT(21, 1, 0, P32); /* get_eop */ + AT(21, 2, 0, P32); /* get_freely */ + AT(22, 0, 0, P32); /* pop */ + AT(22, 1, 0, P32); /* pop_eop */ + AT(22, 2, 0, P32); /* pop_freely */ + default: + return nfp6000_mu_common(cpp_id); + } +} + +static int nfp6000_mu_imu(u32 cpp_id) +{ + return nfp6000_mu_common(cpp_id); +} + +static int nfp6000_mu(u32 cpp_id, u64 address) +{ + int pp; + + if (address < 0x2000000000ULL) + pp = nfp6000_mu_ctm(cpp_id); + else if (address < 0x8000000000ULL) + pp = nfp6000_mu_emu(cpp_id); + else if (address < 0x9800000000ULL) + pp = nfp6000_mu_ctm(cpp_id); + else if (address < 0x9C00000000ULL) + pp = nfp6000_mu_emu(cpp_id); + else if (address < 0xA000000000ULL) + pp = nfp6000_mu_imu(cpp_id); + else + pp = nfp6000_mu_ctm(cpp_id); + + return pp; +} + +static int nfp6000_ila(u32 cpp_id) +{ + switch (cpp_id & NFP_CPP_ID(0, ~0, ~0)) { + AT(0, 1, 0, P32); /* read_check_error */ + AT(2, 0, 0, P32); /* read_int */ + AT(3, 0, P32, 0); /* write_int */ + default: + return target_rw(cpp_id, P32, 48, 4); + } +} + +static int nfp6000_pci(u32 cpp_id) +{ + switch (cpp_id & NFP_CPP_ID(0, ~0, ~0)) { + AT(2, 0, 0, P32); + AT(3, 0, P32, 0); + default: + return target_rw(cpp_id, P32, 4, 4); + } +} + +static int nfp6000_crypto(u32 cpp_id) +{ + switch (cpp_id & NFP_CPP_ID(0, ~0, ~0)) { + AT(2, 0, P64, 0); + default: + return target_rw(cpp_id, P64, 12, 4); + } +} + +static int nfp6000_cap_xpb(u32 cpp_id) +{ + switch (cpp_id & NFP_CPP_ID(0, ~0, ~0)) { + AT(0, 1, 0, P32); /* RingGet */ + AT(0, 2, P32, 0); /* Interthread Signal */ + AT(1, 1, P32, 0); /* RingPut */ + AT(1, 2, P32, 0); /* CTNNWr */ + AT(2, 0, 0, P32); /* ReflectRd, signal none */ + AT(2, 1, 0, P32); /* ReflectRd, signal self */ + AT(2, 2, 0, P32); /* ReflectRd, signal remote */ + AT(2, 3, 0, P32); /* ReflectRd, signal both */ + AT(3, 0, P32, 0); /* ReflectWr, signal none */ + AT(3, 1, P32, 0); /* ReflectWr, signal self */ + AT(3, 2, P32, 0); /* ReflectWr, signal remote */ + AT(3, 3, P32, 0); /* ReflectWr, signal both */ + AT(NFP_CPP_ACTION_RW, 1, P32, P32); + default: + return target_rw(cpp_id, P32, 1, 63); + } +} + +static int nfp6000_cls(u32 cpp_id) +{ + switch (cpp_id & NFP_CPP_ID(0, ~0, ~0)) { + AT(0, 3, P32, 0); /* xor */ + AT(2, 0, P32, 0); /* set */ + AT(2, 1, P32, 0); /* clr */ + AT(4, 0, P32, 0); /* add */ + AT(4, 1, P32, 0); /* add64 */ + AT(6, 0, P32, 0); /* sub */ + AT(6, 1, P32, 0); /* sub64 */ + AT(6, 2, P32, 0); /* subsat */ + AT(8, 2, P32, 0); /* hash_mask */ + AT(8, 3, P32, 0); /* hash_clear */ + AT(9, 0, 0, P32); /* ring_get */ + AT(9, 1, 0, P32); /* ring_pop */ + AT(9, 2, 0, P32); /* ring_get_freely */ + AT(9, 3, 0, P32); /* ring_pop_freely */ + AT(10, 0, P32, 0); /* ring_put */ + AT(10, 2, P32, 0); /* ring_journal */ + AT(14, 0, P32, 0); /* reflect_write_sig_local */ + AT(15, 1, 0, P32); /* reflect_read_sig_local */ + AT(17, 2, P32, 0); /* statisic */ + AT(24, 0, 0, P32); /* ring_read */ + AT(24, 1, P32, 0); /* ring_write */ + AT(25, 0, 0, P32); /* ring_workq_add_thread */ + AT(25, 1, P32, 0); /* ring_workq_add_work */ + default: + return target_rw(cpp_id, P32, 0, 64); + } +} + +int nfp_target_pushpull(u32 cpp_id, u64 address) +{ + switch (NFP_CPP_ID_TARGET_of(cpp_id)) { + case NFP_CPP_TARGET_NBI: + return nfp6000_nbi(cpp_id, address); + case NFP_CPP_TARGET_QDR: + return target_rw(cpp_id, P32, 24, 4); + case NFP_CPP_TARGET_ILA: + return nfp6000_ila(cpp_id); + case NFP_CPP_TARGET_MU: + return nfp6000_mu(cpp_id, address); + case NFP_CPP_TARGET_PCIE: + return nfp6000_pci(cpp_id); + case NFP_CPP_TARGET_ARM: + if (address < 0x10000) + return target_rw(cpp_id, P64, 1, 1); + else + return target_rw(cpp_id, P32, 1, 1); + case NFP_CPP_TARGET_CRYPTO: + return nfp6000_crypto(cpp_id); + case NFP_CPP_TARGET_CT_XPB: + return nfp6000_cap_xpb(cpp_id); + case NFP_CPP_TARGET_CLS: + return nfp6000_cls(cpp_id); + case 0: + return target_rw(cpp_id, P32, 4, 4); + default: + return -EINVAL; + } +} + +#undef AT +#undef P32 +#undef P64 + +/* All magic NFP-6xxx IMB 'mode' numbers here are from: + * Databook (1 August 2013) + * - System Overview and Connectivity + * -- Internal Connectivity + * --- Distributed Switch Fabric - Command Push/Pull (DSF-CPP) Bus + * ---- CPP addressing + * ----- Table 3.6. CPP Address Translation Mode Commands + */ + +#define _NIC_NFP6000_MU_LOCALITY_DIRECT 2 + +static int nfp_decode_basic(u64 addr, int *dest_island, int cpp_tgt, + int mode, bool addr40, int isld1, int isld0) +{ + int iid_lsb, idx_lsb; + + /* This function doesn't handle MU or CTXBP */ + if (cpp_tgt == NFP_CPP_TARGET_MU || cpp_tgt == NFP_CPP_TARGET_CT_XPB) + return -EINVAL; + + switch (mode) { + case 0: + /* For VQDR, in this mode for 32-bit addressing + * it would be islands 0, 16, 32 and 48 depending on channel + * and upper address bits. + * Since those are not all valid islands, most decode + * cases would result in bad island IDs, but we do them + * anyway since this is decoding an address that is already + * assumed to be used as-is to get to sram. + */ + iid_lsb = addr40 ? 34 : 26; + *dest_island = (addr >> iid_lsb) & 0x3F; + return 0; + case 1: + /* For VQDR 32-bit, this would decode as: + * Channel 0: island#0 + * Channel 1: island#0 + * Channel 2: island#1 + * Channel 3: island#1 + * That would be valid as long as both islands + * have VQDR. Let's allow this. + */ + idx_lsb = addr40 ? 39 : 31; + if (addr & BIT_ULL(idx_lsb)) + *dest_island = isld1; + else + *dest_island = isld0; + + return 0; + case 2: + /* For VQDR 32-bit: + * Channel 0: (island#0 | 0) + * Channel 1: (island#0 | 1) + * Channel 2: (island#1 | 0) + * Channel 3: (island#1 | 1) + * + * Make sure we compare against isldN values + * by clearing the LSB. + * This is what the silicon does. + */ + isld0 &= ~1; + isld1 &= ~1; + + idx_lsb = addr40 ? 39 : 31; + iid_lsb = idx_lsb - 1; + + if (addr & BIT_ULL(idx_lsb)) + *dest_island = isld1 | (int)((addr >> iid_lsb) & 1); + else + *dest_island = isld0 | (int)((addr >> iid_lsb) & 1); + + return 0; + case 3: + /* In this mode the data address starts to affect the island ID + * so rather not allow it. In some really specific case + * one could use this to send the upper half of the + * VQDR channel to another MU, but this is getting very + * specific. + * However, as above for mode 0, this is the decoder + * and the caller should validate the resulting IID. + * This blindly does what the silicon would do. + */ + isld0 &= ~3; + isld1 &= ~3; + + idx_lsb = addr40 ? 39 : 31; + iid_lsb = idx_lsb - 2; + + if (addr & BIT_ULL(idx_lsb)) + *dest_island = isld1 | (int)((addr >> iid_lsb) & 3); + else + *dest_island = isld0 | (int)((addr >> iid_lsb) & 3); + + return 0; + default: + return -EINVAL; + } +} + +static int nfp_encode_basic_qdr(u64 addr, int dest_island, int cpp_tgt, + int mode, bool addr40, int isld1, int isld0) +{ + int v, ret; + + /* Full Island ID and channel bits overlap? */ + ret = nfp_decode_basic(addr, &v, cpp_tgt, mode, addr40, isld1, isld0); + if (ret) + return ret; + + /* The current address won't go where expected? */ + if (dest_island != -1 && dest_island != v) + return -EINVAL; + + /* If dest_island was -1, we don't care where it goes. */ + return 0; +} + +/* Try each option, take first one that fits. + * Not sure if we would want to do some smarter + * searching and prefer 0 or non-0 island IDs. + */ +static int nfp_encode_basic_search(u64 *addr, int dest_island, int *isld, + int iid_lsb, int idx_lsb, int v_max) +{ + int i, v; + + for (i = 0; i < 2; i++) + for (v = 0; v < v_max; v++) { + if (dest_island != (isld[i] | v)) + continue; + + *addr &= ~GENMASK_ULL(idx_lsb, iid_lsb); + *addr |= ((u64)i << idx_lsb); + *addr |= ((u64)v << iid_lsb); + return 0; + } + + return -ENODEV; +} + +/* For VQDR, we may not modify the Channel bits, which might overlap + * with the Index bit. When it does, we need to ensure that isld0 == isld1. + */ +static int nfp_encode_basic(u64 *addr, int dest_island, int cpp_tgt, + int mode, bool addr40, int isld1, int isld0) +{ + int iid_lsb, idx_lsb; + int isld[2]; + u64 v64; + + isld[0] = isld0; + isld[1] = isld1; + + /* This function doesn't handle MU or CTXBP */ + if (cpp_tgt == NFP_CPP_TARGET_MU || cpp_tgt == NFP_CPP_TARGET_CT_XPB) + return -EINVAL; + + switch (mode) { + case 0: + if (cpp_tgt == NFP_CPP_TARGET_QDR && !addr40) + /* In this specific mode we'd rather not modify + * the address but we can verify if the existing + * contents will point to a valid island. + */ + return nfp_encode_basic_qdr(*addr, cpp_tgt, dest_island, + mode, addr40, isld1, isld0); + + iid_lsb = addr40 ? 34 : 26; + /* <39:34> or <31:26> */ + v64 = GENMASK_ULL(iid_lsb + 5, iid_lsb); + *addr &= ~v64; + *addr |= ((u64)dest_island << iid_lsb) & v64; + return 0; + case 1: + if (cpp_tgt == NFP_CPP_TARGET_QDR && !addr40) + return nfp_encode_basic_qdr(*addr, cpp_tgt, dest_island, + mode, addr40, isld1, isld0); + + idx_lsb = addr40 ? 39 : 31; + if (dest_island == isld0) { + /* Only need to clear the Index bit */ + *addr &= ~BIT_ULL(idx_lsb); + return 0; + } + + if (dest_island == isld1) { + /* Only need to set the Index bit */ + *addr |= BIT_ULL(idx_lsb); + return 0; + } + + return -ENODEV; + case 2: + /* iid<0> = addr<30> = channel<0> + * channel<1> = addr<31> = Index + */ + if (cpp_tgt == NFP_CPP_TARGET_QDR && !addr40) + /* Special case where we allow channel bits to + * be set before hand and with them select an island. + * So we need to confirm that it's at least plausible. + */ + return nfp_encode_basic_qdr(*addr, cpp_tgt, dest_island, + mode, addr40, isld1, isld0); + + /* Make sure we compare against isldN values + * by clearing the LSB. + * This is what the silicon does. + */ + isld[0] &= ~1; + isld[1] &= ~1; + + idx_lsb = addr40 ? 39 : 31; + iid_lsb = idx_lsb - 1; + + return nfp_encode_basic_search(addr, dest_island, isld, + iid_lsb, idx_lsb, 2); + case 3: + if (cpp_tgt == NFP_CPP_TARGET_QDR && !addr40) + /* iid<0> = addr<29> = data + * iid<1> = addr<30> = channel<0> + * channel<1> = addr<31> = Index + */ + return nfp_encode_basic_qdr(*addr, cpp_tgt, dest_island, + mode, addr40, isld1, isld0); + + isld[0] &= ~3; + isld[1] &= ~3; + + idx_lsb = addr40 ? 39 : 31; + iid_lsb = idx_lsb - 2; + + return nfp_encode_basic_search(addr, dest_island, isld, + iid_lsb, idx_lsb, 4); + default: + return -EINVAL; + } +} + +static int nfp_encode_mu(u64 *addr, int dest_island, int mode, + bool addr40, int isld1, int isld0) +{ + int iid_lsb, idx_lsb, locality_lsb; + int isld[2]; + u64 v64; + int da; + + isld[0] = isld0; + isld[1] = isld1; + locality_lsb = nfp_cppat_mu_locality_lsb(mode, addr40); + + if (((*addr >> locality_lsb) & 3) == _NIC_NFP6000_MU_LOCALITY_DIRECT) + da = 1; + else + da = 0; + + switch (mode) { + case 0: + iid_lsb = addr40 ? 32 : 24; + v64 = GENMASK_ULL(iid_lsb + 5, iid_lsb); + *addr &= ~v64; + *addr |= (((u64)dest_island) << iid_lsb) & v64; + return 0; + case 1: + if (da) { + iid_lsb = addr40 ? 32 : 24; + v64 = GENMASK_ULL(iid_lsb + 5, iid_lsb); + *addr &= ~v64; + *addr |= (((u64)dest_island) << iid_lsb) & v64; + return 0; + } + + idx_lsb = addr40 ? 37 : 29; + if (dest_island == isld0) { + *addr &= ~BIT_ULL(idx_lsb); + return 0; + } + + if (dest_island == isld1) { + *addr |= BIT_ULL(idx_lsb); + return 0; + } + + return -ENODEV; + case 2: + if (da) { + iid_lsb = addr40 ? 32 : 24; + v64 = GENMASK_ULL(iid_lsb + 5, iid_lsb); + *addr &= ~v64; + *addr |= (((u64)dest_island) << iid_lsb) & v64; + return 0; + } + + /* Make sure we compare against isldN values + * by clearing the LSB. + * This is what the silicon does. + */ + isld[0] &= ~1; + isld[1] &= ~1; + + idx_lsb = addr40 ? 37 : 29; + iid_lsb = idx_lsb - 1; + + return nfp_encode_basic_search(addr, dest_island, isld, + iid_lsb, idx_lsb, 2); + case 3: + /* Only the EMU will use 40 bit addressing. Silently + * set the direct locality bit for everyone else. + * The SDK toolchain uses dest_island <= 0 to test + * for atypical address encodings to support access + * to local-island CTM with a 32-but address (high-locality + * is effewctively ignored and just used for + * routing to island #0). + */ + if (dest_island > 0 && (dest_island < 24 || dest_island > 26)) { + *addr |= ((u64)_NIC_NFP6000_MU_LOCALITY_DIRECT) + << locality_lsb; + da = 1; + } + + if (da) { + iid_lsb = addr40 ? 32 : 24; + v64 = GENMASK_ULL(iid_lsb + 5, iid_lsb); + *addr &= ~v64; + *addr |= (((u64)dest_island) << iid_lsb) & v64; + return 0; + } + + isld[0] &= ~3; + isld[1] &= ~3; + + idx_lsb = addr40 ? 37 : 29; + iid_lsb = idx_lsb - 2; + + return nfp_encode_basic_search(addr, dest_island, isld, + iid_lsb, idx_lsb, 4); + default: + return -EINVAL; + } +} + +static int nfp_cppat_addr_encode(u64 *addr, int dest_island, int cpp_tgt, + int mode, bool addr40, int isld1, int isld0) +{ + switch (cpp_tgt) { + case NFP_CPP_TARGET_NBI: + case NFP_CPP_TARGET_QDR: + case NFP_CPP_TARGET_ILA: + case NFP_CPP_TARGET_PCIE: + case NFP_CPP_TARGET_ARM: + case NFP_CPP_TARGET_CRYPTO: + case NFP_CPP_TARGET_CLS: + return nfp_encode_basic(addr, dest_island, cpp_tgt, mode, + addr40, isld1, isld0); + + case NFP_CPP_TARGET_MU: + return nfp_encode_mu(addr, dest_island, mode, + addr40, isld1, isld0); + + case NFP_CPP_TARGET_CT_XPB: + if (mode != 1 || addr40) + return -EINVAL; + *addr &= ~GENMASK_ULL(29, 24); + *addr |= ((u64)dest_island << 24) & GENMASK_ULL(29, 24); + return 0; + default: + return -EINVAL; + } +} + +int nfp_target_cpp(u32 cpp_island_id, u64 cpp_island_address, + u32 *cpp_target_id, u64 *cpp_target_address, + const u32 *imb_table) +{ + const int island = NFP_CPP_ID_ISLAND_of(cpp_island_id); + const int target = NFP_CPP_ID_TARGET_of(cpp_island_id); + u32 imb; + int err; + + if (target < 0 || target >= 16) { + pr_err("Invalid CPP target: %d\n", target); + return -EINVAL; + } + + if (island == 0) { + /* Already translated */ + *cpp_target_id = cpp_island_id; + *cpp_target_address = cpp_island_address; + return 0; + } + + /* CPP + Island only allowed on systems with IMB tables */ + if (!imb_table) + return -EINVAL; + + imb = imb_table[target]; + + *cpp_target_address = cpp_island_address; + err = nfp_cppat_addr_encode(cpp_target_address, island, target, + ((imb >> 13) & 7), ((imb >> 12) & 1), + ((imb >> 6) & 0x3f), ((imb >> 0) & 0x3f)); + if (err) { + pr_err("Can't encode CPP address: %d\n", err); + return err; + } + + *cpp_target_id = NFP_CPP_ID(target, + NFP_CPP_ID_ACTION_of(cpp_island_id), + NFP_CPP_ID_TOKEN_of(cpp_island_id)); + + return 0; +} |