diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-07 18:49:45 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-07 18:49:45 +0000 |
commit | 2c3c1048746a4622d8c89a29670120dc8fab93c4 (patch) | |
tree | 848558de17fb3008cdf4d861b01ac7781903ce39 /drivers/misc/ocxl | |
parent | Initial commit. (diff) | |
download | linux-upstream.tar.xz linux-upstream.zip |
Adding upstream version 6.1.76.upstream/6.1.76upstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to '')
-rw-r--r-- | drivers/misc/ocxl/Kconfig | 31 | ||||
-rw-r--r-- | drivers/misc/ocxl/Makefile | 12 | ||||
-rw-r--r-- | drivers/misc/ocxl/afu_irq.c | 212 | ||||
-rw-r--r-- | drivers/misc/ocxl/config.c | 937 | ||||
-rw-r--r-- | drivers/misc/ocxl/context.c | 296 | ||||
-rw-r--r-- | drivers/misc/ocxl/core.c | 569 | ||||
-rw-r--r-- | drivers/misc/ocxl/file.c | 620 | ||||
-rw-r--r-- | drivers/misc/ocxl/link.c | 779 | ||||
-rw-r--r-- | drivers/misc/ocxl/main.c | 37 | ||||
-rw-r--r-- | drivers/misc/ocxl/mmio.c | 234 | ||||
-rw-r--r-- | drivers/misc/ocxl/ocxl_internal.h | 154 | ||||
-rw-r--r-- | drivers/misc/ocxl/pasid.c | 107 | ||||
-rw-r--r-- | drivers/misc/ocxl/pci.c | 66 | ||||
-rw-r--r-- | drivers/misc/ocxl/sysfs.c | 186 | ||||
-rw-r--r-- | drivers/misc/ocxl/trace.c | 6 | ||||
-rw-r--r-- | drivers/misc/ocxl/trace.h | 242 |
16 files changed, 4488 insertions, 0 deletions
diff --git a/drivers/misc/ocxl/Kconfig b/drivers/misc/ocxl/Kconfig new file mode 100644 index 000000000..c9b0a27ca --- /dev/null +++ b/drivers/misc/ocxl/Kconfig @@ -0,0 +1,31 @@ +# SPDX-License-Identifier: GPL-2.0-only +# +# Open Coherent Accelerator (OCXL) compatible devices +# + +config OCXL_BASE + bool + select PPC_COPRO_BASE + +config OCXL + tristate "OpenCAPI coherent accelerator support" + depends on HOTPLUG_PCI_POWERNV + select OCXL_BASE + default m + help + Select this option to enable the ocxl driver for Open + Coherent Accelerator Processor Interface (OpenCAPI) devices. + + OpenCAPI allows FPGA and ASIC accelerators to be coherently + attached to a CPU over an OpenCAPI link. + + The ocxl driver enables userspace programs to access these + accelerators through devices in /dev/ocxl/. + + For more information, see https://opencapi.org. + + This is not to be confused with the support for IBM CAPI + accelerators (CONFIG_CXL), which are PCI-based instead of a + dedicated OpenCAPI link, and don't follow the same protocol. + + If unsure, say N. diff --git a/drivers/misc/ocxl/Makefile b/drivers/misc/ocxl/Makefile new file mode 100644 index 000000000..d07d1bb8e --- /dev/null +++ b/drivers/misc/ocxl/Makefile @@ -0,0 +1,12 @@ +# SPDX-License-Identifier: GPL-2.0+ +ccflags-$(CONFIG_PPC_WERROR) += -Werror + +ocxl-y += main.o pci.o config.o file.o pasid.o mmio.o +ocxl-y += link.o context.o afu_irq.o sysfs.o trace.o +ocxl-y += core.o +obj-$(CONFIG_OCXL) += ocxl.o + +# For tracepoints to include our trace.h from tracepoint infrastructure: +CFLAGS_trace.o := -I$(src) + +# ccflags-y += -DDEBUG diff --git a/drivers/misc/ocxl/afu_irq.c b/drivers/misc/ocxl/afu_irq.c new file mode 100644 index 000000000..a06920b7e --- /dev/null +++ b/drivers/misc/ocxl/afu_irq.c @@ -0,0 +1,212 @@ +// SPDX-License-Identifier: GPL-2.0+ +// Copyright 2017 IBM Corp. +#include <linux/interrupt.h> +#include <linux/irqdomain.h> +#include <asm/pnv-ocxl.h> +#include <asm/xive.h> +#include "ocxl_internal.h" +#include "trace.h" + +struct afu_irq { + int id; + int hw_irq; + unsigned int virq; + char *name; + irqreturn_t (*handler)(void *private); + void (*free_private)(void *private); + void *private; +}; + +int ocxl_irq_offset_to_id(struct ocxl_context *ctx, u64 offset) +{ + return (offset - ctx->afu->irq_base_offset) >> PAGE_SHIFT; +} + +u64 ocxl_irq_id_to_offset(struct ocxl_context *ctx, int irq_id) +{ + return ctx->afu->irq_base_offset + (irq_id << PAGE_SHIFT); +} + +int ocxl_irq_set_handler(struct ocxl_context *ctx, int irq_id, + irqreturn_t (*handler)(void *private), + void (*free_private)(void *private), + void *private) +{ + struct afu_irq *irq; + int rc; + + mutex_lock(&ctx->irq_lock); + irq = idr_find(&ctx->irq_idr, irq_id); + if (!irq) { + rc = -EINVAL; + goto unlock; + } + + irq->handler = handler; + irq->private = private; + irq->free_private = free_private; + + rc = 0; + // Fall through to unlock + +unlock: + mutex_unlock(&ctx->irq_lock); + return rc; +} +EXPORT_SYMBOL_GPL(ocxl_irq_set_handler); + +static irqreturn_t afu_irq_handler(int virq, void *data) +{ + struct afu_irq *irq = (struct afu_irq *) data; + + trace_ocxl_afu_irq_receive(virq); + + if (irq->handler) + return irq->handler(irq->private); + + return IRQ_HANDLED; // Just drop it on the ground +} + +static int setup_afu_irq(struct ocxl_context *ctx, struct afu_irq *irq) +{ + int rc; + + irq->virq = irq_create_mapping(NULL, irq->hw_irq); + if (!irq->virq) { + pr_err("irq_create_mapping failed\n"); + return -ENOMEM; + } + pr_debug("hw_irq %d mapped to virq %u\n", irq->hw_irq, irq->virq); + + irq->name = kasprintf(GFP_KERNEL, "ocxl-afu-%u", irq->virq); + if (!irq->name) { + irq_dispose_mapping(irq->virq); + return -ENOMEM; + } + + rc = request_irq(irq->virq, afu_irq_handler, 0, irq->name, irq); + if (rc) { + kfree(irq->name); + irq->name = NULL; + irq_dispose_mapping(irq->virq); + pr_err("request_irq failed: %d\n", rc); + return rc; + } + return 0; +} + +static void release_afu_irq(struct afu_irq *irq) +{ + free_irq(irq->virq, irq); + irq_dispose_mapping(irq->virq); + kfree(irq->name); +} + +int ocxl_afu_irq_alloc(struct ocxl_context *ctx, int *irq_id) +{ + struct afu_irq *irq; + int rc; + + irq = kzalloc(sizeof(struct afu_irq), GFP_KERNEL); + if (!irq) + return -ENOMEM; + + /* + * We limit the number of afu irqs per context and per link to + * avoid a single process or user depleting the pool of IPIs + */ + + mutex_lock(&ctx->irq_lock); + + irq->id = idr_alloc(&ctx->irq_idr, irq, 0, MAX_IRQ_PER_CONTEXT, + GFP_KERNEL); + if (irq->id < 0) { + rc = -ENOSPC; + goto err_unlock; + } + + rc = ocxl_link_irq_alloc(ctx->afu->fn->link, &irq->hw_irq); + if (rc) + goto err_idr; + + rc = setup_afu_irq(ctx, irq); + if (rc) + goto err_alloc; + + trace_ocxl_afu_irq_alloc(ctx->pasid, irq->id, irq->virq, irq->hw_irq); + mutex_unlock(&ctx->irq_lock); + + *irq_id = irq->id; + + return 0; + +err_alloc: + ocxl_link_free_irq(ctx->afu->fn->link, irq->hw_irq); +err_idr: + idr_remove(&ctx->irq_idr, irq->id); +err_unlock: + mutex_unlock(&ctx->irq_lock); + kfree(irq); + return rc; +} +EXPORT_SYMBOL_GPL(ocxl_afu_irq_alloc); + +static void afu_irq_free(struct afu_irq *irq, struct ocxl_context *ctx) +{ + trace_ocxl_afu_irq_free(ctx->pasid, irq->id); + if (ctx->mapping) + unmap_mapping_range(ctx->mapping, + ocxl_irq_id_to_offset(ctx, irq->id), + 1 << PAGE_SHIFT, 1); + release_afu_irq(irq); + if (irq->free_private) + irq->free_private(irq->private); + ocxl_link_free_irq(ctx->afu->fn->link, irq->hw_irq); + kfree(irq); +} + +int ocxl_afu_irq_free(struct ocxl_context *ctx, int irq_id) +{ + struct afu_irq *irq; + + mutex_lock(&ctx->irq_lock); + + irq = idr_find(&ctx->irq_idr, irq_id); + if (!irq) { + mutex_unlock(&ctx->irq_lock); + return -EINVAL; + } + idr_remove(&ctx->irq_idr, irq->id); + afu_irq_free(irq, ctx); + mutex_unlock(&ctx->irq_lock); + return 0; +} +EXPORT_SYMBOL_GPL(ocxl_afu_irq_free); + +void ocxl_afu_irq_free_all(struct ocxl_context *ctx) +{ + struct afu_irq *irq; + int id; + + mutex_lock(&ctx->irq_lock); + idr_for_each_entry(&ctx->irq_idr, irq, id) + afu_irq_free(irq, ctx); + mutex_unlock(&ctx->irq_lock); +} + +u64 ocxl_afu_irq_get_addr(struct ocxl_context *ctx, int irq_id) +{ + struct xive_irq_data *xd; + struct afu_irq *irq; + u64 addr = 0; + + mutex_lock(&ctx->irq_lock); + irq = idr_find(&ctx->irq_idr, irq_id); + if (irq) { + xd = irq_get_handler_data(irq->virq); + addr = xd ? xd->trig_page : 0; + } + mutex_unlock(&ctx->irq_lock); + return addr; +} +EXPORT_SYMBOL_GPL(ocxl_afu_irq_get_addr); diff --git a/drivers/misc/ocxl/config.c b/drivers/misc/ocxl/config.c new file mode 100644 index 000000000..92ab49705 --- /dev/null +++ b/drivers/misc/ocxl/config.c @@ -0,0 +1,937 @@ +// SPDX-License-Identifier: GPL-2.0+ +// Copyright 2017 IBM Corp. +#include <linux/pci.h> +#include <asm/pnv-ocxl.h> +#include <misc/ocxl-config.h> +#include "ocxl_internal.h" + +#define EXTRACT_BIT(val, bit) (!!(val & BIT(bit))) +#define EXTRACT_BITS(val, s, e) ((val & GENMASK(e, s)) >> s) + +#define OCXL_DVSEC_AFU_IDX_MASK GENMASK(5, 0) +#define OCXL_DVSEC_ACTAG_MASK GENMASK(11, 0) +#define OCXL_DVSEC_PASID_MASK GENMASK(19, 0) +#define OCXL_DVSEC_PASID_LOG_MASK GENMASK(4, 0) + +#define OCXL_DVSEC_TEMPL_VERSION 0x0 +#define OCXL_DVSEC_TEMPL_NAME 0x4 +#define OCXL_DVSEC_TEMPL_AFU_VERSION 0x1C +#define OCXL_DVSEC_TEMPL_MMIO_GLOBAL 0x20 +#define OCXL_DVSEC_TEMPL_MMIO_GLOBAL_SZ 0x28 +#define OCXL_DVSEC_TEMPL_MMIO_PP 0x30 +#define OCXL_DVSEC_TEMPL_MMIO_PP_SZ 0x38 +#define OCXL_DVSEC_TEMPL_ALL_MEM_SZ 0x3C +#define OCXL_DVSEC_TEMPL_LPC_MEM_START 0x40 +#define OCXL_DVSEC_TEMPL_WWID 0x48 +#define OCXL_DVSEC_TEMPL_LPC_MEM_SZ 0x58 + +#define OCXL_MAX_AFU_PER_FUNCTION 64 +#define OCXL_TEMPL_LEN_1_0 0x58 +#define OCXL_TEMPL_LEN_1_1 0x60 +#define OCXL_TEMPL_NAME_LEN 24 +#define OCXL_CFG_TIMEOUT 3 + +static int find_dvsec(struct pci_dev *dev, int dvsec_id) +{ + return pci_find_dvsec_capability(dev, PCI_VENDOR_ID_IBM, dvsec_id); +} + +static int find_dvsec_afu_ctrl(struct pci_dev *dev, u8 afu_idx) +{ + int vsec = 0; + u16 vendor, id; + u8 idx; + + while ((vsec = pci_find_next_ext_capability(dev, vsec, + OCXL_EXT_CAP_ID_DVSEC))) { + pci_read_config_word(dev, vsec + OCXL_DVSEC_VENDOR_OFFSET, + &vendor); + pci_read_config_word(dev, vsec + OCXL_DVSEC_ID_OFFSET, &id); + + if (vendor == PCI_VENDOR_ID_IBM && + id == OCXL_DVSEC_AFU_CTRL_ID) { + pci_read_config_byte(dev, + vsec + OCXL_DVSEC_AFU_CTRL_AFU_IDX, + &idx); + if (idx == afu_idx) + return vsec; + } + } + return 0; +} + +/** + * get_function_0() - Find a related PCI device (function 0) + * @dev: PCI device to match + * + * Returns a pointer to the related device, or null if not found + */ +static struct pci_dev *get_function_0(struct pci_dev *dev) +{ + unsigned int devfn = PCI_DEVFN(PCI_SLOT(dev->devfn), 0); + + return pci_get_domain_bus_and_slot(pci_domain_nr(dev->bus), + dev->bus->number, devfn); +} + +static void read_pasid(struct pci_dev *dev, struct ocxl_fn_config *fn) +{ + u16 val; + int pos; + + pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_PASID); + if (!pos) { + /* + * PASID capability is not mandatory, but there + * shouldn't be any AFU + */ + dev_dbg(&dev->dev, "Function doesn't require any PASID\n"); + fn->max_pasid_log = -1; + goto out; + } + pci_read_config_word(dev, pos + PCI_PASID_CAP, &val); + fn->max_pasid_log = EXTRACT_BITS(val, 8, 12); + +out: + dev_dbg(&dev->dev, "PASID capability:\n"); + dev_dbg(&dev->dev, " Max PASID log = %d\n", fn->max_pasid_log); +} + +static int read_dvsec_tl(struct pci_dev *dev, struct ocxl_fn_config *fn) +{ + int pos; + + pos = find_dvsec(dev, OCXL_DVSEC_TL_ID); + if (!pos && PCI_FUNC(dev->devfn) == 0) { + dev_err(&dev->dev, "Can't find TL DVSEC\n"); + return -ENODEV; + } + if (pos && PCI_FUNC(dev->devfn) != 0) { + dev_err(&dev->dev, "TL DVSEC is only allowed on function 0\n"); + return -ENODEV; + } + fn->dvsec_tl_pos = pos; + return 0; +} + +static int read_dvsec_function(struct pci_dev *dev, struct ocxl_fn_config *fn) +{ + int pos, afu_present; + u32 val; + + pos = find_dvsec(dev, OCXL_DVSEC_FUNC_ID); + if (!pos) { + dev_err(&dev->dev, "Can't find function DVSEC\n"); + return -ENODEV; + } + fn->dvsec_function_pos = pos; + + pci_read_config_dword(dev, pos + OCXL_DVSEC_FUNC_OFF_INDEX, &val); + afu_present = EXTRACT_BIT(val, 31); + if (!afu_present) { + fn->max_afu_index = -1; + dev_dbg(&dev->dev, "Function doesn't define any AFU\n"); + goto out; + } + fn->max_afu_index = EXTRACT_BITS(val, 24, 29); + +out: + dev_dbg(&dev->dev, "Function DVSEC:\n"); + dev_dbg(&dev->dev, " Max AFU index = %d\n", fn->max_afu_index); + return 0; +} + +static int read_dvsec_afu_info(struct pci_dev *dev, struct ocxl_fn_config *fn) +{ + int pos; + + if (fn->max_afu_index < 0) { + fn->dvsec_afu_info_pos = -1; + return 0; + } + + pos = find_dvsec(dev, OCXL_DVSEC_AFU_INFO_ID); + if (!pos) { + dev_err(&dev->dev, "Can't find AFU information DVSEC\n"); + return -ENODEV; + } + fn->dvsec_afu_info_pos = pos; + return 0; +} + +static int read_dvsec_vendor(struct pci_dev *dev) +{ + int pos; + u32 cfg, tlx, dlx, reset_reload; + + /* + * vendor specific DVSEC, for IBM images only. Some older + * images may not have it + * + * It's only used on function 0 to specify the version of some + * logic blocks and to give access to special registers to + * enable host-based flashing. + */ + if (PCI_FUNC(dev->devfn) != 0) + return 0; + + pos = find_dvsec(dev, OCXL_DVSEC_VENDOR_ID); + if (!pos) + return 0; + + pci_read_config_dword(dev, pos + OCXL_DVSEC_VENDOR_CFG_VERS, &cfg); + pci_read_config_dword(dev, pos + OCXL_DVSEC_VENDOR_TLX_VERS, &tlx); + pci_read_config_dword(dev, pos + OCXL_DVSEC_VENDOR_DLX_VERS, &dlx); + pci_read_config_dword(dev, pos + OCXL_DVSEC_VENDOR_RESET_RELOAD, + &reset_reload); + + dev_dbg(&dev->dev, "Vendor specific DVSEC:\n"); + dev_dbg(&dev->dev, " CFG version = 0x%x\n", cfg); + dev_dbg(&dev->dev, " TLX version = 0x%x\n", tlx); + dev_dbg(&dev->dev, " DLX version = 0x%x\n", dlx); + dev_dbg(&dev->dev, " ResetReload = 0x%x\n", reset_reload); + return 0; +} + +/** + * get_dvsec_vendor0() - Find a related PCI device (function 0) + * @dev: PCI device to match + * @dev0: The PCI device (function 0) found + * @out_pos: The position of PCI device (function 0) + * + * Returns 0 on success, negative on failure. + * + * NOTE: If it's successful, the reference of dev0 is increased, + * so after using it, the callers must call pci_dev_put() to give + * up the reference. + */ +static int get_dvsec_vendor0(struct pci_dev *dev, struct pci_dev **dev0, + int *out_pos) +{ + int pos; + + if (PCI_FUNC(dev->devfn) != 0) { + dev = get_function_0(dev); + if (!dev) + return -1; + } else { + dev = pci_dev_get(dev); + } + pos = find_dvsec(dev, OCXL_DVSEC_VENDOR_ID); + if (!pos) { + pci_dev_put(dev); + return -1; + } + *dev0 = dev; + *out_pos = pos; + return 0; +} + +int ocxl_config_get_reset_reload(struct pci_dev *dev, int *val) +{ + struct pci_dev *dev0; + u32 reset_reload; + int pos; + + if (get_dvsec_vendor0(dev, &dev0, &pos)) + return -1; + + pci_read_config_dword(dev0, pos + OCXL_DVSEC_VENDOR_RESET_RELOAD, + &reset_reload); + pci_dev_put(dev0); + *val = !!(reset_reload & BIT(0)); + return 0; +} + +int ocxl_config_set_reset_reload(struct pci_dev *dev, int val) +{ + struct pci_dev *dev0; + u32 reset_reload; + int pos; + + if (get_dvsec_vendor0(dev, &dev0, &pos)) + return -1; + + pci_read_config_dword(dev0, pos + OCXL_DVSEC_VENDOR_RESET_RELOAD, + &reset_reload); + if (val) + reset_reload |= BIT(0); + else + reset_reload &= ~BIT(0); + pci_write_config_dword(dev0, pos + OCXL_DVSEC_VENDOR_RESET_RELOAD, + reset_reload); + pci_dev_put(dev0); + return 0; +} + +static int validate_function(struct pci_dev *dev, struct ocxl_fn_config *fn) +{ + if (fn->max_pasid_log == -1 && fn->max_afu_index >= 0) { + dev_err(&dev->dev, + "AFUs are defined but no PASIDs are requested\n"); + return -EINVAL; + } + + if (fn->max_afu_index > OCXL_MAX_AFU_PER_FUNCTION) { + dev_err(&dev->dev, + "Max AFU index out of architectural limit (%d vs %d)\n", + fn->max_afu_index, OCXL_MAX_AFU_PER_FUNCTION); + return -EINVAL; + } + return 0; +} + +int ocxl_config_read_function(struct pci_dev *dev, struct ocxl_fn_config *fn) +{ + int rc; + + read_pasid(dev, fn); + + rc = read_dvsec_tl(dev, fn); + if (rc) { + dev_err(&dev->dev, + "Invalid Transaction Layer DVSEC configuration: %d\n", + rc); + return -ENODEV; + } + + rc = read_dvsec_function(dev, fn); + if (rc) { + dev_err(&dev->dev, + "Invalid Function DVSEC configuration: %d\n", rc); + return -ENODEV; + } + + rc = read_dvsec_afu_info(dev, fn); + if (rc) { + dev_err(&dev->dev, "Invalid AFU configuration: %d\n", rc); + return -ENODEV; + } + + rc = read_dvsec_vendor(dev); + if (rc) { + dev_err(&dev->dev, + "Invalid vendor specific DVSEC configuration: %d\n", + rc); + return -ENODEV; + } + + rc = validate_function(dev, fn); + return rc; +} +EXPORT_SYMBOL_GPL(ocxl_config_read_function); + +static int read_afu_info(struct pci_dev *dev, struct ocxl_fn_config *fn, + int offset, u32 *data) +{ + u32 val; + unsigned long timeout = jiffies + (HZ * OCXL_CFG_TIMEOUT); + int pos = fn->dvsec_afu_info_pos; + + /* Protect 'data valid' bit */ + if (EXTRACT_BIT(offset, 31)) { + dev_err(&dev->dev, "Invalid offset in AFU info DVSEC\n"); + return -EINVAL; + } + + pci_write_config_dword(dev, pos + OCXL_DVSEC_AFU_INFO_OFF, offset); + pci_read_config_dword(dev, pos + OCXL_DVSEC_AFU_INFO_OFF, &val); + while (!EXTRACT_BIT(val, 31)) { + if (time_after_eq(jiffies, timeout)) { + dev_err(&dev->dev, + "Timeout while reading AFU info DVSEC (offset=%d)\n", + offset); + return -EBUSY; + } + cpu_relax(); + pci_read_config_dword(dev, pos + OCXL_DVSEC_AFU_INFO_OFF, &val); + } + pci_read_config_dword(dev, pos + OCXL_DVSEC_AFU_INFO_DATA, data); + return 0; +} + +/** + * read_template_version() - Read the template version from the AFU + * @dev: the device for the AFU + * @fn: the AFU offsets + * @len: outputs the template length + * @version: outputs the major<<8,minor version + * + * Returns 0 on success, negative on failure + */ +static int read_template_version(struct pci_dev *dev, struct ocxl_fn_config *fn, + u16 *len, u16 *version) +{ + u32 val32; + u8 major, minor; + int rc; + + rc = read_afu_info(dev, fn, OCXL_DVSEC_TEMPL_VERSION, &val32); + if (rc) + return rc; + + *len = EXTRACT_BITS(val32, 16, 31); + major = EXTRACT_BITS(val32, 8, 15); + minor = EXTRACT_BITS(val32, 0, 7); + *version = (major << 8) + minor; + return 0; +} + +int ocxl_config_check_afu_index(struct pci_dev *dev, + struct ocxl_fn_config *fn, int afu_idx) +{ + int rc; + u16 templ_version; + u16 len, expected_len; + + pci_write_config_byte(dev, + fn->dvsec_afu_info_pos + OCXL_DVSEC_AFU_INFO_AFU_IDX, + afu_idx); + + rc = read_template_version(dev, fn, &len, &templ_version); + if (rc) + return rc; + + /* AFU index map can have holes, in which case we read all 0's */ + if (!templ_version && !len) + return 0; + + dev_dbg(&dev->dev, "AFU descriptor template version %d.%d\n", + templ_version >> 8, templ_version & 0xFF); + + switch (templ_version) { + case 0x0005: // v0.5 was used prior to the spec approval + case 0x0100: + expected_len = OCXL_TEMPL_LEN_1_0; + break; + case 0x0101: + expected_len = OCXL_TEMPL_LEN_1_1; + break; + default: + dev_warn(&dev->dev, "Unknown AFU template version %#x\n", + templ_version); + expected_len = len; + } + if (len != expected_len) + dev_warn(&dev->dev, + "Unexpected template length %#x in AFU information, expected %#x for version %#x\n", + len, expected_len, templ_version); + return 1; +} + +static int read_afu_name(struct pci_dev *dev, struct ocxl_fn_config *fn, + struct ocxl_afu_config *afu) +{ + int i, rc; + u32 val, *ptr; + + BUILD_BUG_ON(OCXL_AFU_NAME_SZ < OCXL_TEMPL_NAME_LEN); + for (i = 0; i < OCXL_TEMPL_NAME_LEN; i += 4) { + rc = read_afu_info(dev, fn, OCXL_DVSEC_TEMPL_NAME + i, &val); + if (rc) + return rc; + ptr = (u32 *) &afu->name[i]; + *ptr = le32_to_cpu((__force __le32) val); + } + afu->name[OCXL_AFU_NAME_SZ - 1] = '\0'; /* play safe */ + return 0; +} + +static int read_afu_mmio(struct pci_dev *dev, struct ocxl_fn_config *fn, + struct ocxl_afu_config *afu) +{ + int rc; + u32 val; + + /* + * Global MMIO + */ + rc = read_afu_info(dev, fn, OCXL_DVSEC_TEMPL_MMIO_GLOBAL, &val); + if (rc) + return rc; + afu->global_mmio_bar = EXTRACT_BITS(val, 0, 2); + afu->global_mmio_offset = EXTRACT_BITS(val, 16, 31) << 16; + + rc = read_afu_info(dev, fn, OCXL_DVSEC_TEMPL_MMIO_GLOBAL + 4, &val); + if (rc) + return rc; + afu->global_mmio_offset += (u64) val << 32; + + rc = read_afu_info(dev, fn, OCXL_DVSEC_TEMPL_MMIO_GLOBAL_SZ, &val); + if (rc) + return rc; + afu->global_mmio_size = val; + + /* + * Per-process MMIO + */ + rc = read_afu_info(dev, fn, OCXL_DVSEC_TEMPL_MMIO_PP, &val); + if (rc) + return rc; + afu->pp_mmio_bar = EXTRACT_BITS(val, 0, 2); + afu->pp_mmio_offset = EXTRACT_BITS(val, 16, 31) << 16; + + rc = read_afu_info(dev, fn, OCXL_DVSEC_TEMPL_MMIO_PP + 4, &val); + if (rc) + return rc; + afu->pp_mmio_offset += (u64) val << 32; + + rc = read_afu_info(dev, fn, OCXL_DVSEC_TEMPL_MMIO_PP_SZ, &val); + if (rc) + return rc; + afu->pp_mmio_stride = val; + + return 0; +} + +static int read_afu_control(struct pci_dev *dev, struct ocxl_afu_config *afu) +{ + int pos; + u8 val8; + u16 val16; + + pos = find_dvsec_afu_ctrl(dev, afu->idx); + if (!pos) { + dev_err(&dev->dev, "Can't find AFU control DVSEC for AFU %d\n", + afu->idx); + return -ENODEV; + } + afu->dvsec_afu_control_pos = pos; + + pci_read_config_byte(dev, pos + OCXL_DVSEC_AFU_CTRL_PASID_SUP, &val8); + afu->pasid_supported_log = EXTRACT_BITS(val8, 0, 4); + + pci_read_config_word(dev, pos + OCXL_DVSEC_AFU_CTRL_ACTAG_SUP, &val16); + afu->actag_supported = EXTRACT_BITS(val16, 0, 11); + return 0; +} + +static bool char_allowed(int c) +{ + /* + * Permitted Characters : Alphanumeric, hyphen, underscore, comma + */ + if ((c >= 0x30 && c <= 0x39) /* digits */ || + (c >= 0x41 && c <= 0x5A) /* upper case */ || + (c >= 0x61 && c <= 0x7A) /* lower case */ || + c == 0 /* NULL */ || + c == 0x2D /* - */ || + c == 0x5F /* _ */ || + c == 0x2C /* , */) + return true; + return false; +} + +static int validate_afu(struct pci_dev *dev, struct ocxl_afu_config *afu) +{ + int i; + + if (!afu->name[0]) { + dev_err(&dev->dev, "Empty AFU name\n"); + return -EINVAL; + } + for (i = 0; i < OCXL_TEMPL_NAME_LEN; i++) { + if (!char_allowed(afu->name[i])) { + dev_err(&dev->dev, + "Invalid character in AFU name\n"); + return -EINVAL; + } + } + + if (afu->global_mmio_bar != 0 && + afu->global_mmio_bar != 2 && + afu->global_mmio_bar != 4) { + dev_err(&dev->dev, "Invalid global MMIO bar number\n"); + return -EINVAL; + } + if (afu->pp_mmio_bar != 0 && + afu->pp_mmio_bar != 2 && + afu->pp_mmio_bar != 4) { + dev_err(&dev->dev, "Invalid per-process MMIO bar number\n"); + return -EINVAL; + } + return 0; +} + +/** + * read_afu_lpc_memory_info() - Populate AFU metadata regarding LPC memory + * @dev: the device for the AFU + * @fn: the AFU offsets + * @afu: the AFU struct to populate the LPC metadata into + * + * Returns 0 on success, negative on failure + */ +static int read_afu_lpc_memory_info(struct pci_dev *dev, + struct ocxl_fn_config *fn, + struct ocxl_afu_config *afu) +{ + int rc; + u32 val32; + u16 templ_version; + u16 templ_len; + u64 total_mem_size = 0; + u64 lpc_mem_size = 0; + + afu->lpc_mem_offset = 0; + afu->lpc_mem_size = 0; + afu->special_purpose_mem_offset = 0; + afu->special_purpose_mem_size = 0; + /* + * For AFUs following template v1.0, the LPC memory covers the + * total memory. Its size is a power of 2. + * + * For AFUs with template >= v1.01, the total memory size is + * still a power of 2, but it is split in 2 parts: + * - the LPC memory, whose size can now be anything + * - the remainder memory is a special purpose memory, whose + * definition is AFU-dependent. It is not accessible through + * the usual commands for LPC memory + */ + rc = read_afu_info(dev, fn, OCXL_DVSEC_TEMPL_ALL_MEM_SZ, &val32); + if (rc) + return rc; + + val32 = EXTRACT_BITS(val32, 0, 7); + if (!val32) + return 0; /* No LPC memory */ + + /* + * The configuration space spec allows for a memory size of up + * to 2^255 bytes. + * + * Current generation hardware uses 56-bit physical addresses, + * but we won't be able to get near close to that, as we won't + * have a hole big enough in the memory map. Let it pass in + * the driver for now. We'll get an error from the firmware + * when trying to configure something too big. + */ + total_mem_size = 1ull << val32; + + rc = read_afu_info(dev, fn, OCXL_DVSEC_TEMPL_LPC_MEM_START, &val32); + if (rc) + return rc; + + afu->lpc_mem_offset = val32; + + rc = read_afu_info(dev, fn, OCXL_DVSEC_TEMPL_LPC_MEM_START + 4, &val32); + if (rc) + return rc; + + afu->lpc_mem_offset |= (u64) val32 << 32; + + rc = read_template_version(dev, fn, &templ_len, &templ_version); + if (rc) + return rc; + + if (templ_version >= 0x0101) { + rc = read_afu_info(dev, fn, + OCXL_DVSEC_TEMPL_LPC_MEM_SZ, &val32); + if (rc) + return rc; + lpc_mem_size = val32; + + rc = read_afu_info(dev, fn, + OCXL_DVSEC_TEMPL_LPC_MEM_SZ + 4, &val32); + if (rc) + return rc; + lpc_mem_size |= (u64) val32 << 32; + } else { + lpc_mem_size = total_mem_size; + } + afu->lpc_mem_size = lpc_mem_size; + + if (lpc_mem_size < total_mem_size) { + afu->special_purpose_mem_offset = + afu->lpc_mem_offset + lpc_mem_size; + afu->special_purpose_mem_size = + total_mem_size - lpc_mem_size; + } + return 0; +} + +int ocxl_config_read_afu(struct pci_dev *dev, struct ocxl_fn_config *fn, + struct ocxl_afu_config *afu, u8 afu_idx) +{ + int rc; + u32 val32; + + /* + * First, we need to write the AFU idx for the AFU we want to + * access. + */ + WARN_ON((afu_idx & OCXL_DVSEC_AFU_IDX_MASK) != afu_idx); + afu->idx = afu_idx; + pci_write_config_byte(dev, + fn->dvsec_afu_info_pos + OCXL_DVSEC_AFU_INFO_AFU_IDX, + afu->idx); + + rc = read_afu_name(dev, fn, afu); + if (rc) + return rc; + + rc = read_afu_info(dev, fn, OCXL_DVSEC_TEMPL_AFU_VERSION, &val32); + if (rc) + return rc; + afu->version_major = EXTRACT_BITS(val32, 24, 31); + afu->version_minor = EXTRACT_BITS(val32, 16, 23); + afu->afuc_type = EXTRACT_BITS(val32, 14, 15); + afu->afum_type = EXTRACT_BITS(val32, 12, 13); + afu->profile = EXTRACT_BITS(val32, 0, 7); + + rc = read_afu_mmio(dev, fn, afu); + if (rc) + return rc; + + rc = read_afu_lpc_memory_info(dev, fn, afu); + if (rc) + return rc; + + rc = read_afu_control(dev, afu); + if (rc) + return rc; + + dev_dbg(&dev->dev, "AFU configuration:\n"); + dev_dbg(&dev->dev, " name = %s\n", afu->name); + dev_dbg(&dev->dev, " version = %d.%d\n", afu->version_major, + afu->version_minor); + dev_dbg(&dev->dev, " global mmio bar = %hhu\n", afu->global_mmio_bar); + dev_dbg(&dev->dev, " global mmio offset = %#llx\n", + afu->global_mmio_offset); + dev_dbg(&dev->dev, " global mmio size = %#x\n", afu->global_mmio_size); + dev_dbg(&dev->dev, " pp mmio bar = %hhu\n", afu->pp_mmio_bar); + dev_dbg(&dev->dev, " pp mmio offset = %#llx\n", afu->pp_mmio_offset); + dev_dbg(&dev->dev, " pp mmio stride = %#x\n", afu->pp_mmio_stride); + dev_dbg(&dev->dev, " lpc_mem offset = %#llx\n", afu->lpc_mem_offset); + dev_dbg(&dev->dev, " lpc_mem size = %#llx\n", afu->lpc_mem_size); + dev_dbg(&dev->dev, " special purpose mem offset = %#llx\n", + afu->special_purpose_mem_offset); + dev_dbg(&dev->dev, " special purpose mem size = %#llx\n", + afu->special_purpose_mem_size); + dev_dbg(&dev->dev, " pasid supported (log) = %u\n", + afu->pasid_supported_log); + dev_dbg(&dev->dev, " actag supported = %u\n", + afu->actag_supported); + + rc = validate_afu(dev, afu); + return rc; +} +EXPORT_SYMBOL_GPL(ocxl_config_read_afu); + +int ocxl_config_get_actag_info(struct pci_dev *dev, u16 *base, u16 *enabled, + u16 *supported) +{ + int rc; + + /* + * This is really a simple wrapper for the kernel API, to + * avoid an external driver using ocxl as a library to call + * platform-dependent code + */ + rc = pnv_ocxl_get_actag(dev, base, enabled, supported); + if (rc) { + dev_err(&dev->dev, "Can't get actag for device: %d\n", rc); + return rc; + } + return 0; +} +EXPORT_SYMBOL_GPL(ocxl_config_get_actag_info); + +void ocxl_config_set_afu_actag(struct pci_dev *dev, int pos, int actag_base, + int actag_count) +{ + u16 val; + + val = actag_count & OCXL_DVSEC_ACTAG_MASK; + pci_write_config_byte(dev, pos + OCXL_DVSEC_AFU_CTRL_ACTAG_EN, val); + + val = actag_base & OCXL_DVSEC_ACTAG_MASK; + pci_write_config_dword(dev, pos + OCXL_DVSEC_AFU_CTRL_ACTAG_BASE, val); +} +EXPORT_SYMBOL_GPL(ocxl_config_set_afu_actag); + +int ocxl_config_get_pasid_info(struct pci_dev *dev, int *count) +{ + return pnv_ocxl_get_pasid_count(dev, count); +} + +void ocxl_config_set_afu_pasid(struct pci_dev *dev, int pos, int pasid_base, + u32 pasid_count_log) +{ + u8 val8; + u32 val32; + + val8 = pasid_count_log & OCXL_DVSEC_PASID_LOG_MASK; + pci_write_config_byte(dev, pos + OCXL_DVSEC_AFU_CTRL_PASID_EN, val8); + + pci_read_config_dword(dev, pos + OCXL_DVSEC_AFU_CTRL_PASID_BASE, + &val32); + val32 &= ~OCXL_DVSEC_PASID_MASK; + val32 |= pasid_base & OCXL_DVSEC_PASID_MASK; + pci_write_config_dword(dev, pos + OCXL_DVSEC_AFU_CTRL_PASID_BASE, + val32); +} +EXPORT_SYMBOL_GPL(ocxl_config_set_afu_pasid); + +void ocxl_config_set_afu_state(struct pci_dev *dev, int pos, int enable) +{ + u8 val; + + pci_read_config_byte(dev, pos + OCXL_DVSEC_AFU_CTRL_ENABLE, &val); + if (enable) + val |= 1; + else + val &= 0xFE; + pci_write_config_byte(dev, pos + OCXL_DVSEC_AFU_CTRL_ENABLE, val); +} +EXPORT_SYMBOL_GPL(ocxl_config_set_afu_state); + +int ocxl_config_set_TL(struct pci_dev *dev, int tl_dvsec) +{ + u32 val; + __be32 *be32ptr; + u8 timers; + int i, rc; + long recv_cap; + char *recv_rate; + + /* + * Skip on function != 0, as the TL can only be defined on 0 + */ + if (PCI_FUNC(dev->devfn) != 0) + return 0; + + recv_rate = kzalloc(PNV_OCXL_TL_RATE_BUF_SIZE, GFP_KERNEL); + if (!recv_rate) + return -ENOMEM; + /* + * The spec defines 64 templates for messages in the + * Transaction Layer (TL). + * + * The host and device each support a subset, so we need to + * configure the transmitters on each side to send only + * templates the receiver understands, at a rate the receiver + * can process. Per the spec, template 0 must be supported by + * everybody. That's the template which has been used by the + * host and device so far. + * + * The sending rate limit must be set before the template is + * enabled. + */ + + /* + * Device -> host + */ + rc = pnv_ocxl_get_tl_cap(dev, &recv_cap, recv_rate, + PNV_OCXL_TL_RATE_BUF_SIZE); + if (rc) + goto out; + + for (i = 0; i < PNV_OCXL_TL_RATE_BUF_SIZE; i += 4) { + be32ptr = (__be32 *) &recv_rate[i]; + pci_write_config_dword(dev, + tl_dvsec + OCXL_DVSEC_TL_SEND_RATE + i, + be32_to_cpu(*be32ptr)); + } + val = recv_cap >> 32; + pci_write_config_dword(dev, tl_dvsec + OCXL_DVSEC_TL_SEND_CAP, val); + val = recv_cap & GENMASK(31, 0); + pci_write_config_dword(dev, tl_dvsec + OCXL_DVSEC_TL_SEND_CAP + 4, val); + + /* + * Host -> device + */ + for (i = 0; i < PNV_OCXL_TL_RATE_BUF_SIZE; i += 4) { + pci_read_config_dword(dev, + tl_dvsec + OCXL_DVSEC_TL_RECV_RATE + i, + &val); + be32ptr = (__be32 *) &recv_rate[i]; + *be32ptr = cpu_to_be32(val); + } + pci_read_config_dword(dev, tl_dvsec + OCXL_DVSEC_TL_RECV_CAP, &val); + recv_cap = (long) val << 32; + pci_read_config_dword(dev, tl_dvsec + OCXL_DVSEC_TL_RECV_CAP + 4, &val); + recv_cap |= val; + + rc = pnv_ocxl_set_tl_conf(dev, recv_cap, __pa(recv_rate), + PNV_OCXL_TL_RATE_BUF_SIZE); + if (rc) + goto out; + + /* + * Opencapi commands needing to be retried are classified per + * the TL in 2 groups: short and long commands. + * + * The short back off timer it not used for now. It will be + * for opencapi 4.0. + * + * The long back off timer is typically used when an AFU hits + * a page fault but the NPU is already processing one. So the + * AFU needs to wait before it can resubmit. Having a value + * too low doesn't break anything, but can generate extra + * traffic on the link. + * We set it to 1.6 us for now. It's shorter than, but in the + * same order of magnitude as the time spent to process a page + * fault. + */ + timers = 0x2 << 4; /* long timer = 1.6 us */ + pci_write_config_byte(dev, tl_dvsec + OCXL_DVSEC_TL_BACKOFF_TIMERS, + timers); + + rc = 0; +out: + kfree(recv_rate); + return rc; +} +EXPORT_SYMBOL_GPL(ocxl_config_set_TL); + +int ocxl_config_terminate_pasid(struct pci_dev *dev, int afu_control, int pasid) +{ + u32 val; + unsigned long timeout; + + pci_read_config_dword(dev, afu_control + OCXL_DVSEC_AFU_CTRL_TERM_PASID, + &val); + if (EXTRACT_BIT(val, 20)) { + dev_err(&dev->dev, + "Can't terminate PASID %#x, previous termination didn't complete\n", + pasid); + return -EBUSY; + } + + val &= ~OCXL_DVSEC_PASID_MASK; + val |= pasid & OCXL_DVSEC_PASID_MASK; + val |= BIT(20); + pci_write_config_dword(dev, + afu_control + OCXL_DVSEC_AFU_CTRL_TERM_PASID, + val); + + timeout = jiffies + (HZ * OCXL_CFG_TIMEOUT); + pci_read_config_dword(dev, afu_control + OCXL_DVSEC_AFU_CTRL_TERM_PASID, + &val); + while (EXTRACT_BIT(val, 20)) { + if (time_after_eq(jiffies, timeout)) { + dev_err(&dev->dev, + "Timeout while waiting for AFU to terminate PASID %#x\n", + pasid); + return -EBUSY; + } + cpu_relax(); + pci_read_config_dword(dev, + afu_control + OCXL_DVSEC_AFU_CTRL_TERM_PASID, + &val); + } + return 0; +} +EXPORT_SYMBOL_GPL(ocxl_config_terminate_pasid); + +void ocxl_config_set_actag(struct pci_dev *dev, int func_dvsec, u32 tag_first, + u32 tag_count) +{ + u32 val; + + val = (tag_first & OCXL_DVSEC_ACTAG_MASK) << 16; + val |= tag_count & OCXL_DVSEC_ACTAG_MASK; + pci_write_config_dword(dev, func_dvsec + OCXL_DVSEC_FUNC_OFF_ACTAG, + val); +} +EXPORT_SYMBOL_GPL(ocxl_config_set_actag); diff --git a/drivers/misc/ocxl/context.c b/drivers/misc/ocxl/context.c new file mode 100644 index 000000000..9eb0d93b0 --- /dev/null +++ b/drivers/misc/ocxl/context.c @@ -0,0 +1,296 @@ +// SPDX-License-Identifier: GPL-2.0+ +// Copyright 2017 IBM Corp. +#include <linux/sched/mm.h> +#include "trace.h" +#include "ocxl_internal.h" + +int ocxl_context_alloc(struct ocxl_context **context, struct ocxl_afu *afu, + struct address_space *mapping) +{ + int pasid; + struct ocxl_context *ctx; + + ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); + if (!ctx) + return -ENOMEM; + + ctx->afu = afu; + mutex_lock(&afu->contexts_lock); + pasid = idr_alloc(&afu->contexts_idr, ctx, afu->pasid_base, + afu->pasid_base + afu->pasid_max, GFP_KERNEL); + if (pasid < 0) { + mutex_unlock(&afu->contexts_lock); + kfree(ctx); + return pasid; + } + afu->pasid_count++; + mutex_unlock(&afu->contexts_lock); + + ctx->pasid = pasid; + ctx->status = OPENED; + mutex_init(&ctx->status_mutex); + ctx->mapping = mapping; + mutex_init(&ctx->mapping_lock); + init_waitqueue_head(&ctx->events_wq); + mutex_init(&ctx->xsl_error_lock); + mutex_init(&ctx->irq_lock); + idr_init(&ctx->irq_idr); + ctx->tidr = 0; + + /* + * Keep a reference on the AFU to make sure it's valid for the + * duration of the life of the context + */ + ocxl_afu_get(afu); + *context = ctx; + return 0; +} +EXPORT_SYMBOL_GPL(ocxl_context_alloc); + +/* + * Callback for when a translation fault triggers an error + * data: a pointer to the context which triggered the fault + * addr: the address that triggered the error + * dsisr: the value of the PPC64 dsisr register + */ +static void xsl_fault_error(void *data, u64 addr, u64 dsisr) +{ + struct ocxl_context *ctx = (struct ocxl_context *) data; + + mutex_lock(&ctx->xsl_error_lock); + ctx->xsl_error.addr = addr; + ctx->xsl_error.dsisr = dsisr; + ctx->xsl_error.count++; + mutex_unlock(&ctx->xsl_error_lock); + + wake_up_all(&ctx->events_wq); +} + +int ocxl_context_attach(struct ocxl_context *ctx, u64 amr, struct mm_struct *mm) +{ + int rc; + unsigned long pidr = 0; + struct pci_dev *dev; + + // Locks both status & tidr + mutex_lock(&ctx->status_mutex); + if (ctx->status != OPENED) { + rc = -EIO; + goto out; + } + + if (mm) + pidr = mm->context.id; + + dev = to_pci_dev(ctx->afu->fn->dev.parent); + rc = ocxl_link_add_pe(ctx->afu->fn->link, ctx->pasid, pidr, ctx->tidr, + amr, pci_dev_id(dev), mm, xsl_fault_error, ctx); + if (rc) + goto out; + + ctx->status = ATTACHED; +out: + mutex_unlock(&ctx->status_mutex); + return rc; +} +EXPORT_SYMBOL_GPL(ocxl_context_attach); + +static vm_fault_t map_afu_irq(struct vm_area_struct *vma, unsigned long address, + u64 offset, struct ocxl_context *ctx) +{ + u64 trigger_addr; + int irq_id = ocxl_irq_offset_to_id(ctx, offset); + + trigger_addr = ocxl_afu_irq_get_addr(ctx, irq_id); + if (!trigger_addr) + return VM_FAULT_SIGBUS; + + return vmf_insert_pfn(vma, address, trigger_addr >> PAGE_SHIFT); +} + +static vm_fault_t map_pp_mmio(struct vm_area_struct *vma, unsigned long address, + u64 offset, struct ocxl_context *ctx) +{ + u64 pp_mmio_addr; + int pasid_off; + vm_fault_t ret; + + if (offset >= ctx->afu->config.pp_mmio_stride) + return VM_FAULT_SIGBUS; + + mutex_lock(&ctx->status_mutex); + if (ctx->status != ATTACHED) { + mutex_unlock(&ctx->status_mutex); + pr_debug("%s: Context not attached, failing mmio mmap\n", + __func__); + return VM_FAULT_SIGBUS; + } + + pasid_off = ctx->pasid - ctx->afu->pasid_base; + pp_mmio_addr = ctx->afu->pp_mmio_start + + pasid_off * ctx->afu->config.pp_mmio_stride + + offset; + + ret = vmf_insert_pfn(vma, address, pp_mmio_addr >> PAGE_SHIFT); + mutex_unlock(&ctx->status_mutex); + return ret; +} + +static vm_fault_t ocxl_mmap_fault(struct vm_fault *vmf) +{ + struct vm_area_struct *vma = vmf->vma; + struct ocxl_context *ctx = vma->vm_file->private_data; + u64 offset; + vm_fault_t ret; + + offset = vmf->pgoff << PAGE_SHIFT; + pr_debug("%s: pasid %d address 0x%lx offset 0x%llx\n", __func__, + ctx->pasid, vmf->address, offset); + + if (offset < ctx->afu->irq_base_offset) + ret = map_pp_mmio(vma, vmf->address, offset, ctx); + else + ret = map_afu_irq(vma, vmf->address, offset, ctx); + return ret; +} + +static const struct vm_operations_struct ocxl_vmops = { + .fault = ocxl_mmap_fault, +}; + +static int check_mmap_afu_irq(struct ocxl_context *ctx, + struct vm_area_struct *vma) +{ + int irq_id = ocxl_irq_offset_to_id(ctx, vma->vm_pgoff << PAGE_SHIFT); + + /* only one page */ + if (vma_pages(vma) != 1) + return -EINVAL; + + /* check offset validty */ + if (!ocxl_afu_irq_get_addr(ctx, irq_id)) + return -EINVAL; + + /* + * trigger page should only be accessible in write mode. + * + * It's a bit theoretical, as a page mmaped with only + * PROT_WRITE is currently readable, but it doesn't hurt. + */ + if ((vma->vm_flags & VM_READ) || (vma->vm_flags & VM_EXEC) || + !(vma->vm_flags & VM_WRITE)) + return -EINVAL; + vma->vm_flags &= ~(VM_MAYREAD | VM_MAYEXEC); + return 0; +} + +static int check_mmap_mmio(struct ocxl_context *ctx, + struct vm_area_struct *vma) +{ + if ((vma_pages(vma) + vma->vm_pgoff) > + (ctx->afu->config.pp_mmio_stride >> PAGE_SHIFT)) + return -EINVAL; + return 0; +} + +int ocxl_context_mmap(struct ocxl_context *ctx, struct vm_area_struct *vma) +{ + int rc; + + if ((vma->vm_pgoff << PAGE_SHIFT) < ctx->afu->irq_base_offset) + rc = check_mmap_mmio(ctx, vma); + else + rc = check_mmap_afu_irq(ctx, vma); + if (rc) + return rc; + + vma->vm_flags |= VM_IO | VM_PFNMAP; + vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); + vma->vm_ops = &ocxl_vmops; + return 0; +} + +int ocxl_context_detach(struct ocxl_context *ctx) +{ + struct pci_dev *dev; + int afu_control_pos; + enum ocxl_context_status status; + int rc; + + mutex_lock(&ctx->status_mutex); + status = ctx->status; + ctx->status = CLOSED; + mutex_unlock(&ctx->status_mutex); + if (status != ATTACHED) + return 0; + + dev = to_pci_dev(ctx->afu->fn->dev.parent); + afu_control_pos = ctx->afu->config.dvsec_afu_control_pos; + + mutex_lock(&ctx->afu->afu_control_lock); + rc = ocxl_config_terminate_pasid(dev, afu_control_pos, ctx->pasid); + mutex_unlock(&ctx->afu->afu_control_lock); + trace_ocxl_terminate_pasid(ctx->pasid, rc); + if (rc) { + /* + * If we timeout waiting for the AFU to terminate the + * pasid, then it's dangerous to clean up the Process + * Element entry in the SPA, as it may be referenced + * in the future by the AFU. In which case, we would + * checkstop because of an invalid PE access (FIR + * register 2, bit 42). So leave the PE + * defined. Caller shouldn't free the context so that + * PASID remains allocated. + * + * A link reset will be required to cleanup the AFU + * and the SPA. + */ + if (rc == -EBUSY) + return rc; + } + rc = ocxl_link_remove_pe(ctx->afu->fn->link, ctx->pasid); + if (rc) { + dev_warn(&dev->dev, + "Couldn't remove PE entry cleanly: %d\n", rc); + } + return 0; +} +EXPORT_SYMBOL_GPL(ocxl_context_detach); + +void ocxl_context_detach_all(struct ocxl_afu *afu) +{ + struct ocxl_context *ctx; + int tmp; + + mutex_lock(&afu->contexts_lock); + idr_for_each_entry(&afu->contexts_idr, ctx, tmp) { + ocxl_context_detach(ctx); + /* + * We are force detaching - remove any active mmio + * mappings so userspace cannot interfere with the + * card if it comes back. Easiest way to exercise + * this is to unbind and rebind the driver via sysfs + * while it is in use. + */ + mutex_lock(&ctx->mapping_lock); + if (ctx->mapping) + unmap_mapping_range(ctx->mapping, 0, 0, 1); + mutex_unlock(&ctx->mapping_lock); + } + mutex_unlock(&afu->contexts_lock); +} + +void ocxl_context_free(struct ocxl_context *ctx) +{ + mutex_lock(&ctx->afu->contexts_lock); + ctx->afu->pasid_count--; + idr_remove(&ctx->afu->contexts_idr, ctx->pasid); + mutex_unlock(&ctx->afu->contexts_lock); + + ocxl_afu_irq_free_all(ctx); + idr_destroy(&ctx->irq_idr); + /* reference to the AFU taken in ocxl_context_alloc() */ + ocxl_afu_put(ctx->afu); + kfree(ctx); +} +EXPORT_SYMBOL_GPL(ocxl_context_free); diff --git a/drivers/misc/ocxl/core.c b/drivers/misc/ocxl/core.c new file mode 100644 index 000000000..aebfc53a2 --- /dev/null +++ b/drivers/misc/ocxl/core.c @@ -0,0 +1,569 @@ +// SPDX-License-Identifier: GPL-2.0+ +// Copyright 2019 IBM Corp. +#include <linux/idr.h> +#include "ocxl_internal.h" + +static struct ocxl_fn *ocxl_fn_get(struct ocxl_fn *fn) +{ + return (get_device(&fn->dev) == NULL) ? NULL : fn; +} + +static void ocxl_fn_put(struct ocxl_fn *fn) +{ + put_device(&fn->dev); +} + +static struct ocxl_afu *alloc_afu(struct ocxl_fn *fn) +{ + struct ocxl_afu *afu; + + afu = kzalloc(sizeof(struct ocxl_afu), GFP_KERNEL); + if (!afu) + return NULL; + + kref_init(&afu->kref); + mutex_init(&afu->contexts_lock); + mutex_init(&afu->afu_control_lock); + idr_init(&afu->contexts_idr); + afu->fn = fn; + ocxl_fn_get(fn); + return afu; +} + +static void free_afu(struct kref *kref) +{ + struct ocxl_afu *afu = container_of(kref, struct ocxl_afu, kref); + + idr_destroy(&afu->contexts_idr); + ocxl_fn_put(afu->fn); + kfree(afu); +} + +void ocxl_afu_get(struct ocxl_afu *afu) +{ + kref_get(&afu->kref); +} +EXPORT_SYMBOL_GPL(ocxl_afu_get); + +void ocxl_afu_put(struct ocxl_afu *afu) +{ + kref_put(&afu->kref, free_afu); +} +EXPORT_SYMBOL_GPL(ocxl_afu_put); + +static int assign_afu_actag(struct ocxl_afu *afu) +{ + struct ocxl_fn *fn = afu->fn; + int actag_count, actag_offset; + struct pci_dev *pci_dev = to_pci_dev(fn->dev.parent); + + /* + * if there were not enough actags for the function, each afu + * reduces its count as well + */ + actag_count = afu->config.actag_supported * + fn->actag_enabled / fn->actag_supported; + actag_offset = ocxl_actag_afu_alloc(fn, actag_count); + if (actag_offset < 0) { + dev_err(&pci_dev->dev, "Can't allocate %d actags for AFU: %d\n", + actag_count, actag_offset); + return actag_offset; + } + afu->actag_base = fn->actag_base + actag_offset; + afu->actag_enabled = actag_count; + + ocxl_config_set_afu_actag(pci_dev, afu->config.dvsec_afu_control_pos, + afu->actag_base, afu->actag_enabled); + dev_dbg(&pci_dev->dev, "actag base=%d enabled=%d\n", + afu->actag_base, afu->actag_enabled); + return 0; +} + +static void reclaim_afu_actag(struct ocxl_afu *afu) +{ + struct ocxl_fn *fn = afu->fn; + int start_offset, size; + + start_offset = afu->actag_base - fn->actag_base; + size = afu->actag_enabled; + ocxl_actag_afu_free(afu->fn, start_offset, size); +} + +static int assign_afu_pasid(struct ocxl_afu *afu) +{ + struct ocxl_fn *fn = afu->fn; + int pasid_count, pasid_offset; + struct pci_dev *pci_dev = to_pci_dev(fn->dev.parent); + + /* + * We only support the case where the function configuration + * requested enough PASIDs to cover all AFUs. + */ + pasid_count = 1 << afu->config.pasid_supported_log; + pasid_offset = ocxl_pasid_afu_alloc(fn, pasid_count); + if (pasid_offset < 0) { + dev_err(&pci_dev->dev, "Can't allocate %d PASIDs for AFU: %d\n", + pasid_count, pasid_offset); + return pasid_offset; + } + afu->pasid_base = fn->pasid_base + pasid_offset; + afu->pasid_count = 0; + afu->pasid_max = pasid_count; + + ocxl_config_set_afu_pasid(pci_dev, afu->config.dvsec_afu_control_pos, + afu->pasid_base, + afu->config.pasid_supported_log); + dev_dbg(&pci_dev->dev, "PASID base=%d, enabled=%d\n", + afu->pasid_base, pasid_count); + return 0; +} + +static void reclaim_afu_pasid(struct ocxl_afu *afu) +{ + struct ocxl_fn *fn = afu->fn; + int start_offset, size; + + start_offset = afu->pasid_base - fn->pasid_base; + size = 1 << afu->config.pasid_supported_log; + ocxl_pasid_afu_free(afu->fn, start_offset, size); +} + +static int reserve_fn_bar(struct ocxl_fn *fn, int bar) +{ + struct pci_dev *dev = to_pci_dev(fn->dev.parent); + int rc, idx; + + if (bar != 0 && bar != 2 && bar != 4) + return -EINVAL; + + idx = bar >> 1; + if (fn->bar_used[idx]++ == 0) { + rc = pci_request_region(dev, bar, "ocxl"); + if (rc) + return rc; + } + return 0; +} + +static void release_fn_bar(struct ocxl_fn *fn, int bar) +{ + struct pci_dev *dev = to_pci_dev(fn->dev.parent); + int idx; + + if (bar != 0 && bar != 2 && bar != 4) + return; + + idx = bar >> 1; + if (--fn->bar_used[idx] == 0) + pci_release_region(dev, bar); + WARN_ON(fn->bar_used[idx] < 0); +} + +static int map_mmio_areas(struct ocxl_afu *afu) +{ + int rc; + struct pci_dev *pci_dev = to_pci_dev(afu->fn->dev.parent); + + rc = reserve_fn_bar(afu->fn, afu->config.global_mmio_bar); + if (rc) + return rc; + + rc = reserve_fn_bar(afu->fn, afu->config.pp_mmio_bar); + if (rc) { + release_fn_bar(afu->fn, afu->config.global_mmio_bar); + return rc; + } + + afu->global_mmio_start = + pci_resource_start(pci_dev, afu->config.global_mmio_bar) + + afu->config.global_mmio_offset; + afu->pp_mmio_start = + pci_resource_start(pci_dev, afu->config.pp_mmio_bar) + + afu->config.pp_mmio_offset; + + afu->global_mmio_ptr = ioremap(afu->global_mmio_start, + afu->config.global_mmio_size); + if (!afu->global_mmio_ptr) { + release_fn_bar(afu->fn, afu->config.pp_mmio_bar); + release_fn_bar(afu->fn, afu->config.global_mmio_bar); + dev_err(&pci_dev->dev, "Error mapping global mmio area\n"); + return -ENOMEM; + } + + /* + * Leave an empty page between the per-process mmio area and + * the AFU interrupt mappings + */ + afu->irq_base_offset = afu->config.pp_mmio_stride + PAGE_SIZE; + return 0; +} + +static void unmap_mmio_areas(struct ocxl_afu *afu) +{ + if (afu->global_mmio_ptr) { + iounmap(afu->global_mmio_ptr); + afu->global_mmio_ptr = NULL; + } + afu->global_mmio_start = 0; + afu->pp_mmio_start = 0; + release_fn_bar(afu->fn, afu->config.pp_mmio_bar); + release_fn_bar(afu->fn, afu->config.global_mmio_bar); +} + +static int configure_afu(struct ocxl_afu *afu, u8 afu_idx, struct pci_dev *dev) +{ + int rc; + + rc = ocxl_config_read_afu(dev, &afu->fn->config, &afu->config, afu_idx); + if (rc) + return rc; + + rc = assign_afu_actag(afu); + if (rc) + return rc; + + rc = assign_afu_pasid(afu); + if (rc) + goto err_free_actag; + + rc = map_mmio_areas(afu); + if (rc) + goto err_free_pasid; + + return 0; + +err_free_pasid: + reclaim_afu_pasid(afu); +err_free_actag: + reclaim_afu_actag(afu); + return rc; +} + +static void deconfigure_afu(struct ocxl_afu *afu) +{ + unmap_mmio_areas(afu); + reclaim_afu_pasid(afu); + reclaim_afu_actag(afu); +} + +static int activate_afu(struct pci_dev *dev, struct ocxl_afu *afu) +{ + ocxl_config_set_afu_state(dev, afu->config.dvsec_afu_control_pos, 1); + + return 0; +} + +static void deactivate_afu(struct ocxl_afu *afu) +{ + struct pci_dev *dev = to_pci_dev(afu->fn->dev.parent); + + ocxl_config_set_afu_state(dev, afu->config.dvsec_afu_control_pos, 0); +} + +static int init_afu(struct pci_dev *dev, struct ocxl_fn *fn, u8 afu_idx) +{ + int rc; + struct ocxl_afu *afu; + + afu = alloc_afu(fn); + if (!afu) + return -ENOMEM; + + rc = configure_afu(afu, afu_idx, dev); + if (rc) { + ocxl_afu_put(afu); + return rc; + } + + rc = activate_afu(dev, afu); + if (rc) { + deconfigure_afu(afu); + ocxl_afu_put(afu); + return rc; + } + + list_add_tail(&afu->list, &fn->afu_list); + + return 0; +} + +static void remove_afu(struct ocxl_afu *afu) +{ + list_del(&afu->list); + ocxl_context_detach_all(afu); + deactivate_afu(afu); + deconfigure_afu(afu); + ocxl_afu_put(afu); // matches the implicit get in alloc_afu +} + +static struct ocxl_fn *alloc_function(void) +{ + struct ocxl_fn *fn; + + fn = kzalloc(sizeof(struct ocxl_fn), GFP_KERNEL); + if (!fn) + return NULL; + + INIT_LIST_HEAD(&fn->afu_list); + INIT_LIST_HEAD(&fn->pasid_list); + INIT_LIST_HEAD(&fn->actag_list); + + return fn; +} + +static void free_function(struct ocxl_fn *fn) +{ + WARN_ON(!list_empty(&fn->afu_list)); + WARN_ON(!list_empty(&fn->pasid_list)); + kfree(fn); +} + +static void free_function_dev(struct device *dev) +{ + struct ocxl_fn *fn = container_of(dev, struct ocxl_fn, dev); + + free_function(fn); +} + +static int set_function_device(struct ocxl_fn *fn, struct pci_dev *dev) +{ + fn->dev.parent = &dev->dev; + fn->dev.release = free_function_dev; + return dev_set_name(&fn->dev, "ocxlfn.%s", dev_name(&dev->dev)); +} + +static int assign_function_actag(struct ocxl_fn *fn) +{ + struct pci_dev *dev = to_pci_dev(fn->dev.parent); + u16 base, enabled, supported; + int rc; + + rc = ocxl_config_get_actag_info(dev, &base, &enabled, &supported); + if (rc) + return rc; + + fn->actag_base = base; + fn->actag_enabled = enabled; + fn->actag_supported = supported; + + ocxl_config_set_actag(dev, fn->config.dvsec_function_pos, + fn->actag_base, fn->actag_enabled); + dev_dbg(&fn->dev, "actag range starting at %d, enabled %d\n", + fn->actag_base, fn->actag_enabled); + return 0; +} + +static int set_function_pasid(struct ocxl_fn *fn) +{ + struct pci_dev *dev = to_pci_dev(fn->dev.parent); + int rc, desired_count, max_count; + + /* A function may not require any PASID */ + if (fn->config.max_pasid_log < 0) + return 0; + + rc = ocxl_config_get_pasid_info(dev, &max_count); + if (rc) + return rc; + + desired_count = 1 << fn->config.max_pasid_log; + + if (desired_count > max_count) { + dev_err(&fn->dev, + "Function requires more PASIDs than is available (%d vs. %d)\n", + desired_count, max_count); + return -ENOSPC; + } + + fn->pasid_base = 0; + return 0; +} + +static int configure_function(struct ocxl_fn *fn, struct pci_dev *dev) +{ + int rc; + + rc = pci_enable_device(dev); + if (rc) { + dev_err(&dev->dev, "pci_enable_device failed: %d\n", rc); + return rc; + } + + /* + * Once it has been confirmed to work on our hardware, we + * should reset the function, to force the adapter to restart + * from scratch. + * A function reset would also reset all its AFUs. + * + * Some hints for implementation: + * + * - there's not status bit to know when the reset is done. We + * should try reading the config space to know when it's + * done. + * - probably something like: + * Reset + * wait 100ms + * issue config read + * allow device up to 1 sec to return success on config + * read before declaring it broken + * + * Some shared logic on the card (CFG, TLX) won't be reset, so + * there's no guarantee that it will be enough. + */ + rc = ocxl_config_read_function(dev, &fn->config); + if (rc) + return rc; + + rc = set_function_device(fn, dev); + if (rc) + return rc; + + rc = assign_function_actag(fn); + if (rc) + return rc; + + rc = set_function_pasid(fn); + if (rc) + return rc; + + rc = ocxl_link_setup(dev, 0, &fn->link); + if (rc) + return rc; + + rc = ocxl_config_set_TL(dev, fn->config.dvsec_tl_pos); + if (rc) { + ocxl_link_release(dev, fn->link); + return rc; + } + return 0; +} + +static void deconfigure_function(struct ocxl_fn *fn) +{ + struct pci_dev *dev = to_pci_dev(fn->dev.parent); + + ocxl_link_release(dev, fn->link); + pci_disable_device(dev); +} + +static struct ocxl_fn *init_function(struct pci_dev *dev) +{ + struct ocxl_fn *fn; + int rc; + + fn = alloc_function(); + if (!fn) + return ERR_PTR(-ENOMEM); + + rc = configure_function(fn, dev); + if (rc) { + free_function(fn); + return ERR_PTR(rc); + } + + rc = device_register(&fn->dev); + if (rc) { + deconfigure_function(fn); + put_device(&fn->dev); + return ERR_PTR(rc); + } + return fn; +} + +// Device detection & initialisation + +struct ocxl_fn *ocxl_function_open(struct pci_dev *dev) +{ + int rc, afu_count = 0; + u8 afu; + struct ocxl_fn *fn; + + if (!radix_enabled()) { + dev_err(&dev->dev, "Unsupported memory model (hash)\n"); + return ERR_PTR(-ENODEV); + } + + fn = init_function(dev); + if (IS_ERR(fn)) { + dev_err(&dev->dev, "function init failed: %li\n", + PTR_ERR(fn)); + return fn; + } + + for (afu = 0; afu <= fn->config.max_afu_index; afu++) { + rc = ocxl_config_check_afu_index(dev, &fn->config, afu); + if (rc > 0) { + rc = init_afu(dev, fn, afu); + if (rc) { + dev_err(&dev->dev, + "Can't initialize AFU index %d\n", afu); + continue; + } + afu_count++; + } + } + dev_info(&dev->dev, "%d AFU(s) configured\n", afu_count); + return fn; +} +EXPORT_SYMBOL_GPL(ocxl_function_open); + +struct list_head *ocxl_function_afu_list(struct ocxl_fn *fn) +{ + return &fn->afu_list; +} +EXPORT_SYMBOL_GPL(ocxl_function_afu_list); + +struct ocxl_afu *ocxl_function_fetch_afu(struct ocxl_fn *fn, u8 afu_idx) +{ + struct ocxl_afu *afu; + + list_for_each_entry(afu, &fn->afu_list, list) { + if (afu->config.idx == afu_idx) + return afu; + } + + return NULL; +} +EXPORT_SYMBOL_GPL(ocxl_function_fetch_afu); + +const struct ocxl_fn_config *ocxl_function_config(struct ocxl_fn *fn) +{ + return &fn->config; +} +EXPORT_SYMBOL_GPL(ocxl_function_config); + +void ocxl_function_close(struct ocxl_fn *fn) +{ + struct ocxl_afu *afu, *tmp; + + list_for_each_entry_safe(afu, tmp, &fn->afu_list, list) { + remove_afu(afu); + } + + deconfigure_function(fn); + device_unregister(&fn->dev); +} +EXPORT_SYMBOL_GPL(ocxl_function_close); + +// AFU Metadata + +struct ocxl_afu_config *ocxl_afu_config(struct ocxl_afu *afu) +{ + return &afu->config; +} +EXPORT_SYMBOL_GPL(ocxl_afu_config); + +void ocxl_afu_set_private(struct ocxl_afu *afu, void *private) +{ + afu->private = private; +} +EXPORT_SYMBOL_GPL(ocxl_afu_set_private); + +void *ocxl_afu_get_private(struct ocxl_afu *afu) +{ + if (afu) + return afu->private; + + return NULL; +} +EXPORT_SYMBOL_GPL(ocxl_afu_get_private); diff --git a/drivers/misc/ocxl/file.c b/drivers/misc/ocxl/file.c new file mode 100644 index 000000000..452d5777a --- /dev/null +++ b/drivers/misc/ocxl/file.c @@ -0,0 +1,620 @@ +// SPDX-License-Identifier: GPL-2.0+ +// Copyright 2017 IBM Corp. +#include <linux/fs.h> +#include <linux/poll.h> +#include <linux/sched/signal.h> +#include <linux/eventfd.h> +#include <linux/uaccess.h> +#include <uapi/misc/ocxl.h> +#include <asm/reg.h> +#include <asm/switch_to.h> +#include "ocxl_internal.h" + + +#define OCXL_NUM_MINORS 256 /* Total to reserve */ + +static dev_t ocxl_dev; +static struct class *ocxl_class; +static DEFINE_MUTEX(minors_idr_lock); +static struct idr minors_idr; + +static struct ocxl_file_info *find_and_get_file_info(dev_t devno) +{ + struct ocxl_file_info *info; + + mutex_lock(&minors_idr_lock); + info = idr_find(&minors_idr, MINOR(devno)); + if (info) + get_device(&info->dev); + mutex_unlock(&minors_idr_lock); + return info; +} + +static int allocate_minor(struct ocxl_file_info *info) +{ + int minor; + + mutex_lock(&minors_idr_lock); + minor = idr_alloc(&minors_idr, info, 0, OCXL_NUM_MINORS, GFP_KERNEL); + mutex_unlock(&minors_idr_lock); + return minor; +} + +static void free_minor(struct ocxl_file_info *info) +{ + mutex_lock(&minors_idr_lock); + idr_remove(&minors_idr, MINOR(info->dev.devt)); + mutex_unlock(&minors_idr_lock); +} + +static int afu_open(struct inode *inode, struct file *file) +{ + struct ocxl_file_info *info; + struct ocxl_context *ctx; + int rc; + + pr_debug("%s for device %x\n", __func__, inode->i_rdev); + + info = find_and_get_file_info(inode->i_rdev); + if (!info) + return -ENODEV; + + rc = ocxl_context_alloc(&ctx, info->afu, inode->i_mapping); + if (rc) { + put_device(&info->dev); + return rc; + } + put_device(&info->dev); + file->private_data = ctx; + return 0; +} + +static long afu_ioctl_attach(struct ocxl_context *ctx, + struct ocxl_ioctl_attach __user *uarg) +{ + struct ocxl_ioctl_attach arg; + u64 amr = 0; + + pr_debug("%s for context %d\n", __func__, ctx->pasid); + + if (copy_from_user(&arg, uarg, sizeof(arg))) + return -EFAULT; + + /* Make sure reserved fields are not set for forward compatibility */ + if (arg.reserved1 || arg.reserved2 || arg.reserved3) + return -EINVAL; + + amr = arg.amr & mfspr(SPRN_UAMOR); + return ocxl_context_attach(ctx, amr, current->mm); +} + +static long afu_ioctl_get_metadata(struct ocxl_context *ctx, + struct ocxl_ioctl_metadata __user *uarg) +{ + struct ocxl_ioctl_metadata arg; + + memset(&arg, 0, sizeof(arg)); + + arg.version = 0; + + arg.afu_version_major = ctx->afu->config.version_major; + arg.afu_version_minor = ctx->afu->config.version_minor; + arg.pasid = ctx->pasid; + arg.pp_mmio_size = ctx->afu->config.pp_mmio_stride; + arg.global_mmio_size = ctx->afu->config.global_mmio_size; + + if (copy_to_user(uarg, &arg, sizeof(arg))) + return -EFAULT; + + return 0; +} + +#ifdef CONFIG_PPC64 +static long afu_ioctl_enable_p9_wait(struct ocxl_context *ctx, + struct ocxl_ioctl_p9_wait __user *uarg) +{ + struct ocxl_ioctl_p9_wait arg; + + memset(&arg, 0, sizeof(arg)); + + if (cpu_has_feature(CPU_FTR_P9_TIDR)) { + enum ocxl_context_status status; + + // Locks both status & tidr + mutex_lock(&ctx->status_mutex); + if (!ctx->tidr) { + if (set_thread_tidr(current)) { + mutex_unlock(&ctx->status_mutex); + return -ENOENT; + } + + ctx->tidr = current->thread.tidr; + } + + status = ctx->status; + mutex_unlock(&ctx->status_mutex); + + if (status == ATTACHED) { + int rc = ocxl_link_update_pe(ctx->afu->fn->link, + ctx->pasid, ctx->tidr); + + if (rc) + return rc; + } + + arg.thread_id = ctx->tidr; + } else + return -ENOENT; + + if (copy_to_user(uarg, &arg, sizeof(arg))) + return -EFAULT; + + return 0; +} +#endif + + +static long afu_ioctl_get_features(struct ocxl_context *ctx, + struct ocxl_ioctl_features __user *uarg) +{ + struct ocxl_ioctl_features arg; + + memset(&arg, 0, sizeof(arg)); + +#ifdef CONFIG_PPC64 + if (cpu_has_feature(CPU_FTR_P9_TIDR)) + arg.flags[0] |= OCXL_IOCTL_FEATURES_FLAGS0_P9_WAIT; +#endif + + if (copy_to_user(uarg, &arg, sizeof(arg))) + return -EFAULT; + + return 0; +} + +#define CMD_STR(x) (x == OCXL_IOCTL_ATTACH ? "ATTACH" : \ + x == OCXL_IOCTL_IRQ_ALLOC ? "IRQ_ALLOC" : \ + x == OCXL_IOCTL_IRQ_FREE ? "IRQ_FREE" : \ + x == OCXL_IOCTL_IRQ_SET_FD ? "IRQ_SET_FD" : \ + x == OCXL_IOCTL_GET_METADATA ? "GET_METADATA" : \ + x == OCXL_IOCTL_ENABLE_P9_WAIT ? "ENABLE_P9_WAIT" : \ + x == OCXL_IOCTL_GET_FEATURES ? "GET_FEATURES" : \ + "UNKNOWN") + +static irqreturn_t irq_handler(void *private) +{ + struct eventfd_ctx *ev_ctx = private; + + eventfd_signal(ev_ctx, 1); + return IRQ_HANDLED; +} + +static void irq_free(void *private) +{ + struct eventfd_ctx *ev_ctx = private; + + eventfd_ctx_put(ev_ctx); +} + +static long afu_ioctl(struct file *file, unsigned int cmd, + unsigned long args) +{ + struct ocxl_context *ctx = file->private_data; + struct ocxl_ioctl_irq_fd irq_fd; + struct eventfd_ctx *ev_ctx; + int irq_id; + u64 irq_offset; + long rc; + bool closed; + + pr_debug("%s for context %d, command %s\n", __func__, ctx->pasid, + CMD_STR(cmd)); + + mutex_lock(&ctx->status_mutex); + closed = (ctx->status == CLOSED); + mutex_unlock(&ctx->status_mutex); + + if (closed) + return -EIO; + + switch (cmd) { + case OCXL_IOCTL_ATTACH: + rc = afu_ioctl_attach(ctx, + (struct ocxl_ioctl_attach __user *) args); + break; + + case OCXL_IOCTL_IRQ_ALLOC: + rc = ocxl_afu_irq_alloc(ctx, &irq_id); + if (!rc) { + irq_offset = ocxl_irq_id_to_offset(ctx, irq_id); + rc = copy_to_user((u64 __user *) args, &irq_offset, + sizeof(irq_offset)); + if (rc) { + ocxl_afu_irq_free(ctx, irq_id); + return -EFAULT; + } + } + break; + + case OCXL_IOCTL_IRQ_FREE: + rc = copy_from_user(&irq_offset, (u64 __user *) args, + sizeof(irq_offset)); + if (rc) + return -EFAULT; + irq_id = ocxl_irq_offset_to_id(ctx, irq_offset); + rc = ocxl_afu_irq_free(ctx, irq_id); + break; + + case OCXL_IOCTL_IRQ_SET_FD: + rc = copy_from_user(&irq_fd, (u64 __user *) args, + sizeof(irq_fd)); + if (rc) + return -EFAULT; + if (irq_fd.reserved) + return -EINVAL; + irq_id = ocxl_irq_offset_to_id(ctx, irq_fd.irq_offset); + ev_ctx = eventfd_ctx_fdget(irq_fd.eventfd); + if (IS_ERR(ev_ctx)) + return PTR_ERR(ev_ctx); + rc = ocxl_irq_set_handler(ctx, irq_id, irq_handler, irq_free, ev_ctx); + if (rc) + eventfd_ctx_put(ev_ctx); + break; + + case OCXL_IOCTL_GET_METADATA: + rc = afu_ioctl_get_metadata(ctx, + (struct ocxl_ioctl_metadata __user *) args); + break; + +#ifdef CONFIG_PPC64 + case OCXL_IOCTL_ENABLE_P9_WAIT: + rc = afu_ioctl_enable_p9_wait(ctx, + (struct ocxl_ioctl_p9_wait __user *) args); + break; +#endif + + case OCXL_IOCTL_GET_FEATURES: + rc = afu_ioctl_get_features(ctx, + (struct ocxl_ioctl_features __user *) args); + break; + + default: + rc = -EINVAL; + } + return rc; +} + +static long afu_compat_ioctl(struct file *file, unsigned int cmd, + unsigned long args) +{ + return afu_ioctl(file, cmd, args); +} + +static int afu_mmap(struct file *file, struct vm_area_struct *vma) +{ + struct ocxl_context *ctx = file->private_data; + + pr_debug("%s for context %d\n", __func__, ctx->pasid); + return ocxl_context_mmap(ctx, vma); +} + +static bool has_xsl_error(struct ocxl_context *ctx) +{ + bool ret; + + mutex_lock(&ctx->xsl_error_lock); + ret = !!ctx->xsl_error.addr; + mutex_unlock(&ctx->xsl_error_lock); + + return ret; +} + +/* + * Are there any events pending on the AFU + * ctx: The AFU context + * Returns: true if there are events pending + */ +static bool afu_events_pending(struct ocxl_context *ctx) +{ + if (has_xsl_error(ctx)) + return true; + return false; +} + +static unsigned int afu_poll(struct file *file, struct poll_table_struct *wait) +{ + struct ocxl_context *ctx = file->private_data; + unsigned int mask = 0; + bool closed; + + pr_debug("%s for context %d\n", __func__, ctx->pasid); + + poll_wait(file, &ctx->events_wq, wait); + + mutex_lock(&ctx->status_mutex); + closed = (ctx->status == CLOSED); + mutex_unlock(&ctx->status_mutex); + + if (afu_events_pending(ctx)) + mask = EPOLLIN | EPOLLRDNORM; + else if (closed) + mask = EPOLLERR; + + return mask; +} + +/* + * Populate the supplied buffer with a single XSL error + * ctx: The AFU context to report the error from + * header: the event header to populate + * buf: The buffer to write the body into (should be at least + * AFU_EVENT_BODY_XSL_ERROR_SIZE) + * Return: the amount of buffer that was populated + */ +static ssize_t append_xsl_error(struct ocxl_context *ctx, + struct ocxl_kernel_event_header *header, + char __user *buf) +{ + struct ocxl_kernel_event_xsl_fault_error body; + + memset(&body, 0, sizeof(body)); + + mutex_lock(&ctx->xsl_error_lock); + if (!ctx->xsl_error.addr) { + mutex_unlock(&ctx->xsl_error_lock); + return 0; + } + + body.addr = ctx->xsl_error.addr; + body.dsisr = ctx->xsl_error.dsisr; + body.count = ctx->xsl_error.count; + + ctx->xsl_error.addr = 0; + ctx->xsl_error.dsisr = 0; + ctx->xsl_error.count = 0; + + mutex_unlock(&ctx->xsl_error_lock); + + header->type = OCXL_AFU_EVENT_XSL_FAULT_ERROR; + + if (copy_to_user(buf, &body, sizeof(body))) + return -EFAULT; + + return sizeof(body); +} + +#define AFU_EVENT_BODY_MAX_SIZE sizeof(struct ocxl_kernel_event_xsl_fault_error) + +/* + * Reports events on the AFU + * Format: + * Header (struct ocxl_kernel_event_header) + * Body (struct ocxl_kernel_event_*) + * Header... + */ +static ssize_t afu_read(struct file *file, char __user *buf, size_t count, + loff_t *off) +{ + struct ocxl_context *ctx = file->private_data; + struct ocxl_kernel_event_header header; + ssize_t rc; + ssize_t used = 0; + DEFINE_WAIT(event_wait); + + memset(&header, 0, sizeof(header)); + + /* Require offset to be 0 */ + if (*off != 0) + return -EINVAL; + + if (count < (sizeof(struct ocxl_kernel_event_header) + + AFU_EVENT_BODY_MAX_SIZE)) + return -EINVAL; + + for (;;) { + prepare_to_wait(&ctx->events_wq, &event_wait, + TASK_INTERRUPTIBLE); + + if (afu_events_pending(ctx)) + break; + + if (ctx->status == CLOSED) + break; + + if (file->f_flags & O_NONBLOCK) { + finish_wait(&ctx->events_wq, &event_wait); + return -EAGAIN; + } + + if (signal_pending(current)) { + finish_wait(&ctx->events_wq, &event_wait); + return -ERESTARTSYS; + } + + schedule(); + } + + finish_wait(&ctx->events_wq, &event_wait); + + if (has_xsl_error(ctx)) { + used = append_xsl_error(ctx, &header, buf + sizeof(header)); + if (used < 0) + return used; + } + + if (!afu_events_pending(ctx)) + header.flags |= OCXL_KERNEL_EVENT_FLAG_LAST; + + if (copy_to_user(buf, &header, sizeof(header))) + return -EFAULT; + + used += sizeof(header); + + rc = used; + return rc; +} + +static int afu_release(struct inode *inode, struct file *file) +{ + struct ocxl_context *ctx = file->private_data; + int rc; + + pr_debug("%s for device %x\n", __func__, inode->i_rdev); + rc = ocxl_context_detach(ctx); + mutex_lock(&ctx->mapping_lock); + ctx->mapping = NULL; + mutex_unlock(&ctx->mapping_lock); + wake_up_all(&ctx->events_wq); + if (rc != -EBUSY) + ocxl_context_free(ctx); + return 0; +} + +static const struct file_operations ocxl_afu_fops = { + .owner = THIS_MODULE, + .open = afu_open, + .unlocked_ioctl = afu_ioctl, + .compat_ioctl = afu_compat_ioctl, + .mmap = afu_mmap, + .poll = afu_poll, + .read = afu_read, + .release = afu_release, +}; + +// Free the info struct +static void info_release(struct device *dev) +{ + struct ocxl_file_info *info = container_of(dev, struct ocxl_file_info, dev); + + ocxl_afu_put(info->afu); + kfree(info); +} + +static int ocxl_file_make_visible(struct ocxl_file_info *info) +{ + int rc; + + cdev_init(&info->cdev, &ocxl_afu_fops); + rc = cdev_add(&info->cdev, info->dev.devt, 1); + if (rc) { + dev_err(&info->dev, "Unable to add afu char device: %d\n", rc); + return rc; + } + + return 0; +} + +static void ocxl_file_make_invisible(struct ocxl_file_info *info) +{ + cdev_del(&info->cdev); +} + +int ocxl_file_register_afu(struct ocxl_afu *afu) +{ + int minor; + int rc; + struct ocxl_file_info *info; + struct ocxl_fn *fn = afu->fn; + struct pci_dev *pci_dev = to_pci_dev(fn->dev.parent); + + info = kzalloc(sizeof(*info), GFP_KERNEL); + if (info == NULL) + return -ENOMEM; + + minor = allocate_minor(info); + if (minor < 0) { + kfree(info); + return minor; + } + + info->dev.parent = &fn->dev; + info->dev.devt = MKDEV(MAJOR(ocxl_dev), minor); + info->dev.class = ocxl_class; + info->dev.release = info_release; + + info->afu = afu; + ocxl_afu_get(afu); + + rc = dev_set_name(&info->dev, "%s.%s.%hhu", + afu->config.name, dev_name(&pci_dev->dev), afu->config.idx); + if (rc) + goto err_put; + + rc = device_register(&info->dev); + if (rc) { + free_minor(info); + put_device(&info->dev); + return rc; + } + + rc = ocxl_sysfs_register_afu(info); + if (rc) + goto err_unregister; + + rc = ocxl_file_make_visible(info); + if (rc) + goto err_unregister; + + ocxl_afu_set_private(afu, info); + + return 0; + +err_unregister: + ocxl_sysfs_unregister_afu(info); // safe to call even if register failed + free_minor(info); + device_unregister(&info->dev); + return rc; +err_put: + ocxl_afu_put(afu); + free_minor(info); + kfree(info); + return rc; +} + +void ocxl_file_unregister_afu(struct ocxl_afu *afu) +{ + struct ocxl_file_info *info = ocxl_afu_get_private(afu); + + if (!info) + return; + + ocxl_file_make_invisible(info); + ocxl_sysfs_unregister_afu(info); + free_minor(info); + device_unregister(&info->dev); +} + +static char *ocxl_devnode(struct device *dev, umode_t *mode) +{ + return kasprintf(GFP_KERNEL, "ocxl/%s", dev_name(dev)); +} + +int ocxl_file_init(void) +{ + int rc; + + idr_init(&minors_idr); + + rc = alloc_chrdev_region(&ocxl_dev, 0, OCXL_NUM_MINORS, "ocxl"); + if (rc) { + pr_err("Unable to allocate ocxl major number: %d\n", rc); + return rc; + } + + ocxl_class = class_create(THIS_MODULE, "ocxl"); + if (IS_ERR(ocxl_class)) { + pr_err("Unable to create ocxl class\n"); + unregister_chrdev_region(ocxl_dev, OCXL_NUM_MINORS); + return PTR_ERR(ocxl_class); + } + + ocxl_class->devnode = ocxl_devnode; + return 0; +} + +void ocxl_file_exit(void) +{ + class_destroy(ocxl_class); + unregister_chrdev_region(ocxl_dev, OCXL_NUM_MINORS); + idr_destroy(&minors_idr); +} diff --git a/drivers/misc/ocxl/link.c b/drivers/misc/ocxl/link.c new file mode 100644 index 000000000..4cf4c55a5 --- /dev/null +++ b/drivers/misc/ocxl/link.c @@ -0,0 +1,779 @@ +// SPDX-License-Identifier: GPL-2.0+ +// Copyright 2017 IBM Corp. +#include <linux/sched/mm.h> +#include <linux/mutex.h> +#include <linux/mm.h> +#include <linux/mm_types.h> +#include <linux/mmu_context.h> +#include <linux/mmu_notifier.h> +#include <linux/irqdomain.h> +#include <asm/copro.h> +#include <asm/pnv-ocxl.h> +#include <asm/xive.h> +#include <misc/ocxl.h> +#include "ocxl_internal.h" +#include "trace.h" + + +#define SPA_PASID_BITS 15 +#define SPA_PASID_MAX ((1 << SPA_PASID_BITS) - 1) +#define SPA_PE_MASK SPA_PASID_MAX +#define SPA_SPA_SIZE_LOG 22 /* Each SPA is 4 Mb */ + +#define SPA_CFG_SF (1ull << (63-0)) +#define SPA_CFG_TA (1ull << (63-1)) +#define SPA_CFG_HV (1ull << (63-3)) +#define SPA_CFG_UV (1ull << (63-4)) +#define SPA_CFG_XLAT_hpt (0ull << (63-6)) /* Hashed page table (HPT) mode */ +#define SPA_CFG_XLAT_roh (2ull << (63-6)) /* Radix on HPT mode */ +#define SPA_CFG_XLAT_ror (3ull << (63-6)) /* Radix on Radix mode */ +#define SPA_CFG_PR (1ull << (63-49)) +#define SPA_CFG_TC (1ull << (63-54)) +#define SPA_CFG_DR (1ull << (63-59)) + +#define SPA_XSL_TF (1ull << (63-3)) /* Translation fault */ +#define SPA_XSL_S (1ull << (63-38)) /* Store operation */ + +#define SPA_PE_VALID 0x80000000 + +struct ocxl_link; + +struct pe_data { + struct mm_struct *mm; + /* callback to trigger when a translation fault occurs */ + void (*xsl_err_cb)(void *data, u64 addr, u64 dsisr); + /* opaque pointer to be passed to the above callback */ + void *xsl_err_data; + struct rcu_head rcu; + struct ocxl_link *link; + struct mmu_notifier mmu_notifier; +}; + +struct spa { + struct ocxl_process_element *spa_mem; + int spa_order; + struct mutex spa_lock; + struct radix_tree_root pe_tree; /* Maps PE handles to pe_data */ + char *irq_name; + int virq; + void __iomem *reg_dsisr; + void __iomem *reg_dar; + void __iomem *reg_tfc; + void __iomem *reg_pe_handle; + /* + * The following field are used by the memory fault + * interrupt handler. We can only have one interrupt at a + * time. The NPU won't raise another interrupt until the + * previous one has been ack'd by writing to the TFC register + */ + struct xsl_fault { + struct work_struct fault_work; + u64 pe; + u64 dsisr; + u64 dar; + struct pe_data pe_data; + } xsl_fault; +}; + +/* + * A opencapi link can be used be by several PCI functions. We have + * one link per device slot. + * + * A linked list of opencapi links should suffice, as there's a + * limited number of opencapi slots on a system and lookup is only + * done when the device is probed + */ +struct ocxl_link { + struct list_head list; + struct kref ref; + int domain; + int bus; + int dev; + void __iomem *arva; /* ATSD register virtual address */ + spinlock_t atsd_lock; /* to serialize shootdowns */ + atomic_t irq_available; + struct spa *spa; + void *platform_data; +}; +static LIST_HEAD(links_list); +static DEFINE_MUTEX(links_list_lock); + +enum xsl_response { + CONTINUE, + ADDRESS_ERROR, + RESTART, +}; + + +static void read_irq(struct spa *spa, u64 *dsisr, u64 *dar, u64 *pe) +{ + u64 reg; + + *dsisr = in_be64(spa->reg_dsisr); + *dar = in_be64(spa->reg_dar); + reg = in_be64(spa->reg_pe_handle); + *pe = reg & SPA_PE_MASK; +} + +static void ack_irq(struct spa *spa, enum xsl_response r) +{ + u64 reg = 0; + + /* continue is not supported */ + if (r == RESTART) + reg = PPC_BIT(31); + else if (r == ADDRESS_ERROR) + reg = PPC_BIT(30); + else + WARN(1, "Invalid irq response %d\n", r); + + if (reg) { + trace_ocxl_fault_ack(spa->spa_mem, spa->xsl_fault.pe, + spa->xsl_fault.dsisr, spa->xsl_fault.dar, reg); + out_be64(spa->reg_tfc, reg); + } +} + +static void xsl_fault_handler_bh(struct work_struct *fault_work) +{ + vm_fault_t flt = 0; + unsigned long access, flags, inv_flags = 0; + enum xsl_response r; + struct xsl_fault *fault = container_of(fault_work, struct xsl_fault, + fault_work); + struct spa *spa = container_of(fault, struct spa, xsl_fault); + + int rc; + + /* + * We must release a reference on mm_users whenever exiting this + * function (taken in the memory fault interrupt handler) + */ + rc = copro_handle_mm_fault(fault->pe_data.mm, fault->dar, fault->dsisr, + &flt); + if (rc) { + pr_debug("copro_handle_mm_fault failed: %d\n", rc); + if (fault->pe_data.xsl_err_cb) { + fault->pe_data.xsl_err_cb( + fault->pe_data.xsl_err_data, + fault->dar, fault->dsisr); + } + r = ADDRESS_ERROR; + goto ack; + } + + if (!radix_enabled()) { + /* + * update_mmu_cache() will not have loaded the hash + * since current->trap is not a 0x400 or 0x300, so + * just call hash_page_mm() here. + */ + access = _PAGE_PRESENT | _PAGE_READ; + if (fault->dsisr & SPA_XSL_S) + access |= _PAGE_WRITE; + + if (get_region_id(fault->dar) != USER_REGION_ID) + access |= _PAGE_PRIVILEGED; + + local_irq_save(flags); + hash_page_mm(fault->pe_data.mm, fault->dar, access, 0x300, + inv_flags); + local_irq_restore(flags); + } + r = RESTART; +ack: + mmput(fault->pe_data.mm); + ack_irq(spa, r); +} + +static irqreturn_t xsl_fault_handler(int irq, void *data) +{ + struct ocxl_link *link = (struct ocxl_link *) data; + struct spa *spa = link->spa; + u64 dsisr, dar, pe_handle; + struct pe_data *pe_data; + struct ocxl_process_element *pe; + int pid; + bool schedule = false; + + read_irq(spa, &dsisr, &dar, &pe_handle); + trace_ocxl_fault(spa->spa_mem, pe_handle, dsisr, dar, -1); + + WARN_ON(pe_handle > SPA_PE_MASK); + pe = spa->spa_mem + pe_handle; + pid = be32_to_cpu(pe->pid); + /* We could be reading all null values here if the PE is being + * removed while an interrupt kicks in. It's not supposed to + * happen if the driver notified the AFU to terminate the + * PASID, and the AFU waited for pending operations before + * acknowledging. But even if it happens, we won't find a + * memory context below and fail silently, so it should be ok. + */ + if (!(dsisr & SPA_XSL_TF)) { + WARN(1, "Invalid xsl interrupt fault register %#llx\n", dsisr); + ack_irq(spa, ADDRESS_ERROR); + return IRQ_HANDLED; + } + + rcu_read_lock(); + pe_data = radix_tree_lookup(&spa->pe_tree, pe_handle); + if (!pe_data) { + /* + * Could only happen if the driver didn't notify the + * AFU about PASID termination before removing the PE, + * or the AFU didn't wait for all memory access to + * have completed. + * + * Either way, we fail early, but we shouldn't log an + * error message, as it is a valid (if unexpected) + * scenario + */ + rcu_read_unlock(); + pr_debug("Unknown mm context for xsl interrupt\n"); + ack_irq(spa, ADDRESS_ERROR); + return IRQ_HANDLED; + } + + if (!pe_data->mm) { + /* + * translation fault from a kernel context - an OpenCAPI + * device tried to access a bad kernel address + */ + rcu_read_unlock(); + pr_warn("Unresolved OpenCAPI xsl fault in kernel context\n"); + ack_irq(spa, ADDRESS_ERROR); + return IRQ_HANDLED; + } + WARN_ON(pe_data->mm->context.id != pid); + + if (mmget_not_zero(pe_data->mm)) { + spa->xsl_fault.pe = pe_handle; + spa->xsl_fault.dar = dar; + spa->xsl_fault.dsisr = dsisr; + spa->xsl_fault.pe_data = *pe_data; + schedule = true; + /* mm_users count released by bottom half */ + } + rcu_read_unlock(); + if (schedule) + schedule_work(&spa->xsl_fault.fault_work); + else + ack_irq(spa, ADDRESS_ERROR); + return IRQ_HANDLED; +} + +static void unmap_irq_registers(struct spa *spa) +{ + pnv_ocxl_unmap_xsl_regs(spa->reg_dsisr, spa->reg_dar, spa->reg_tfc, + spa->reg_pe_handle); +} + +static int map_irq_registers(struct pci_dev *dev, struct spa *spa) +{ + return pnv_ocxl_map_xsl_regs(dev, &spa->reg_dsisr, &spa->reg_dar, + &spa->reg_tfc, &spa->reg_pe_handle); +} + +static int setup_xsl_irq(struct pci_dev *dev, struct ocxl_link *link) +{ + struct spa *spa = link->spa; + int rc; + int hwirq; + + rc = pnv_ocxl_get_xsl_irq(dev, &hwirq); + if (rc) + return rc; + + rc = map_irq_registers(dev, spa); + if (rc) + return rc; + + spa->irq_name = kasprintf(GFP_KERNEL, "ocxl-xsl-%x-%x-%x", + link->domain, link->bus, link->dev); + if (!spa->irq_name) { + dev_err(&dev->dev, "Can't allocate name for xsl interrupt\n"); + rc = -ENOMEM; + goto err_xsl; + } + /* + * At some point, we'll need to look into allowing a higher + * number of interrupts. Could we have an IRQ domain per link? + */ + spa->virq = irq_create_mapping(NULL, hwirq); + if (!spa->virq) { + dev_err(&dev->dev, + "irq_create_mapping failed for translation interrupt\n"); + rc = -EINVAL; + goto err_name; + } + + dev_dbg(&dev->dev, "hwirq %d mapped to virq %d\n", hwirq, spa->virq); + + rc = request_irq(spa->virq, xsl_fault_handler, 0, spa->irq_name, + link); + if (rc) { + dev_err(&dev->dev, + "request_irq failed for translation interrupt: %d\n", + rc); + rc = -EINVAL; + goto err_mapping; + } + return 0; + +err_mapping: + irq_dispose_mapping(spa->virq); +err_name: + kfree(spa->irq_name); +err_xsl: + unmap_irq_registers(spa); + return rc; +} + +static void release_xsl_irq(struct ocxl_link *link) +{ + struct spa *spa = link->spa; + + if (spa->virq) { + free_irq(spa->virq, link); + irq_dispose_mapping(spa->virq); + } + kfree(spa->irq_name); + unmap_irq_registers(spa); +} + +static int alloc_spa(struct pci_dev *dev, struct ocxl_link *link) +{ + struct spa *spa; + + spa = kzalloc(sizeof(struct spa), GFP_KERNEL); + if (!spa) + return -ENOMEM; + + mutex_init(&spa->spa_lock); + INIT_RADIX_TREE(&spa->pe_tree, GFP_KERNEL); + INIT_WORK(&spa->xsl_fault.fault_work, xsl_fault_handler_bh); + + spa->spa_order = SPA_SPA_SIZE_LOG - PAGE_SHIFT; + spa->spa_mem = (struct ocxl_process_element *) + __get_free_pages(GFP_KERNEL | __GFP_ZERO, spa->spa_order); + if (!spa->spa_mem) { + dev_err(&dev->dev, "Can't allocate Shared Process Area\n"); + kfree(spa); + return -ENOMEM; + } + pr_debug("Allocated SPA for %x:%x:%x at %p\n", link->domain, link->bus, + link->dev, spa->spa_mem); + + link->spa = spa; + return 0; +} + +static void free_spa(struct ocxl_link *link) +{ + struct spa *spa = link->spa; + + pr_debug("Freeing SPA for %x:%x:%x\n", link->domain, link->bus, + link->dev); + + if (spa && spa->spa_mem) { + free_pages((unsigned long) spa->spa_mem, spa->spa_order); + kfree(spa); + link->spa = NULL; + } +} + +static int alloc_link(struct pci_dev *dev, int PE_mask, struct ocxl_link **out_link) +{ + struct ocxl_link *link; + int rc; + + link = kzalloc(sizeof(struct ocxl_link), GFP_KERNEL); + if (!link) + return -ENOMEM; + + kref_init(&link->ref); + link->domain = pci_domain_nr(dev->bus); + link->bus = dev->bus->number; + link->dev = PCI_SLOT(dev->devfn); + atomic_set(&link->irq_available, MAX_IRQ_PER_LINK); + spin_lock_init(&link->atsd_lock); + + rc = alloc_spa(dev, link); + if (rc) + goto err_free; + + rc = setup_xsl_irq(dev, link); + if (rc) + goto err_spa; + + /* platform specific hook */ + rc = pnv_ocxl_spa_setup(dev, link->spa->spa_mem, PE_mask, + &link->platform_data); + if (rc) + goto err_xsl_irq; + + /* if link->arva is not defeined, MMIO registers are not used to + * generate TLB invalidate. PowerBus snooping is enabled. + * Otherwise, PowerBus snooping is disabled. TLB Invalidates are + * initiated using MMIO registers. + */ + pnv_ocxl_map_lpar(dev, mfspr(SPRN_LPID), 0, &link->arva); + + *out_link = link; + return 0; + +err_xsl_irq: + release_xsl_irq(link); +err_spa: + free_spa(link); +err_free: + kfree(link); + return rc; +} + +static void free_link(struct ocxl_link *link) +{ + release_xsl_irq(link); + free_spa(link); + kfree(link); +} + +int ocxl_link_setup(struct pci_dev *dev, int PE_mask, void **link_handle) +{ + int rc = 0; + struct ocxl_link *link; + + mutex_lock(&links_list_lock); + list_for_each_entry(link, &links_list, list) { + /* The functions of a device all share the same link */ + if (link->domain == pci_domain_nr(dev->bus) && + link->bus == dev->bus->number && + link->dev == PCI_SLOT(dev->devfn)) { + kref_get(&link->ref); + *link_handle = link; + goto unlock; + } + } + rc = alloc_link(dev, PE_mask, &link); + if (rc) + goto unlock; + + list_add(&link->list, &links_list); + *link_handle = link; +unlock: + mutex_unlock(&links_list_lock); + return rc; +} +EXPORT_SYMBOL_GPL(ocxl_link_setup); + +static void release_xsl(struct kref *ref) +{ + struct ocxl_link *link = container_of(ref, struct ocxl_link, ref); + + if (link->arva) { + pnv_ocxl_unmap_lpar(link->arva); + link->arva = NULL; + } + + list_del(&link->list); + /* call platform code before releasing data */ + pnv_ocxl_spa_release(link->platform_data); + free_link(link); +} + +void ocxl_link_release(struct pci_dev *dev, void *link_handle) +{ + struct ocxl_link *link = (struct ocxl_link *) link_handle; + + mutex_lock(&links_list_lock); + kref_put(&link->ref, release_xsl); + mutex_unlock(&links_list_lock); +} +EXPORT_SYMBOL_GPL(ocxl_link_release); + +static void invalidate_range(struct mmu_notifier *mn, + struct mm_struct *mm, + unsigned long start, unsigned long end) +{ + struct pe_data *pe_data = container_of(mn, struct pe_data, mmu_notifier); + struct ocxl_link *link = pe_data->link; + unsigned long addr, pid, page_size = PAGE_SIZE; + + pid = mm->context.id; + trace_ocxl_mmu_notifier_range(start, end, pid); + + spin_lock(&link->atsd_lock); + for (addr = start; addr < end; addr += page_size) + pnv_ocxl_tlb_invalidate(link->arva, pid, addr, page_size); + spin_unlock(&link->atsd_lock); +} + +static const struct mmu_notifier_ops ocxl_mmu_notifier_ops = { + .invalidate_range = invalidate_range, +}; + +static u64 calculate_cfg_state(bool kernel) +{ + u64 state; + + state = SPA_CFG_DR; + if (mfspr(SPRN_LPCR) & LPCR_TC) + state |= SPA_CFG_TC; + if (radix_enabled()) + state |= SPA_CFG_XLAT_ror; + else + state |= SPA_CFG_XLAT_hpt; + state |= SPA_CFG_HV; + if (kernel) { + if (mfmsr() & MSR_SF) + state |= SPA_CFG_SF; + } else { + state |= SPA_CFG_PR; + if (!test_tsk_thread_flag(current, TIF_32BIT)) + state |= SPA_CFG_SF; + } + return state; +} + +int ocxl_link_add_pe(void *link_handle, int pasid, u32 pidr, u32 tidr, + u64 amr, u16 bdf, struct mm_struct *mm, + void (*xsl_err_cb)(void *data, u64 addr, u64 dsisr), + void *xsl_err_data) +{ + struct ocxl_link *link = (struct ocxl_link *) link_handle; + struct spa *spa = link->spa; + struct ocxl_process_element *pe; + int pe_handle, rc = 0; + struct pe_data *pe_data; + + BUILD_BUG_ON(sizeof(struct ocxl_process_element) != 128); + if (pasid > SPA_PASID_MAX) + return -EINVAL; + + mutex_lock(&spa->spa_lock); + pe_handle = pasid & SPA_PE_MASK; + pe = spa->spa_mem + pe_handle; + + if (pe->software_state) { + rc = -EBUSY; + goto unlock; + } + + pe_data = kmalloc(sizeof(*pe_data), GFP_KERNEL); + if (!pe_data) { + rc = -ENOMEM; + goto unlock; + } + + pe_data->mm = mm; + pe_data->xsl_err_cb = xsl_err_cb; + pe_data->xsl_err_data = xsl_err_data; + pe_data->link = link; + pe_data->mmu_notifier.ops = &ocxl_mmu_notifier_ops; + + memset(pe, 0, sizeof(struct ocxl_process_element)); + pe->config_state = cpu_to_be64(calculate_cfg_state(pidr == 0)); + pe->pasid = cpu_to_be32(pasid << (31 - 19)); + pe->bdf = cpu_to_be16(bdf); + pe->lpid = cpu_to_be32(mfspr(SPRN_LPID)); + pe->pid = cpu_to_be32(pidr); + pe->tid = cpu_to_be32(tidr); + pe->amr = cpu_to_be64(amr); + pe->software_state = cpu_to_be32(SPA_PE_VALID); + + /* + * For user contexts, register a copro so that TLBIs are seen + * by the nest MMU. If we have a kernel context, TLBIs are + * already global. + */ + if (mm) { + mm_context_add_copro(mm); + if (link->arva) { + /* Use MMIO registers for the TLB Invalidate + * operations. + */ + trace_ocxl_init_mmu_notifier(pasid, mm->context.id); + mmu_notifier_register(&pe_data->mmu_notifier, mm); + } + } + + /* + * Barrier is to make sure PE is visible in the SPA before it + * is used by the device. It also helps with the global TLBI + * invalidation + */ + mb(); + radix_tree_insert(&spa->pe_tree, pe_handle, pe_data); + + /* + * The mm must stay valid for as long as the device uses it. We + * lower the count when the context is removed from the SPA. + * + * We grab mm_count (and not mm_users), as we don't want to + * end up in a circular dependency if a process mmaps its + * mmio, therefore incrementing the file ref count when + * calling mmap(), and forgets to unmap before exiting. In + * that scenario, when the kernel handles the death of the + * process, the file is not cleaned because unmap was not + * called, and the mm wouldn't be freed because we would still + * have a reference on mm_users. Incrementing mm_count solves + * the problem. + */ + if (mm) + mmgrab(mm); + trace_ocxl_context_add(current->pid, spa->spa_mem, pasid, pidr, tidr); +unlock: + mutex_unlock(&spa->spa_lock); + return rc; +} +EXPORT_SYMBOL_GPL(ocxl_link_add_pe); + +int ocxl_link_update_pe(void *link_handle, int pasid, __u16 tid) +{ + struct ocxl_link *link = (struct ocxl_link *) link_handle; + struct spa *spa = link->spa; + struct ocxl_process_element *pe; + int pe_handle, rc; + + if (pasid > SPA_PASID_MAX) + return -EINVAL; + + pe_handle = pasid & SPA_PE_MASK; + pe = spa->spa_mem + pe_handle; + + mutex_lock(&spa->spa_lock); + + pe->tid = cpu_to_be32(tid); + + /* + * The barrier makes sure the PE is updated + * before we clear the NPU context cache below, so that the + * old PE cannot be reloaded erroneously. + */ + mb(); + + /* + * hook to platform code + * On powerpc, the entry needs to be cleared from the context + * cache of the NPU. + */ + rc = pnv_ocxl_spa_remove_pe_from_cache(link->platform_data, pe_handle); + WARN_ON(rc); + + mutex_unlock(&spa->spa_lock); + return rc; +} + +int ocxl_link_remove_pe(void *link_handle, int pasid) +{ + struct ocxl_link *link = (struct ocxl_link *) link_handle; + struct spa *spa = link->spa; + struct ocxl_process_element *pe; + struct pe_data *pe_data; + int pe_handle, rc; + + if (pasid > SPA_PASID_MAX) + return -EINVAL; + + /* + * About synchronization with our memory fault handler: + * + * Before removing the PE, the driver is supposed to have + * notified the AFU, which should have cleaned up and make + * sure the PASID is no longer in use, including pending + * interrupts. However, there's no way to be sure... + * + * We clear the PE and remove the context from our radix + * tree. From that point on, any new interrupt for that + * context will fail silently, which is ok. As mentioned + * above, that's not expected, but it could happen if the + * driver or AFU didn't do the right thing. + * + * There could still be a bottom half running, but we don't + * need to wait/flush, as it is managing a reference count on + * the mm it reads from the radix tree. + */ + pe_handle = pasid & SPA_PE_MASK; + pe = spa->spa_mem + pe_handle; + + mutex_lock(&spa->spa_lock); + + if (!(be32_to_cpu(pe->software_state) & SPA_PE_VALID)) { + rc = -EINVAL; + goto unlock; + } + + trace_ocxl_context_remove(current->pid, spa->spa_mem, pasid, + be32_to_cpu(pe->pid), be32_to_cpu(pe->tid)); + + memset(pe, 0, sizeof(struct ocxl_process_element)); + /* + * The barrier makes sure the PE is removed from the SPA + * before we clear the NPU context cache below, so that the + * old PE cannot be reloaded erroneously. + */ + mb(); + + /* + * hook to platform code + * On powerpc, the entry needs to be cleared from the context + * cache of the NPU. + */ + rc = pnv_ocxl_spa_remove_pe_from_cache(link->platform_data, pe_handle); + WARN_ON(rc); + + pe_data = radix_tree_delete(&spa->pe_tree, pe_handle); + if (!pe_data) { + WARN(1, "Couldn't find pe data when removing PE\n"); + } else { + if (pe_data->mm) { + if (link->arva) { + trace_ocxl_release_mmu_notifier(pasid, + pe_data->mm->context.id); + mmu_notifier_unregister(&pe_data->mmu_notifier, + pe_data->mm); + spin_lock(&link->atsd_lock); + pnv_ocxl_tlb_invalidate(link->arva, + pe_data->mm->context.id, + 0ull, + PAGE_SIZE); + spin_unlock(&link->atsd_lock); + } + mm_context_remove_copro(pe_data->mm); + mmdrop(pe_data->mm); + } + kfree_rcu(pe_data, rcu); + } +unlock: + mutex_unlock(&spa->spa_lock); + return rc; +} +EXPORT_SYMBOL_GPL(ocxl_link_remove_pe); + +int ocxl_link_irq_alloc(void *link_handle, int *hw_irq) +{ + struct ocxl_link *link = (struct ocxl_link *) link_handle; + int irq; + + if (atomic_dec_if_positive(&link->irq_available) < 0) + return -ENOSPC; + + irq = xive_native_alloc_irq(); + if (!irq) { + atomic_inc(&link->irq_available); + return -ENXIO; + } + + *hw_irq = irq; + return 0; +} +EXPORT_SYMBOL_GPL(ocxl_link_irq_alloc); + +void ocxl_link_free_irq(void *link_handle, int hw_irq) +{ + struct ocxl_link *link = (struct ocxl_link *) link_handle; + + xive_native_free_irq(hw_irq); + atomic_inc(&link->irq_available); +} +EXPORT_SYMBOL_GPL(ocxl_link_free_irq); diff --git a/drivers/misc/ocxl/main.c b/drivers/misc/ocxl/main.c new file mode 100644 index 000000000..ef73cf35d --- /dev/null +++ b/drivers/misc/ocxl/main.c @@ -0,0 +1,37 @@ +// SPDX-License-Identifier: GPL-2.0+ +// Copyright 2017 IBM Corp. +#include <linux/module.h> +#include <linux/pci.h> +#include <asm/mmu.h> +#include "ocxl_internal.h" + +static int __init init_ocxl(void) +{ + int rc = 0; + + if (!tlbie_capable) + return -EINVAL; + + rc = ocxl_file_init(); + if (rc) + return rc; + + rc = pci_register_driver(&ocxl_pci_driver); + if (rc) { + ocxl_file_exit(); + return rc; + } + return 0; +} + +static void exit_ocxl(void) +{ + pci_unregister_driver(&ocxl_pci_driver); + ocxl_file_exit(); +} + +module_init(init_ocxl); +module_exit(exit_ocxl); + +MODULE_DESCRIPTION("Open Coherent Accelerator"); +MODULE_LICENSE("GPL"); diff --git a/drivers/misc/ocxl/mmio.c b/drivers/misc/ocxl/mmio.c new file mode 100644 index 000000000..aae713db4 --- /dev/null +++ b/drivers/misc/ocxl/mmio.c @@ -0,0 +1,234 @@ +// SPDX-License-Identifier: GPL-2.0+ +// Copyright 2019 IBM Corp. +#include <linux/sched/mm.h> +#include "trace.h" +#include "ocxl_internal.h" + +int ocxl_global_mmio_read32(struct ocxl_afu *afu, size_t offset, + enum ocxl_endian endian, u32 *val) +{ + if (offset > afu->config.global_mmio_size - 4) + return -EINVAL; + +#ifdef __BIG_ENDIAN__ + if (endian == OCXL_HOST_ENDIAN) + endian = OCXL_BIG_ENDIAN; +#endif + + switch (endian) { + case OCXL_BIG_ENDIAN: + *val = readl_be((char *)afu->global_mmio_ptr + offset); + break; + + default: + *val = readl((char *)afu->global_mmio_ptr + offset); + break; + } + + return 0; +} +EXPORT_SYMBOL_GPL(ocxl_global_mmio_read32); + +int ocxl_global_mmio_read64(struct ocxl_afu *afu, size_t offset, + enum ocxl_endian endian, u64 *val) +{ + if (offset > afu->config.global_mmio_size - 8) + return -EINVAL; + +#ifdef __BIG_ENDIAN__ + if (endian == OCXL_HOST_ENDIAN) + endian = OCXL_BIG_ENDIAN; +#endif + + switch (endian) { + case OCXL_BIG_ENDIAN: + *val = readq_be((char *)afu->global_mmio_ptr + offset); + break; + + default: + *val = readq((char *)afu->global_mmio_ptr + offset); + break; + } + + return 0; +} +EXPORT_SYMBOL_GPL(ocxl_global_mmio_read64); + +int ocxl_global_mmio_write32(struct ocxl_afu *afu, size_t offset, + enum ocxl_endian endian, u32 val) +{ + if (offset > afu->config.global_mmio_size - 4) + return -EINVAL; + +#ifdef __BIG_ENDIAN__ + if (endian == OCXL_HOST_ENDIAN) + endian = OCXL_BIG_ENDIAN; +#endif + + switch (endian) { + case OCXL_BIG_ENDIAN: + writel_be(val, (char *)afu->global_mmio_ptr + offset); + break; + + default: + writel(val, (char *)afu->global_mmio_ptr + offset); + break; + } + + + return 0; +} +EXPORT_SYMBOL_GPL(ocxl_global_mmio_write32); + +int ocxl_global_mmio_write64(struct ocxl_afu *afu, size_t offset, + enum ocxl_endian endian, u64 val) +{ + if (offset > afu->config.global_mmio_size - 8) + return -EINVAL; + +#ifdef __BIG_ENDIAN__ + if (endian == OCXL_HOST_ENDIAN) + endian = OCXL_BIG_ENDIAN; +#endif + + switch (endian) { + case OCXL_BIG_ENDIAN: + writeq_be(val, (char *)afu->global_mmio_ptr + offset); + break; + + default: + writeq(val, (char *)afu->global_mmio_ptr + offset); + break; + } + + + return 0; +} +EXPORT_SYMBOL_GPL(ocxl_global_mmio_write64); + +int ocxl_global_mmio_set32(struct ocxl_afu *afu, size_t offset, + enum ocxl_endian endian, u32 mask) +{ + u32 tmp; + + if (offset > afu->config.global_mmio_size - 4) + return -EINVAL; + +#ifdef __BIG_ENDIAN__ + if (endian == OCXL_HOST_ENDIAN) + endian = OCXL_BIG_ENDIAN; +#endif + + switch (endian) { + case OCXL_BIG_ENDIAN: + tmp = readl_be((char *)afu->global_mmio_ptr + offset); + tmp |= mask; + writel_be(tmp, (char *)afu->global_mmio_ptr + offset); + break; + + default: + tmp = readl((char *)afu->global_mmio_ptr + offset); + tmp |= mask; + writel(tmp, (char *)afu->global_mmio_ptr + offset); + break; + } + + return 0; +} +EXPORT_SYMBOL_GPL(ocxl_global_mmio_set32); + +int ocxl_global_mmio_set64(struct ocxl_afu *afu, size_t offset, + enum ocxl_endian endian, u64 mask) +{ + u64 tmp; + + if (offset > afu->config.global_mmio_size - 8) + return -EINVAL; + +#ifdef __BIG_ENDIAN__ + if (endian == OCXL_HOST_ENDIAN) + endian = OCXL_BIG_ENDIAN; +#endif + + switch (endian) { + case OCXL_BIG_ENDIAN: + tmp = readq_be((char *)afu->global_mmio_ptr + offset); + tmp |= mask; + writeq_be(tmp, (char *)afu->global_mmio_ptr + offset); + break; + + default: + tmp = readq((char *)afu->global_mmio_ptr + offset); + tmp |= mask; + writeq(tmp, (char *)afu->global_mmio_ptr + offset); + break; + } + + return 0; +} +EXPORT_SYMBOL_GPL(ocxl_global_mmio_set64); + +int ocxl_global_mmio_clear32(struct ocxl_afu *afu, size_t offset, + enum ocxl_endian endian, u32 mask) +{ + u32 tmp; + + if (offset > afu->config.global_mmio_size - 4) + return -EINVAL; + +#ifdef __BIG_ENDIAN__ + if (endian == OCXL_HOST_ENDIAN) + endian = OCXL_BIG_ENDIAN; +#endif + + switch (endian) { + case OCXL_BIG_ENDIAN: + tmp = readl_be((char *)afu->global_mmio_ptr + offset); + tmp &= ~mask; + writel_be(tmp, (char *)afu->global_mmio_ptr + offset); + break; + + default: + tmp = readl((char *)afu->global_mmio_ptr + offset); + tmp &= ~mask; + writel(tmp, (char *)afu->global_mmio_ptr + offset); + break; + } + + + return 0; +} +EXPORT_SYMBOL_GPL(ocxl_global_mmio_clear32); + +int ocxl_global_mmio_clear64(struct ocxl_afu *afu, size_t offset, + enum ocxl_endian endian, u64 mask) +{ + u64 tmp; + + if (offset > afu->config.global_mmio_size - 8) + return -EINVAL; + +#ifdef __BIG_ENDIAN__ + if (endian == OCXL_HOST_ENDIAN) + endian = OCXL_BIG_ENDIAN; +#endif + + switch (endian) { + case OCXL_BIG_ENDIAN: + tmp = readq_be((char *)afu->global_mmio_ptr + offset); + tmp &= ~mask; + writeq_be(tmp, (char *)afu->global_mmio_ptr + offset); + break; + + default: + tmp = readq((char *)afu->global_mmio_ptr + offset); + tmp &= ~mask; + writeq(tmp, (char *)afu->global_mmio_ptr + offset); + break; + } + + writeq(tmp, (char *)afu->global_mmio_ptr + offset); + + return 0; +} +EXPORT_SYMBOL_GPL(ocxl_global_mmio_clear64); diff --git a/drivers/misc/ocxl/ocxl_internal.h b/drivers/misc/ocxl/ocxl_internal.h new file mode 100644 index 000000000..10125a22d --- /dev/null +++ b/drivers/misc/ocxl/ocxl_internal.h @@ -0,0 +1,154 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +// Copyright 2017 IBM Corp. +#ifndef _OCXL_INTERNAL_H_ +#define _OCXL_INTERNAL_H_ + +#include <linux/pci.h> +#include <linux/cdev.h> +#include <linux/list.h> +#include <misc/ocxl.h> + +#define MAX_IRQ_PER_LINK 2000 +#define MAX_IRQ_PER_CONTEXT MAX_IRQ_PER_LINK + +extern struct pci_driver ocxl_pci_driver; + +struct ocxl_fn { + struct device dev; + int bar_used[3]; + struct ocxl_fn_config config; + struct list_head afu_list; + int pasid_base; + int actag_base; + int actag_enabled; + int actag_supported; + struct list_head pasid_list; + struct list_head actag_list; + void *link; +}; + +struct ocxl_file_info { + struct ocxl_afu *afu; + struct device dev; + struct cdev cdev; + struct bin_attribute attr_global_mmio; +}; + +struct ocxl_afu { + struct kref kref; + struct ocxl_fn *fn; + struct list_head list; + struct ocxl_afu_config config; + int pasid_base; + int pasid_count; /* opened contexts */ + int pasid_max; /* maximum number of contexts */ + int actag_base; + int actag_enabled; + struct mutex contexts_lock; + struct idr contexts_idr; + struct mutex afu_control_lock; + u64 global_mmio_start; + u64 irq_base_offset; + void __iomem *global_mmio_ptr; + u64 pp_mmio_start; + void *private; +}; + +enum ocxl_context_status { + CLOSED, + OPENED, + ATTACHED, +}; + +// Contains metadata about a translation fault +struct ocxl_xsl_error { + u64 addr; // The address that triggered the fault + u64 dsisr; // the value of the dsisr register + u64 count; // The number of times this fault has been triggered +}; + +struct ocxl_context { + struct ocxl_afu *afu; + int pasid; + struct mutex status_mutex; + enum ocxl_context_status status; + struct address_space *mapping; + struct mutex mapping_lock; + wait_queue_head_t events_wq; + struct mutex xsl_error_lock; + struct ocxl_xsl_error xsl_error; + struct mutex irq_lock; + struct idr irq_idr; + u16 tidr; // Thread ID used for P9 wait implementation +}; + +struct ocxl_process_element { + __be64 config_state; + __be32 pasid; + __be16 bdf; + __be16 reserved1; + __be32 reserved2[9]; + __be32 lpid; + __be32 tid; + __be32 pid; + __be32 reserved3[10]; + __be64 amr; + __be32 reserved4[3]; + __be32 software_state; +}; + +int ocxl_create_cdev(struct ocxl_afu *afu); +void ocxl_destroy_cdev(struct ocxl_afu *afu); +int ocxl_file_register_afu(struct ocxl_afu *afu); +void ocxl_file_unregister_afu(struct ocxl_afu *afu); + +int ocxl_file_init(void); +void ocxl_file_exit(void); + +int ocxl_pasid_afu_alloc(struct ocxl_fn *fn, u32 size); +void ocxl_pasid_afu_free(struct ocxl_fn *fn, u32 start, u32 size); +int ocxl_actag_afu_alloc(struct ocxl_fn *fn, u32 size); +void ocxl_actag_afu_free(struct ocxl_fn *fn, u32 start, u32 size); + +/* + * Get the max PASID value that can be used by the function + */ +int ocxl_config_get_pasid_info(struct pci_dev *dev, int *count); + +/* + * Control whether the FPGA is reloaded on a link reset + */ +int ocxl_config_get_reset_reload(struct pci_dev *dev, int *val); +int ocxl_config_set_reset_reload(struct pci_dev *dev, int val); + +/* + * Check if an AFU index is valid for the given function. + * + * AFU indexes can be sparse, so a driver should check all indexes up + * to the maximum found in the function description + */ +int ocxl_config_check_afu_index(struct pci_dev *dev, + struct ocxl_fn_config *fn, int afu_idx); + +/** + * ocxl_link_update_pe() - Update values within a Process Element + * @link_handle: the link handle associated with the process element + * @pasid: the PASID for the AFU context + * @tid: the new thread id for the process element + * + * Returns 0 on success + */ +int ocxl_link_update_pe(void *link_handle, int pasid, __u16 tid); + +int ocxl_context_mmap(struct ocxl_context *ctx, + struct vm_area_struct *vma); +void ocxl_context_detach_all(struct ocxl_afu *afu); + +int ocxl_sysfs_register_afu(struct ocxl_file_info *info); +void ocxl_sysfs_unregister_afu(struct ocxl_file_info *info); + +int ocxl_irq_offset_to_id(struct ocxl_context *ctx, u64 offset); +u64 ocxl_irq_id_to_offset(struct ocxl_context *ctx, int irq_id); +void ocxl_afu_irq_free_all(struct ocxl_context *ctx); + +#endif /* _OCXL_INTERNAL_H_ */ diff --git a/drivers/misc/ocxl/pasid.c b/drivers/misc/ocxl/pasid.c new file mode 100644 index 000000000..d14cb56e6 --- /dev/null +++ b/drivers/misc/ocxl/pasid.c @@ -0,0 +1,107 @@ +// SPDX-License-Identifier: GPL-2.0+ +// Copyright 2017 IBM Corp. +#include "ocxl_internal.h" + + +struct id_range { + struct list_head list; + u32 start; + u32 end; +}; + +#ifdef DEBUG +static void dump_list(struct list_head *head, char *type_str) +{ + struct id_range *cur; + + pr_debug("%s ranges allocated:\n", type_str); + list_for_each_entry(cur, head, list) { + pr_debug("Range %d->%d\n", cur->start, cur->end); + } +} +#endif + +static int range_alloc(struct list_head *head, u32 size, int max_id, + char *type_str) +{ + struct list_head *pos; + struct id_range *cur, *new; + int rc, last_end; + + new = kmalloc(sizeof(struct id_range), GFP_KERNEL); + if (!new) + return -ENOMEM; + + pos = head; + last_end = -1; + list_for_each_entry(cur, head, list) { + if ((cur->start - last_end) > size) + break; + last_end = cur->end; + pos = &cur->list; + } + + new->start = last_end + 1; + new->end = new->start + size - 1; + + if (new->end > max_id) { + kfree(new); + rc = -ENOSPC; + } else { + list_add(&new->list, pos); + rc = new->start; + } + +#ifdef DEBUG + dump_list(head, type_str); +#endif + return rc; +} + +static void range_free(struct list_head *head, u32 start, u32 size, + char *type_str) +{ + bool found = false; + struct id_range *cur, *tmp; + + list_for_each_entry_safe(cur, tmp, head, list) { + if (cur->start == start && cur->end == (start + size - 1)) { + found = true; + list_del(&cur->list); + kfree(cur); + break; + } + } + WARN_ON(!found); +#ifdef DEBUG + dump_list(head, type_str); +#endif +} + +int ocxl_pasid_afu_alloc(struct ocxl_fn *fn, u32 size) +{ + int max_pasid; + + if (fn->config.max_pasid_log < 0) + return -ENOSPC; + max_pasid = 1 << fn->config.max_pasid_log; + return range_alloc(&fn->pasid_list, size, max_pasid, "afu pasid"); +} + +void ocxl_pasid_afu_free(struct ocxl_fn *fn, u32 start, u32 size) +{ + return range_free(&fn->pasid_list, start, size, "afu pasid"); +} + +int ocxl_actag_afu_alloc(struct ocxl_fn *fn, u32 size) +{ + int max_actag; + + max_actag = fn->actag_enabled; + return range_alloc(&fn->actag_list, size, max_actag, "afu actag"); +} + +void ocxl_actag_afu_free(struct ocxl_fn *fn, u32 start, u32 size) +{ + return range_free(&fn->actag_list, start, size, "afu actag"); +} diff --git a/drivers/misc/ocxl/pci.c b/drivers/misc/ocxl/pci.c new file mode 100644 index 000000000..cb920aa88 --- /dev/null +++ b/drivers/misc/ocxl/pci.c @@ -0,0 +1,66 @@ +// SPDX-License-Identifier: GPL-2.0+ +// Copyright 2019 IBM Corp. +#include <linux/module.h> +#include "ocxl_internal.h" + +/* + * Any opencapi device which wants to use this 'generic' driver should + * use the 0x062B device ID. Vendors should define the subsystem + * vendor/device ID to help differentiate devices. + */ +static const struct pci_device_id ocxl_pci_tbl[] = { + { PCI_DEVICE(PCI_VENDOR_ID_IBM, 0x062B), }, + { } +}; +MODULE_DEVICE_TABLE(pci, ocxl_pci_tbl); + +static int ocxl_probe(struct pci_dev *dev, const struct pci_device_id *id) +{ + int rc; + struct ocxl_afu *afu, *tmp; + struct ocxl_fn *fn; + struct list_head *afu_list; + + fn = ocxl_function_open(dev); + if (IS_ERR(fn)) + return PTR_ERR(fn); + + pci_set_drvdata(dev, fn); + + afu_list = ocxl_function_afu_list(fn); + + list_for_each_entry_safe(afu, tmp, afu_list, list) { + // Cleanup handled within ocxl_file_register_afu() + rc = ocxl_file_register_afu(afu); + if (rc) { + dev_err(&dev->dev, "Failed to register AFU '%s' index %d", + afu->config.name, afu->config.idx); + } + } + + return 0; +} + +static void ocxl_remove(struct pci_dev *dev) +{ + struct ocxl_fn *fn; + struct ocxl_afu *afu; + struct list_head *afu_list; + + fn = pci_get_drvdata(dev); + afu_list = ocxl_function_afu_list(fn); + + list_for_each_entry(afu, afu_list, list) { + ocxl_file_unregister_afu(afu); + } + + ocxl_function_close(fn); +} + +struct pci_driver ocxl_pci_driver = { + .name = "ocxl", + .id_table = ocxl_pci_tbl, + .probe = ocxl_probe, + .remove = ocxl_remove, + .shutdown = ocxl_remove, +}; diff --git a/drivers/misc/ocxl/sysfs.c b/drivers/misc/ocxl/sysfs.c new file mode 100644 index 000000000..25c78df80 --- /dev/null +++ b/drivers/misc/ocxl/sysfs.c @@ -0,0 +1,186 @@ +// SPDX-License-Identifier: GPL-2.0+ +// Copyright 2017 IBM Corp. +#include <linux/sysfs.h> +#include "ocxl_internal.h" + +static inline struct ocxl_afu *to_afu(struct device *device) +{ + struct ocxl_file_info *info = container_of(device, struct ocxl_file_info, dev); + + return info->afu; +} + +static ssize_t global_mmio_size_show(struct device *device, + struct device_attribute *attr, + char *buf) +{ + struct ocxl_afu *afu = to_afu(device); + + return scnprintf(buf, PAGE_SIZE, "%d\n", + afu->config.global_mmio_size); +} + +static ssize_t pp_mmio_size_show(struct device *device, + struct device_attribute *attr, + char *buf) +{ + struct ocxl_afu *afu = to_afu(device); + + return scnprintf(buf, PAGE_SIZE, "%d\n", + afu->config.pp_mmio_stride); +} + +static ssize_t afu_version_show(struct device *device, + struct device_attribute *attr, + char *buf) +{ + struct ocxl_afu *afu = to_afu(device); + + return scnprintf(buf, PAGE_SIZE, "%hhu:%hhu\n", + afu->config.version_major, + afu->config.version_minor); +} + +static ssize_t contexts_show(struct device *device, + struct device_attribute *attr, + char *buf) +{ + struct ocxl_afu *afu = to_afu(device); + + return scnprintf(buf, PAGE_SIZE, "%d/%d\n", + afu->pasid_count, afu->pasid_max); +} + +static ssize_t reload_on_reset_show(struct device *device, + struct device_attribute *attr, + char *buf) +{ + struct ocxl_afu *afu = to_afu(device); + struct ocxl_fn *fn = afu->fn; + struct pci_dev *pci_dev = to_pci_dev(fn->dev.parent); + int val; + + if (ocxl_config_get_reset_reload(pci_dev, &val)) + return scnprintf(buf, PAGE_SIZE, "unavailable\n"); + + return scnprintf(buf, PAGE_SIZE, "%d\n", val); +} + +static ssize_t reload_on_reset_store(struct device *device, + struct device_attribute *attr, + const char *buf, size_t count) +{ + struct ocxl_afu *afu = to_afu(device); + struct ocxl_fn *fn = afu->fn; + struct pci_dev *pci_dev = to_pci_dev(fn->dev.parent); + int rc, val; + + rc = kstrtoint(buf, 0, &val); + if (rc || (val != 0 && val != 1)) + return -EINVAL; + + if (ocxl_config_set_reset_reload(pci_dev, val)) + return -ENODEV; + + return count; +} + +static struct device_attribute afu_attrs[] = { + __ATTR_RO(global_mmio_size), + __ATTR_RO(pp_mmio_size), + __ATTR_RO(afu_version), + __ATTR_RO(contexts), + __ATTR_RW(reload_on_reset), +}; + +static ssize_t global_mmio_read(struct file *filp, struct kobject *kobj, + struct bin_attribute *bin_attr, char *buf, + loff_t off, size_t count) +{ + struct ocxl_afu *afu = to_afu(kobj_to_dev(kobj)); + + if (count == 0 || off < 0 || + off >= afu->config.global_mmio_size) + return 0; + memcpy_fromio(buf, afu->global_mmio_ptr + off, count); + return count; +} + +static vm_fault_t global_mmio_fault(struct vm_fault *vmf) +{ + struct vm_area_struct *vma = vmf->vma; + struct ocxl_afu *afu = vma->vm_private_data; + unsigned long offset; + + if (vmf->pgoff >= (afu->config.global_mmio_size >> PAGE_SHIFT)) + return VM_FAULT_SIGBUS; + + offset = vmf->pgoff; + offset += (afu->global_mmio_start >> PAGE_SHIFT); + return vmf_insert_pfn(vma, vmf->address, offset); +} + +static const struct vm_operations_struct global_mmio_vmops = { + .fault = global_mmio_fault, +}; + +static int global_mmio_mmap(struct file *filp, struct kobject *kobj, + struct bin_attribute *bin_attr, + struct vm_area_struct *vma) +{ + struct ocxl_afu *afu = to_afu(kobj_to_dev(kobj)); + + if ((vma_pages(vma) + vma->vm_pgoff) > + (afu->config.global_mmio_size >> PAGE_SHIFT)) + return -EINVAL; + + vma->vm_flags |= VM_IO | VM_PFNMAP; + vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); + vma->vm_ops = &global_mmio_vmops; + vma->vm_private_data = afu; + return 0; +} + +int ocxl_sysfs_register_afu(struct ocxl_file_info *info) +{ + int i, rc; + + for (i = 0; i < ARRAY_SIZE(afu_attrs); i++) { + rc = device_create_file(&info->dev, &afu_attrs[i]); + if (rc) + goto err; + } + + sysfs_attr_init(&info->attr_global_mmio.attr); + info->attr_global_mmio.attr.name = "global_mmio_area"; + info->attr_global_mmio.attr.mode = 0600; + info->attr_global_mmio.size = info->afu->config.global_mmio_size; + info->attr_global_mmio.read = global_mmio_read; + info->attr_global_mmio.mmap = global_mmio_mmap; + rc = device_create_bin_file(&info->dev, &info->attr_global_mmio); + if (rc) { + dev_err(&info->dev, "Unable to create global mmio attr for afu: %d\n", rc); + goto err; + } + + return 0; + +err: + for (i--; i >= 0; i--) + device_remove_file(&info->dev, &afu_attrs[i]); + + return rc; +} + +void ocxl_sysfs_unregister_afu(struct ocxl_file_info *info) +{ + int i; + + /* + * device_remove_bin_file is safe to call if the file is not added as + * the files are removed by name, and early exit if not found + */ + for (i = 0; i < ARRAY_SIZE(afu_attrs); i++) + device_remove_file(&info->dev, &afu_attrs[i]); + device_remove_bin_file(&info->dev, &info->attr_global_mmio); +} diff --git a/drivers/misc/ocxl/trace.c b/drivers/misc/ocxl/trace.c new file mode 100644 index 000000000..1e6947049 --- /dev/null +++ b/drivers/misc/ocxl/trace.c @@ -0,0 +1,6 @@ +// SPDX-License-Identifier: GPL-2.0+ +// Copyright 2017 IBM Corp. +#ifndef __CHECKER__ +#define CREATE_TRACE_POINTS +#include "trace.h" +#endif diff --git a/drivers/misc/ocxl/trace.h b/drivers/misc/ocxl/trace.h new file mode 100644 index 000000000..a33a5094f --- /dev/null +++ b/drivers/misc/ocxl/trace.h @@ -0,0 +1,242 @@ +/* SPDX-License-Identifier: GPL-2.0+ */ +// Copyright 2017 IBM Corp. +#undef TRACE_SYSTEM +#define TRACE_SYSTEM ocxl + +#if !defined(_TRACE_OCXL_H) || defined(TRACE_HEADER_MULTI_READ) +#define _TRACE_OCXL_H + +#include <linux/tracepoint.h> + + +TRACE_EVENT(ocxl_mmu_notifier_range, + TP_PROTO(unsigned long start, unsigned long end, unsigned long pidr), + TP_ARGS(start, end, pidr), + + TP_STRUCT__entry( + __field(unsigned long, start) + __field(unsigned long, end) + __field(unsigned long, pidr) + ), + + TP_fast_assign( + __entry->start = start; + __entry->end = end; + __entry->pidr = pidr; + ), + + TP_printk("start=0x%lx end=0x%lx pidr=0x%lx", + __entry->start, + __entry->end, + __entry->pidr + ) +); + +TRACE_EVENT(ocxl_init_mmu_notifier, + TP_PROTO(int pasid, unsigned long pidr), + TP_ARGS(pasid, pidr), + + TP_STRUCT__entry( + __field(int, pasid) + __field(unsigned long, pidr) + ), + + TP_fast_assign( + __entry->pasid = pasid; + __entry->pidr = pidr; + ), + + TP_printk("pasid=%d, pidr=0x%lx", + __entry->pasid, + __entry->pidr + ) +); + +TRACE_EVENT(ocxl_release_mmu_notifier, + TP_PROTO(int pasid, unsigned long pidr), + TP_ARGS(pasid, pidr), + + TP_STRUCT__entry( + __field(int, pasid) + __field(unsigned long, pidr) + ), + + TP_fast_assign( + __entry->pasid = pasid; + __entry->pidr = pidr; + ), + + TP_printk("pasid=%d, pidr=0x%lx", + __entry->pasid, + __entry->pidr + ) +); + +DECLARE_EVENT_CLASS(ocxl_context, + TP_PROTO(pid_t pid, void *spa, int pasid, u32 pidr, u32 tidr), + TP_ARGS(pid, spa, pasid, pidr, tidr), + + TP_STRUCT__entry( + __field(pid_t, pid) + __field(void*, spa) + __field(int, pasid) + __field(u32, pidr) + __field(u32, tidr) + ), + + TP_fast_assign( + __entry->pid = pid; + __entry->spa = spa; + __entry->pasid = pasid; + __entry->pidr = pidr; + __entry->tidr = tidr; + ), + + TP_printk("linux pid=%d spa=0x%p pasid=0x%x pidr=0x%x tidr=0x%x", + __entry->pid, + __entry->spa, + __entry->pasid, + __entry->pidr, + __entry->tidr + ) +); + +DEFINE_EVENT(ocxl_context, ocxl_context_add, + TP_PROTO(pid_t pid, void *spa, int pasid, u32 pidr, u32 tidr), + TP_ARGS(pid, spa, pasid, pidr, tidr) +); + +DEFINE_EVENT(ocxl_context, ocxl_context_remove, + TP_PROTO(pid_t pid, void *spa, int pasid, u32 pidr, u32 tidr), + TP_ARGS(pid, spa, pasid, pidr, tidr) +); + +TRACE_EVENT(ocxl_terminate_pasid, + TP_PROTO(int pasid, int rc), + TP_ARGS(pasid, rc), + + TP_STRUCT__entry( + __field(int, pasid) + __field(int, rc) + ), + + TP_fast_assign( + __entry->pasid = pasid; + __entry->rc = rc; + ), + + TP_printk("pasid=0x%x rc=%d", + __entry->pasid, + __entry->rc + ) +); + +DECLARE_EVENT_CLASS(ocxl_fault_handler, + TP_PROTO(void *spa, u64 pe, u64 dsisr, u64 dar, u64 tfc), + TP_ARGS(spa, pe, dsisr, dar, tfc), + + TP_STRUCT__entry( + __field(void *, spa) + __field(u64, pe) + __field(u64, dsisr) + __field(u64, dar) + __field(u64, tfc) + ), + + TP_fast_assign( + __entry->spa = spa; + __entry->pe = pe; + __entry->dsisr = dsisr; + __entry->dar = dar; + __entry->tfc = tfc; + ), + + TP_printk("spa=%p pe=0x%llx dsisr=0x%llx dar=0x%llx tfc=0x%llx", + __entry->spa, + __entry->pe, + __entry->dsisr, + __entry->dar, + __entry->tfc + ) +); + +DEFINE_EVENT(ocxl_fault_handler, ocxl_fault, + TP_PROTO(void *spa, u64 pe, u64 dsisr, u64 dar, u64 tfc), + TP_ARGS(spa, pe, dsisr, dar, tfc) +); + +DEFINE_EVENT(ocxl_fault_handler, ocxl_fault_ack, + TP_PROTO(void *spa, u64 pe, u64 dsisr, u64 dar, u64 tfc), + TP_ARGS(spa, pe, dsisr, dar, tfc) +); + +TRACE_EVENT(ocxl_afu_irq_alloc, + TP_PROTO(int pasid, int irq_id, unsigned int virq, int hw_irq), + TP_ARGS(pasid, irq_id, virq, hw_irq), + + TP_STRUCT__entry( + __field(int, pasid) + __field(int, irq_id) + __field(unsigned int, virq) + __field(int, hw_irq) + ), + + TP_fast_assign( + __entry->pasid = pasid; + __entry->irq_id = irq_id; + __entry->virq = virq; + __entry->hw_irq = hw_irq; + ), + + TP_printk("pasid=0x%x irq_id=%d virq=%u hw_irq=%d", + __entry->pasid, + __entry->irq_id, + __entry->virq, + __entry->hw_irq + ) +); + +TRACE_EVENT(ocxl_afu_irq_free, + TP_PROTO(int pasid, int irq_id), + TP_ARGS(pasid, irq_id), + + TP_STRUCT__entry( + __field(int, pasid) + __field(int, irq_id) + ), + + TP_fast_assign( + __entry->pasid = pasid; + __entry->irq_id = irq_id; + ), + + TP_printk("pasid=0x%x irq_id=%d", + __entry->pasid, + __entry->irq_id + ) +); + +TRACE_EVENT(ocxl_afu_irq_receive, + TP_PROTO(int virq), + TP_ARGS(virq), + + TP_STRUCT__entry( + __field(int, virq) + ), + + TP_fast_assign( + __entry->virq = virq; + ), + + TP_printk("virq=%d", + __entry->virq + ) +); + +#endif /* _TRACE_OCXL_H */ + +/* This part must be outside protection */ +#undef TRACE_INCLUDE_PATH +#define TRACE_INCLUDE_PATH . +#define TRACE_INCLUDE_FILE trace +#include <trace/define_trace.h> |