summaryrefslogtreecommitdiffstats
path: root/arch/s390/pci
diff options
context:
space:
mode:
Diffstat (limited to '')
-rw-r--r--arch/s390/pci/Makefile9
-rw-r--r--arch/s390/pci/pci.c983
-rw-r--r--arch/s390/pci/pci_bus.c275
-rw-r--r--arch/s390/pci/pci_bus.h37
-rw-r--r--arch/s390/pci/pci_clp.c658
-rw-r--r--arch/s390/pci/pci_debug.c210
-rw-r--r--arch/s390/pci/pci_dma.c695
-rw-r--r--arch/s390/pci/pci_event.c175
-rw-r--r--arch/s390/pci/pci_insn.c386
-rw-r--r--arch/s390/pci/pci_iov.c99
-rw-r--r--arch/s390/pci/pci_iov.h30
-rw-r--r--arch/s390/pci/pci_irq.c495
-rw-r--r--arch/s390/pci/pci_mmio.c328
-rw-r--r--arch/s390/pci/pci_sysfs.c174
14 files changed, 4554 insertions, 0 deletions
diff --git a/arch/s390/pci/Makefile b/arch/s390/pci/Makefile
new file mode 100644
index 000000000..bf557a1b7
--- /dev/null
+++ b/arch/s390/pci/Makefile
@@ -0,0 +1,9 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for the s390 PCI subsystem.
+#
+
+obj-$(CONFIG_PCI) += pci.o pci_irq.o pci_dma.o pci_clp.o pci_sysfs.o \
+ pci_event.o pci_debug.o pci_insn.o pci_mmio.o \
+ pci_bus.o
+obj-$(CONFIG_PCI_IOV) += pci_iov.o
diff --git a/arch/s390/pci/pci.c b/arch/s390/pci/pci.c
new file mode 100644
index 000000000..74799439b
--- /dev/null
+++ b/arch/s390/pci/pci.c
@@ -0,0 +1,983 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright IBM Corp. 2012
+ *
+ * Author(s):
+ * Jan Glauber <jang@linux.vnet.ibm.com>
+ *
+ * The System z PCI code is a rewrite from a prototype by
+ * the following people (Kudoz!):
+ * Alexander Schmidt
+ * Christoph Raisch
+ * Hannes Hering
+ * Hoang-Nam Nguyen
+ * Jan-Bernd Themann
+ * Stefan Roscher
+ * Thomas Klein
+ */
+
+#define KMSG_COMPONENT "zpci"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/export.h>
+#include <linux/delay.h>
+#include <linux/seq_file.h>
+#include <linux/jump_label.h>
+#include <linux/pci.h>
+#include <linux/printk.h>
+
+#include <asm/isc.h>
+#include <asm/airq.h>
+#include <asm/facility.h>
+#include <asm/pci_insn.h>
+#include <asm/pci_clp.h>
+#include <asm/pci_dma.h>
+
+#include "pci_bus.h"
+#include "pci_iov.h"
+
+/* list of all detected zpci devices */
+static LIST_HEAD(zpci_list);
+static DEFINE_SPINLOCK(zpci_list_lock);
+
+static DECLARE_BITMAP(zpci_domain, ZPCI_DOMAIN_BITMAP_SIZE);
+static DEFINE_SPINLOCK(zpci_domain_lock);
+
+#define ZPCI_IOMAP_ENTRIES \
+ min(((unsigned long) ZPCI_NR_DEVICES * PCI_STD_NUM_BARS / 2), \
+ ZPCI_IOMAP_MAX_ENTRIES)
+
+unsigned int s390_pci_no_rid;
+
+static DEFINE_SPINLOCK(zpci_iomap_lock);
+static unsigned long *zpci_iomap_bitmap;
+struct zpci_iomap_entry *zpci_iomap_start;
+EXPORT_SYMBOL_GPL(zpci_iomap_start);
+
+DEFINE_STATIC_KEY_FALSE(have_mio);
+
+static struct kmem_cache *zdev_fmb_cache;
+
+struct zpci_dev *get_zdev_by_fid(u32 fid)
+{
+ struct zpci_dev *tmp, *zdev = NULL;
+
+ spin_lock(&zpci_list_lock);
+ list_for_each_entry(tmp, &zpci_list, entry) {
+ if (tmp->fid == fid) {
+ zdev = tmp;
+ zpci_zdev_get(zdev);
+ break;
+ }
+ }
+ spin_unlock(&zpci_list_lock);
+ return zdev;
+}
+
+void zpci_remove_reserved_devices(void)
+{
+ struct zpci_dev *tmp, *zdev;
+ enum zpci_state state;
+ LIST_HEAD(remove);
+
+ spin_lock(&zpci_list_lock);
+ list_for_each_entry_safe(zdev, tmp, &zpci_list, entry) {
+ if (zdev->state == ZPCI_FN_STATE_STANDBY &&
+ !clp_get_state(zdev->fid, &state) &&
+ state == ZPCI_FN_STATE_RESERVED)
+ list_move_tail(&zdev->entry, &remove);
+ }
+ spin_unlock(&zpci_list_lock);
+
+ list_for_each_entry_safe(zdev, tmp, &remove, entry)
+ zpci_device_reserved(zdev);
+}
+
+int pci_domain_nr(struct pci_bus *bus)
+{
+ return ((struct zpci_bus *) bus->sysdata)->domain_nr;
+}
+EXPORT_SYMBOL_GPL(pci_domain_nr);
+
+int pci_proc_domain(struct pci_bus *bus)
+{
+ return pci_domain_nr(bus);
+}
+EXPORT_SYMBOL_GPL(pci_proc_domain);
+
+/* Modify PCI: Register I/O address translation parameters */
+int zpci_register_ioat(struct zpci_dev *zdev, u8 dmaas,
+ u64 base, u64 limit, u64 iota)
+{
+ u64 req = ZPCI_CREATE_REQ(zdev->fh, dmaas, ZPCI_MOD_FC_REG_IOAT);
+ struct zpci_fib fib = {0};
+ u8 status;
+
+ WARN_ON_ONCE(iota & 0x3fff);
+ fib.pba = base;
+ fib.pal = limit;
+ fib.iota = iota | ZPCI_IOTA_RTTO_FLAG;
+ return zpci_mod_fc(req, &fib, &status) ? -EIO : 0;
+}
+
+/* Modify PCI: Unregister I/O address translation parameters */
+int zpci_unregister_ioat(struct zpci_dev *zdev, u8 dmaas)
+{
+ u64 req = ZPCI_CREATE_REQ(zdev->fh, dmaas, ZPCI_MOD_FC_DEREG_IOAT);
+ struct zpci_fib fib = {0};
+ u8 cc, status;
+
+ cc = zpci_mod_fc(req, &fib, &status);
+ if (cc == 3) /* Function already gone. */
+ cc = 0;
+ return cc ? -EIO : 0;
+}
+
+/* Modify PCI: Set PCI function measurement parameters */
+int zpci_fmb_enable_device(struct zpci_dev *zdev)
+{
+ u64 req = ZPCI_CREATE_REQ(zdev->fh, 0, ZPCI_MOD_FC_SET_MEASURE);
+ struct zpci_fib fib = {0};
+ u8 cc, status;
+
+ if (zdev->fmb || sizeof(*zdev->fmb) < zdev->fmb_length)
+ return -EINVAL;
+
+ zdev->fmb = kmem_cache_zalloc(zdev_fmb_cache, GFP_KERNEL);
+ if (!zdev->fmb)
+ return -ENOMEM;
+ WARN_ON((u64) zdev->fmb & 0xf);
+
+ /* reset software counters */
+ atomic64_set(&zdev->allocated_pages, 0);
+ atomic64_set(&zdev->mapped_pages, 0);
+ atomic64_set(&zdev->unmapped_pages, 0);
+
+ fib.fmb_addr = virt_to_phys(zdev->fmb);
+ cc = zpci_mod_fc(req, &fib, &status);
+ if (cc) {
+ kmem_cache_free(zdev_fmb_cache, zdev->fmb);
+ zdev->fmb = NULL;
+ }
+ return cc ? -EIO : 0;
+}
+
+/* Modify PCI: Disable PCI function measurement */
+int zpci_fmb_disable_device(struct zpci_dev *zdev)
+{
+ u64 req = ZPCI_CREATE_REQ(zdev->fh, 0, ZPCI_MOD_FC_SET_MEASURE);
+ struct zpci_fib fib = {0};
+ u8 cc, status;
+
+ if (!zdev->fmb)
+ return -EINVAL;
+
+ /* Function measurement is disabled if fmb address is zero */
+ cc = zpci_mod_fc(req, &fib, &status);
+ if (cc == 3) /* Function already gone. */
+ cc = 0;
+
+ if (!cc) {
+ kmem_cache_free(zdev_fmb_cache, zdev->fmb);
+ zdev->fmb = NULL;
+ }
+ return cc ? -EIO : 0;
+}
+
+static int zpci_cfg_load(struct zpci_dev *zdev, int offset, u32 *val, u8 len)
+{
+ u64 req = ZPCI_CREATE_REQ(zdev->fh, ZPCI_PCIAS_CFGSPC, len);
+ u64 data;
+ int rc;
+
+ rc = __zpci_load(&data, req, offset);
+ if (!rc) {
+ data = le64_to_cpu((__force __le64) data);
+ data >>= (8 - len) * 8;
+ *val = (u32) data;
+ } else
+ *val = 0xffffffff;
+ return rc;
+}
+
+static int zpci_cfg_store(struct zpci_dev *zdev, int offset, u32 val, u8 len)
+{
+ u64 req = ZPCI_CREATE_REQ(zdev->fh, ZPCI_PCIAS_CFGSPC, len);
+ u64 data = val;
+ int rc;
+
+ data <<= (8 - len) * 8;
+ data = (__force u64) cpu_to_le64(data);
+ rc = __zpci_store(data, req, offset);
+ return rc;
+}
+
+resource_size_t pcibios_align_resource(void *data, const struct resource *res,
+ resource_size_t size,
+ resource_size_t align)
+{
+ return 0;
+}
+
+/* combine single writes by using store-block insn */
+void __iowrite64_copy(void __iomem *to, const void *from, size_t count)
+{
+ zpci_memcpy_toio(to, from, count);
+}
+
+static void __iomem *__ioremap(phys_addr_t addr, size_t size, pgprot_t prot)
+{
+ unsigned long offset, vaddr;
+ struct vm_struct *area;
+ phys_addr_t last_addr;
+
+ last_addr = addr + size - 1;
+ if (!size || last_addr < addr)
+ return NULL;
+
+ if (!static_branch_unlikely(&have_mio))
+ return (void __iomem *) addr;
+
+ offset = addr & ~PAGE_MASK;
+ addr &= PAGE_MASK;
+ size = PAGE_ALIGN(size + offset);
+ area = get_vm_area(size, VM_IOREMAP);
+ if (!area)
+ return NULL;
+
+ vaddr = (unsigned long) area->addr;
+ if (ioremap_page_range(vaddr, vaddr + size, addr, prot)) {
+ free_vm_area(area);
+ return NULL;
+ }
+ return (void __iomem *) ((unsigned long) area->addr + offset);
+}
+
+void __iomem *ioremap_prot(phys_addr_t addr, size_t size, unsigned long prot)
+{
+ return __ioremap(addr, size, __pgprot(prot));
+}
+EXPORT_SYMBOL(ioremap_prot);
+
+void __iomem *ioremap(phys_addr_t addr, size_t size)
+{
+ return __ioremap(addr, size, PAGE_KERNEL);
+}
+EXPORT_SYMBOL(ioremap);
+
+void __iomem *ioremap_wc(phys_addr_t addr, size_t size)
+{
+ return __ioremap(addr, size, pgprot_writecombine(PAGE_KERNEL));
+}
+EXPORT_SYMBOL(ioremap_wc);
+
+void __iomem *ioremap_wt(phys_addr_t addr, size_t size)
+{
+ return __ioremap(addr, size, pgprot_writethrough(PAGE_KERNEL));
+}
+EXPORT_SYMBOL(ioremap_wt);
+
+void iounmap(volatile void __iomem *addr)
+{
+ if (static_branch_likely(&have_mio))
+ vunmap((__force void *) ((unsigned long) addr & PAGE_MASK));
+}
+EXPORT_SYMBOL(iounmap);
+
+/* Create a virtual mapping cookie for a PCI BAR */
+static void __iomem *pci_iomap_range_fh(struct pci_dev *pdev, int bar,
+ unsigned long offset, unsigned long max)
+{
+ struct zpci_dev *zdev = to_zpci(pdev);
+ int idx;
+
+ idx = zdev->bars[bar].map_idx;
+ spin_lock(&zpci_iomap_lock);
+ /* Detect overrun */
+ WARN_ON(!++zpci_iomap_start[idx].count);
+ zpci_iomap_start[idx].fh = zdev->fh;
+ zpci_iomap_start[idx].bar = bar;
+ spin_unlock(&zpci_iomap_lock);
+
+ return (void __iomem *) ZPCI_ADDR(idx) + offset;
+}
+
+static void __iomem *pci_iomap_range_mio(struct pci_dev *pdev, int bar,
+ unsigned long offset,
+ unsigned long max)
+{
+ unsigned long barsize = pci_resource_len(pdev, bar);
+ struct zpci_dev *zdev = to_zpci(pdev);
+ void __iomem *iova;
+
+ iova = ioremap((unsigned long) zdev->bars[bar].mio_wt, barsize);
+ return iova ? iova + offset : iova;
+}
+
+void __iomem *pci_iomap_range(struct pci_dev *pdev, int bar,
+ unsigned long offset, unsigned long max)
+{
+ if (bar >= PCI_STD_NUM_BARS || !pci_resource_len(pdev, bar))
+ return NULL;
+
+ if (static_branch_likely(&have_mio))
+ return pci_iomap_range_mio(pdev, bar, offset, max);
+ else
+ return pci_iomap_range_fh(pdev, bar, offset, max);
+}
+EXPORT_SYMBOL(pci_iomap_range);
+
+void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen)
+{
+ return pci_iomap_range(dev, bar, 0, maxlen);
+}
+EXPORT_SYMBOL(pci_iomap);
+
+static void __iomem *pci_iomap_wc_range_mio(struct pci_dev *pdev, int bar,
+ unsigned long offset, unsigned long max)
+{
+ unsigned long barsize = pci_resource_len(pdev, bar);
+ struct zpci_dev *zdev = to_zpci(pdev);
+ void __iomem *iova;
+
+ iova = ioremap((unsigned long) zdev->bars[bar].mio_wb, barsize);
+ return iova ? iova + offset : iova;
+}
+
+void __iomem *pci_iomap_wc_range(struct pci_dev *pdev, int bar,
+ unsigned long offset, unsigned long max)
+{
+ if (bar >= PCI_STD_NUM_BARS || !pci_resource_len(pdev, bar))
+ return NULL;
+
+ if (static_branch_likely(&have_mio))
+ return pci_iomap_wc_range_mio(pdev, bar, offset, max);
+ else
+ return pci_iomap_range_fh(pdev, bar, offset, max);
+}
+EXPORT_SYMBOL(pci_iomap_wc_range);
+
+void __iomem *pci_iomap_wc(struct pci_dev *dev, int bar, unsigned long maxlen)
+{
+ return pci_iomap_wc_range(dev, bar, 0, maxlen);
+}
+EXPORT_SYMBOL(pci_iomap_wc);
+
+static void pci_iounmap_fh(struct pci_dev *pdev, void __iomem *addr)
+{
+ unsigned int idx = ZPCI_IDX(addr);
+
+ spin_lock(&zpci_iomap_lock);
+ /* Detect underrun */
+ WARN_ON(!zpci_iomap_start[idx].count);
+ if (!--zpci_iomap_start[idx].count) {
+ zpci_iomap_start[idx].fh = 0;
+ zpci_iomap_start[idx].bar = 0;
+ }
+ spin_unlock(&zpci_iomap_lock);
+}
+
+static void pci_iounmap_mio(struct pci_dev *pdev, void __iomem *addr)
+{
+ iounmap(addr);
+}
+
+void pci_iounmap(struct pci_dev *pdev, void __iomem *addr)
+{
+ if (static_branch_likely(&have_mio))
+ pci_iounmap_mio(pdev, addr);
+ else
+ pci_iounmap_fh(pdev, addr);
+}
+EXPORT_SYMBOL(pci_iounmap);
+
+static int pci_read(struct pci_bus *bus, unsigned int devfn, int where,
+ int size, u32 *val)
+{
+ struct zpci_dev *zdev = get_zdev_by_bus(bus, devfn);
+
+ return (zdev) ? zpci_cfg_load(zdev, where, val, size) : -ENODEV;
+}
+
+static int pci_write(struct pci_bus *bus, unsigned int devfn, int where,
+ int size, u32 val)
+{
+ struct zpci_dev *zdev = get_zdev_by_bus(bus, devfn);
+
+ return (zdev) ? zpci_cfg_store(zdev, where, val, size) : -ENODEV;
+}
+
+static struct pci_ops pci_root_ops = {
+ .read = pci_read,
+ .write = pci_write,
+};
+
+static void zpci_map_resources(struct pci_dev *pdev)
+{
+ struct zpci_dev *zdev = to_zpci(pdev);
+ resource_size_t len;
+ int i;
+
+ for (i = 0; i < PCI_STD_NUM_BARS; i++) {
+ len = pci_resource_len(pdev, i);
+ if (!len)
+ continue;
+
+ if (zpci_use_mio(zdev))
+ pdev->resource[i].start =
+ (resource_size_t __force) zdev->bars[i].mio_wt;
+ else
+ pdev->resource[i].start = (resource_size_t __force)
+ pci_iomap_range_fh(pdev, i, 0, 0);
+ pdev->resource[i].end = pdev->resource[i].start + len - 1;
+ }
+
+ zpci_iov_map_resources(pdev);
+}
+
+static void zpci_unmap_resources(struct pci_dev *pdev)
+{
+ struct zpci_dev *zdev = to_zpci(pdev);
+ resource_size_t len;
+ int i;
+
+ if (zpci_use_mio(zdev))
+ return;
+
+ for (i = 0; i < PCI_STD_NUM_BARS; i++) {
+ len = pci_resource_len(pdev, i);
+ if (!len)
+ continue;
+ pci_iounmap_fh(pdev, (void __iomem __force *)
+ pdev->resource[i].start);
+ }
+}
+
+static int zpci_alloc_iomap(struct zpci_dev *zdev)
+{
+ unsigned long entry;
+
+ spin_lock(&zpci_iomap_lock);
+ entry = find_first_zero_bit(zpci_iomap_bitmap, ZPCI_IOMAP_ENTRIES);
+ if (entry == ZPCI_IOMAP_ENTRIES) {
+ spin_unlock(&zpci_iomap_lock);
+ return -ENOSPC;
+ }
+ set_bit(entry, zpci_iomap_bitmap);
+ spin_unlock(&zpci_iomap_lock);
+ return entry;
+}
+
+static void zpci_free_iomap(struct zpci_dev *zdev, int entry)
+{
+ spin_lock(&zpci_iomap_lock);
+ memset(&zpci_iomap_start[entry], 0, sizeof(struct zpci_iomap_entry));
+ clear_bit(entry, zpci_iomap_bitmap);
+ spin_unlock(&zpci_iomap_lock);
+}
+
+static struct resource *__alloc_res(struct zpci_dev *zdev, unsigned long start,
+ unsigned long size, unsigned long flags)
+{
+ struct resource *r;
+
+ r = kzalloc(sizeof(*r), GFP_KERNEL);
+ if (!r)
+ return NULL;
+
+ r->start = start;
+ r->end = r->start + size - 1;
+ r->flags = flags;
+ r->name = zdev->res_name;
+
+ if (request_resource(&iomem_resource, r)) {
+ kfree(r);
+ return NULL;
+ }
+ return r;
+}
+
+int zpci_setup_bus_resources(struct zpci_dev *zdev,
+ struct list_head *resources)
+{
+ unsigned long addr, size, flags;
+ struct resource *res;
+ int i, entry;
+
+ snprintf(zdev->res_name, sizeof(zdev->res_name),
+ "PCI Bus %04x:%02x", zdev->uid, ZPCI_BUS_NR);
+
+ for (i = 0; i < PCI_STD_NUM_BARS; i++) {
+ if (!zdev->bars[i].size)
+ continue;
+ entry = zpci_alloc_iomap(zdev);
+ if (entry < 0)
+ return entry;
+ zdev->bars[i].map_idx = entry;
+
+ /* only MMIO is supported */
+ flags = IORESOURCE_MEM;
+ if (zdev->bars[i].val & 8)
+ flags |= IORESOURCE_PREFETCH;
+ if (zdev->bars[i].val & 4)
+ flags |= IORESOURCE_MEM_64;
+
+ if (zpci_use_mio(zdev))
+ addr = (unsigned long) zdev->bars[i].mio_wt;
+ else
+ addr = ZPCI_ADDR(entry);
+ size = 1UL << zdev->bars[i].size;
+
+ res = __alloc_res(zdev, addr, size, flags);
+ if (!res) {
+ zpci_free_iomap(zdev, entry);
+ return -ENOMEM;
+ }
+ zdev->bars[i].res = res;
+ pci_add_resource(resources, res);
+ }
+
+ return 0;
+}
+
+static void zpci_cleanup_bus_resources(struct zpci_dev *zdev)
+{
+ int i;
+
+ for (i = 0; i < PCI_STD_NUM_BARS; i++) {
+ if (!zdev->bars[i].size || !zdev->bars[i].res)
+ continue;
+
+ zpci_free_iomap(zdev, zdev->bars[i].map_idx);
+ release_resource(zdev->bars[i].res);
+ kfree(zdev->bars[i].res);
+ }
+}
+
+int pcibios_add_device(struct pci_dev *pdev)
+{
+ struct zpci_dev *zdev = to_zpci(pdev);
+ struct resource *res;
+ int i;
+
+ /* The pdev has a reference to the zdev via its bus */
+ zpci_zdev_get(zdev);
+ if (pdev->is_physfn)
+ pdev->no_vf_scan = 1;
+
+ pdev->dev.groups = zpci_attr_groups;
+ pdev->dev.dma_ops = &s390_pci_dma_ops;
+ zpci_map_resources(pdev);
+
+ for (i = 0; i < PCI_STD_NUM_BARS; i++) {
+ res = &pdev->resource[i];
+ if (res->parent || !res->flags)
+ continue;
+ pci_claim_resource(pdev, i);
+ }
+
+ return 0;
+}
+
+void pcibios_release_device(struct pci_dev *pdev)
+{
+ struct zpci_dev *zdev = to_zpci(pdev);
+
+ zpci_unmap_resources(pdev);
+ zpci_zdev_put(zdev);
+}
+
+int pcibios_enable_device(struct pci_dev *pdev, int mask)
+{
+ struct zpci_dev *zdev = to_zpci(pdev);
+
+ zpci_debug_init_device(zdev, dev_name(&pdev->dev));
+ zpci_fmb_enable_device(zdev);
+
+ return pci_enable_resources(pdev, mask);
+}
+
+void pcibios_disable_device(struct pci_dev *pdev)
+{
+ struct zpci_dev *zdev = to_zpci(pdev);
+
+ zpci_fmb_disable_device(zdev);
+ zpci_debug_exit_device(zdev);
+}
+
+static int __zpci_register_domain(int domain)
+{
+ spin_lock(&zpci_domain_lock);
+ if (test_bit(domain, zpci_domain)) {
+ spin_unlock(&zpci_domain_lock);
+ pr_err("Domain %04x is already assigned\n", domain);
+ return -EEXIST;
+ }
+ set_bit(domain, zpci_domain);
+ spin_unlock(&zpci_domain_lock);
+ return domain;
+}
+
+static int __zpci_alloc_domain(void)
+{
+ int domain;
+
+ spin_lock(&zpci_domain_lock);
+ /*
+ * We can always auto allocate domains below ZPCI_NR_DEVICES.
+ * There is either a free domain or we have reached the maximum in
+ * which case we would have bailed earlier.
+ */
+ domain = find_first_zero_bit(zpci_domain, ZPCI_NR_DEVICES);
+ set_bit(domain, zpci_domain);
+ spin_unlock(&zpci_domain_lock);
+ return domain;
+}
+
+int zpci_alloc_domain(int domain)
+{
+ if (zpci_unique_uid) {
+ if (domain)
+ return __zpci_register_domain(domain);
+ pr_warn("UID checking was active but no UID is provided: switching to automatic domain allocation\n");
+ update_uid_checking(false);
+ }
+ return __zpci_alloc_domain();
+}
+
+void zpci_free_domain(int domain)
+{
+ spin_lock(&zpci_domain_lock);
+ clear_bit(domain, zpci_domain);
+ spin_unlock(&zpci_domain_lock);
+}
+
+
+int zpci_enable_device(struct zpci_dev *zdev)
+{
+ int rc;
+
+ if (clp_enable_fh(zdev, ZPCI_NR_DMA_SPACES)) {
+ rc = -EIO;
+ goto out;
+ }
+
+ rc = zpci_dma_init_device(zdev);
+ if (rc)
+ goto out_dma;
+
+ zdev->state = ZPCI_FN_STATE_ONLINE;
+ return 0;
+
+out_dma:
+ clp_disable_fh(zdev);
+out:
+ return rc;
+}
+EXPORT_SYMBOL_GPL(zpci_enable_device);
+
+int zpci_disable_device(struct zpci_dev *zdev)
+{
+ zpci_dma_exit_device(zdev);
+ /*
+ * The zPCI function may already be disabled by the platform, this is
+ * detected in clp_disable_fh() which becomes a no-op.
+ */
+ return clp_disable_fh(zdev) ? -EIO : 0;
+}
+EXPORT_SYMBOL_GPL(zpci_disable_device);
+
+/* zpci_remove_device - Removes the given zdev from the PCI core
+ * @zdev: the zdev to be removed from the PCI core
+ * @set_error: if true the device's error state is set to permanent failure
+ *
+ * Sets a zPCI device to a configured but offline state; the zPCI
+ * device is still accessible through its hotplug slot and the zPCI
+ * API but is removed from the common code PCI bus, making it
+ * no longer available to drivers.
+ */
+void zpci_remove_device(struct zpci_dev *zdev, bool set_error)
+{
+ struct zpci_bus *zbus = zdev->zbus;
+ struct pci_dev *pdev;
+
+ if (!zdev->zbus->bus)
+ return;
+
+ pdev = pci_get_slot(zbus->bus, zdev->devfn);
+ if (pdev) {
+ if (set_error)
+ pdev->error_state = pci_channel_io_perm_failure;
+ if (pdev->is_virtfn) {
+ zpci_iov_remove_virtfn(pdev, zdev->vfn);
+ /* balance pci_get_slot */
+ pci_dev_put(pdev);
+ return;
+ }
+ pci_stop_and_remove_bus_device_locked(pdev);
+ /* balance pci_get_slot */
+ pci_dev_put(pdev);
+ }
+}
+
+/**
+ * zpci_create_device() - Create a new zpci_dev and add it to the zbus
+ * @fid: Function ID of the device to be created
+ * @fh: Current Function Handle of the device to be created
+ * @state: Initial state after creation either Standby or Configured
+ *
+ * Creates a new zpci device and adds it to its, possibly newly created, zbus
+ * as well as zpci_list.
+ *
+ * Returns: 0 on success, an error value otherwise
+ */
+int zpci_create_device(u32 fid, u32 fh, enum zpci_state state)
+{
+ struct zpci_dev *zdev;
+ int rc;
+
+ zpci_dbg(3, "add fid:%x, fh:%x, c:%d\n", fid, fh, state);
+ zdev = kzalloc(sizeof(*zdev), GFP_KERNEL);
+ if (!zdev)
+ return -ENOMEM;
+
+ /* FID and Function Handle are the static/dynamic identifiers */
+ zdev->fid = fid;
+ zdev->fh = fh;
+
+ /* Query function properties and update zdev */
+ rc = clp_query_pci_fn(zdev);
+ if (rc)
+ goto error;
+ zdev->state = state;
+
+ kref_init(&zdev->kref);
+ mutex_init(&zdev->lock);
+
+ rc = zpci_init_iommu(zdev);
+ if (rc)
+ goto error;
+
+ if (zdev->state == ZPCI_FN_STATE_CONFIGURED) {
+ rc = zpci_enable_device(zdev);
+ if (rc)
+ goto error_destroy_iommu;
+ }
+
+ rc = zpci_bus_device_register(zdev, &pci_root_ops);
+ if (rc)
+ goto error_disable;
+
+ spin_lock(&zpci_list_lock);
+ list_add_tail(&zdev->entry, &zpci_list);
+ spin_unlock(&zpci_list_lock);
+
+ return 0;
+
+error_disable:
+ if (zdev->state == ZPCI_FN_STATE_ONLINE)
+ zpci_disable_device(zdev);
+error_destroy_iommu:
+ zpci_destroy_iommu(zdev);
+error:
+ zpci_dbg(0, "add fid:%x, rc:%d\n", fid, rc);
+ kfree(zdev);
+ return rc;
+}
+
+bool zpci_is_device_configured(struct zpci_dev *zdev)
+{
+ enum zpci_state state = zdev->state;
+
+ return state != ZPCI_FN_STATE_RESERVED &&
+ state != ZPCI_FN_STATE_STANDBY;
+}
+
+/**
+ * zpci_device_reserved() - Mark device as resverved
+ * @zdev: the zpci_dev that was reserved
+ *
+ * Handle the case that a given zPCI function was reserved by another system.
+ * After a call to this function the zpci_dev can not be found via
+ * get_zdev_by_fid() anymore but may still be accessible via existing
+ * references though it will not be functional anymore.
+ */
+void zpci_device_reserved(struct zpci_dev *zdev)
+{
+ if (zdev->has_hp_slot)
+ zpci_exit_slot(zdev);
+ /*
+ * Remove device from zpci_list as it is going away. This also
+ * makes sure we ignore subsequent zPCI events for this device.
+ */
+ spin_lock(&zpci_list_lock);
+ list_del(&zdev->entry);
+ spin_unlock(&zpci_list_lock);
+ zdev->state = ZPCI_FN_STATE_RESERVED;
+ zpci_dbg(3, "rsv fid:%x\n", zdev->fid);
+ zpci_zdev_put(zdev);
+}
+
+void zpci_release_device(struct kref *kref)
+{
+ struct zpci_dev *zdev = container_of(kref, struct zpci_dev, kref);
+
+ if (zdev->zbus->bus)
+ zpci_remove_device(zdev, false);
+
+ switch (zdev->state) {
+ case ZPCI_FN_STATE_ONLINE:
+ case ZPCI_FN_STATE_CONFIGURED:
+ zpci_disable_device(zdev);
+ fallthrough;
+ case ZPCI_FN_STATE_STANDBY:
+ if (zdev->has_hp_slot)
+ zpci_exit_slot(zdev);
+ spin_lock(&zpci_list_lock);
+ list_del(&zdev->entry);
+ spin_unlock(&zpci_list_lock);
+ zpci_dbg(3, "rsv fid:%x\n", zdev->fid);
+ fallthrough;
+ case ZPCI_FN_STATE_RESERVED:
+ zpci_cleanup_bus_resources(zdev);
+ zpci_bus_device_unregister(zdev);
+ zpci_destroy_iommu(zdev);
+ fallthrough;
+ default:
+ break;
+ }
+ zpci_dbg(3, "rem fid:%x\n", zdev->fid);
+ kfree(zdev);
+}
+
+int zpci_report_error(struct pci_dev *pdev,
+ struct zpci_report_error_header *report)
+{
+ struct zpci_dev *zdev = to_zpci(pdev);
+
+ return sclp_pci_report(report, zdev->fh, zdev->fid);
+}
+EXPORT_SYMBOL(zpci_report_error);
+
+static int zpci_mem_init(void)
+{
+ BUILD_BUG_ON(!is_power_of_2(__alignof__(struct zpci_fmb)) ||
+ __alignof__(struct zpci_fmb) < sizeof(struct zpci_fmb));
+
+ zdev_fmb_cache = kmem_cache_create("PCI_FMB_cache", sizeof(struct zpci_fmb),
+ __alignof__(struct zpci_fmb), 0, NULL);
+ if (!zdev_fmb_cache)
+ goto error_fmb;
+
+ zpci_iomap_start = kcalloc(ZPCI_IOMAP_ENTRIES,
+ sizeof(*zpci_iomap_start), GFP_KERNEL);
+ if (!zpci_iomap_start)
+ goto error_iomap;
+
+ zpci_iomap_bitmap = kcalloc(BITS_TO_LONGS(ZPCI_IOMAP_ENTRIES),
+ sizeof(*zpci_iomap_bitmap), GFP_KERNEL);
+ if (!zpci_iomap_bitmap)
+ goto error_iomap_bitmap;
+
+ if (static_branch_likely(&have_mio))
+ clp_setup_writeback_mio();
+
+ return 0;
+error_iomap_bitmap:
+ kfree(zpci_iomap_start);
+error_iomap:
+ kmem_cache_destroy(zdev_fmb_cache);
+error_fmb:
+ return -ENOMEM;
+}
+
+static void zpci_mem_exit(void)
+{
+ kfree(zpci_iomap_bitmap);
+ kfree(zpci_iomap_start);
+ kmem_cache_destroy(zdev_fmb_cache);
+}
+
+static unsigned int s390_pci_probe __initdata = 1;
+unsigned int s390_pci_force_floating __initdata;
+static unsigned int s390_pci_initialized;
+
+char * __init pcibios_setup(char *str)
+{
+ if (!strcmp(str, "off")) {
+ s390_pci_probe = 0;
+ return NULL;
+ }
+ if (!strcmp(str, "nomio")) {
+ S390_lowcore.machine_flags &= ~MACHINE_FLAG_PCI_MIO;
+ return NULL;
+ }
+ if (!strcmp(str, "force_floating")) {
+ s390_pci_force_floating = 1;
+ return NULL;
+ }
+ if (!strcmp(str, "norid")) {
+ s390_pci_no_rid = 1;
+ return NULL;
+ }
+ return str;
+}
+
+bool zpci_is_enabled(void)
+{
+ return s390_pci_initialized;
+}
+
+static int __init pci_base_init(void)
+{
+ int rc;
+
+ if (!s390_pci_probe)
+ return 0;
+
+ if (!test_facility(69) || !test_facility(71))
+ return 0;
+
+ if (MACHINE_HAS_PCI_MIO) {
+ static_branch_enable(&have_mio);
+ ctl_set_bit(2, 5);
+ }
+
+ rc = zpci_debug_init();
+ if (rc)
+ goto out;
+
+ rc = zpci_mem_init();
+ if (rc)
+ goto out_mem;
+
+ rc = zpci_irq_init();
+ if (rc)
+ goto out_irq;
+
+ rc = zpci_dma_init();
+ if (rc)
+ goto out_dma;
+
+ rc = clp_scan_pci_devices();
+ if (rc)
+ goto out_find;
+
+ s390_pci_initialized = 1;
+ return 0;
+
+out_find:
+ zpci_dma_exit();
+out_dma:
+ zpci_irq_exit();
+out_irq:
+ zpci_mem_exit();
+out_mem:
+ zpci_debug_exit();
+out:
+ return rc;
+}
+subsys_initcall_sync(pci_base_init);
diff --git a/arch/s390/pci/pci_bus.c b/arch/s390/pci/pci_bus.c
new file mode 100644
index 000000000..755b46f4c
--- /dev/null
+++ b/arch/s390/pci/pci_bus.c
@@ -0,0 +1,275 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright IBM Corp. 2020
+ *
+ * Author(s):
+ * Pierre Morel <pmorel@linux.ibm.com>
+ *
+ */
+
+#define KMSG_COMPONENT "zpci"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/export.h>
+#include <linux/delay.h>
+#include <linux/seq_file.h>
+#include <linux/jump_label.h>
+#include <linux/pci.h>
+#include <linux/printk.h>
+
+#include <asm/pci_clp.h>
+#include <asm/pci_dma.h>
+
+#include "pci_bus.h"
+#include "pci_iov.h"
+
+static LIST_HEAD(zbus_list);
+static DEFINE_SPINLOCK(zbus_list_lock);
+static int zpci_nb_devices;
+
+/* zpci_bus_scan
+ * @zbus: the zbus holding the zdevices
+ * @ops: the pci operations
+ *
+ * The domain number must be set before pci_scan_root_bus is called.
+ * This function can be called once the domain is known, hence
+ * when the function_0 is dicovered.
+ */
+static int zpci_bus_scan(struct zpci_bus *zbus, int domain, struct pci_ops *ops)
+{
+ struct pci_bus *bus;
+ int rc;
+
+ rc = zpci_alloc_domain(domain);
+ if (rc < 0)
+ return rc;
+ zbus->domain_nr = rc;
+
+ bus = pci_scan_root_bus(NULL, ZPCI_BUS_NR, ops, zbus, &zbus->resources);
+ if (!bus) {
+ zpci_free_domain(zbus->domain_nr);
+ return -EFAULT;
+ }
+
+ zbus->bus = bus;
+ pci_bus_add_devices(bus);
+ return 0;
+}
+
+static void zpci_bus_release(struct kref *kref)
+{
+ struct zpci_bus *zbus = container_of(kref, struct zpci_bus, kref);
+
+ if (zbus->bus) {
+ pci_lock_rescan_remove();
+ pci_stop_root_bus(zbus->bus);
+
+ zpci_free_domain(zbus->domain_nr);
+ pci_free_resource_list(&zbus->resources);
+
+ pci_remove_root_bus(zbus->bus);
+ pci_unlock_rescan_remove();
+ }
+
+ spin_lock(&zbus_list_lock);
+ list_del(&zbus->bus_next);
+ spin_unlock(&zbus_list_lock);
+ kfree(zbus);
+}
+
+static void zpci_bus_put(struct zpci_bus *zbus)
+{
+ kref_put(&zbus->kref, zpci_bus_release);
+}
+
+static struct zpci_bus *zpci_bus_get(int pchid)
+{
+ struct zpci_bus *zbus;
+
+ spin_lock(&zbus_list_lock);
+ list_for_each_entry(zbus, &zbus_list, bus_next) {
+ if (pchid == zbus->pchid) {
+ kref_get(&zbus->kref);
+ goto out_unlock;
+ }
+ }
+ zbus = NULL;
+out_unlock:
+ spin_unlock(&zbus_list_lock);
+ return zbus;
+}
+
+static struct zpci_bus *zpci_bus_alloc(int pchid)
+{
+ struct zpci_bus *zbus;
+
+ zbus = kzalloc(sizeof(*zbus), GFP_KERNEL);
+ if (!zbus)
+ return NULL;
+
+ zbus->pchid = pchid;
+ INIT_LIST_HEAD(&zbus->bus_next);
+ spin_lock(&zbus_list_lock);
+ list_add_tail(&zbus->bus_next, &zbus_list);
+ spin_unlock(&zbus_list_lock);
+
+ kref_init(&zbus->kref);
+ INIT_LIST_HEAD(&zbus->resources);
+
+ zbus->bus_resource.start = 0;
+ zbus->bus_resource.end = ZPCI_BUS_NR;
+ zbus->bus_resource.flags = IORESOURCE_BUS;
+ pci_add_resource(&zbus->resources, &zbus->bus_resource);
+
+ return zbus;
+}
+
+void pcibios_bus_add_device(struct pci_dev *pdev)
+{
+ struct zpci_dev *zdev = to_zpci(pdev);
+
+ /*
+ * With pdev->no_vf_scan the common PCI probing code does not
+ * perform PF/VF linking.
+ */
+ if (zdev->vfn) {
+ zpci_iov_setup_virtfn(zdev->zbus, pdev, zdev->vfn);
+ pdev->no_command_memory = 1;
+ }
+}
+
+static int zpci_bus_add_device(struct zpci_bus *zbus, struct zpci_dev *zdev)
+{
+ struct pci_bus *bus;
+ struct resource_entry *window, *n;
+ struct resource *res;
+ struct pci_dev *pdev;
+ int rc;
+
+ bus = zbus->bus;
+ if (!bus)
+ return -EINVAL;
+
+ pdev = pci_get_slot(bus, zdev->devfn);
+ if (pdev) {
+ /* Device is already known. */
+ pci_dev_put(pdev);
+ return 0;
+ }
+
+ rc = zpci_init_slot(zdev);
+ if (rc)
+ return rc;
+ zdev->has_hp_slot = 1;
+
+ resource_list_for_each_entry_safe(window, n, &zbus->resources) {
+ res = window->res;
+ pci_bus_add_resource(bus, res, 0);
+ }
+
+ pdev = pci_scan_single_device(bus, zdev->devfn);
+ if (pdev)
+ pci_bus_add_device(pdev);
+
+ return 0;
+}
+
+static void zpci_bus_add_devices(struct zpci_bus *zbus)
+{
+ int i;
+
+ for (i = 1; i < ZPCI_FUNCTIONS_PER_BUS; i++)
+ if (zbus->function[i])
+ zpci_bus_add_device(zbus, zbus->function[i]);
+
+ pci_lock_rescan_remove();
+ pci_bus_add_devices(zbus->bus);
+ pci_unlock_rescan_remove();
+}
+
+int zpci_bus_device_register(struct zpci_dev *zdev, struct pci_ops *ops)
+{
+ struct zpci_bus *zbus = NULL;
+ int rc = -EBADF;
+
+ if (zpci_nb_devices == ZPCI_NR_DEVICES) {
+ pr_warn("Adding PCI function %08x failed because the configured limit of %d is reached\n",
+ zdev->fid, ZPCI_NR_DEVICES);
+ return -ENOSPC;
+ }
+ zpci_nb_devices++;
+
+ if (zdev->devfn >= ZPCI_FUNCTIONS_PER_BUS)
+ return -EINVAL;
+
+ if (!s390_pci_no_rid && zdev->rid_available)
+ zbus = zpci_bus_get(zdev->pchid);
+
+ if (!zbus) {
+ zbus = zpci_bus_alloc(zdev->pchid);
+ if (!zbus)
+ return -ENOMEM;
+ }
+
+ zdev->zbus = zbus;
+ if (zbus->function[zdev->devfn]) {
+ pr_err("devfn %04x is already assigned\n", zdev->devfn);
+ goto error; /* rc already set */
+ }
+ zbus->function[zdev->devfn] = zdev;
+
+ zpci_setup_bus_resources(zdev, &zbus->resources);
+
+ if (zbus->bus) {
+ if (!zbus->multifunction) {
+ WARN_ONCE(1, "zbus is not multifunction\n");
+ goto error_bus;
+ }
+ if (!zdev->rid_available) {
+ WARN_ONCE(1, "rid_available not set for multifunction\n");
+ goto error_bus;
+ }
+ rc = zpci_bus_add_device(zbus, zdev);
+ if (rc)
+ goto error_bus;
+ } else if (zdev->devfn == 0) {
+ if (zbus->multifunction && !zdev->rid_available) {
+ WARN_ONCE(1, "rid_available not set on function 0 for multifunction\n");
+ goto error_bus;
+ }
+ rc = zpci_bus_scan(zbus, (u16)zdev->uid, ops);
+ if (rc)
+ goto error_bus;
+ zpci_bus_add_devices(zbus);
+ rc = zpci_init_slot(zdev);
+ if (rc)
+ goto error_bus;
+ zdev->has_hp_slot = 1;
+ zbus->multifunction = zdev->rid_available;
+ zbus->max_bus_speed = zdev->max_bus_speed;
+ } else {
+ zbus->multifunction = 1;
+ }
+
+ return 0;
+
+error_bus:
+ zpci_nb_devices--;
+ zbus->function[zdev->devfn] = NULL;
+error:
+ pr_err("Adding PCI function %08x failed\n", zdev->fid);
+ zpci_bus_put(zbus);
+ return rc;
+}
+
+void zpci_bus_device_unregister(struct zpci_dev *zdev)
+{
+ struct zpci_bus *zbus = zdev->zbus;
+
+ zpci_nb_devices--;
+ zbus->function[zdev->devfn] = NULL;
+ zpci_bus_put(zbus);
+}
diff --git a/arch/s390/pci/pci_bus.h b/arch/s390/pci/pci_bus.h
new file mode 100644
index 000000000..8d2fcd091
--- /dev/null
+++ b/arch/s390/pci/pci_bus.h
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright IBM Corp. 2020
+ *
+ * Author(s):
+ * Pierre Morel <pmorel@linux.ibm.com>
+ *
+ */
+
+int zpci_bus_device_register(struct zpci_dev *zdev, struct pci_ops *ops);
+void zpci_bus_device_unregister(struct zpci_dev *zdev);
+
+void zpci_release_device(struct kref *kref);
+static inline void zpci_zdev_put(struct zpci_dev *zdev)
+{
+ if (zdev)
+ kref_put(&zdev->kref, zpci_release_device);
+}
+
+static inline void zpci_zdev_get(struct zpci_dev *zdev)
+{
+ kref_get(&zdev->kref);
+}
+
+int zpci_alloc_domain(int domain);
+void zpci_free_domain(int domain);
+int zpci_setup_bus_resources(struct zpci_dev *zdev,
+ struct list_head *resources);
+
+static inline struct zpci_dev *get_zdev_by_bus(struct pci_bus *bus,
+ unsigned int devfn)
+{
+ struct zpci_bus *zbus = bus->sysdata;
+
+ return (devfn >= ZPCI_FUNCTIONS_PER_BUS) ? NULL : zbus->function[devfn];
+}
+
diff --git a/arch/s390/pci/pci_clp.c b/arch/s390/pci/pci_clp.c
new file mode 100644
index 000000000..d1a5c80a4
--- /dev/null
+++ b/arch/s390/pci/pci_clp.c
@@ -0,0 +1,658 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright IBM Corp. 2012
+ *
+ * Author(s):
+ * Jan Glauber <jang@linux.vnet.ibm.com>
+ */
+
+#define KMSG_COMPONENT "zpci"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/compat.h>
+#include <linux/kernel.h>
+#include <linux/miscdevice.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/delay.h>
+#include <linux/pci.h>
+#include <linux/uaccess.h>
+#include <asm/pci_debug.h>
+#include <asm/pci_clp.h>
+#include <asm/clp.h>
+#include <uapi/asm/clp.h>
+
+#include "pci_bus.h"
+
+bool zpci_unique_uid;
+
+void update_uid_checking(bool new)
+{
+ if (zpci_unique_uid != new)
+ zpci_dbg(1, "uid checking:%d\n", new);
+
+ zpci_unique_uid = new;
+}
+
+static inline void zpci_err_clp(unsigned int rsp, int rc)
+{
+ struct {
+ unsigned int rsp;
+ int rc;
+ } __packed data = {rsp, rc};
+
+ zpci_err_hex(&data, sizeof(data));
+}
+
+/*
+ * Call Logical Processor with c=1, lps=0 and command 1
+ * to get the bit mask of installed logical processors
+ */
+static inline int clp_get_ilp(unsigned long *ilp)
+{
+ unsigned long mask;
+ int cc = 3;
+
+ asm volatile (
+ " .insn rrf,0xb9a00000,%[mask],%[cmd],8,0\n"
+ "0: ipm %[cc]\n"
+ " srl %[cc],28\n"
+ "1:\n"
+ EX_TABLE(0b, 1b)
+ : [cc] "+d" (cc), [mask] "=d" (mask) : [cmd] "a" (1)
+ : "cc");
+ *ilp = mask;
+ return cc;
+}
+
+/*
+ * Call Logical Processor with c=0, the give constant lps and an lpcb request.
+ */
+static __always_inline int clp_req(void *data, unsigned int lps)
+{
+ struct { u8 _[CLP_BLK_SIZE]; } *req = data;
+ u64 ignored;
+ int cc = 3;
+
+ asm volatile (
+ " .insn rrf,0xb9a00000,%[ign],%[req],0,%[lps]\n"
+ "0: ipm %[cc]\n"
+ " srl %[cc],28\n"
+ "1:\n"
+ EX_TABLE(0b, 1b)
+ : [cc] "+d" (cc), [ign] "=d" (ignored), "+m" (*req)
+ : [req] "a" (req), [lps] "i" (lps)
+ : "cc");
+ return cc;
+}
+
+static void *clp_alloc_block(gfp_t gfp_mask)
+{
+ return (void *) __get_free_pages(gfp_mask, get_order(CLP_BLK_SIZE));
+}
+
+static void clp_free_block(void *ptr)
+{
+ free_pages((unsigned long) ptr, get_order(CLP_BLK_SIZE));
+}
+
+static void clp_store_query_pci_fngrp(struct zpci_dev *zdev,
+ struct clp_rsp_query_pci_grp *response)
+{
+ zdev->tlb_refresh = response->refresh;
+ zdev->dma_mask = response->dasm;
+ zdev->msi_addr = response->msia;
+ zdev->max_msi = response->noi;
+ zdev->fmb_update = response->mui;
+ zdev->version = response->version;
+
+ switch (response->version) {
+ case 1:
+ zdev->max_bus_speed = PCIE_SPEED_5_0GT;
+ break;
+ default:
+ zdev->max_bus_speed = PCI_SPEED_UNKNOWN;
+ break;
+ }
+}
+
+static int clp_query_pci_fngrp(struct zpci_dev *zdev, u8 pfgid)
+{
+ struct clp_req_rsp_query_pci_grp *rrb;
+ int rc;
+
+ rrb = clp_alloc_block(GFP_KERNEL);
+ if (!rrb)
+ return -ENOMEM;
+
+ memset(rrb, 0, sizeof(*rrb));
+ rrb->request.hdr.len = sizeof(rrb->request);
+ rrb->request.hdr.cmd = CLP_QUERY_PCI_FNGRP;
+ rrb->response.hdr.len = sizeof(rrb->response);
+ rrb->request.pfgid = pfgid;
+
+ rc = clp_req(rrb, CLP_LPS_PCI);
+ if (!rc && rrb->response.hdr.rsp == CLP_RC_OK)
+ clp_store_query_pci_fngrp(zdev, &rrb->response);
+ else {
+ zpci_err("Q PCI FGRP:\n");
+ zpci_err_clp(rrb->response.hdr.rsp, rc);
+ rc = -EIO;
+ }
+ clp_free_block(rrb);
+ return rc;
+}
+
+static int clp_store_query_pci_fn(struct zpci_dev *zdev,
+ struct clp_rsp_query_pci *response)
+{
+ int i;
+
+ for (i = 0; i < PCI_STD_NUM_BARS; i++) {
+ zdev->bars[i].val = le32_to_cpu(response->bar[i]);
+ zdev->bars[i].size = response->bar_size[i];
+ }
+ zdev->start_dma = response->sdma;
+ zdev->end_dma = response->edma;
+ zdev->pchid = response->pchid;
+ zdev->pfgid = response->pfgid;
+ zdev->pft = response->pft;
+ zdev->vfn = response->vfn;
+ zdev->port = response->port;
+ zdev->uid = response->uid;
+ zdev->fmb_length = sizeof(u32) * response->fmb_len;
+ zdev->rid_available = response->rid_avail;
+ zdev->is_physfn = response->is_physfn;
+ if (!s390_pci_no_rid && zdev->rid_available)
+ zdev->devfn = response->rid & ZPCI_RID_MASK_DEVFN;
+
+ memcpy(zdev->pfip, response->pfip, sizeof(zdev->pfip));
+ if (response->util_str_avail) {
+ memcpy(zdev->util_str, response->util_str,
+ sizeof(zdev->util_str));
+ zdev->util_str_avail = 1;
+ }
+ zdev->mio_capable = response->mio_addr_avail;
+ for (i = 0; i < PCI_STD_NUM_BARS; i++) {
+ if (!(response->mio.valid & (1 << (PCI_STD_NUM_BARS - i - 1))))
+ continue;
+
+ zdev->bars[i].mio_wb = (void __iomem *) response->mio.addr[i].wb;
+ zdev->bars[i].mio_wt = (void __iomem *) response->mio.addr[i].wt;
+ }
+ return 0;
+}
+
+int clp_query_pci_fn(struct zpci_dev *zdev)
+{
+ struct clp_req_rsp_query_pci *rrb;
+ int rc;
+
+ rrb = clp_alloc_block(GFP_KERNEL);
+ if (!rrb)
+ return -ENOMEM;
+
+ memset(rrb, 0, sizeof(*rrb));
+ rrb->request.hdr.len = sizeof(rrb->request);
+ rrb->request.hdr.cmd = CLP_QUERY_PCI_FN;
+ rrb->response.hdr.len = sizeof(rrb->response);
+ rrb->request.fh = zdev->fh;
+
+ rc = clp_req(rrb, CLP_LPS_PCI);
+ if (!rc && rrb->response.hdr.rsp == CLP_RC_OK) {
+ rc = clp_store_query_pci_fn(zdev, &rrb->response);
+ if (rc)
+ goto out;
+ rc = clp_query_pci_fngrp(zdev, rrb->response.pfgid);
+ } else {
+ zpci_err("Q PCI FN:\n");
+ zpci_err_clp(rrb->response.hdr.rsp, rc);
+ rc = -EIO;
+ }
+out:
+ clp_free_block(rrb);
+ return rc;
+}
+
+static int clp_refresh_fh(u32 fid);
+/**
+ * clp_set_pci_fn() - Execute a command on a PCI function
+ * @zdev: Function that will be affected
+ * @nr_dma_as: DMA address space number
+ * @command: The command code to execute
+ *
+ * Returns: 0 on success, < 0 for Linux errors (e.g. -ENOMEM), and
+ * > 0 for non-success platform responses
+ */
+static int clp_set_pci_fn(struct zpci_dev *zdev, u8 nr_dma_as, u8 command)
+{
+ struct clp_req_rsp_set_pci *rrb;
+ int rc, retries = 100;
+
+ rrb = clp_alloc_block(GFP_KERNEL);
+ if (!rrb)
+ return -ENOMEM;
+
+ do {
+ memset(rrb, 0, sizeof(*rrb));
+ rrb->request.hdr.len = sizeof(rrb->request);
+ rrb->request.hdr.cmd = CLP_SET_PCI_FN;
+ rrb->response.hdr.len = sizeof(rrb->response);
+ rrb->request.fh = zdev->fh;
+ rrb->request.oc = command;
+ rrb->request.ndas = nr_dma_as;
+
+ rc = clp_req(rrb, CLP_LPS_PCI);
+ if (rrb->response.hdr.rsp == CLP_RC_SETPCIFN_BUSY) {
+ retries--;
+ if (retries < 0)
+ break;
+ msleep(20);
+ }
+ } while (rrb->response.hdr.rsp == CLP_RC_SETPCIFN_BUSY);
+
+ if (!rc && rrb->response.hdr.rsp == CLP_RC_OK) {
+ zdev->fh = rrb->response.fh;
+ } else if (!rc && rrb->response.hdr.rsp == CLP_RC_SETPCIFN_ALRDY) {
+ /* Function is already in desired state - update handle */
+ rc = clp_refresh_fh(zdev->fid);
+ } else {
+ zpci_err("Set PCI FN:\n");
+ zpci_err_clp(rrb->response.hdr.rsp, rc);
+ if (!rc)
+ rc = rrb->response.hdr.rsp;
+ }
+ clp_free_block(rrb);
+ return rc;
+}
+
+int clp_setup_writeback_mio(void)
+{
+ struct clp_req_rsp_slpc_pci *rrb;
+ u8 wb_bit_pos;
+ int rc;
+
+ rrb = clp_alloc_block(GFP_KERNEL);
+ if (!rrb)
+ return -ENOMEM;
+
+ memset(rrb, 0, sizeof(*rrb));
+ rrb->request.hdr.len = sizeof(rrb->request);
+ rrb->request.hdr.cmd = CLP_SLPC;
+ rrb->response.hdr.len = sizeof(rrb->response);
+
+ rc = clp_req(rrb, CLP_LPS_PCI);
+ if (!rc && rrb->response.hdr.rsp == CLP_RC_OK) {
+ if (rrb->response.vwb) {
+ wb_bit_pos = rrb->response.mio_wb;
+ set_bit_inv(wb_bit_pos, &mio_wb_bit_mask);
+ zpci_dbg(3, "wb bit: %d\n", wb_bit_pos);
+ } else {
+ zpci_dbg(3, "wb bit: n.a.\n");
+ }
+
+ } else {
+ zpci_err("SLPC PCI:\n");
+ zpci_err_clp(rrb->response.hdr.rsp, rc);
+ rc = -EIO;
+ }
+ clp_free_block(rrb);
+ return rc;
+}
+
+int clp_enable_fh(struct zpci_dev *zdev, u8 nr_dma_as)
+{
+ int rc;
+
+ rc = clp_set_pci_fn(zdev, nr_dma_as, CLP_SET_ENABLE_PCI_FN);
+ zpci_dbg(3, "ena fid:%x, fh:%x, rc:%d\n", zdev->fid, zdev->fh, rc);
+ if (!rc && zpci_use_mio(zdev)) {
+ rc = clp_set_pci_fn(zdev, nr_dma_as, CLP_SET_ENABLE_MIO);
+ zpci_dbg(3, "ena mio fid:%x, fh:%x, rc:%d\n",
+ zdev->fid, zdev->fh, rc);
+ if (rc)
+ clp_disable_fh(zdev);
+ }
+ return rc;
+}
+
+int clp_disable_fh(struct zpci_dev *zdev)
+{
+ int rc;
+
+ if (!zdev_enabled(zdev))
+ return 0;
+
+ rc = clp_set_pci_fn(zdev, 0, CLP_SET_DISABLE_PCI_FN);
+ zpci_dbg(3, "dis fid:%x, fh:%x, rc:%d\n", zdev->fid, zdev->fh, rc);
+ return rc;
+}
+
+static int clp_list_pci(struct clp_req_rsp_list_pci *rrb, void *data,
+ void (*cb)(struct clp_fh_list_entry *, void *))
+{
+ u64 resume_token = 0;
+ int entries, i, rc;
+
+ do {
+ memset(rrb, 0, sizeof(*rrb));
+ rrb->request.hdr.len = sizeof(rrb->request);
+ rrb->request.hdr.cmd = CLP_LIST_PCI;
+ /* store as many entries as possible */
+ rrb->response.hdr.len = CLP_BLK_SIZE - LIST_PCI_HDR_LEN;
+ rrb->request.resume_token = resume_token;
+
+ /* Get PCI function handle list */
+ rc = clp_req(rrb, CLP_LPS_PCI);
+ if (rc || rrb->response.hdr.rsp != CLP_RC_OK) {
+ zpci_err("List PCI FN:\n");
+ zpci_err_clp(rrb->response.hdr.rsp, rc);
+ rc = -EIO;
+ goto out;
+ }
+
+ update_uid_checking(rrb->response.uid_checking);
+ WARN_ON_ONCE(rrb->response.entry_size !=
+ sizeof(struct clp_fh_list_entry));
+
+ entries = (rrb->response.hdr.len - LIST_PCI_HDR_LEN) /
+ rrb->response.entry_size;
+
+ resume_token = rrb->response.resume_token;
+ for (i = 0; i < entries; i++)
+ cb(&rrb->response.fh_list[i], data);
+ } while (resume_token);
+out:
+ return rc;
+}
+
+static void __clp_add(struct clp_fh_list_entry *entry, void *data)
+{
+ struct zpci_dev *zdev;
+
+ if (!entry->vendor_id)
+ return;
+
+ zdev = get_zdev_by_fid(entry->fid);
+ if (zdev) {
+ zpci_zdev_put(zdev);
+ return;
+ }
+ zpci_create_device(entry->fid, entry->fh, entry->config_state);
+}
+
+int clp_scan_pci_devices(void)
+{
+ struct clp_req_rsp_list_pci *rrb;
+ int rc;
+
+ rrb = clp_alloc_block(GFP_KERNEL);
+ if (!rrb)
+ return -ENOMEM;
+
+ rc = clp_list_pci(rrb, NULL, __clp_add);
+
+ clp_free_block(rrb);
+ return rc;
+}
+
+static void __clp_refresh_fh(struct clp_fh_list_entry *entry, void *data)
+{
+ struct zpci_dev *zdev;
+ u32 fid = *((u32 *)data);
+
+ if (!entry->vendor_id || fid != entry->fid)
+ return;
+
+ zdev = get_zdev_by_fid(fid);
+ if (!zdev)
+ return;
+
+ zdev->fh = entry->fh;
+}
+
+/*
+ * Refresh the function handle of the function matching @fid
+ */
+static int clp_refresh_fh(u32 fid)
+{
+ struct clp_req_rsp_list_pci *rrb;
+ int rc;
+
+ rrb = clp_alloc_block(GFP_NOWAIT);
+ if (!rrb)
+ return -ENOMEM;
+
+ rc = clp_list_pci(rrb, &fid, __clp_refresh_fh);
+
+ clp_free_block(rrb);
+ return rc;
+}
+
+struct clp_state_data {
+ u32 fid;
+ enum zpci_state state;
+};
+
+static void __clp_get_state(struct clp_fh_list_entry *entry, void *data)
+{
+ struct clp_state_data *sd = data;
+
+ if (entry->fid != sd->fid)
+ return;
+
+ sd->state = entry->config_state;
+}
+
+int clp_get_state(u32 fid, enum zpci_state *state)
+{
+ struct clp_req_rsp_list_pci *rrb;
+ struct clp_state_data sd = {fid, ZPCI_FN_STATE_RESERVED};
+ int rc;
+
+ rrb = clp_alloc_block(GFP_ATOMIC);
+ if (!rrb)
+ return -ENOMEM;
+
+ rc = clp_list_pci(rrb, &sd, __clp_get_state);
+ if (!rc)
+ *state = sd.state;
+
+ clp_free_block(rrb);
+ return rc;
+}
+
+static int clp_base_slpc(struct clp_req *req, struct clp_req_rsp_slpc *lpcb)
+{
+ unsigned long limit = PAGE_SIZE - sizeof(lpcb->request);
+
+ if (lpcb->request.hdr.len != sizeof(lpcb->request) ||
+ lpcb->response.hdr.len > limit)
+ return -EINVAL;
+ return clp_req(lpcb, CLP_LPS_BASE) ? -EOPNOTSUPP : 0;
+}
+
+static int clp_base_command(struct clp_req *req, struct clp_req_hdr *lpcb)
+{
+ switch (lpcb->cmd) {
+ case 0x0001: /* store logical-processor characteristics */
+ return clp_base_slpc(req, (void *) lpcb);
+ default:
+ return -EINVAL;
+ }
+}
+
+static int clp_pci_slpc(struct clp_req *req, struct clp_req_rsp_slpc_pci *lpcb)
+{
+ unsigned long limit = PAGE_SIZE - sizeof(lpcb->request);
+
+ if (lpcb->request.hdr.len != sizeof(lpcb->request) ||
+ lpcb->response.hdr.len > limit)
+ return -EINVAL;
+ return clp_req(lpcb, CLP_LPS_PCI) ? -EOPNOTSUPP : 0;
+}
+
+static int clp_pci_list(struct clp_req *req, struct clp_req_rsp_list_pci *lpcb)
+{
+ unsigned long limit = PAGE_SIZE - sizeof(lpcb->request);
+
+ if (lpcb->request.hdr.len != sizeof(lpcb->request) ||
+ lpcb->response.hdr.len > limit)
+ return -EINVAL;
+ if (lpcb->request.reserved2 != 0)
+ return -EINVAL;
+ return clp_req(lpcb, CLP_LPS_PCI) ? -EOPNOTSUPP : 0;
+}
+
+static int clp_pci_query(struct clp_req *req,
+ struct clp_req_rsp_query_pci *lpcb)
+{
+ unsigned long limit = PAGE_SIZE - sizeof(lpcb->request);
+
+ if (lpcb->request.hdr.len != sizeof(lpcb->request) ||
+ lpcb->response.hdr.len > limit)
+ return -EINVAL;
+ if (lpcb->request.reserved2 != 0 || lpcb->request.reserved3 != 0)
+ return -EINVAL;
+ return clp_req(lpcb, CLP_LPS_PCI) ? -EOPNOTSUPP : 0;
+}
+
+static int clp_pci_query_grp(struct clp_req *req,
+ struct clp_req_rsp_query_pci_grp *lpcb)
+{
+ unsigned long limit = PAGE_SIZE - sizeof(lpcb->request);
+
+ if (lpcb->request.hdr.len != sizeof(lpcb->request) ||
+ lpcb->response.hdr.len > limit)
+ return -EINVAL;
+ if (lpcb->request.reserved2 != 0 || lpcb->request.reserved3 != 0 ||
+ lpcb->request.reserved4 != 0)
+ return -EINVAL;
+ return clp_req(lpcb, CLP_LPS_PCI) ? -EOPNOTSUPP : 0;
+}
+
+static int clp_pci_command(struct clp_req *req, struct clp_req_hdr *lpcb)
+{
+ switch (lpcb->cmd) {
+ case 0x0001: /* store logical-processor characteristics */
+ return clp_pci_slpc(req, (void *) lpcb);
+ case 0x0002: /* list PCI functions */
+ return clp_pci_list(req, (void *) lpcb);
+ case 0x0003: /* query PCI function */
+ return clp_pci_query(req, (void *) lpcb);
+ case 0x0004: /* query PCI function group */
+ return clp_pci_query_grp(req, (void *) lpcb);
+ default:
+ return -EINVAL;
+ }
+}
+
+static int clp_normal_command(struct clp_req *req)
+{
+ struct clp_req_hdr *lpcb;
+ void __user *uptr;
+ int rc;
+
+ rc = -EINVAL;
+ if (req->lps != 0 && req->lps != 2)
+ goto out;
+
+ rc = -ENOMEM;
+ lpcb = clp_alloc_block(GFP_KERNEL);
+ if (!lpcb)
+ goto out;
+
+ rc = -EFAULT;
+ uptr = (void __force __user *)(unsigned long) req->data_p;
+ if (copy_from_user(lpcb, uptr, PAGE_SIZE) != 0)
+ goto out_free;
+
+ rc = -EINVAL;
+ if (lpcb->fmt != 0 || lpcb->reserved1 != 0 || lpcb->reserved2 != 0)
+ goto out_free;
+
+ switch (req->lps) {
+ case 0:
+ rc = clp_base_command(req, lpcb);
+ break;
+ case 2:
+ rc = clp_pci_command(req, lpcb);
+ break;
+ }
+ if (rc)
+ goto out_free;
+
+ rc = -EFAULT;
+ if (copy_to_user(uptr, lpcb, PAGE_SIZE) != 0)
+ goto out_free;
+
+ rc = 0;
+
+out_free:
+ clp_free_block(lpcb);
+out:
+ return rc;
+}
+
+static int clp_immediate_command(struct clp_req *req)
+{
+ void __user *uptr;
+ unsigned long ilp;
+ int exists;
+
+ if (req->cmd > 1 || clp_get_ilp(&ilp) != 0)
+ return -EINVAL;
+
+ uptr = (void __force __user *)(unsigned long) req->data_p;
+ if (req->cmd == 0) {
+ /* Command code 0: test for a specific processor */
+ exists = test_bit_inv(req->lps, &ilp);
+ return put_user(exists, (int __user *) uptr);
+ }
+ /* Command code 1: return bit mask of installed processors */
+ return put_user(ilp, (unsigned long __user *) uptr);
+}
+
+static long clp_misc_ioctl(struct file *filp, unsigned int cmd,
+ unsigned long arg)
+{
+ struct clp_req req;
+ void __user *argp;
+
+ if (cmd != CLP_SYNC)
+ return -EINVAL;
+
+ argp = is_compat_task() ? compat_ptr(arg) : (void __user *) arg;
+ if (copy_from_user(&req, argp, sizeof(req)))
+ return -EFAULT;
+ if (req.r != 0)
+ return -EINVAL;
+ return req.c ? clp_immediate_command(&req) : clp_normal_command(&req);
+}
+
+static int clp_misc_release(struct inode *inode, struct file *filp)
+{
+ return 0;
+}
+
+static const struct file_operations clp_misc_fops = {
+ .owner = THIS_MODULE,
+ .open = nonseekable_open,
+ .release = clp_misc_release,
+ .unlocked_ioctl = clp_misc_ioctl,
+ .compat_ioctl = clp_misc_ioctl,
+ .llseek = no_llseek,
+};
+
+static struct miscdevice clp_misc_device = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "clp",
+ .fops = &clp_misc_fops,
+};
+
+static int __init clp_misc_init(void)
+{
+ return misc_register(&clp_misc_device);
+}
+
+device_initcall(clp_misc_init);
diff --git a/arch/s390/pci/pci_debug.c b/arch/s390/pci/pci_debug.c
new file mode 100644
index 000000000..3408c0df3
--- /dev/null
+++ b/arch/s390/pci/pci_debug.c
@@ -0,0 +1,210 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright IBM Corp. 2012,2015
+ *
+ * Author(s):
+ * Jan Glauber <jang@linux.vnet.ibm.com>
+ */
+
+#define KMSG_COMPONENT "zpci"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/seq_file.h>
+#include <linux/debugfs.h>
+#include <linux/export.h>
+#include <linux/pci.h>
+#include <asm/debug.h>
+
+#include <asm/pci_dma.h>
+
+static struct dentry *debugfs_root;
+debug_info_t *pci_debug_msg_id;
+EXPORT_SYMBOL_GPL(pci_debug_msg_id);
+debug_info_t *pci_debug_err_id;
+EXPORT_SYMBOL_GPL(pci_debug_err_id);
+
+static char *pci_common_names[] = {
+ "Load operations",
+ "Store operations",
+ "Store block operations",
+ "Refresh operations",
+};
+
+static char *pci_fmt0_names[] = {
+ "DMA read bytes",
+ "DMA write bytes",
+};
+
+static char *pci_fmt1_names[] = {
+ "Received bytes",
+ "Received packets",
+ "Transmitted bytes",
+ "Transmitted packets",
+};
+
+static char *pci_fmt2_names[] = {
+ "Consumed work units",
+ "Maximum work units",
+};
+
+static char *pci_fmt3_names[] = {
+ "Transmitted bytes",
+};
+
+static char *pci_sw_names[] = {
+ "Allocated pages",
+ "Mapped pages",
+ "Unmapped pages",
+};
+
+static void pci_fmb_show(struct seq_file *m, char *name[], int length,
+ u64 *data)
+{
+ int i;
+
+ for (i = 0; i < length; i++, data++)
+ seq_printf(m, "%26s:\t%llu\n", name[i], *data);
+}
+
+static void pci_sw_counter_show(struct seq_file *m)
+{
+ struct zpci_dev *zdev = m->private;
+ atomic64_t *counter = &zdev->allocated_pages;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(pci_sw_names); i++, counter++)
+ seq_printf(m, "%26s:\t%llu\n", pci_sw_names[i],
+ atomic64_read(counter));
+}
+
+static int pci_perf_show(struct seq_file *m, void *v)
+{
+ struct zpci_dev *zdev = m->private;
+
+ if (!zdev)
+ return 0;
+
+ mutex_lock(&zdev->lock);
+ if (!zdev->fmb) {
+ mutex_unlock(&zdev->lock);
+ seq_puts(m, "FMB statistics disabled\n");
+ return 0;
+ }
+
+ /* header */
+ seq_printf(m, "Update interval: %u ms\n", zdev->fmb_update);
+ seq_printf(m, "Samples: %u\n", zdev->fmb->samples);
+ seq_printf(m, "Last update TOD: %Lx\n", zdev->fmb->last_update);
+
+ pci_fmb_show(m, pci_common_names, ARRAY_SIZE(pci_common_names),
+ &zdev->fmb->ld_ops);
+
+ switch (zdev->fmb->format) {
+ case 0:
+ if (!(zdev->fmb->fmt_ind & ZPCI_FMB_DMA_COUNTER_VALID))
+ break;
+ pci_fmb_show(m, pci_fmt0_names, ARRAY_SIZE(pci_fmt0_names),
+ &zdev->fmb->fmt0.dma_rbytes);
+ break;
+ case 1:
+ pci_fmb_show(m, pci_fmt1_names, ARRAY_SIZE(pci_fmt1_names),
+ &zdev->fmb->fmt1.rx_bytes);
+ break;
+ case 2:
+ pci_fmb_show(m, pci_fmt2_names, ARRAY_SIZE(pci_fmt2_names),
+ &zdev->fmb->fmt2.consumed_work_units);
+ break;
+ case 3:
+ pci_fmb_show(m, pci_fmt3_names, ARRAY_SIZE(pci_fmt3_names),
+ &zdev->fmb->fmt3.tx_bytes);
+ break;
+ default:
+ seq_puts(m, "Unknown format\n");
+ }
+
+ pci_sw_counter_show(m);
+ mutex_unlock(&zdev->lock);
+ return 0;
+}
+
+static ssize_t pci_perf_seq_write(struct file *file, const char __user *ubuf,
+ size_t count, loff_t *off)
+{
+ struct zpci_dev *zdev = ((struct seq_file *) file->private_data)->private;
+ unsigned long val;
+ int rc;
+
+ if (!zdev)
+ return 0;
+
+ rc = kstrtoul_from_user(ubuf, count, 10, &val);
+ if (rc)
+ return rc;
+
+ mutex_lock(&zdev->lock);
+ switch (val) {
+ case 0:
+ rc = zpci_fmb_disable_device(zdev);
+ break;
+ case 1:
+ rc = zpci_fmb_enable_device(zdev);
+ break;
+ }
+ mutex_unlock(&zdev->lock);
+ return rc ? rc : count;
+}
+
+static int pci_perf_seq_open(struct inode *inode, struct file *filp)
+{
+ return single_open(filp, pci_perf_show,
+ file_inode(filp)->i_private);
+}
+
+static const struct file_operations debugfs_pci_perf_fops = {
+ .open = pci_perf_seq_open,
+ .read = seq_read,
+ .write = pci_perf_seq_write,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+void zpci_debug_init_device(struct zpci_dev *zdev, const char *name)
+{
+ zdev->debugfs_dev = debugfs_create_dir(name, debugfs_root);
+
+ debugfs_create_file("statistics", S_IFREG | S_IRUGO | S_IWUSR,
+ zdev->debugfs_dev, zdev, &debugfs_pci_perf_fops);
+}
+
+void zpci_debug_exit_device(struct zpci_dev *zdev)
+{
+ debugfs_remove_recursive(zdev->debugfs_dev);
+}
+
+int __init zpci_debug_init(void)
+{
+ /* event trace buffer */
+ pci_debug_msg_id = debug_register("pci_msg", 8, 1, 8 * sizeof(long));
+ if (!pci_debug_msg_id)
+ return -EINVAL;
+ debug_register_view(pci_debug_msg_id, &debug_sprintf_view);
+ debug_set_level(pci_debug_msg_id, 3);
+
+ /* error log */
+ pci_debug_err_id = debug_register("pci_error", 2, 1, 16);
+ if (!pci_debug_err_id)
+ return -EINVAL;
+ debug_register_view(pci_debug_err_id, &debug_hex_ascii_view);
+ debug_set_level(pci_debug_err_id, 6);
+
+ debugfs_root = debugfs_create_dir("pci", NULL);
+ return 0;
+}
+
+void zpci_debug_exit(void)
+{
+ debug_unregister(pci_debug_msg_id);
+ debug_unregister(pci_debug_err_id);
+ debugfs_remove(debugfs_root);
+}
diff --git a/arch/s390/pci/pci_dma.c b/arch/s390/pci/pci_dma.c
new file mode 100644
index 000000000..f6690a700
--- /dev/null
+++ b/arch/s390/pci/pci_dma.c
@@ -0,0 +1,695 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright IBM Corp. 2012
+ *
+ * Author(s):
+ * Jan Glauber <jang@linux.vnet.ibm.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/export.h>
+#include <linux/iommu-helper.h>
+#include <linux/dma-map-ops.h>
+#include <linux/vmalloc.h>
+#include <linux/pci.h>
+#include <asm/pci_dma.h>
+
+static struct kmem_cache *dma_region_table_cache;
+static struct kmem_cache *dma_page_table_cache;
+static int s390_iommu_strict;
+
+static int zpci_refresh_global(struct zpci_dev *zdev)
+{
+ return zpci_refresh_trans((u64) zdev->fh << 32, zdev->start_dma,
+ zdev->iommu_pages * PAGE_SIZE);
+}
+
+unsigned long *dma_alloc_cpu_table(void)
+{
+ unsigned long *table, *entry;
+
+ table = kmem_cache_alloc(dma_region_table_cache, GFP_ATOMIC);
+ if (!table)
+ return NULL;
+
+ for (entry = table; entry < table + ZPCI_TABLE_ENTRIES; entry++)
+ *entry = ZPCI_TABLE_INVALID;
+ return table;
+}
+
+static void dma_free_cpu_table(void *table)
+{
+ kmem_cache_free(dma_region_table_cache, table);
+}
+
+static unsigned long *dma_alloc_page_table(void)
+{
+ unsigned long *table, *entry;
+
+ table = kmem_cache_alloc(dma_page_table_cache, GFP_ATOMIC);
+ if (!table)
+ return NULL;
+
+ for (entry = table; entry < table + ZPCI_PT_ENTRIES; entry++)
+ *entry = ZPCI_PTE_INVALID;
+ return table;
+}
+
+static void dma_free_page_table(void *table)
+{
+ kmem_cache_free(dma_page_table_cache, table);
+}
+
+static unsigned long *dma_get_seg_table_origin(unsigned long *entry)
+{
+ unsigned long *sto;
+
+ if (reg_entry_isvalid(*entry))
+ sto = get_rt_sto(*entry);
+ else {
+ sto = dma_alloc_cpu_table();
+ if (!sto)
+ return NULL;
+
+ set_rt_sto(entry, sto);
+ validate_rt_entry(entry);
+ entry_clr_protected(entry);
+ }
+ return sto;
+}
+
+static unsigned long *dma_get_page_table_origin(unsigned long *entry)
+{
+ unsigned long *pto;
+
+ if (reg_entry_isvalid(*entry))
+ pto = get_st_pto(*entry);
+ else {
+ pto = dma_alloc_page_table();
+ if (!pto)
+ return NULL;
+ set_st_pto(entry, pto);
+ validate_st_entry(entry);
+ entry_clr_protected(entry);
+ }
+ return pto;
+}
+
+unsigned long *dma_walk_cpu_trans(unsigned long *rto, dma_addr_t dma_addr)
+{
+ unsigned long *sto, *pto;
+ unsigned int rtx, sx, px;
+
+ rtx = calc_rtx(dma_addr);
+ sto = dma_get_seg_table_origin(&rto[rtx]);
+ if (!sto)
+ return NULL;
+
+ sx = calc_sx(dma_addr);
+ pto = dma_get_page_table_origin(&sto[sx]);
+ if (!pto)
+ return NULL;
+
+ px = calc_px(dma_addr);
+ return &pto[px];
+}
+
+void dma_update_cpu_trans(unsigned long *entry, void *page_addr, int flags)
+{
+ if (flags & ZPCI_PTE_INVALID) {
+ invalidate_pt_entry(entry);
+ } else {
+ set_pt_pfaa(entry, page_addr);
+ validate_pt_entry(entry);
+ }
+
+ if (flags & ZPCI_TABLE_PROTECTED)
+ entry_set_protected(entry);
+ else
+ entry_clr_protected(entry);
+}
+
+static int __dma_update_trans(struct zpci_dev *zdev, unsigned long pa,
+ dma_addr_t dma_addr, size_t size, int flags)
+{
+ unsigned int nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
+ u8 *page_addr = (u8 *) (pa & PAGE_MASK);
+ unsigned long irq_flags;
+ unsigned long *entry;
+ int i, rc = 0;
+
+ if (!nr_pages)
+ return -EINVAL;
+
+ spin_lock_irqsave(&zdev->dma_table_lock, irq_flags);
+ if (!zdev->dma_table) {
+ rc = -EINVAL;
+ goto out_unlock;
+ }
+
+ for (i = 0; i < nr_pages; i++) {
+ entry = dma_walk_cpu_trans(zdev->dma_table, dma_addr);
+ if (!entry) {
+ rc = -ENOMEM;
+ goto undo_cpu_trans;
+ }
+ dma_update_cpu_trans(entry, page_addr, flags);
+ page_addr += PAGE_SIZE;
+ dma_addr += PAGE_SIZE;
+ }
+
+undo_cpu_trans:
+ if (rc && ((flags & ZPCI_PTE_VALID_MASK) == ZPCI_PTE_VALID)) {
+ flags = ZPCI_PTE_INVALID;
+ while (i-- > 0) {
+ page_addr -= PAGE_SIZE;
+ dma_addr -= PAGE_SIZE;
+ entry = dma_walk_cpu_trans(zdev->dma_table, dma_addr);
+ if (!entry)
+ break;
+ dma_update_cpu_trans(entry, page_addr, flags);
+ }
+ }
+out_unlock:
+ spin_unlock_irqrestore(&zdev->dma_table_lock, irq_flags);
+ return rc;
+}
+
+static int __dma_purge_tlb(struct zpci_dev *zdev, dma_addr_t dma_addr,
+ size_t size, int flags)
+{
+ unsigned long irqflags;
+ int ret;
+
+ /*
+ * With zdev->tlb_refresh == 0, rpcit is not required to establish new
+ * translations when previously invalid translation-table entries are
+ * validated. With lazy unmap, rpcit is skipped for previously valid
+ * entries, but a global rpcit is then required before any address can
+ * be re-used, i.e. after each iommu bitmap wrap-around.
+ */
+ if ((flags & ZPCI_PTE_VALID_MASK) == ZPCI_PTE_VALID) {
+ if (!zdev->tlb_refresh)
+ return 0;
+ } else {
+ if (!s390_iommu_strict)
+ return 0;
+ }
+
+ ret = zpci_refresh_trans((u64) zdev->fh << 32, dma_addr,
+ PAGE_ALIGN(size));
+ if (ret == -ENOMEM && !s390_iommu_strict) {
+ /* enable the hypervisor to free some resources */
+ if (zpci_refresh_global(zdev))
+ goto out;
+
+ spin_lock_irqsave(&zdev->iommu_bitmap_lock, irqflags);
+ bitmap_andnot(zdev->iommu_bitmap, zdev->iommu_bitmap,
+ zdev->lazy_bitmap, zdev->iommu_pages);
+ bitmap_zero(zdev->lazy_bitmap, zdev->iommu_pages);
+ spin_unlock_irqrestore(&zdev->iommu_bitmap_lock, irqflags);
+ ret = 0;
+ }
+out:
+ return ret;
+}
+
+static int dma_update_trans(struct zpci_dev *zdev, unsigned long pa,
+ dma_addr_t dma_addr, size_t size, int flags)
+{
+ int rc;
+
+ rc = __dma_update_trans(zdev, pa, dma_addr, size, flags);
+ if (rc)
+ return rc;
+
+ rc = __dma_purge_tlb(zdev, dma_addr, size, flags);
+ if (rc && ((flags & ZPCI_PTE_VALID_MASK) == ZPCI_PTE_VALID))
+ __dma_update_trans(zdev, pa, dma_addr, size, ZPCI_PTE_INVALID);
+
+ return rc;
+}
+
+void dma_free_seg_table(unsigned long entry)
+{
+ unsigned long *sto = get_rt_sto(entry);
+ int sx;
+
+ for (sx = 0; sx < ZPCI_TABLE_ENTRIES; sx++)
+ if (reg_entry_isvalid(sto[sx]))
+ dma_free_page_table(get_st_pto(sto[sx]));
+
+ dma_free_cpu_table(sto);
+}
+
+void dma_cleanup_tables(unsigned long *table)
+{
+ int rtx;
+
+ if (!table)
+ return;
+
+ for (rtx = 0; rtx < ZPCI_TABLE_ENTRIES; rtx++)
+ if (reg_entry_isvalid(table[rtx]))
+ dma_free_seg_table(table[rtx]);
+
+ dma_free_cpu_table(table);
+}
+
+static unsigned long __dma_alloc_iommu(struct device *dev,
+ unsigned long start, int size)
+{
+ struct zpci_dev *zdev = to_zpci(to_pci_dev(dev));
+
+ return iommu_area_alloc(zdev->iommu_bitmap, zdev->iommu_pages,
+ start, size, zdev->start_dma >> PAGE_SHIFT,
+ dma_get_seg_boundary_nr_pages(dev, PAGE_SHIFT),
+ 0);
+}
+
+static dma_addr_t dma_alloc_address(struct device *dev, int size)
+{
+ struct zpci_dev *zdev = to_zpci(to_pci_dev(dev));
+ unsigned long offset, flags;
+
+ spin_lock_irqsave(&zdev->iommu_bitmap_lock, flags);
+ offset = __dma_alloc_iommu(dev, zdev->next_bit, size);
+ if (offset == -1) {
+ if (!s390_iommu_strict) {
+ /* global flush before DMA addresses are reused */
+ if (zpci_refresh_global(zdev))
+ goto out_error;
+
+ bitmap_andnot(zdev->iommu_bitmap, zdev->iommu_bitmap,
+ zdev->lazy_bitmap, zdev->iommu_pages);
+ bitmap_zero(zdev->lazy_bitmap, zdev->iommu_pages);
+ }
+ /* wrap-around */
+ offset = __dma_alloc_iommu(dev, 0, size);
+ if (offset == -1)
+ goto out_error;
+ }
+ zdev->next_bit = offset + size;
+ spin_unlock_irqrestore(&zdev->iommu_bitmap_lock, flags);
+
+ return zdev->start_dma + offset * PAGE_SIZE;
+
+out_error:
+ spin_unlock_irqrestore(&zdev->iommu_bitmap_lock, flags);
+ return DMA_MAPPING_ERROR;
+}
+
+static void dma_free_address(struct device *dev, dma_addr_t dma_addr, int size)
+{
+ struct zpci_dev *zdev = to_zpci(to_pci_dev(dev));
+ unsigned long flags, offset;
+
+ offset = (dma_addr - zdev->start_dma) >> PAGE_SHIFT;
+
+ spin_lock_irqsave(&zdev->iommu_bitmap_lock, flags);
+ if (!zdev->iommu_bitmap)
+ goto out;
+
+ if (s390_iommu_strict)
+ bitmap_clear(zdev->iommu_bitmap, offset, size);
+ else
+ bitmap_set(zdev->lazy_bitmap, offset, size);
+
+out:
+ spin_unlock_irqrestore(&zdev->iommu_bitmap_lock, flags);
+}
+
+static inline void zpci_err_dma(unsigned long rc, unsigned long addr)
+{
+ struct {
+ unsigned long rc;
+ unsigned long addr;
+ } __packed data = {rc, addr};
+
+ zpci_err_hex(&data, sizeof(data));
+}
+
+static dma_addr_t s390_dma_map_pages(struct device *dev, struct page *page,
+ unsigned long offset, size_t size,
+ enum dma_data_direction direction,
+ unsigned long attrs)
+{
+ struct zpci_dev *zdev = to_zpci(to_pci_dev(dev));
+ unsigned long pa = page_to_phys(page) + offset;
+ int flags = ZPCI_PTE_VALID;
+ unsigned long nr_pages;
+ dma_addr_t dma_addr;
+ int ret;
+
+ /* This rounds up number of pages based on size and offset */
+ nr_pages = iommu_num_pages(pa, size, PAGE_SIZE);
+ dma_addr = dma_alloc_address(dev, nr_pages);
+ if (dma_addr == DMA_MAPPING_ERROR) {
+ ret = -ENOSPC;
+ goto out_err;
+ }
+
+ /* Use rounded up size */
+ size = nr_pages * PAGE_SIZE;
+
+ if (direction == DMA_NONE || direction == DMA_TO_DEVICE)
+ flags |= ZPCI_TABLE_PROTECTED;
+
+ ret = dma_update_trans(zdev, pa, dma_addr, size, flags);
+ if (ret)
+ goto out_free;
+
+ atomic64_add(nr_pages, &zdev->mapped_pages);
+ return dma_addr + (offset & ~PAGE_MASK);
+
+out_free:
+ dma_free_address(dev, dma_addr, nr_pages);
+out_err:
+ zpci_err("map error:\n");
+ zpci_err_dma(ret, pa);
+ return DMA_MAPPING_ERROR;
+}
+
+static void s390_dma_unmap_pages(struct device *dev, dma_addr_t dma_addr,
+ size_t size, enum dma_data_direction direction,
+ unsigned long attrs)
+{
+ struct zpci_dev *zdev = to_zpci(to_pci_dev(dev));
+ int npages, ret;
+
+ npages = iommu_num_pages(dma_addr, size, PAGE_SIZE);
+ dma_addr = dma_addr & PAGE_MASK;
+ ret = dma_update_trans(zdev, 0, dma_addr, npages * PAGE_SIZE,
+ ZPCI_PTE_INVALID);
+ if (ret) {
+ zpci_err("unmap error:\n");
+ zpci_err_dma(ret, dma_addr);
+ return;
+ }
+
+ atomic64_add(npages, &zdev->unmapped_pages);
+ dma_free_address(dev, dma_addr, npages);
+}
+
+static void *s390_dma_alloc(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, gfp_t flag,
+ unsigned long attrs)
+{
+ struct zpci_dev *zdev = to_zpci(to_pci_dev(dev));
+ struct page *page;
+ unsigned long pa;
+ dma_addr_t map;
+
+ size = PAGE_ALIGN(size);
+ page = alloc_pages(flag | __GFP_ZERO, get_order(size));
+ if (!page)
+ return NULL;
+
+ pa = page_to_phys(page);
+ map = s390_dma_map_pages(dev, page, 0, size, DMA_BIDIRECTIONAL, 0);
+ if (dma_mapping_error(dev, map)) {
+ free_pages(pa, get_order(size));
+ return NULL;
+ }
+
+ atomic64_add(size / PAGE_SIZE, &zdev->allocated_pages);
+ if (dma_handle)
+ *dma_handle = map;
+ return (void *) pa;
+}
+
+static void s390_dma_free(struct device *dev, size_t size,
+ void *pa, dma_addr_t dma_handle,
+ unsigned long attrs)
+{
+ struct zpci_dev *zdev = to_zpci(to_pci_dev(dev));
+
+ size = PAGE_ALIGN(size);
+ atomic64_sub(size / PAGE_SIZE, &zdev->allocated_pages);
+ s390_dma_unmap_pages(dev, dma_handle, size, DMA_BIDIRECTIONAL, 0);
+ free_pages((unsigned long) pa, get_order(size));
+}
+
+/* Map a segment into a contiguous dma address area */
+static int __s390_dma_map_sg(struct device *dev, struct scatterlist *sg,
+ size_t size, dma_addr_t *handle,
+ enum dma_data_direction dir)
+{
+ unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
+ struct zpci_dev *zdev = to_zpci(to_pci_dev(dev));
+ dma_addr_t dma_addr_base, dma_addr;
+ int flags = ZPCI_PTE_VALID;
+ struct scatterlist *s;
+ unsigned long pa = 0;
+ int ret;
+
+ dma_addr_base = dma_alloc_address(dev, nr_pages);
+ if (dma_addr_base == DMA_MAPPING_ERROR)
+ return -ENOMEM;
+
+ dma_addr = dma_addr_base;
+ if (dir == DMA_NONE || dir == DMA_TO_DEVICE)
+ flags |= ZPCI_TABLE_PROTECTED;
+
+ for (s = sg; dma_addr < dma_addr_base + size; s = sg_next(s)) {
+ pa = page_to_phys(sg_page(s));
+ ret = __dma_update_trans(zdev, pa, dma_addr,
+ s->offset + s->length, flags);
+ if (ret)
+ goto unmap;
+
+ dma_addr += s->offset + s->length;
+ }
+ ret = __dma_purge_tlb(zdev, dma_addr_base, size, flags);
+ if (ret)
+ goto unmap;
+
+ *handle = dma_addr_base;
+ atomic64_add(nr_pages, &zdev->mapped_pages);
+
+ return ret;
+
+unmap:
+ dma_update_trans(zdev, 0, dma_addr_base, dma_addr - dma_addr_base,
+ ZPCI_PTE_INVALID);
+ dma_free_address(dev, dma_addr_base, nr_pages);
+ zpci_err("map error:\n");
+ zpci_err_dma(ret, pa);
+ return ret;
+}
+
+static int s390_dma_map_sg(struct device *dev, struct scatterlist *sg,
+ int nr_elements, enum dma_data_direction dir,
+ unsigned long attrs)
+{
+ struct scatterlist *s = sg, *start = sg, *dma = sg;
+ unsigned int max = dma_get_max_seg_size(dev);
+ unsigned int size = s->offset + s->length;
+ unsigned int offset = s->offset;
+ int count = 0, i;
+
+ for (i = 1; i < nr_elements; i++) {
+ s = sg_next(s);
+
+ s->dma_address = DMA_MAPPING_ERROR;
+ s->dma_length = 0;
+
+ if (s->offset || (size & ~PAGE_MASK) ||
+ size + s->length > max) {
+ if (__s390_dma_map_sg(dev, start, size,
+ &dma->dma_address, dir))
+ goto unmap;
+
+ dma->dma_address += offset;
+ dma->dma_length = size - offset;
+
+ size = offset = s->offset;
+ start = s;
+ dma = sg_next(dma);
+ count++;
+ }
+ size += s->length;
+ }
+ if (__s390_dma_map_sg(dev, start, size, &dma->dma_address, dir))
+ goto unmap;
+
+ dma->dma_address += offset;
+ dma->dma_length = size - offset;
+
+ return count + 1;
+unmap:
+ for_each_sg(sg, s, count, i)
+ s390_dma_unmap_pages(dev, sg_dma_address(s), sg_dma_len(s),
+ dir, attrs);
+
+ return 0;
+}
+
+static void s390_dma_unmap_sg(struct device *dev, struct scatterlist *sg,
+ int nr_elements, enum dma_data_direction dir,
+ unsigned long attrs)
+{
+ struct scatterlist *s;
+ int i;
+
+ for_each_sg(sg, s, nr_elements, i) {
+ if (s->dma_length)
+ s390_dma_unmap_pages(dev, s->dma_address, s->dma_length,
+ dir, attrs);
+ s->dma_address = 0;
+ s->dma_length = 0;
+ }
+}
+
+static unsigned long *bitmap_vzalloc(size_t bits, gfp_t flags)
+{
+ size_t n = BITS_TO_LONGS(bits);
+ size_t bytes;
+
+ if (unlikely(check_mul_overflow(n, sizeof(unsigned long), &bytes)))
+ return NULL;
+
+ return vzalloc(bytes);
+}
+
+int zpci_dma_init_device(struct zpci_dev *zdev)
+{
+ int rc;
+
+ /*
+ * At this point, if the device is part of an IOMMU domain, this would
+ * be a strong hint towards a bug in the IOMMU API (common) code and/or
+ * simultaneous access via IOMMU and DMA API. So let's issue a warning.
+ */
+ WARN_ON(zdev->s390_domain);
+
+ spin_lock_init(&zdev->iommu_bitmap_lock);
+ spin_lock_init(&zdev->dma_table_lock);
+
+ zdev->dma_table = dma_alloc_cpu_table();
+ if (!zdev->dma_table) {
+ rc = -ENOMEM;
+ goto out;
+ }
+
+ /*
+ * Restrict the iommu bitmap size to the minimum of the following:
+ * - main memory size
+ * - 3-level pagetable address limit minus start_dma offset
+ * - DMA address range allowed by the hardware (clp query pci fn)
+ *
+ * Also set zdev->end_dma to the actual end address of the usable
+ * range, instead of the theoretical maximum as reported by hardware.
+ */
+ zdev->start_dma = PAGE_ALIGN(zdev->start_dma);
+ zdev->iommu_size = min3((u64) high_memory,
+ ZPCI_TABLE_SIZE_RT - zdev->start_dma,
+ zdev->end_dma - zdev->start_dma + 1);
+ zdev->end_dma = zdev->start_dma + zdev->iommu_size - 1;
+ zdev->iommu_pages = zdev->iommu_size >> PAGE_SHIFT;
+ zdev->iommu_bitmap = bitmap_vzalloc(zdev->iommu_pages, GFP_KERNEL);
+ if (!zdev->iommu_bitmap) {
+ rc = -ENOMEM;
+ goto free_dma_table;
+ }
+ if (!s390_iommu_strict) {
+ zdev->lazy_bitmap = bitmap_vzalloc(zdev->iommu_pages, GFP_KERNEL);
+ if (!zdev->lazy_bitmap) {
+ rc = -ENOMEM;
+ goto free_bitmap;
+ }
+
+ }
+ rc = zpci_register_ioat(zdev, 0, zdev->start_dma, zdev->end_dma,
+ (u64) zdev->dma_table);
+ if (rc)
+ goto free_bitmap;
+
+ return 0;
+free_bitmap:
+ vfree(zdev->iommu_bitmap);
+ zdev->iommu_bitmap = NULL;
+ vfree(zdev->lazy_bitmap);
+ zdev->lazy_bitmap = NULL;
+free_dma_table:
+ dma_free_cpu_table(zdev->dma_table);
+ zdev->dma_table = NULL;
+out:
+ return rc;
+}
+
+void zpci_dma_exit_device(struct zpci_dev *zdev)
+{
+ /*
+ * At this point, if the device is part of an IOMMU domain, this would
+ * be a strong hint towards a bug in the IOMMU API (common) code and/or
+ * simultaneous access via IOMMU and DMA API. So let's issue a warning.
+ */
+ WARN_ON(zdev->s390_domain);
+
+ if (zpci_unregister_ioat(zdev, 0))
+ return;
+
+ dma_cleanup_tables(zdev->dma_table);
+ zdev->dma_table = NULL;
+ vfree(zdev->iommu_bitmap);
+ zdev->iommu_bitmap = NULL;
+ vfree(zdev->lazy_bitmap);
+ zdev->lazy_bitmap = NULL;
+
+ zdev->next_bit = 0;
+}
+
+static int __init dma_alloc_cpu_table_caches(void)
+{
+ dma_region_table_cache = kmem_cache_create("PCI_DMA_region_tables",
+ ZPCI_TABLE_SIZE, ZPCI_TABLE_ALIGN,
+ 0, NULL);
+ if (!dma_region_table_cache)
+ return -ENOMEM;
+
+ dma_page_table_cache = kmem_cache_create("PCI_DMA_page_tables",
+ ZPCI_PT_SIZE, ZPCI_PT_ALIGN,
+ 0, NULL);
+ if (!dma_page_table_cache) {
+ kmem_cache_destroy(dma_region_table_cache);
+ return -ENOMEM;
+ }
+ return 0;
+}
+
+int __init zpci_dma_init(void)
+{
+ return dma_alloc_cpu_table_caches();
+}
+
+void zpci_dma_exit(void)
+{
+ kmem_cache_destroy(dma_page_table_cache);
+ kmem_cache_destroy(dma_region_table_cache);
+}
+
+const struct dma_map_ops s390_pci_dma_ops = {
+ .alloc = s390_dma_alloc,
+ .free = s390_dma_free,
+ .map_sg = s390_dma_map_sg,
+ .unmap_sg = s390_dma_unmap_sg,
+ .map_page = s390_dma_map_pages,
+ .unmap_page = s390_dma_unmap_pages,
+ .mmap = dma_common_mmap,
+ .get_sgtable = dma_common_get_sgtable,
+ .alloc_pages = dma_common_alloc_pages,
+ .free_pages = dma_common_free_pages,
+ /* dma_supported is unconditionally true without a callback */
+};
+EXPORT_SYMBOL_GPL(s390_pci_dma_ops);
+
+static int __init s390_iommu_setup(char *str)
+{
+ if (!strcmp(str, "strict"))
+ s390_iommu_strict = 1;
+ return 1;
+}
+
+__setup("s390_iommu=", s390_iommu_setup);
diff --git a/arch/s390/pci/pci_event.c b/arch/s390/pci/pci_event.c
new file mode 100644
index 000000000..6ced44b5b
--- /dev/null
+++ b/arch/s390/pci/pci_event.c
@@ -0,0 +1,175 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright IBM Corp. 2012
+ *
+ * Author(s):
+ * Jan Glauber <jang@linux.vnet.ibm.com>
+ */
+
+#define KMSG_COMPONENT "zpci"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <asm/pci_debug.h>
+#include <asm/sclp.h>
+
+#include "pci_bus.h"
+
+/* Content Code Description for PCI Function Error */
+struct zpci_ccdf_err {
+ u32 reserved1;
+ u32 fh; /* function handle */
+ u32 fid; /* function id */
+ u32 ett : 4; /* expected table type */
+ u32 mvn : 12; /* MSI vector number */
+ u32 dmaas : 8; /* DMA address space */
+ u32 : 6;
+ u32 q : 1; /* event qualifier */
+ u32 rw : 1; /* read/write */
+ u64 faddr; /* failing address */
+ u32 reserved3;
+ u16 reserved4;
+ u16 pec; /* PCI event code */
+} __packed;
+
+/* Content Code Description for PCI Function Availability */
+struct zpci_ccdf_avail {
+ u32 reserved1;
+ u32 fh; /* function handle */
+ u32 fid; /* function id */
+ u32 reserved2;
+ u32 reserved3;
+ u32 reserved4;
+ u32 reserved5;
+ u16 reserved6;
+ u16 pec; /* PCI event code */
+} __packed;
+
+static void __zpci_event_error(struct zpci_ccdf_err *ccdf)
+{
+ struct zpci_dev *zdev = get_zdev_by_fid(ccdf->fid);
+ struct pci_dev *pdev = NULL;
+
+ zpci_err("error CCDF:\n");
+ zpci_err_hex(ccdf, sizeof(*ccdf));
+
+ if (zdev)
+ pdev = pci_get_slot(zdev->zbus->bus, zdev->devfn);
+
+ pr_err("%s: Event 0x%x reports an error for PCI function 0x%x\n",
+ pdev ? pci_name(pdev) : "n/a", ccdf->pec, ccdf->fid);
+
+ if (!pdev)
+ goto no_pdev;
+
+ pdev->error_state = pci_channel_io_perm_failure;
+ pci_dev_put(pdev);
+no_pdev:
+ zpci_zdev_put(zdev);
+}
+
+void zpci_event_error(void *data)
+{
+ if (zpci_is_enabled())
+ __zpci_event_error(data);
+}
+
+static void __zpci_event_availability(struct zpci_ccdf_avail *ccdf)
+{
+ struct zpci_dev *zdev = get_zdev_by_fid(ccdf->fid);
+ bool existing_zdev = !!zdev;
+ enum zpci_state state;
+ struct pci_dev *pdev;
+ int ret;
+
+ zpci_err("avail CCDF:\n");
+ zpci_err_hex(ccdf, sizeof(*ccdf));
+
+ switch (ccdf->pec) {
+ case 0x0301: /* Reserved|Standby -> Configured */
+ if (!zdev) {
+ zpci_create_device(ccdf->fid, ccdf->fh, ZPCI_FN_STATE_CONFIGURED);
+ break;
+ }
+ /* the configuration request may be stale */
+ if (zdev->state != ZPCI_FN_STATE_STANDBY)
+ break;
+ zdev->fh = ccdf->fh;
+ zdev->state = ZPCI_FN_STATE_CONFIGURED;
+ ret = zpci_enable_device(zdev);
+ if (ret)
+ break;
+
+ /* the PCI function will be scanned once function 0 appears */
+ if (!zdev->zbus->bus)
+ break;
+
+ pdev = pci_scan_single_device(zdev->zbus->bus, zdev->devfn);
+ if (!pdev)
+ break;
+
+ pci_bus_add_device(pdev);
+ pci_lock_rescan_remove();
+ pci_bus_add_devices(zdev->zbus->bus);
+ pci_unlock_rescan_remove();
+ break;
+ case 0x0302: /* Reserved -> Standby */
+ if (!zdev) {
+ zpci_create_device(ccdf->fid, ccdf->fh, ZPCI_FN_STATE_STANDBY);
+ break;
+ }
+ zdev->fh = ccdf->fh;
+ break;
+ case 0x0303: /* Deconfiguration requested */
+ if (!zdev)
+ break;
+ zpci_remove_device(zdev, false);
+
+ ret = zpci_disable_device(zdev);
+ if (ret)
+ break;
+
+ ret = sclp_pci_deconfigure(zdev->fid);
+ zpci_dbg(3, "deconf fid:%x, rc:%d\n", zdev->fid, ret);
+ if (!ret)
+ zdev->state = ZPCI_FN_STATE_STANDBY;
+
+ break;
+ case 0x0304: /* Configured -> Standby|Reserved */
+ if (!zdev)
+ break;
+ /* Give the driver a hint that the function is
+ * already unusable.
+ */
+ zpci_remove_device(zdev, true);
+
+ zdev->fh = ccdf->fh;
+ zpci_disable_device(zdev);
+ zdev->state = ZPCI_FN_STATE_STANDBY;
+ if (!clp_get_state(ccdf->fid, &state) &&
+ state == ZPCI_FN_STATE_RESERVED) {
+ zpci_device_reserved(zdev);
+ }
+ break;
+ case 0x0306: /* 0x308 or 0x302 for multiple devices */
+ zpci_remove_reserved_devices();
+ clp_scan_pci_devices();
+ break;
+ case 0x0308: /* Standby -> Reserved */
+ if (!zdev)
+ break;
+ zpci_device_reserved(zdev);
+ break;
+ default:
+ break;
+ }
+ if (existing_zdev)
+ zpci_zdev_put(zdev);
+}
+
+void zpci_event_availability(void *data)
+{
+ if (zpci_is_enabled())
+ __zpci_event_availability(data);
+}
diff --git a/arch/s390/pci/pci_insn.c b/arch/s390/pci/pci_insn.c
new file mode 100644
index 000000000..02f9505c9
--- /dev/null
+++ b/arch/s390/pci/pci_insn.c
@@ -0,0 +1,386 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * s390 specific pci instructions
+ *
+ * Copyright IBM Corp. 2013
+ */
+
+#include <linux/export.h>
+#include <linux/errno.h>
+#include <linux/delay.h>
+#include <linux/jump_label.h>
+#include <asm/facility.h>
+#include <asm/pci_insn.h>
+#include <asm/pci_debug.h>
+#include <asm/pci_io.h>
+#include <asm/processor.h>
+
+#define ZPCI_INSN_BUSY_DELAY 1 /* 1 microsecond */
+
+static inline void zpci_err_insn(u8 cc, u8 status, u64 req, u64 offset)
+{
+ struct {
+ u64 req;
+ u64 offset;
+ u8 cc;
+ u8 status;
+ } __packed data = {req, offset, cc, status};
+
+ zpci_err_hex(&data, sizeof(data));
+}
+
+/* Modify PCI Function Controls */
+static inline u8 __mpcifc(u64 req, struct zpci_fib *fib, u8 *status)
+{
+ u8 cc;
+
+ asm volatile (
+ " .insn rxy,0xe300000000d0,%[req],%[fib]\n"
+ " ipm %[cc]\n"
+ " srl %[cc],28\n"
+ : [cc] "=d" (cc), [req] "+d" (req), [fib] "+Q" (*fib)
+ : : "cc");
+ *status = req >> 24 & 0xff;
+ return cc;
+}
+
+u8 zpci_mod_fc(u64 req, struct zpci_fib *fib, u8 *status)
+{
+ u8 cc;
+
+ do {
+ cc = __mpcifc(req, fib, status);
+ if (cc == 2)
+ msleep(ZPCI_INSN_BUSY_DELAY);
+ } while (cc == 2);
+
+ if (cc)
+ zpci_err_insn(cc, *status, req, 0);
+
+ return cc;
+}
+
+/* Refresh PCI Translations */
+static inline u8 __rpcit(u64 fn, u64 addr, u64 range, u8 *status)
+{
+ register u64 __addr asm("2") = addr;
+ register u64 __range asm("3") = range;
+ u8 cc;
+
+ asm volatile (
+ " .insn rre,0xb9d30000,%[fn],%[addr]\n"
+ " ipm %[cc]\n"
+ " srl %[cc],28\n"
+ : [cc] "=d" (cc), [fn] "+d" (fn)
+ : [addr] "d" (__addr), "d" (__range)
+ : "cc");
+ *status = fn >> 24 & 0xff;
+ return cc;
+}
+
+int zpci_refresh_trans(u64 fn, u64 addr, u64 range)
+{
+ u8 cc, status;
+
+ do {
+ cc = __rpcit(fn, addr, range, &status);
+ if (cc == 2)
+ udelay(ZPCI_INSN_BUSY_DELAY);
+ } while (cc == 2);
+
+ if (cc)
+ zpci_err_insn(cc, status, addr, range);
+
+ if (cc == 1 && (status == 4 || status == 16))
+ return -ENOMEM;
+
+ return (cc) ? -EIO : 0;
+}
+
+/* Set Interruption Controls */
+int __zpci_set_irq_ctrl(u16 ctl, u8 isc, union zpci_sic_iib *iib)
+{
+ if (!test_facility(72))
+ return -EIO;
+
+ asm volatile(
+ ".insn rsy,0xeb00000000d1,%[ctl],%[isc],%[iib]\n"
+ : : [ctl] "d" (ctl), [isc] "d" (isc << 27), [iib] "Q" (*iib));
+
+ return 0;
+}
+
+/* PCI Load */
+static inline int ____pcilg(u64 *data, u64 req, u64 offset, u8 *status)
+{
+ register u64 __req asm("2") = req;
+ register u64 __offset asm("3") = offset;
+ int cc = -ENXIO;
+ u64 __data;
+
+ asm volatile (
+ " .insn rre,0xb9d20000,%[data],%[req]\n"
+ "0: ipm %[cc]\n"
+ " srl %[cc],28\n"
+ "1:\n"
+ EX_TABLE(0b, 1b)
+ : [cc] "+d" (cc), [data] "=d" (__data), [req] "+d" (__req)
+ : "d" (__offset)
+ : "cc");
+ *status = __req >> 24 & 0xff;
+ *data = __data;
+ return cc;
+}
+
+static inline int __pcilg(u64 *data, u64 req, u64 offset, u8 *status)
+{
+ u64 __data;
+ int cc;
+
+ cc = ____pcilg(&__data, req, offset, status);
+ if (!cc)
+ *data = __data;
+
+ return cc;
+}
+
+int __zpci_load(u64 *data, u64 req, u64 offset)
+{
+ u8 status;
+ int cc;
+
+ do {
+ cc = __pcilg(data, req, offset, &status);
+ if (cc == 2)
+ udelay(ZPCI_INSN_BUSY_DELAY);
+ } while (cc == 2);
+
+ if (cc)
+ zpci_err_insn(cc, status, req, offset);
+
+ return (cc > 0) ? -EIO : cc;
+}
+EXPORT_SYMBOL_GPL(__zpci_load);
+
+static inline int zpci_load_fh(u64 *data, const volatile void __iomem *addr,
+ unsigned long len)
+{
+ struct zpci_iomap_entry *entry = &zpci_iomap_start[ZPCI_IDX(addr)];
+ u64 req = ZPCI_CREATE_REQ(entry->fh, entry->bar, len);
+
+ return __zpci_load(data, req, ZPCI_OFFSET(addr));
+}
+
+static inline int __pcilg_mio(u64 *data, u64 ioaddr, u64 len, u8 *status)
+{
+ register u64 addr asm("2") = ioaddr;
+ register u64 r3 asm("3") = len;
+ int cc = -ENXIO;
+ u64 __data;
+
+ asm volatile (
+ " .insn rre,0xb9d60000,%[data],%[ioaddr]\n"
+ "0: ipm %[cc]\n"
+ " srl %[cc],28\n"
+ "1:\n"
+ EX_TABLE(0b, 1b)
+ : [cc] "+d" (cc), [data] "=d" (__data), "+d" (r3)
+ : [ioaddr] "d" (addr)
+ : "cc");
+ *status = r3 >> 24 & 0xff;
+ *data = __data;
+ return cc;
+}
+
+int zpci_load(u64 *data, const volatile void __iomem *addr, unsigned long len)
+{
+ u8 status;
+ int cc;
+
+ if (!static_branch_unlikely(&have_mio))
+ return zpci_load_fh(data, addr, len);
+
+ cc = __pcilg_mio(data, (__force u64) addr, len, &status);
+ if (cc)
+ zpci_err_insn(cc, status, 0, (__force u64) addr);
+
+ return (cc > 0) ? -EIO : cc;
+}
+EXPORT_SYMBOL_GPL(zpci_load);
+
+/* PCI Store */
+static inline int __pcistg(u64 data, u64 req, u64 offset, u8 *status)
+{
+ register u64 __req asm("2") = req;
+ register u64 __offset asm("3") = offset;
+ int cc = -ENXIO;
+
+ asm volatile (
+ " .insn rre,0xb9d00000,%[data],%[req]\n"
+ "0: ipm %[cc]\n"
+ " srl %[cc],28\n"
+ "1:\n"
+ EX_TABLE(0b, 1b)
+ : [cc] "+d" (cc), [req] "+d" (__req)
+ : "d" (__offset), [data] "d" (data)
+ : "cc");
+ *status = __req >> 24 & 0xff;
+ return cc;
+}
+
+int __zpci_store(u64 data, u64 req, u64 offset)
+{
+ u8 status;
+ int cc;
+
+ do {
+ cc = __pcistg(data, req, offset, &status);
+ if (cc == 2)
+ udelay(ZPCI_INSN_BUSY_DELAY);
+ } while (cc == 2);
+
+ if (cc)
+ zpci_err_insn(cc, status, req, offset);
+
+ return (cc > 0) ? -EIO : cc;
+}
+EXPORT_SYMBOL_GPL(__zpci_store);
+
+static inline int zpci_store_fh(const volatile void __iomem *addr, u64 data,
+ unsigned long len)
+{
+ struct zpci_iomap_entry *entry = &zpci_iomap_start[ZPCI_IDX(addr)];
+ u64 req = ZPCI_CREATE_REQ(entry->fh, entry->bar, len);
+
+ return __zpci_store(data, req, ZPCI_OFFSET(addr));
+}
+
+static inline int __pcistg_mio(u64 data, u64 ioaddr, u64 len, u8 *status)
+{
+ register u64 addr asm("2") = ioaddr;
+ register u64 r3 asm("3") = len;
+ int cc = -ENXIO;
+
+ asm volatile (
+ " .insn rre,0xb9d40000,%[data],%[ioaddr]\n"
+ "0: ipm %[cc]\n"
+ " srl %[cc],28\n"
+ "1:\n"
+ EX_TABLE(0b, 1b)
+ : [cc] "+d" (cc), "+d" (r3)
+ : [data] "d" (data), [ioaddr] "d" (addr)
+ : "cc");
+ *status = r3 >> 24 & 0xff;
+ return cc;
+}
+
+int zpci_store(const volatile void __iomem *addr, u64 data, unsigned long len)
+{
+ u8 status;
+ int cc;
+
+ if (!static_branch_unlikely(&have_mio))
+ return zpci_store_fh(addr, data, len);
+
+ cc = __pcistg_mio(data, (__force u64) addr, len, &status);
+ if (cc)
+ zpci_err_insn(cc, status, 0, (__force u64) addr);
+
+ return (cc > 0) ? -EIO : cc;
+}
+EXPORT_SYMBOL_GPL(zpci_store);
+
+/* PCI Store Block */
+static inline int __pcistb(const u64 *data, u64 req, u64 offset, u8 *status)
+{
+ int cc = -ENXIO;
+
+ asm volatile (
+ " .insn rsy,0xeb00000000d0,%[req],%[offset],%[data]\n"
+ "0: ipm %[cc]\n"
+ " srl %[cc],28\n"
+ "1:\n"
+ EX_TABLE(0b, 1b)
+ : [cc] "+d" (cc), [req] "+d" (req)
+ : [offset] "d" (offset), [data] "Q" (*data)
+ : "cc");
+ *status = req >> 24 & 0xff;
+ return cc;
+}
+
+int __zpci_store_block(const u64 *data, u64 req, u64 offset)
+{
+ u8 status;
+ int cc;
+
+ do {
+ cc = __pcistb(data, req, offset, &status);
+ if (cc == 2)
+ udelay(ZPCI_INSN_BUSY_DELAY);
+ } while (cc == 2);
+
+ if (cc)
+ zpci_err_insn(cc, status, req, offset);
+
+ return (cc > 0) ? -EIO : cc;
+}
+EXPORT_SYMBOL_GPL(__zpci_store_block);
+
+static inline int zpci_write_block_fh(volatile void __iomem *dst,
+ const void *src, unsigned long len)
+{
+ struct zpci_iomap_entry *entry = &zpci_iomap_start[ZPCI_IDX(dst)];
+ u64 req = ZPCI_CREATE_REQ(entry->fh, entry->bar, len);
+ u64 offset = ZPCI_OFFSET(dst);
+
+ return __zpci_store_block(src, req, offset);
+}
+
+static inline int __pcistb_mio(const u64 *data, u64 ioaddr, u64 len, u8 *status)
+{
+ int cc = -ENXIO;
+
+ asm volatile (
+ " .insn rsy,0xeb00000000d4,%[len],%[ioaddr],%[data]\n"
+ "0: ipm %[cc]\n"
+ " srl %[cc],28\n"
+ "1:\n"
+ EX_TABLE(0b, 1b)
+ : [cc] "+d" (cc), [len] "+d" (len)
+ : [ioaddr] "d" (ioaddr), [data] "Q" (*data)
+ : "cc");
+ *status = len >> 24 & 0xff;
+ return cc;
+}
+
+int zpci_write_block(volatile void __iomem *dst,
+ const void *src, unsigned long len)
+{
+ u8 status;
+ int cc;
+
+ if (!static_branch_unlikely(&have_mio))
+ return zpci_write_block_fh(dst, src, len);
+
+ cc = __pcistb_mio(src, (__force u64) dst, len, &status);
+ if (cc)
+ zpci_err_insn(cc, status, 0, (__force u64) dst);
+
+ return (cc > 0) ? -EIO : cc;
+}
+EXPORT_SYMBOL_GPL(zpci_write_block);
+
+static inline void __pciwb_mio(void)
+{
+ unsigned long unused = 0;
+
+ asm volatile (".insn rre,0xb9d50000,%[op],%[op]\n"
+ : [op] "+d" (unused));
+}
+
+void zpci_barrier(void)
+{
+ if (static_branch_likely(&have_mio))
+ __pciwb_mio();
+}
+EXPORT_SYMBOL_GPL(zpci_barrier);
diff --git a/arch/s390/pci/pci_iov.c b/arch/s390/pci/pci_iov.c
new file mode 100644
index 000000000..ead062bf2
--- /dev/null
+++ b/arch/s390/pci/pci_iov.c
@@ -0,0 +1,99 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright IBM Corp. 2020
+ *
+ * Author(s):
+ * Niklas Schnelle <schnelle@linux.ibm.com>
+ *
+ */
+
+#define KMSG_COMPONENT "zpci"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/pci.h>
+
+#include "pci_iov.h"
+
+static struct resource iov_res = {
+ .name = "PCI IOV res",
+ .start = 0,
+ .end = -1,
+ .flags = IORESOURCE_MEM,
+};
+
+void zpci_iov_map_resources(struct pci_dev *pdev)
+{
+ resource_size_t len;
+ int i;
+
+ for (i = 0; i < PCI_SRIOV_NUM_BARS; i++) {
+ int bar = i + PCI_IOV_RESOURCES;
+
+ len = pci_resource_len(pdev, bar);
+ if (!len)
+ continue;
+ pdev->resource[bar].parent = &iov_res;
+ }
+}
+
+void zpci_iov_remove_virtfn(struct pci_dev *pdev, int vfn)
+{
+ pci_lock_rescan_remove();
+ /* Linux' vfid's start at 0 vfn at 1 */
+ pci_iov_remove_virtfn(pdev->physfn, vfn - 1);
+ pci_unlock_rescan_remove();
+}
+
+static int zpci_iov_link_virtfn(struct pci_dev *pdev, struct pci_dev *virtfn, int vfid)
+{
+ int rc;
+
+ rc = pci_iov_sysfs_link(pdev, virtfn, vfid);
+ if (rc)
+ return rc;
+
+ virtfn->is_virtfn = 1;
+ virtfn->multifunction = 0;
+ virtfn->physfn = pci_dev_get(pdev);
+
+ return 0;
+}
+
+int zpci_iov_setup_virtfn(struct zpci_bus *zbus, struct pci_dev *virtfn, int vfn)
+{
+ int i, cand_devfn;
+ struct zpci_dev *zdev;
+ struct pci_dev *pdev;
+ int vfid = vfn - 1; /* Linux' vfid's start at 0 vfn at 1*/
+ int rc = 0;
+
+ if (!zbus->multifunction)
+ return 0;
+
+ /* If the parent PF for the given VF is also configured in the
+ * instance, it must be on the same zbus.
+ * We can then identify the parent PF by checking what
+ * devfn the VF would have if it belonged to that PF using the PF's
+ * stride and offset. Only if this candidate devfn matches the
+ * actual devfn will we link both functions.
+ */
+ for (i = 0; i < ZPCI_FUNCTIONS_PER_BUS; i++) {
+ zdev = zbus->function[i];
+ if (zdev && zdev->is_physfn) {
+ pdev = pci_get_slot(zbus->bus, zdev->devfn);
+ if (!pdev)
+ continue;
+ cand_devfn = pci_iov_virtfn_devfn(pdev, vfid);
+ if (cand_devfn == virtfn->devfn) {
+ rc = zpci_iov_link_virtfn(pdev, virtfn, vfid);
+ /* balance pci_get_slot() */
+ pci_dev_put(pdev);
+ break;
+ }
+ /* balance pci_get_slot() */
+ pci_dev_put(pdev);
+ }
+ }
+ return rc;
+}
diff --git a/arch/s390/pci/pci_iov.h b/arch/s390/pci/pci_iov.h
new file mode 100644
index 000000000..b2c828003
--- /dev/null
+++ b/arch/s390/pci/pci_iov.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright IBM Corp. 2020
+ *
+ * Author(s):
+ * Niklas Schnelle <schnelle@linux.ibm.com>
+ *
+ */
+
+#ifndef __S390_PCI_IOV_H
+#define __S390_PCI_IOV_H
+
+#ifdef CONFIG_PCI_IOV
+void zpci_iov_remove_virtfn(struct pci_dev *pdev, int vfn);
+
+void zpci_iov_map_resources(struct pci_dev *pdev);
+
+int zpci_iov_setup_virtfn(struct zpci_bus *zbus, struct pci_dev *virtfn, int vfn);
+
+#else /* CONFIG_PCI_IOV */
+static inline void zpci_iov_remove_virtfn(struct pci_dev *pdev, int vfn) {}
+
+static inline void zpci_iov_map_resources(struct pci_dev *pdev) {}
+
+static inline int zpci_iov_setup_virtfn(struct zpci_bus *zbus, struct pci_dev *virtfn, int vfn)
+{
+ return 0;
+}
+#endif /* CONFIG_PCI_IOV */
+#endif /* __S390_PCI_IOV_h */
diff --git a/arch/s390/pci/pci_irq.c b/arch/s390/pci/pci_irq.c
new file mode 100644
index 000000000..75217fb63
--- /dev/null
+++ b/arch/s390/pci/pci_irq.c
@@ -0,0 +1,495 @@
+// SPDX-License-Identifier: GPL-2.0
+#define KMSG_COMPONENT "zpci"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/irq.h>
+#include <linux/kernel_stat.h>
+#include <linux/pci.h>
+#include <linux/msi.h>
+#include <linux/smp.h>
+
+#include <asm/isc.h>
+#include <asm/airq.h>
+
+static enum {FLOATING, DIRECTED} irq_delivery;
+
+#define SIC_IRQ_MODE_ALL 0
+#define SIC_IRQ_MODE_SINGLE 1
+#define SIC_IRQ_MODE_DIRECT 4
+#define SIC_IRQ_MODE_D_ALL 16
+#define SIC_IRQ_MODE_D_SINGLE 17
+#define SIC_IRQ_MODE_SET_CPU 18
+
+/*
+ * summary bit vector
+ * FLOATING - summary bit per function
+ * DIRECTED - summary bit per cpu (only used in fallback path)
+ */
+static struct airq_iv *zpci_sbv;
+
+/*
+ * interrupt bit vectors
+ * FLOATING - interrupt bit vector per function
+ * DIRECTED - interrupt bit vector per cpu
+ */
+static struct airq_iv **zpci_ibv;
+
+/* Modify PCI: Register adapter interruptions */
+static int zpci_set_airq(struct zpci_dev *zdev)
+{
+ u64 req = ZPCI_CREATE_REQ(zdev->fh, 0, ZPCI_MOD_FC_REG_INT);
+ struct zpci_fib fib = {0};
+ u8 status;
+
+ fib.fmt0.isc = PCI_ISC;
+ fib.fmt0.sum = 1; /* enable summary notifications */
+ fib.fmt0.noi = airq_iv_end(zdev->aibv);
+ fib.fmt0.aibv = (unsigned long) zdev->aibv->vector;
+ fib.fmt0.aibvo = 0; /* each zdev has its own interrupt vector */
+ fib.fmt0.aisb = (unsigned long) zpci_sbv->vector + (zdev->aisb/64)*8;
+ fib.fmt0.aisbo = zdev->aisb & 63;
+
+ return zpci_mod_fc(req, &fib, &status) ? -EIO : 0;
+}
+
+/* Modify PCI: Unregister adapter interruptions */
+static int zpci_clear_airq(struct zpci_dev *zdev)
+{
+ u64 req = ZPCI_CREATE_REQ(zdev->fh, 0, ZPCI_MOD_FC_DEREG_INT);
+ struct zpci_fib fib = {0};
+ u8 cc, status;
+
+ cc = zpci_mod_fc(req, &fib, &status);
+ if (cc == 3 || (cc == 1 && status == 24))
+ /* Function already gone or IRQs already deregistered. */
+ cc = 0;
+
+ return cc ? -EIO : 0;
+}
+
+/* Modify PCI: Register CPU directed interruptions */
+static int zpci_set_directed_irq(struct zpci_dev *zdev)
+{
+ u64 req = ZPCI_CREATE_REQ(zdev->fh, 0, ZPCI_MOD_FC_REG_INT_D);
+ struct zpci_fib fib = {0};
+ u8 status;
+
+ fib.fmt = 1;
+ fib.fmt1.noi = zdev->msi_nr_irqs;
+ fib.fmt1.dibvo = zdev->msi_first_bit;
+
+ return zpci_mod_fc(req, &fib, &status) ? -EIO : 0;
+}
+
+/* Modify PCI: Unregister CPU directed interruptions */
+static int zpci_clear_directed_irq(struct zpci_dev *zdev)
+{
+ u64 req = ZPCI_CREATE_REQ(zdev->fh, 0, ZPCI_MOD_FC_DEREG_INT_D);
+ struct zpci_fib fib = {0};
+ u8 cc, status;
+
+ fib.fmt = 1;
+ cc = zpci_mod_fc(req, &fib, &status);
+ if (cc == 3 || (cc == 1 && status == 24))
+ /* Function already gone or IRQs already deregistered. */
+ cc = 0;
+
+ return cc ? -EIO : 0;
+}
+
+static int zpci_set_irq_affinity(struct irq_data *data, const struct cpumask *dest,
+ bool force)
+{
+ struct msi_desc *entry = irq_get_msi_desc(data->irq);
+ struct msi_msg msg = entry->msg;
+ int cpu_addr = smp_cpu_get_cpu_address(cpumask_first(dest));
+
+ msg.address_lo &= 0xff0000ff;
+ msg.address_lo |= (cpu_addr << 8);
+ pci_write_msi_msg(data->irq, &msg);
+
+ return IRQ_SET_MASK_OK;
+}
+
+static struct irq_chip zpci_irq_chip = {
+ .name = "PCI-MSI",
+ .irq_unmask = pci_msi_unmask_irq,
+ .irq_mask = pci_msi_mask_irq,
+};
+
+static void zpci_handle_cpu_local_irq(bool rescan)
+{
+ struct airq_iv *dibv = zpci_ibv[smp_processor_id()];
+ unsigned long bit;
+ int irqs_on = 0;
+
+ for (bit = 0;;) {
+ /* Scan the directed IRQ bit vector */
+ bit = airq_iv_scan(dibv, bit, airq_iv_end(dibv));
+ if (bit == -1UL) {
+ if (!rescan || irqs_on++)
+ /* End of second scan with interrupts on. */
+ break;
+ /* First scan complete, reenable interrupts. */
+ if (zpci_set_irq_ctrl(SIC_IRQ_MODE_D_SINGLE, PCI_ISC))
+ break;
+ bit = 0;
+ continue;
+ }
+ inc_irq_stat(IRQIO_MSI);
+ generic_handle_irq(airq_iv_get_data(dibv, bit));
+ }
+}
+
+struct cpu_irq_data {
+ call_single_data_t csd;
+ atomic_t scheduled;
+};
+static DEFINE_PER_CPU_SHARED_ALIGNED(struct cpu_irq_data, irq_data);
+
+static void zpci_handle_remote_irq(void *data)
+{
+ atomic_t *scheduled = data;
+
+ do {
+ zpci_handle_cpu_local_irq(false);
+ } while (atomic_dec_return(scheduled));
+}
+
+static void zpci_handle_fallback_irq(void)
+{
+ struct cpu_irq_data *cpu_data;
+ unsigned long cpu;
+ int irqs_on = 0;
+
+ for (cpu = 0;;) {
+ cpu = airq_iv_scan(zpci_sbv, cpu, airq_iv_end(zpci_sbv));
+ if (cpu == -1UL) {
+ if (irqs_on++)
+ /* End of second scan with interrupts on. */
+ break;
+ /* First scan complete, reenable interrupts. */
+ if (zpci_set_irq_ctrl(SIC_IRQ_MODE_SINGLE, PCI_ISC))
+ break;
+ cpu = 0;
+ continue;
+ }
+ cpu_data = &per_cpu(irq_data, cpu);
+ if (atomic_inc_return(&cpu_data->scheduled) > 1)
+ continue;
+
+ cpu_data->csd.func = zpci_handle_remote_irq;
+ cpu_data->csd.info = &cpu_data->scheduled;
+ cpu_data->csd.flags = 0;
+ smp_call_function_single_async(cpu, &cpu_data->csd);
+ }
+}
+
+static void zpci_directed_irq_handler(struct airq_struct *airq, bool floating)
+{
+ if (floating) {
+ inc_irq_stat(IRQIO_PCF);
+ zpci_handle_fallback_irq();
+ } else {
+ inc_irq_stat(IRQIO_PCD);
+ zpci_handle_cpu_local_irq(true);
+ }
+}
+
+static void zpci_floating_irq_handler(struct airq_struct *airq, bool floating)
+{
+ unsigned long si, ai;
+ struct airq_iv *aibv;
+ int irqs_on = 0;
+
+ inc_irq_stat(IRQIO_PCF);
+ for (si = 0;;) {
+ /* Scan adapter summary indicator bit vector */
+ si = airq_iv_scan(zpci_sbv, si, airq_iv_end(zpci_sbv));
+ if (si == -1UL) {
+ if (irqs_on++)
+ /* End of second scan with interrupts on. */
+ break;
+ /* First scan complete, reenable interrupts. */
+ if (zpci_set_irq_ctrl(SIC_IRQ_MODE_SINGLE, PCI_ISC))
+ break;
+ si = 0;
+ continue;
+ }
+
+ /* Scan the adapter interrupt vector for this device. */
+ aibv = zpci_ibv[si];
+ for (ai = 0;;) {
+ ai = airq_iv_scan(aibv, ai, airq_iv_end(aibv));
+ if (ai == -1UL)
+ break;
+ inc_irq_stat(IRQIO_MSI);
+ airq_iv_lock(aibv, ai);
+ generic_handle_irq(airq_iv_get_data(aibv, ai));
+ airq_iv_unlock(aibv, ai);
+ }
+ }
+}
+
+int arch_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type)
+{
+ struct zpci_dev *zdev = to_zpci(pdev);
+ unsigned int hwirq, msi_vecs, cpu;
+ unsigned long bit;
+ struct msi_desc *msi;
+ struct msi_msg msg;
+ int cpu_addr;
+ int rc, irq;
+
+ zdev->aisb = -1UL;
+ zdev->msi_first_bit = -1U;
+ if (type == PCI_CAP_ID_MSI && nvec > 1)
+ return 1;
+ msi_vecs = min_t(unsigned int, nvec, zdev->max_msi);
+
+ if (irq_delivery == DIRECTED) {
+ /* Allocate cpu vector bits */
+ bit = airq_iv_alloc(zpci_ibv[0], msi_vecs);
+ if (bit == -1UL)
+ return -EIO;
+ } else {
+ /* Allocate adapter summary indicator bit */
+ bit = airq_iv_alloc_bit(zpci_sbv);
+ if (bit == -1UL)
+ return -EIO;
+ zdev->aisb = bit;
+
+ /* Create adapter interrupt vector */
+ zdev->aibv = airq_iv_create(msi_vecs, AIRQ_IV_DATA | AIRQ_IV_BITLOCK);
+ if (!zdev->aibv)
+ return -ENOMEM;
+
+ /* Wire up shortcut pointer */
+ zpci_ibv[bit] = zdev->aibv;
+ /* Each function has its own interrupt vector */
+ bit = 0;
+ }
+
+ /* Request MSI interrupts */
+ hwirq = bit;
+ for_each_pci_msi_entry(msi, pdev) {
+ rc = -EIO;
+ if (hwirq - bit >= msi_vecs)
+ break;
+ irq = __irq_alloc_descs(-1, 0, 1, 0, THIS_MODULE,
+ (irq_delivery == DIRECTED) ?
+ msi->affinity : NULL);
+ if (irq < 0)
+ return -ENOMEM;
+ rc = irq_set_msi_desc(irq, msi);
+ if (rc)
+ return rc;
+ irq_set_chip_and_handler(irq, &zpci_irq_chip,
+ handle_percpu_irq);
+ msg.data = hwirq - bit;
+ if (irq_delivery == DIRECTED) {
+ if (msi->affinity)
+ cpu = cpumask_first(&msi->affinity->mask);
+ else
+ cpu = 0;
+ cpu_addr = smp_cpu_get_cpu_address(cpu);
+
+ msg.address_lo = zdev->msi_addr & 0xff0000ff;
+ msg.address_lo |= (cpu_addr << 8);
+
+ for_each_possible_cpu(cpu) {
+ airq_iv_set_data(zpci_ibv[cpu], hwirq, irq);
+ }
+ } else {
+ msg.address_lo = zdev->msi_addr & 0xffffffff;
+ airq_iv_set_data(zdev->aibv, hwirq, irq);
+ }
+ msg.address_hi = zdev->msi_addr >> 32;
+ pci_write_msi_msg(irq, &msg);
+ hwirq++;
+ }
+
+ zdev->msi_first_bit = bit;
+ zdev->msi_nr_irqs = msi_vecs;
+
+ if (irq_delivery == DIRECTED)
+ rc = zpci_set_directed_irq(zdev);
+ else
+ rc = zpci_set_airq(zdev);
+ if (rc)
+ return rc;
+
+ return (msi_vecs == nvec) ? 0 : msi_vecs;
+}
+
+void arch_teardown_msi_irqs(struct pci_dev *pdev)
+{
+ struct zpci_dev *zdev = to_zpci(pdev);
+ struct msi_desc *msi;
+ int rc;
+
+ /* Disable interrupts */
+ if (irq_delivery == DIRECTED)
+ rc = zpci_clear_directed_irq(zdev);
+ else
+ rc = zpci_clear_airq(zdev);
+ if (rc)
+ return;
+
+ /* Release MSI interrupts */
+ for_each_pci_msi_entry(msi, pdev) {
+ if (!msi->irq)
+ continue;
+ if (msi->msi_attrib.is_msix)
+ __pci_msix_desc_mask_irq(msi, 1);
+ else
+ __pci_msi_desc_mask_irq(msi, 1, 1);
+ irq_set_msi_desc(msi->irq, NULL);
+ irq_free_desc(msi->irq);
+ msi->msg.address_lo = 0;
+ msi->msg.address_hi = 0;
+ msi->msg.data = 0;
+ msi->irq = 0;
+ }
+
+ if (zdev->aisb != -1UL) {
+ zpci_ibv[zdev->aisb] = NULL;
+ airq_iv_free_bit(zpci_sbv, zdev->aisb);
+ zdev->aisb = -1UL;
+ }
+ if (zdev->aibv) {
+ airq_iv_release(zdev->aibv);
+ zdev->aibv = NULL;
+ }
+
+ if ((irq_delivery == DIRECTED) && zdev->msi_first_bit != -1U)
+ airq_iv_free(zpci_ibv[0], zdev->msi_first_bit, zdev->msi_nr_irqs);
+}
+
+static struct airq_struct zpci_airq = {
+ .handler = zpci_floating_irq_handler,
+ .isc = PCI_ISC,
+};
+
+static void __init cpu_enable_directed_irq(void *unused)
+{
+ union zpci_sic_iib iib = {{0}};
+
+ iib.cdiib.dibv_addr = (u64) zpci_ibv[smp_processor_id()]->vector;
+
+ __zpci_set_irq_ctrl(SIC_IRQ_MODE_SET_CPU, 0, &iib);
+ zpci_set_irq_ctrl(SIC_IRQ_MODE_D_SINGLE, PCI_ISC);
+}
+
+static int __init zpci_directed_irq_init(void)
+{
+ union zpci_sic_iib iib = {{0}};
+ unsigned int cpu;
+
+ zpci_sbv = airq_iv_create(num_possible_cpus(), 0);
+ if (!zpci_sbv)
+ return -ENOMEM;
+
+ iib.diib.isc = PCI_ISC;
+ iib.diib.nr_cpus = num_possible_cpus();
+ iib.diib.disb_addr = (u64) zpci_sbv->vector;
+ __zpci_set_irq_ctrl(SIC_IRQ_MODE_DIRECT, 0, &iib);
+
+ zpci_ibv = kcalloc(num_possible_cpus(), sizeof(*zpci_ibv),
+ GFP_KERNEL);
+ if (!zpci_ibv)
+ return -ENOMEM;
+
+ for_each_possible_cpu(cpu) {
+ /*
+ * Per CPU IRQ vectors look the same but bit-allocation
+ * is only done on the first vector.
+ */
+ zpci_ibv[cpu] = airq_iv_create(cache_line_size() * BITS_PER_BYTE,
+ AIRQ_IV_DATA |
+ AIRQ_IV_CACHELINE |
+ (!cpu ? AIRQ_IV_ALLOC : 0));
+ if (!zpci_ibv[cpu])
+ return -ENOMEM;
+ }
+ on_each_cpu(cpu_enable_directed_irq, NULL, 1);
+
+ zpci_irq_chip.irq_set_affinity = zpci_set_irq_affinity;
+
+ return 0;
+}
+
+static int __init zpci_floating_irq_init(void)
+{
+ zpci_ibv = kcalloc(ZPCI_NR_DEVICES, sizeof(*zpci_ibv), GFP_KERNEL);
+ if (!zpci_ibv)
+ return -ENOMEM;
+
+ zpci_sbv = airq_iv_create(ZPCI_NR_DEVICES, AIRQ_IV_ALLOC);
+ if (!zpci_sbv)
+ goto out_free;
+
+ return 0;
+
+out_free:
+ kfree(zpci_ibv);
+ return -ENOMEM;
+}
+
+int __init zpci_irq_init(void)
+{
+ int rc;
+
+ irq_delivery = sclp.has_dirq ? DIRECTED : FLOATING;
+ if (s390_pci_force_floating)
+ irq_delivery = FLOATING;
+
+ if (irq_delivery == DIRECTED)
+ zpci_airq.handler = zpci_directed_irq_handler;
+
+ rc = register_adapter_interrupt(&zpci_airq);
+ if (rc)
+ goto out;
+ /* Set summary to 1 to be called every time for the ISC. */
+ *zpci_airq.lsi_ptr = 1;
+
+ switch (irq_delivery) {
+ case FLOATING:
+ rc = zpci_floating_irq_init();
+ break;
+ case DIRECTED:
+ rc = zpci_directed_irq_init();
+ break;
+ }
+
+ if (rc)
+ goto out_airq;
+
+ /*
+ * Enable floating IRQs (with suppression after one IRQ). When using
+ * directed IRQs this enables the fallback path.
+ */
+ zpci_set_irq_ctrl(SIC_IRQ_MODE_SINGLE, PCI_ISC);
+
+ return 0;
+out_airq:
+ unregister_adapter_interrupt(&zpci_airq);
+out:
+ return rc;
+}
+
+void __init zpci_irq_exit(void)
+{
+ unsigned int cpu;
+
+ if (irq_delivery == DIRECTED) {
+ for_each_possible_cpu(cpu) {
+ airq_iv_release(zpci_ibv[cpu]);
+ }
+ }
+ kfree(zpci_ibv);
+ if (zpci_sbv)
+ airq_iv_release(zpci_sbv);
+ unregister_adapter_interrupt(&zpci_airq);
+}
diff --git a/arch/s390/pci/pci_mmio.c b/arch/s390/pci/pci_mmio.c
new file mode 100644
index 000000000..6e7c4762b
--- /dev/null
+++ b/arch/s390/pci/pci_mmio.c
@@ -0,0 +1,328 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Access to PCI I/O memory from user space programs.
+ *
+ * Copyright IBM Corp. 2014
+ * Author(s): Alexey Ishchuk <aishchuk@linux.vnet.ibm.com>
+ */
+#include <linux/kernel.h>
+#include <linux/syscalls.h>
+#include <linux/init.h>
+#include <linux/mm.h>
+#include <linux/errno.h>
+#include <linux/pci.h>
+#include <asm/pci_io.h>
+#include <asm/pci_debug.h>
+
+static inline void zpci_err_mmio(u8 cc, u8 status, u64 offset)
+{
+ struct {
+ u64 offset;
+ u8 cc;
+ u8 status;
+ } data = {offset, cc, status};
+
+ zpci_err_hex(&data, sizeof(data));
+}
+
+static inline int __pcistb_mio_inuser(
+ void __iomem *ioaddr, const void __user *src,
+ u64 len, u8 *status)
+{
+ int cc = -ENXIO;
+
+ asm volatile (
+ " sacf 256\n"
+ "0: .insn rsy,0xeb00000000d4,%[len],%[ioaddr],%[src]\n"
+ "1: ipm %[cc]\n"
+ " srl %[cc],28\n"
+ "2: sacf 768\n"
+ EX_TABLE(0b, 2b) EX_TABLE(1b, 2b)
+ : [cc] "+d" (cc), [len] "+d" (len)
+ : [ioaddr] "a" (ioaddr), [src] "Q" (*((u8 __force *)src))
+ : "cc", "memory");
+ *status = len >> 24 & 0xff;
+ return cc;
+}
+
+static inline int __pcistg_mio_inuser(
+ void __iomem *ioaddr, const void __user *src,
+ u64 ulen, u8 *status)
+{
+ register u64 addr asm("2") = (u64 __force) ioaddr;
+ register u64 len asm("3") = ulen;
+ int cc = -ENXIO;
+ u64 val = 0;
+ u64 cnt = ulen;
+ u8 tmp;
+
+ /*
+ * copy 0 < @len <= 8 bytes from @src into the right most bytes of
+ * a register, then store it to PCI at @ioaddr while in secondary
+ * address space. pcistg then uses the user mappings.
+ */
+ asm volatile (
+ " sacf 256\n"
+ "0: llgc %[tmp],0(%[src])\n"
+ "4: sllg %[val],%[val],8\n"
+ " aghi %[src],1\n"
+ " ogr %[val],%[tmp]\n"
+ " brctg %[cnt],0b\n"
+ "1: .insn rre,0xb9d40000,%[val],%[ioaddr]\n"
+ "2: ipm %[cc]\n"
+ " srl %[cc],28\n"
+ "3: sacf 768\n"
+ EX_TABLE(0b, 3b) EX_TABLE(4b, 3b) EX_TABLE(1b, 3b) EX_TABLE(2b, 3b)
+ :
+ [src] "+a" (src), [cnt] "+d" (cnt),
+ [val] "+d" (val), [tmp] "=d" (tmp),
+ [len] "+d" (len), [cc] "+d" (cc),
+ [ioaddr] "+a" (addr)
+ :: "cc", "memory");
+ *status = len >> 24 & 0xff;
+
+ /* did we read everything from user memory? */
+ if (!cc && cnt != 0)
+ cc = -EFAULT;
+
+ return cc;
+}
+
+static inline int __memcpy_toio_inuser(void __iomem *dst,
+ const void __user *src, size_t n)
+{
+ int size, rc = 0;
+ u8 status = 0;
+ mm_segment_t old_fs;
+
+ if (!src)
+ return -EINVAL;
+
+ old_fs = enable_sacf_uaccess();
+ while (n > 0) {
+ size = zpci_get_max_io_size((u64 __force) dst,
+ (u64 __force) src, n,
+ ZPCI_MAX_WRITE_SIZE);
+ if (size > 8) /* main path */
+ rc = __pcistb_mio_inuser(dst, src, size, &status);
+ else
+ rc = __pcistg_mio_inuser(dst, src, size, &status);
+ if (rc)
+ break;
+ src += size;
+ dst += size;
+ n -= size;
+ }
+ disable_sacf_uaccess(old_fs);
+ if (rc)
+ zpci_err_mmio(rc, status, (__force u64) dst);
+ return rc;
+}
+
+static long get_pfn(unsigned long user_addr, unsigned long access,
+ unsigned long *pfn)
+{
+ struct vm_area_struct *vma;
+ long ret;
+
+ mmap_read_lock(current->mm);
+ ret = -EINVAL;
+ vma = find_vma(current->mm, user_addr);
+ if (!vma || user_addr < vma->vm_start)
+ goto out;
+ ret = -EACCES;
+ if (!(vma->vm_flags & access))
+ goto out;
+ ret = follow_pfn(vma, user_addr, pfn);
+out:
+ mmap_read_unlock(current->mm);
+ return ret;
+}
+
+SYSCALL_DEFINE3(s390_pci_mmio_write, unsigned long, mmio_addr,
+ const void __user *, user_buffer, size_t, length)
+{
+ u8 local_buf[64];
+ void __iomem *io_addr;
+ void *buf;
+ unsigned long pfn;
+ long ret;
+
+ if (!zpci_is_enabled())
+ return -ENODEV;
+
+ if (length <= 0 || PAGE_SIZE - (mmio_addr & ~PAGE_MASK) < length)
+ return -EINVAL;
+
+ /*
+ * We only support write access to MIO capable devices if we are on
+ * a MIO enabled system. Otherwise we would have to check for every
+ * address if it is a special ZPCI_ADDR and would have to do
+ * a get_pfn() which we don't need for MIO capable devices. Currently
+ * ISM devices are the only devices without MIO support and there is no
+ * known need for accessing these from userspace.
+ */
+ if (static_branch_likely(&have_mio)) {
+ ret = __memcpy_toio_inuser((void __iomem *) mmio_addr,
+ user_buffer,
+ length);
+ return ret;
+ }
+
+ if (length > 64) {
+ buf = kmalloc(length, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+ } else
+ buf = local_buf;
+
+ ret = get_pfn(mmio_addr, VM_WRITE, &pfn);
+ if (ret)
+ goto out;
+ io_addr = (void __iomem *)((pfn << PAGE_SHIFT) |
+ (mmio_addr & ~PAGE_MASK));
+
+ ret = -EFAULT;
+ if ((unsigned long) io_addr < ZPCI_IOMAP_ADDR_BASE)
+ goto out;
+
+ if (copy_from_user(buf, user_buffer, length))
+ goto out;
+
+ ret = zpci_memcpy_toio(io_addr, buf, length);
+out:
+ if (buf != local_buf)
+ kfree(buf);
+ return ret;
+}
+
+static inline int __pcilg_mio_inuser(
+ void __user *dst, const void __iomem *ioaddr,
+ u64 ulen, u8 *status)
+{
+ register u64 addr asm("2") = (u64 __force) ioaddr;
+ register u64 len asm("3") = ulen;
+ u64 cnt = ulen;
+ int shift = ulen * 8;
+ int cc = -ENXIO;
+ u64 val, tmp;
+
+ /*
+ * read 0 < @len <= 8 bytes from the PCI memory mapped at @ioaddr (in
+ * user space) into a register using pcilg then store these bytes at
+ * user address @dst
+ */
+ asm volatile (
+ " sacf 256\n"
+ "0: .insn rre,0xb9d60000,%[val],%[ioaddr]\n"
+ "1: ipm %[cc]\n"
+ " srl %[cc],28\n"
+ " ltr %[cc],%[cc]\n"
+ " jne 4f\n"
+ "2: ahi %[shift],-8\n"
+ " srlg %[tmp],%[val],0(%[shift])\n"
+ "3: stc %[tmp],0(%[dst])\n"
+ "5: aghi %[dst],1\n"
+ " brctg %[cnt],2b\n"
+ "4: sacf 768\n"
+ EX_TABLE(0b, 4b) EX_TABLE(1b, 4b) EX_TABLE(3b, 4b) EX_TABLE(5b, 4b)
+ :
+ [cc] "+d" (cc), [val] "=d" (val), [len] "+d" (len),
+ [dst] "+a" (dst), [cnt] "+d" (cnt), [tmp] "=d" (tmp),
+ [shift] "+d" (shift)
+ :
+ [ioaddr] "a" (addr)
+ : "cc", "memory");
+
+ /* did we write everything to the user space buffer? */
+ if (!cc && cnt != 0)
+ cc = -EFAULT;
+
+ *status = len >> 24 & 0xff;
+ return cc;
+}
+
+static inline int __memcpy_fromio_inuser(void __user *dst,
+ const void __iomem *src,
+ unsigned long n)
+{
+ int size, rc = 0;
+ u8 status;
+ mm_segment_t old_fs;
+
+ old_fs = enable_sacf_uaccess();
+ while (n > 0) {
+ size = zpci_get_max_io_size((u64 __force) src,
+ (u64 __force) dst, n,
+ ZPCI_MAX_READ_SIZE);
+ rc = __pcilg_mio_inuser(dst, src, size, &status);
+ if (rc)
+ break;
+ src += size;
+ dst += size;
+ n -= size;
+ }
+ disable_sacf_uaccess(old_fs);
+ if (rc)
+ zpci_err_mmio(rc, status, (__force u64) dst);
+ return rc;
+}
+
+SYSCALL_DEFINE3(s390_pci_mmio_read, unsigned long, mmio_addr,
+ void __user *, user_buffer, size_t, length)
+{
+ u8 local_buf[64];
+ void __iomem *io_addr;
+ void *buf;
+ unsigned long pfn;
+ long ret;
+
+ if (!zpci_is_enabled())
+ return -ENODEV;
+
+ if (length <= 0 || PAGE_SIZE - (mmio_addr & ~PAGE_MASK) < length)
+ return -EINVAL;
+
+ /*
+ * We only support read access to MIO capable devices if we are on
+ * a MIO enabled system. Otherwise we would have to check for every
+ * address if it is a special ZPCI_ADDR and would have to do
+ * a get_pfn() which we don't need for MIO capable devices. Currently
+ * ISM devices are the only devices without MIO support and there is no
+ * known need for accessing these from userspace.
+ */
+ if (static_branch_likely(&have_mio)) {
+ ret = __memcpy_fromio_inuser(
+ user_buffer, (const void __iomem *)mmio_addr,
+ length);
+ return ret;
+ }
+
+ if (length > 64) {
+ buf = kmalloc(length, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+ } else {
+ buf = local_buf;
+ }
+
+ ret = get_pfn(mmio_addr, VM_READ, &pfn);
+ if (ret)
+ goto out;
+ io_addr = (void __iomem *)((pfn << PAGE_SHIFT) | (mmio_addr & ~PAGE_MASK));
+
+ if ((unsigned long) io_addr < ZPCI_IOMAP_ADDR_BASE) {
+ ret = -EFAULT;
+ goto out;
+ }
+ ret = zpci_memcpy_fromio(buf, io_addr, length);
+ if (ret)
+ goto out;
+ if (copy_to_user(user_buffer, buf, length))
+ ret = -EFAULT;
+
+out:
+ if (buf != local_buf)
+ kfree(buf);
+ return ret;
+}
diff --git a/arch/s390/pci/pci_sysfs.c b/arch/s390/pci/pci_sysfs.c
new file mode 100644
index 000000000..5c028bee9
--- /dev/null
+++ b/arch/s390/pci/pci_sysfs.c
@@ -0,0 +1,174 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright IBM Corp. 2012
+ *
+ * Author(s):
+ * Jan Glauber <jang@linux.vnet.ibm.com>
+ */
+
+#define KMSG_COMPONENT "zpci"
+#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/stat.h>
+#include <linux/pci.h>
+
+#include "../../../drivers/pci/pci.h"
+
+#include <asm/sclp.h>
+
+#define zpci_attr(name, fmt, member) \
+static ssize_t name##_show(struct device *dev, \
+ struct device_attribute *attr, char *buf) \
+{ \
+ struct zpci_dev *zdev = to_zpci(to_pci_dev(dev)); \
+ \
+ return sprintf(buf, fmt, zdev->member); \
+} \
+static DEVICE_ATTR_RO(name)
+
+zpci_attr(function_id, "0x%08x\n", fid);
+zpci_attr(function_handle, "0x%08x\n", fh);
+zpci_attr(pchid, "0x%04x\n", pchid);
+zpci_attr(pfgid, "0x%02x\n", pfgid);
+zpci_attr(vfn, "0x%04x\n", vfn);
+zpci_attr(pft, "0x%02x\n", pft);
+zpci_attr(port, "%d\n", port);
+zpci_attr(uid, "0x%x\n", uid);
+zpci_attr(segment0, "0x%02x\n", pfip[0]);
+zpci_attr(segment1, "0x%02x\n", pfip[1]);
+zpci_attr(segment2, "0x%02x\n", pfip[2]);
+zpci_attr(segment3, "0x%02x\n", pfip[3]);
+
+static ssize_t mio_enabled_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct zpci_dev *zdev = to_zpci(to_pci_dev(dev));
+
+ return sprintf(buf, zpci_use_mio(zdev) ? "1\n" : "0\n");
+}
+static DEVICE_ATTR_RO(mio_enabled);
+
+static ssize_t recover_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct kernfs_node *kn;
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct zpci_dev *zdev = to_zpci(pdev);
+ int ret = 0;
+
+ /* Can't use device_remove_self() here as that would lead us to lock
+ * the pci_rescan_remove_lock while holding the device' kernfs lock.
+ * This would create a possible deadlock with disable_slot() which is
+ * not directly protected by the device' kernfs lock but takes it
+ * during the device removal which happens under
+ * pci_rescan_remove_lock.
+ *
+ * This is analogous to sdev_store_delete() in
+ * drivers/scsi/scsi_sysfs.c
+ */
+ kn = sysfs_break_active_protection(&dev->kobj, &attr->attr);
+ WARN_ON_ONCE(!kn);
+ /* device_remove_file() serializes concurrent calls ignoring all but
+ * the first
+ */
+ device_remove_file(dev, attr);
+
+ /* A concurrent call to recover_store() may slip between
+ * sysfs_break_active_protection() and the sysfs file removal.
+ * Once it unblocks from pci_lock_rescan_remove() the original pdev
+ * will already be removed.
+ */
+ pci_lock_rescan_remove();
+ if (pci_dev_is_added(pdev)) {
+ pci_stop_and_remove_bus_device(pdev);
+ ret = zpci_disable_device(zdev);
+ if (ret)
+ goto out;
+
+ ret = zpci_enable_device(zdev);
+ if (ret)
+ goto out;
+ pci_rescan_bus(zdev->zbus->bus);
+ }
+out:
+ pci_unlock_rescan_remove();
+ if (kn)
+ sysfs_unbreak_active_protection(kn);
+ return ret ? ret : count;
+}
+static DEVICE_ATTR_WO(recover);
+
+static ssize_t util_string_read(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *attr, char *buf,
+ loff_t off, size_t count)
+{
+ struct device *dev = kobj_to_dev(kobj);
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct zpci_dev *zdev = to_zpci(pdev);
+
+ return memory_read_from_buffer(buf, count, &off, zdev->util_str,
+ sizeof(zdev->util_str));
+}
+static BIN_ATTR_RO(util_string, CLP_UTIL_STR_LEN);
+
+static ssize_t report_error_write(struct file *filp, struct kobject *kobj,
+ struct bin_attribute *attr, char *buf,
+ loff_t off, size_t count)
+{
+ struct zpci_report_error_header *report = (void *) buf;
+ struct device *dev = kobj_to_dev(kobj);
+ struct pci_dev *pdev = to_pci_dev(dev);
+ struct zpci_dev *zdev = to_zpci(pdev);
+ int ret;
+
+ if (off || (count < sizeof(*report)))
+ return -EINVAL;
+
+ ret = sclp_pci_report(report, zdev->fh, zdev->fid);
+
+ return ret ? ret : count;
+}
+static BIN_ATTR(report_error, S_IWUSR, NULL, report_error_write, PAGE_SIZE);
+
+static struct bin_attribute *zpci_bin_attrs[] = {
+ &bin_attr_util_string,
+ &bin_attr_report_error,
+ NULL,
+};
+
+static struct attribute *zpci_dev_attrs[] = {
+ &dev_attr_function_id.attr,
+ &dev_attr_function_handle.attr,
+ &dev_attr_pchid.attr,
+ &dev_attr_pfgid.attr,
+ &dev_attr_pft.attr,
+ &dev_attr_port.attr,
+ &dev_attr_vfn.attr,
+ &dev_attr_uid.attr,
+ &dev_attr_recover.attr,
+ &dev_attr_mio_enabled.attr,
+ NULL,
+};
+static struct attribute_group zpci_attr_group = {
+ .attrs = zpci_dev_attrs,
+ .bin_attrs = zpci_bin_attrs,
+};
+
+static struct attribute *pfip_attrs[] = {
+ &dev_attr_segment0.attr,
+ &dev_attr_segment1.attr,
+ &dev_attr_segment2.attr,
+ &dev_attr_segment3.attr,
+ NULL,
+};
+static struct attribute_group pfip_attr_group = {
+ .name = "pfip",
+ .attrs = pfip_attrs,
+};
+
+const struct attribute_group *zpci_attr_groups[] = {
+ &zpci_attr_group,
+ &pfip_attr_group,
+ NULL,
+};