diff options
Diffstat (limited to '')
-rw-r--r-- | arch/x86/platform/intel-mid/Makefile | 2 | ||||
-rw-r--r-- | arch/x86/platform/intel-mid/intel-mid.c | 124 | ||||
-rw-r--r-- | arch/x86/platform/intel-mid/pwr.c | 485 | ||||
-rw-r--r-- | arch/x86/platform/intel-quark/Makefile | 3 | ||||
-rw-r--r-- | arch/x86/platform/intel-quark/imr.c | 597 | ||||
-rw-r--r-- | arch/x86/platform/intel-quark/imr_selftest.c | 129 | ||||
-rw-r--r-- | arch/x86/platform/intel/Makefile | 2 | ||||
-rw-r--r-- | arch/x86/platform/intel/iosf_mbi.c | 571 |
8 files changed, 1913 insertions, 0 deletions
diff --git a/arch/x86/platform/intel-mid/Makefile b/arch/x86/platform/intel-mid/Makefile new file mode 100644 index 000000000..ddfc08783 --- /dev/null +++ b/arch/x86/platform/intel-mid/Makefile @@ -0,0 +1,2 @@ +# SPDX-License-Identifier: GPL-2.0-only +obj-$(CONFIG_X86_INTEL_MID) += intel-mid.o pwr.o diff --git a/arch/x86/platform/intel-mid/intel-mid.c b/arch/x86/platform/intel-mid/intel-mid.c new file mode 100644 index 000000000..f4592dc7a --- /dev/null +++ b/arch/x86/platform/intel-mid/intel-mid.c @@ -0,0 +1,124 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Intel MID platform setup code + * + * (C) Copyright 2008, 2012, 2021 Intel Corporation + * Author: Jacob Pan (jacob.jun.pan@intel.com) + * Author: Sathyanarayanan Kuppuswamy <sathyanarayanan.kuppuswamy@intel.com> + */ + +#define pr_fmt(fmt) "intel_mid: " fmt + +#include <linux/init.h> +#include <linux/kernel.h> +#include <linux/interrupt.h> +#include <linux/regulator/machine.h> +#include <linux/scatterlist.h> +#include <linux/irq.h> +#include <linux/export.h> +#include <linux/notifier.h> + +#include <asm/setup.h> +#include <asm/mpspec_def.h> +#include <asm/hw_irq.h> +#include <asm/apic.h> +#include <asm/io_apic.h> +#include <asm/intel-mid.h> +#include <asm/io.h> +#include <asm/i8259.h> +#include <asm/intel_scu_ipc.h> +#include <asm/reboot.h> + +#define IPCMSG_COLD_OFF 0x80 /* Only for Tangier */ +#define IPCMSG_COLD_RESET 0xF1 + +static void intel_mid_power_off(void) +{ + /* Shut down South Complex via PWRMU */ + intel_mid_pwr_power_off(); + + /* Only for Tangier, the rest will ignore this command */ + intel_scu_ipc_dev_simple_command(NULL, IPCMSG_COLD_OFF, 1); +}; + +static void intel_mid_reboot(void) +{ + intel_scu_ipc_dev_simple_command(NULL, IPCMSG_COLD_RESET, 0); +} + +static void __init intel_mid_time_init(void) +{ + /* Lapic only, no apbt */ + x86_init.timers.setup_percpu_clockev = setup_boot_APIC_clock; + x86_cpuinit.setup_percpu_clockev = setup_secondary_APIC_clock; +} + +static void intel_mid_arch_setup(void) +{ + switch (boot_cpu_data.x86_model) { + case 0x3C: + case 0x4A: + x86_platform.legacy.rtc = 1; + break; + default: + break; + } + + /* + * Intel MID platforms are using explicitly defined regulators. + * + * Let the regulator core know that we do not have any additional + * regulators left. This lets it substitute unprovided regulators with + * dummy ones: + */ + regulator_has_full_constraints(); +} + +/* + * Moorestown does not have external NMI source nor port 0x61 to report + * NMI status. The possible NMI sources are from pmu as a result of NMI + * watchdog or lock debug. Reading io port 0x61 results in 0xff which + * misled NMI handler. + */ +static unsigned char intel_mid_get_nmi_reason(void) +{ + return 0; +} + +/* + * Moorestown specific x86_init function overrides and early setup + * calls. + */ +void __init x86_intel_mid_early_setup(void) +{ + x86_init.resources.probe_roms = x86_init_noop; + x86_init.resources.reserve_resources = x86_init_noop; + + x86_init.timers.timer_init = intel_mid_time_init; + x86_init.timers.setup_percpu_clockev = x86_init_noop; + + x86_init.irqs.pre_vector_init = x86_init_noop; + + x86_init.oem.arch_setup = intel_mid_arch_setup; + + x86_platform.get_nmi_reason = intel_mid_get_nmi_reason; + + x86_init.pci.arch_init = intel_mid_pci_init; + x86_init.pci.fixup_irqs = x86_init_noop; + + legacy_pic = &null_legacy_pic; + + /* + * Do nothing for now as everything needed done in + * x86_intel_mid_early_setup() below. + */ + x86_init.acpi.reduced_hw_early_init = x86_init_noop; + + pm_power_off = intel_mid_power_off; + machine_ops.emergency_restart = intel_mid_reboot; + + /* Avoid searching for BIOS MP tables */ + x86_init.mpparse.find_smp_config = x86_init_noop; + x86_init.mpparse.get_smp_config = x86_init_uint_noop; + set_bit(MP_BUS_ISA, mp_bus_not_pci); +} diff --git a/arch/x86/platform/intel-mid/pwr.c b/arch/x86/platform/intel-mid/pwr.c new file mode 100644 index 000000000..27288d8d3 --- /dev/null +++ b/arch/x86/platform/intel-mid/pwr.c @@ -0,0 +1,485 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Intel MID Power Management Unit (PWRMU) device driver + * + * Copyright (C) 2016, Intel Corporation + * + * Author: Andy Shevchenko <andriy.shevchenko@linux.intel.com> + * + * Intel MID Power Management Unit device driver handles the South Complex PCI + * devices such as GPDMA, SPI, I2C, PWM, and so on. By default PCI core + * modifies bits in PMCSR register in the PCI configuration space. This is not + * enough on some SoCs like Intel Tangier. In such case PCI core sets a new + * power state of the device in question through a PM hook registered in struct + * pci_platform_pm_ops (see drivers/pci/pci-mid.c). + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <linux/delay.h> +#include <linux/errno.h> +#include <linux/interrupt.h> +#include <linux/kernel.h> +#include <linux/export.h> +#include <linux/mutex.h> +#include <linux/pci.h> + +#include <asm/intel-mid.h> + +/* Registers */ +#define PM_STS 0x00 +#define PM_CMD 0x04 +#define PM_ICS 0x08 +#define PM_WKC(x) (0x10 + (x) * 4) +#define PM_WKS(x) (0x18 + (x) * 4) +#define PM_SSC(x) (0x20 + (x) * 4) +#define PM_SSS(x) (0x30 + (x) * 4) + +/* Bits in PM_STS */ +#define PM_STS_BUSY (1 << 8) + +/* Bits in PM_CMD */ +#define PM_CMD_CMD(x) ((x) << 0) +#define PM_CMD_IOC (1 << 8) +#define PM_CMD_CM_NOP (0 << 9) +#define PM_CMD_CM_IMMEDIATE (1 << 9) +#define PM_CMD_CM_DELAY (2 << 9) +#define PM_CMD_CM_TRIGGER (3 << 9) + +/* System states */ +#define PM_CMD_SYS_STATE_S5 (5 << 16) + +/* Trigger variants */ +#define PM_CMD_CFG_TRIGGER_NC (3 << 19) + +/* Message to wait for TRIGGER_NC case */ +#define TRIGGER_NC_MSG_2 (2 << 22) + +/* List of commands */ +#define CMD_SET_CFG 0x01 + +/* Bits in PM_ICS */ +#define PM_ICS_INT_STATUS(x) ((x) & 0xff) +#define PM_ICS_IE (1 << 8) +#define PM_ICS_IP (1 << 9) +#define PM_ICS_SW_INT_STS (1 << 10) + +/* List of interrupts */ +#define INT_INVALID 0 +#define INT_CMD_COMPLETE 1 +#define INT_CMD_ERR 2 +#define INT_WAKE_EVENT 3 +#define INT_LSS_POWER_ERR 4 +#define INT_S0iX_MSG_ERR 5 +#define INT_NO_C6 6 +#define INT_TRIGGER_ERR 7 +#define INT_INACTIVITY 8 + +/* South Complex devices */ +#define LSS_MAX_SHARED_DEVS 4 +#define LSS_MAX_DEVS 64 + +#define LSS_WS_BITS 1 /* wake state width */ +#define LSS_PWS_BITS 2 /* power state width */ + +/* Supported device IDs */ +#define PCI_DEVICE_ID_PENWELL 0x0828 +#define PCI_DEVICE_ID_TANGIER 0x11a1 + +struct mid_pwr_dev { + struct pci_dev *pdev; + pci_power_t state; +}; + +struct mid_pwr { + struct device *dev; + void __iomem *regs; + int irq; + bool available; + + struct mutex lock; + struct mid_pwr_dev lss[LSS_MAX_DEVS][LSS_MAX_SHARED_DEVS]; +}; + +static struct mid_pwr *midpwr; + +static u32 mid_pwr_get_state(struct mid_pwr *pwr, int reg) +{ + return readl(pwr->regs + PM_SSS(reg)); +} + +static void mid_pwr_set_state(struct mid_pwr *pwr, int reg, u32 value) +{ + writel(value, pwr->regs + PM_SSC(reg)); +} + +static void mid_pwr_set_wake(struct mid_pwr *pwr, int reg, u32 value) +{ + writel(value, pwr->regs + PM_WKC(reg)); +} + +static void mid_pwr_interrupt_disable(struct mid_pwr *pwr) +{ + writel(~PM_ICS_IE, pwr->regs + PM_ICS); +} + +static bool mid_pwr_is_busy(struct mid_pwr *pwr) +{ + return !!(readl(pwr->regs + PM_STS) & PM_STS_BUSY); +} + +/* Wait 500ms that the latest PWRMU command finished */ +static int mid_pwr_wait(struct mid_pwr *pwr) +{ + unsigned int count = 500000; + bool busy; + + do { + busy = mid_pwr_is_busy(pwr); + if (!busy) + return 0; + udelay(1); + } while (--count); + + return -EBUSY; +} + +static int mid_pwr_wait_for_cmd(struct mid_pwr *pwr, u8 cmd) +{ + writel(PM_CMD_CMD(cmd) | PM_CMD_CM_IMMEDIATE, pwr->regs + PM_CMD); + return mid_pwr_wait(pwr); +} + +static int __update_power_state(struct mid_pwr *pwr, int reg, int bit, int new) +{ + int curstate; + u32 power; + int ret; + + /* Check if the device is already in desired state */ + power = mid_pwr_get_state(pwr, reg); + curstate = (power >> bit) & 3; + if (curstate == new) + return 0; + + /* Update the power state */ + mid_pwr_set_state(pwr, reg, (power & ~(3 << bit)) | (new << bit)); + + /* Send command to SCU */ + ret = mid_pwr_wait_for_cmd(pwr, CMD_SET_CFG); + if (ret) + return ret; + + /* Check if the device is already in desired state */ + power = mid_pwr_get_state(pwr, reg); + curstate = (power >> bit) & 3; + if (curstate != new) + return -EAGAIN; + + return 0; +} + +static pci_power_t __find_weakest_power_state(struct mid_pwr_dev *lss, + struct pci_dev *pdev, + pci_power_t state) +{ + pci_power_t weakest = PCI_D3hot; + unsigned int j; + + /* Find device in cache or first free cell */ + for (j = 0; j < LSS_MAX_SHARED_DEVS; j++) { + if (lss[j].pdev == pdev || !lss[j].pdev) + break; + } + + /* Store the desired state in cache */ + if (j < LSS_MAX_SHARED_DEVS) { + lss[j].pdev = pdev; + lss[j].state = state; + } else { + dev_WARN(&pdev->dev, "No room for device in PWRMU LSS cache\n"); + weakest = state; + } + + /* Find the power state we may use */ + for (j = 0; j < LSS_MAX_SHARED_DEVS; j++) { + if (lss[j].state < weakest) + weakest = lss[j].state; + } + + return weakest; +} + +static int __set_power_state(struct mid_pwr *pwr, struct pci_dev *pdev, + pci_power_t state, int id, int reg, int bit) +{ + const char *name; + int ret; + + state = __find_weakest_power_state(pwr->lss[id], pdev, state); + name = pci_power_name(state); + + ret = __update_power_state(pwr, reg, bit, (__force int)state); + if (ret) { + dev_warn(&pdev->dev, "Can't set power state %s: %d\n", name, ret); + return ret; + } + + dev_vdbg(&pdev->dev, "Set power state %s\n", name); + return 0; +} + +static int mid_pwr_set_power_state(struct mid_pwr *pwr, struct pci_dev *pdev, + pci_power_t state) +{ + int id, reg, bit; + int ret; + + id = intel_mid_pwr_get_lss_id(pdev); + if (id < 0) + return id; + + reg = (id * LSS_PWS_BITS) / 32; + bit = (id * LSS_PWS_BITS) % 32; + + /* We support states between PCI_D0 and PCI_D3hot */ + if (state < PCI_D0) + state = PCI_D0; + if (state > PCI_D3hot) + state = PCI_D3hot; + + mutex_lock(&pwr->lock); + ret = __set_power_state(pwr, pdev, state, id, reg, bit); + mutex_unlock(&pwr->lock); + return ret; +} + +int intel_mid_pci_set_power_state(struct pci_dev *pdev, pci_power_t state) +{ + struct mid_pwr *pwr = midpwr; + int ret = 0; + + might_sleep(); + + if (pwr && pwr->available) + ret = mid_pwr_set_power_state(pwr, pdev, state); + dev_vdbg(&pdev->dev, "set_power_state() returns %d\n", ret); + + return 0; +} + +pci_power_t intel_mid_pci_get_power_state(struct pci_dev *pdev) +{ + struct mid_pwr *pwr = midpwr; + int id, reg, bit; + u32 power; + + if (!pwr || !pwr->available) + return PCI_UNKNOWN; + + id = intel_mid_pwr_get_lss_id(pdev); + if (id < 0) + return PCI_UNKNOWN; + + reg = (id * LSS_PWS_BITS) / 32; + bit = (id * LSS_PWS_BITS) % 32; + power = mid_pwr_get_state(pwr, reg); + return (__force pci_power_t)((power >> bit) & 3); +} + +void intel_mid_pwr_power_off(void) +{ + struct mid_pwr *pwr = midpwr; + u32 cmd = PM_CMD_SYS_STATE_S5 | + PM_CMD_CMD(CMD_SET_CFG) | + PM_CMD_CM_TRIGGER | + PM_CMD_CFG_TRIGGER_NC | + TRIGGER_NC_MSG_2; + + /* Send command to SCU */ + writel(cmd, pwr->regs + PM_CMD); + mid_pwr_wait(pwr); +} + +int intel_mid_pwr_get_lss_id(struct pci_dev *pdev) +{ + int vndr; + u8 id; + + /* + * Mapping to PWRMU index is kept in the Logical SubSystem ID byte of + * Vendor capability. + */ + vndr = pci_find_capability(pdev, PCI_CAP_ID_VNDR); + if (!vndr) + return -EINVAL; + + /* Read the Logical SubSystem ID byte */ + pci_read_config_byte(pdev, vndr + INTEL_MID_PWR_LSS_OFFSET, &id); + if (!(id & INTEL_MID_PWR_LSS_TYPE)) + return -ENODEV; + + id &= ~INTEL_MID_PWR_LSS_TYPE; + if (id >= LSS_MAX_DEVS) + return -ERANGE; + + return id; +} + +static irqreturn_t mid_pwr_irq_handler(int irq, void *dev_id) +{ + struct mid_pwr *pwr = dev_id; + u32 ics; + + ics = readl(pwr->regs + PM_ICS); + if (!(ics & PM_ICS_IP)) + return IRQ_NONE; + + writel(ics | PM_ICS_IP, pwr->regs + PM_ICS); + + dev_warn(pwr->dev, "Unexpected IRQ: %#x\n", PM_ICS_INT_STATUS(ics)); + return IRQ_HANDLED; +} + +struct mid_pwr_device_info { + int (*set_initial_state)(struct mid_pwr *pwr); +}; + +static int mid_pwr_probe(struct pci_dev *pdev, const struct pci_device_id *id) +{ + struct mid_pwr_device_info *info = (void *)id->driver_data; + struct device *dev = &pdev->dev; + struct mid_pwr *pwr; + int ret; + + ret = pcim_enable_device(pdev); + if (ret < 0) { + dev_err(&pdev->dev, "error: could not enable device\n"); + return ret; + } + + ret = pcim_iomap_regions(pdev, 1 << 0, pci_name(pdev)); + if (ret) { + dev_err(&pdev->dev, "I/O memory remapping failed\n"); + return ret; + } + + pwr = devm_kzalloc(dev, sizeof(*pwr), GFP_KERNEL); + if (!pwr) + return -ENOMEM; + + pwr->dev = dev; + pwr->regs = pcim_iomap_table(pdev)[0]; + pwr->irq = pdev->irq; + + mutex_init(&pwr->lock); + + /* Disable interrupts */ + mid_pwr_interrupt_disable(pwr); + + if (info && info->set_initial_state) { + ret = info->set_initial_state(pwr); + if (ret) + dev_warn(dev, "Can't set initial state: %d\n", ret); + } + + ret = devm_request_irq(dev, pdev->irq, mid_pwr_irq_handler, + IRQF_NO_SUSPEND, pci_name(pdev), pwr); + if (ret) + return ret; + + pwr->available = true; + midpwr = pwr; + + pci_set_drvdata(pdev, pwr); + return 0; +} + +static int mid_set_initial_state(struct mid_pwr *pwr, const u32 *states) +{ + unsigned int i, j; + int ret; + + /* + * Enable wake events. + * + * PWRMU supports up to 32 sources for wake up the system. Ungate them + * all here. + */ + mid_pwr_set_wake(pwr, 0, 0xffffffff); + mid_pwr_set_wake(pwr, 1, 0xffffffff); + + /* + * Power off South Complex devices. + * + * There is a map (see a note below) of 64 devices with 2 bits per each + * on 32-bit HW registers. The following calls set all devices to one + * known initial state, i.e. PCI_D3hot. This is done in conjunction + * with PMCSR setting in arch/x86/pci/intel_mid_pci.c. + * + * NOTE: The actual device mapping is provided by a platform at run + * time using vendor capability of PCI configuration space. + */ + mid_pwr_set_state(pwr, 0, states[0]); + mid_pwr_set_state(pwr, 1, states[1]); + mid_pwr_set_state(pwr, 2, states[2]); + mid_pwr_set_state(pwr, 3, states[3]); + + /* Send command to SCU */ + ret = mid_pwr_wait_for_cmd(pwr, CMD_SET_CFG); + if (ret) + return ret; + + for (i = 0; i < LSS_MAX_DEVS; i++) { + for (j = 0; j < LSS_MAX_SHARED_DEVS; j++) + pwr->lss[i][j].state = PCI_D3hot; + } + + return 0; +} + +static int pnw_set_initial_state(struct mid_pwr *pwr) +{ + /* On Penwell SRAM must stay powered on */ + static const u32 states[] = { + 0xf00fffff, /* PM_SSC(0) */ + 0xffffffff, /* PM_SSC(1) */ + 0xffffffff, /* PM_SSC(2) */ + 0xffffffff, /* PM_SSC(3) */ + }; + return mid_set_initial_state(pwr, states); +} + +static int tng_set_initial_state(struct mid_pwr *pwr) +{ + static const u32 states[] = { + 0xffffffff, /* PM_SSC(0) */ + 0xffffffff, /* PM_SSC(1) */ + 0xffffffff, /* PM_SSC(2) */ + 0xffffffff, /* PM_SSC(3) */ + }; + return mid_set_initial_state(pwr, states); +} + +static const struct mid_pwr_device_info pnw_info = { + .set_initial_state = pnw_set_initial_state, +}; + +static const struct mid_pwr_device_info tng_info = { + .set_initial_state = tng_set_initial_state, +}; + +/* This table should be in sync with the one in drivers/pci/pci-mid.c */ +static const struct pci_device_id mid_pwr_pci_ids[] = { + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_PENWELL), (kernel_ulong_t)&pnw_info }, + { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_TANGIER), (kernel_ulong_t)&tng_info }, + {} +}; + +static struct pci_driver mid_pwr_pci_driver = { + .name = "intel_mid_pwr", + .probe = mid_pwr_probe, + .id_table = mid_pwr_pci_ids, +}; + +builtin_pci_driver(mid_pwr_pci_driver); diff --git a/arch/x86/platform/intel-quark/Makefile b/arch/x86/platform/intel-quark/Makefile new file mode 100644 index 000000000..ed77cb952 --- /dev/null +++ b/arch/x86/platform/intel-quark/Makefile @@ -0,0 +1,3 @@ +# SPDX-License-Identifier: GPL-2.0-only +obj-$(CONFIG_INTEL_IMR) += imr.o +obj-$(CONFIG_DEBUG_IMR_SELFTEST) += imr_selftest.o diff --git a/arch/x86/platform/intel-quark/imr.c b/arch/x86/platform/intel-quark/imr.c new file mode 100644 index 000000000..d3d456925 --- /dev/null +++ b/arch/x86/platform/intel-quark/imr.c @@ -0,0 +1,597 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * imr.c -- Intel Isolated Memory Region driver + * + * Copyright(c) 2013 Intel Corporation. + * Copyright(c) 2015 Bryan O'Donoghue <pure.logic@nexus-software.ie> + * + * IMR registers define an isolated region of memory that can + * be masked to prohibit certain system agents from accessing memory. + * When a device behind a masked port performs an access - snooped or + * not, an IMR may optionally prevent that transaction from changing + * the state of memory or from getting correct data in response to the + * operation. + * + * Write data will be dropped and reads will return 0xFFFFFFFF, the + * system will reset and system BIOS will print out an error message to + * inform the user that an IMR has been violated. + * + * This code is based on the Linux MTRR code and reference code from + * Intel's Quark BSP EFI, Linux and grub code. + * + * See quark-x1000-datasheet.pdf for register definitions. + * http://www.intel.com/content/dam/www/public/us/en/documents/datasheets/quark-x1000-datasheet.pdf + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <asm-generic/sections.h> +#include <asm/cpu_device_id.h> +#include <asm/imr.h> +#include <asm/iosf_mbi.h> +#include <asm/io.h> + +#include <linux/debugfs.h> +#include <linux/init.h> +#include <linux/mm.h> +#include <linux/types.h> + +struct imr_device { + bool init; + struct mutex lock; + int max_imr; + int reg_base; +}; + +static struct imr_device imr_dev; + +/* + * IMR read/write mask control registers. + * See quark-x1000-datasheet.pdf sections 12.7.4.5 and 12.7.4.6 for + * bit definitions. + * + * addr_hi + * 31 Lock bit + * 30:24 Reserved + * 23:2 1 KiB aligned lo address + * 1:0 Reserved + * + * addr_hi + * 31:24 Reserved + * 23:2 1 KiB aligned hi address + * 1:0 Reserved + */ +#define IMR_LOCK BIT(31) + +struct imr_regs { + u32 addr_lo; + u32 addr_hi; + u32 rmask; + u32 wmask; +}; + +#define IMR_NUM_REGS (sizeof(struct imr_regs)/sizeof(u32)) +#define IMR_SHIFT 8 +#define imr_to_phys(x) ((x) << IMR_SHIFT) +#define phys_to_imr(x) ((x) >> IMR_SHIFT) + +/** + * imr_is_enabled - true if an IMR is enabled false otherwise. + * + * Determines if an IMR is enabled based on address range and read/write + * mask. An IMR set with an address range set to zero and a read/write + * access mask set to all is considered to be disabled. An IMR in any + * other state - for example set to zero but without read/write access + * all is considered to be enabled. This definition of disabled is how + * firmware switches off an IMR and is maintained in kernel for + * consistency. + * + * @imr: pointer to IMR descriptor. + * @return: true if IMR enabled false if disabled. + */ +static inline int imr_is_enabled(struct imr_regs *imr) +{ + return !(imr->rmask == IMR_READ_ACCESS_ALL && + imr->wmask == IMR_WRITE_ACCESS_ALL && + imr_to_phys(imr->addr_lo) == 0 && + imr_to_phys(imr->addr_hi) == 0); +} + +/** + * imr_read - read an IMR at a given index. + * + * Requires caller to hold imr mutex. + * + * @idev: pointer to imr_device structure. + * @imr_id: IMR entry to read. + * @imr: IMR structure representing address and access masks. + * @return: 0 on success or error code passed from mbi_iosf on failure. + */ +static int imr_read(struct imr_device *idev, u32 imr_id, struct imr_regs *imr) +{ + u32 reg = imr_id * IMR_NUM_REGS + idev->reg_base; + int ret; + + ret = iosf_mbi_read(QRK_MBI_UNIT_MM, MBI_REG_READ, reg++, &imr->addr_lo); + if (ret) + return ret; + + ret = iosf_mbi_read(QRK_MBI_UNIT_MM, MBI_REG_READ, reg++, &imr->addr_hi); + if (ret) + return ret; + + ret = iosf_mbi_read(QRK_MBI_UNIT_MM, MBI_REG_READ, reg++, &imr->rmask); + if (ret) + return ret; + + return iosf_mbi_read(QRK_MBI_UNIT_MM, MBI_REG_READ, reg++, &imr->wmask); +} + +/** + * imr_write - write an IMR at a given index. + * + * Requires caller to hold imr mutex. + * Note lock bits need to be written independently of address bits. + * + * @idev: pointer to imr_device structure. + * @imr_id: IMR entry to write. + * @imr: IMR structure representing address and access masks. + * @return: 0 on success or error code passed from mbi_iosf on failure. + */ +static int imr_write(struct imr_device *idev, u32 imr_id, struct imr_regs *imr) +{ + unsigned long flags; + u32 reg = imr_id * IMR_NUM_REGS + idev->reg_base; + int ret; + + local_irq_save(flags); + + ret = iosf_mbi_write(QRK_MBI_UNIT_MM, MBI_REG_WRITE, reg++, imr->addr_lo); + if (ret) + goto failed; + + ret = iosf_mbi_write(QRK_MBI_UNIT_MM, MBI_REG_WRITE, reg++, imr->addr_hi); + if (ret) + goto failed; + + ret = iosf_mbi_write(QRK_MBI_UNIT_MM, MBI_REG_WRITE, reg++, imr->rmask); + if (ret) + goto failed; + + ret = iosf_mbi_write(QRK_MBI_UNIT_MM, MBI_REG_WRITE, reg++, imr->wmask); + if (ret) + goto failed; + + local_irq_restore(flags); + return 0; +failed: + /* + * If writing to the IOSF failed then we're in an unknown state, + * likely a very bad state. An IMR in an invalid state will almost + * certainly lead to a memory access violation. + */ + local_irq_restore(flags); + WARN(ret, "IOSF-MBI write fail range 0x%08x-0x%08x unreliable\n", + imr_to_phys(imr->addr_lo), imr_to_phys(imr->addr_hi) + IMR_MASK); + + return ret; +} + +/** + * imr_dbgfs_state_show - print state of IMR registers. + * + * @s: pointer to seq_file for output. + * @unused: unused parameter. + * @return: 0 on success or error code passed from mbi_iosf on failure. + */ +static int imr_dbgfs_state_show(struct seq_file *s, void *unused) +{ + phys_addr_t base; + phys_addr_t end; + int i; + struct imr_device *idev = s->private; + struct imr_regs imr; + size_t size; + int ret = -ENODEV; + + mutex_lock(&idev->lock); + + for (i = 0; i < idev->max_imr; i++) { + + ret = imr_read(idev, i, &imr); + if (ret) + break; + + /* + * Remember to add IMR_ALIGN bytes to size to indicate the + * inherent IMR_ALIGN size bytes contained in the masked away + * lower ten bits. + */ + if (imr_is_enabled(&imr)) { + base = imr_to_phys(imr.addr_lo); + end = imr_to_phys(imr.addr_hi) + IMR_MASK; + size = end - base + 1; + } else { + base = 0; + end = 0; + size = 0; + } + seq_printf(s, "imr%02i: base=%pa, end=%pa, size=0x%08zx " + "rmask=0x%08x, wmask=0x%08x, %s, %s\n", i, + &base, &end, size, imr.rmask, imr.wmask, + imr_is_enabled(&imr) ? "enabled " : "disabled", + imr.addr_lo & IMR_LOCK ? "locked" : "unlocked"); + } + + mutex_unlock(&idev->lock); + return ret; +} +DEFINE_SHOW_ATTRIBUTE(imr_dbgfs_state); + +/** + * imr_debugfs_register - register debugfs hooks. + * + * @idev: pointer to imr_device structure. + */ +static void imr_debugfs_register(struct imr_device *idev) +{ + debugfs_create_file("imr_state", 0444, NULL, idev, + &imr_dbgfs_state_fops); +} + +/** + * imr_check_params - check passed address range IMR alignment and non-zero size + * + * @base: base address of intended IMR. + * @size: size of intended IMR. + * @return: zero on valid range -EINVAL on unaligned base/size. + */ +static int imr_check_params(phys_addr_t base, size_t size) +{ + if ((base & IMR_MASK) || (size & IMR_MASK)) { + pr_err("base %pa size 0x%08zx must align to 1KiB\n", + &base, size); + return -EINVAL; + } + if (size == 0) + return -EINVAL; + + return 0; +} + +/** + * imr_raw_size - account for the IMR_ALIGN bytes that addr_hi appends. + * + * IMR addr_hi has a built in offset of plus IMR_ALIGN (0x400) bytes from the + * value in the register. We need to subtract IMR_ALIGN bytes from input sizes + * as a result. + * + * @size: input size bytes. + * @return: reduced size. + */ +static inline size_t imr_raw_size(size_t size) +{ + return size - IMR_ALIGN; +} + +/** + * imr_address_overlap - detects an address overlap. + * + * @addr: address to check against an existing IMR. + * @imr: imr being checked. + * @return: true for overlap false for no overlap. + */ +static inline int imr_address_overlap(phys_addr_t addr, struct imr_regs *imr) +{ + return addr >= imr_to_phys(imr->addr_lo) && addr <= imr_to_phys(imr->addr_hi); +} + +/** + * imr_add_range - add an Isolated Memory Region. + * + * @base: physical base address of region aligned to 1KiB. + * @size: physical size of region in bytes must be aligned to 1KiB. + * @read_mask: read access mask. + * @write_mask: write access mask. + * @return: zero on success or negative value indicating error. + */ +int imr_add_range(phys_addr_t base, size_t size, + unsigned int rmask, unsigned int wmask) +{ + phys_addr_t end; + unsigned int i; + struct imr_device *idev = &imr_dev; + struct imr_regs imr; + size_t raw_size; + int reg; + int ret; + + if (WARN_ONCE(idev->init == false, "driver not initialized")) + return -ENODEV; + + ret = imr_check_params(base, size); + if (ret) + return ret; + + /* Tweak the size value. */ + raw_size = imr_raw_size(size); + end = base + raw_size; + + /* + * Check for reserved IMR value common to firmware, kernel and grub + * indicating a disabled IMR. + */ + imr.addr_lo = phys_to_imr(base); + imr.addr_hi = phys_to_imr(end); + imr.rmask = rmask; + imr.wmask = wmask; + if (!imr_is_enabled(&imr)) + return -ENOTSUPP; + + mutex_lock(&idev->lock); + + /* + * Find a free IMR while checking for an existing overlapping range. + * Note there's no restriction in silicon to prevent IMR overlaps. + * For the sake of simplicity and ease in defining/debugging an IMR + * memory map we exclude IMR overlaps. + */ + reg = -1; + for (i = 0; i < idev->max_imr; i++) { + ret = imr_read(idev, i, &imr); + if (ret) + goto failed; + + /* Find overlap @ base or end of requested range. */ + ret = -EINVAL; + if (imr_is_enabled(&imr)) { + if (imr_address_overlap(base, &imr)) + goto failed; + if (imr_address_overlap(end, &imr)) + goto failed; + } else { + reg = i; + } + } + + /* Error out if we have no free IMR entries. */ + if (reg == -1) { + ret = -ENOMEM; + goto failed; + } + + pr_debug("add %d phys %pa-%pa size %zx mask 0x%08x wmask 0x%08x\n", + reg, &base, &end, raw_size, rmask, wmask); + + /* Enable IMR at specified range and access mask. */ + imr.addr_lo = phys_to_imr(base); + imr.addr_hi = phys_to_imr(end); + imr.rmask = rmask; + imr.wmask = wmask; + + ret = imr_write(idev, reg, &imr); + if (ret < 0) { + /* + * In the highly unlikely event iosf_mbi_write failed + * attempt to rollback the IMR setup skipping the trapping + * of further IOSF write failures. + */ + imr.addr_lo = 0; + imr.addr_hi = 0; + imr.rmask = IMR_READ_ACCESS_ALL; + imr.wmask = IMR_WRITE_ACCESS_ALL; + imr_write(idev, reg, &imr); + } +failed: + mutex_unlock(&idev->lock); + return ret; +} +EXPORT_SYMBOL_GPL(imr_add_range); + +/** + * __imr_remove_range - delete an Isolated Memory Region. + * + * This function allows you to delete an IMR by its index specified by reg or + * by address range specified by base and size respectively. If you specify an + * index on its own the base and size parameters are ignored. + * imr_remove_range(0, base, size); delete IMR at index 0 base/size ignored. + * imr_remove_range(-1, base, size); delete IMR from base to base+size. + * + * @reg: imr index to remove. + * @base: physical base address of region aligned to 1 KiB. + * @size: physical size of region in bytes aligned to 1 KiB. + * @return: -EINVAL on invalid range or out or range id + * -ENODEV if reg is valid but no IMR exists or is locked + * 0 on success. + */ +static int __imr_remove_range(int reg, phys_addr_t base, size_t size) +{ + phys_addr_t end; + bool found = false; + unsigned int i; + struct imr_device *idev = &imr_dev; + struct imr_regs imr; + size_t raw_size; + int ret = 0; + + if (WARN_ONCE(idev->init == false, "driver not initialized")) + return -ENODEV; + + /* + * Validate address range if deleting by address, else we are + * deleting by index where base and size will be ignored. + */ + if (reg == -1) { + ret = imr_check_params(base, size); + if (ret) + return ret; + } + + /* Tweak the size value. */ + raw_size = imr_raw_size(size); + end = base + raw_size; + + mutex_lock(&idev->lock); + + if (reg >= 0) { + /* If a specific IMR is given try to use it. */ + ret = imr_read(idev, reg, &imr); + if (ret) + goto failed; + + if (!imr_is_enabled(&imr) || imr.addr_lo & IMR_LOCK) { + ret = -ENODEV; + goto failed; + } + found = true; + } else { + /* Search for match based on address range. */ + for (i = 0; i < idev->max_imr; i++) { + ret = imr_read(idev, i, &imr); + if (ret) + goto failed; + + if (!imr_is_enabled(&imr) || imr.addr_lo & IMR_LOCK) + continue; + + if ((imr_to_phys(imr.addr_lo) == base) && + (imr_to_phys(imr.addr_hi) == end)) { + found = true; + reg = i; + break; + } + } + } + + if (!found) { + ret = -ENODEV; + goto failed; + } + + pr_debug("remove %d phys %pa-%pa size %zx\n", reg, &base, &end, raw_size); + + /* Tear down the IMR. */ + imr.addr_lo = 0; + imr.addr_hi = 0; + imr.rmask = IMR_READ_ACCESS_ALL; + imr.wmask = IMR_WRITE_ACCESS_ALL; + + ret = imr_write(idev, reg, &imr); + +failed: + mutex_unlock(&idev->lock); + return ret; +} + +/** + * imr_remove_range - delete an Isolated Memory Region by address + * + * This function allows you to delete an IMR by an address range specified + * by base and size respectively. + * imr_remove_range(base, size); delete IMR from base to base+size. + * + * @base: physical base address of region aligned to 1 KiB. + * @size: physical size of region in bytes aligned to 1 KiB. + * @return: -EINVAL on invalid range or out or range id + * -ENODEV if reg is valid but no IMR exists or is locked + * 0 on success. + */ +int imr_remove_range(phys_addr_t base, size_t size) +{ + return __imr_remove_range(-1, base, size); +} +EXPORT_SYMBOL_GPL(imr_remove_range); + +/** + * imr_clear - delete an Isolated Memory Region by index + * + * This function allows you to delete an IMR by an address range specified + * by the index of the IMR. Useful for initial sanitization of the IMR + * address map. + * imr_ge(base, size); delete IMR from base to base+size. + * + * @reg: imr index to remove. + * @return: -EINVAL on invalid range or out or range id + * -ENODEV if reg is valid but no IMR exists or is locked + * 0 on success. + */ +static inline int imr_clear(int reg) +{ + return __imr_remove_range(reg, 0, 0); +} + +/** + * imr_fixup_memmap - Tear down IMRs used during bootup. + * + * BIOS and Grub both setup IMRs around compressed kernel, initrd memory + * that need to be removed before the kernel hands out one of the IMR + * encased addresses to a downstream DMA agent such as the SD or Ethernet. + * IMRs on Galileo are setup to immediately reset the system on violation. + * As a result if you're running a root filesystem from SD - you'll need + * the boot-time IMRs torn down or you'll find seemingly random resets when + * using your filesystem. + * + * @idev: pointer to imr_device structure. + * @return: + */ +static void __init imr_fixup_memmap(struct imr_device *idev) +{ + phys_addr_t base = virt_to_phys(&_text); + size_t size = virt_to_phys(&__end_rodata) - base; + unsigned long start, end; + int i; + int ret; + + /* Tear down all existing unlocked IMRs. */ + for (i = 0; i < idev->max_imr; i++) + imr_clear(i); + + start = (unsigned long)_text; + end = (unsigned long)__end_rodata - 1; + + /* + * Setup an unlocked IMR around the physical extent of the kernel + * from the beginning of the .text section to the end of the + * .rodata section as one physically contiguous block. + * + * We don't round up @size since it is already PAGE_SIZE aligned. + * See vmlinux.lds.S for details. + */ + ret = imr_add_range(base, size, IMR_CPU, IMR_CPU); + if (ret < 0) { + pr_err("unable to setup IMR for kernel: %zu KiB (%lx - %lx)\n", + size / 1024, start, end); + } else { + pr_info("protecting kernel .text - .rodata: %zu KiB (%lx - %lx)\n", + size / 1024, start, end); + } + +} + +static const struct x86_cpu_id imr_ids[] __initconst = { + X86_MATCH_VENDOR_FAM_MODEL(INTEL, 5, INTEL_FAM5_QUARK_X1000, NULL), + {} +}; + +/** + * imr_init - entry point for IMR driver. + * + * return: -ENODEV for no IMR support 0 if good to go. + */ +static int __init imr_init(void) +{ + struct imr_device *idev = &imr_dev; + + if (!x86_match_cpu(imr_ids) || !iosf_mbi_available()) + return -ENODEV; + + idev->max_imr = QUARK_X1000_IMR_MAX; + idev->reg_base = QUARK_X1000_IMR_REGBASE; + idev->init = true; + + mutex_init(&idev->lock); + imr_debugfs_register(idev); + imr_fixup_memmap(idev); + return 0; +} +device_initcall(imr_init); diff --git a/arch/x86/platform/intel-quark/imr_selftest.c b/arch/x86/platform/intel-quark/imr_selftest.c new file mode 100644 index 000000000..761f3689f --- /dev/null +++ b/arch/x86/platform/intel-quark/imr_selftest.c @@ -0,0 +1,129 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * imr_selftest.c -- Intel Isolated Memory Region self-test driver + * + * Copyright(c) 2013 Intel Corporation. + * Copyright(c) 2015 Bryan O'Donoghue <pure.logic@nexus-software.ie> + * + * IMR self test. The purpose of this module is to run a set of tests on the + * IMR API to validate it's sanity. We check for overlapping, reserved + * addresses and setup/teardown sanity. + * + */ + +#include <asm-generic/sections.h> +#include <asm/cpu_device_id.h> +#include <asm/imr.h> +#include <asm/io.h> + +#include <linux/init.h> +#include <linux/mm.h> +#include <linux/types.h> + +#define SELFTEST KBUILD_MODNAME ": " +/** + * imr_self_test_result - Print result string for self test. + * + * @res: result code - true if test passed false otherwise. + * @fmt: format string. + * ... variadic argument list. + */ +static __printf(2, 3) +void __init imr_self_test_result(int res, const char *fmt, ...) +{ + va_list vlist; + + /* Print pass/fail. */ + if (res) + pr_info(SELFTEST "pass "); + else + pr_info(SELFTEST "fail "); + + /* Print variable string. */ + va_start(vlist, fmt); + vprintk(fmt, vlist); + va_end(vlist); + + /* Optional warning. */ + WARN(res == 0, "test failed"); +} +#undef SELFTEST + +/** + * imr_self_test + * + * Verify IMR self_test with some simple tests to verify overlap, + * zero sized allocations and 1 KiB sized areas. + * + */ +static void __init imr_self_test(void) +{ + phys_addr_t base = virt_to_phys(&_text); + size_t size = virt_to_phys(&__end_rodata) - base; + const char *fmt_over = "overlapped IMR @ (0x%08lx - 0x%08lx)\n"; + int ret; + + /* Test zero zero. */ + ret = imr_add_range(0, 0, 0, 0); + imr_self_test_result(ret < 0, "zero sized IMR\n"); + + /* Test exact overlap. */ + ret = imr_add_range(base, size, IMR_CPU, IMR_CPU); + imr_self_test_result(ret < 0, fmt_over, __va(base), __va(base + size)); + + /* Test overlap with base inside of existing. */ + base += size - IMR_ALIGN; + ret = imr_add_range(base, size, IMR_CPU, IMR_CPU); + imr_self_test_result(ret < 0, fmt_over, __va(base), __va(base + size)); + + /* Test overlap with end inside of existing. */ + base -= size + IMR_ALIGN * 2; + ret = imr_add_range(base, size, IMR_CPU, IMR_CPU); + imr_self_test_result(ret < 0, fmt_over, __va(base), __va(base + size)); + + /* Test that a 1 KiB IMR @ zero with read/write all will bomb out. */ + ret = imr_add_range(0, IMR_ALIGN, IMR_READ_ACCESS_ALL, + IMR_WRITE_ACCESS_ALL); + imr_self_test_result(ret < 0, "1KiB IMR @ 0x00000000 - access-all\n"); + + /* Test that a 1 KiB IMR @ zero with CPU only will work. */ + ret = imr_add_range(0, IMR_ALIGN, IMR_CPU, IMR_CPU); + imr_self_test_result(ret >= 0, "1KiB IMR @ 0x00000000 - cpu-access\n"); + if (ret >= 0) { + ret = imr_remove_range(0, IMR_ALIGN); + imr_self_test_result(ret == 0, "teardown - cpu-access\n"); + } + + /* Test 2 KiB works. */ + size = IMR_ALIGN * 2; + ret = imr_add_range(0, size, IMR_READ_ACCESS_ALL, IMR_WRITE_ACCESS_ALL); + imr_self_test_result(ret >= 0, "2KiB IMR @ 0x00000000\n"); + if (ret >= 0) { + ret = imr_remove_range(0, size); + imr_self_test_result(ret == 0, "teardown 2KiB\n"); + } +} + +static const struct x86_cpu_id imr_ids[] __initconst = { + X86_MATCH_VENDOR_FAM_MODEL(INTEL, 5, INTEL_FAM5_QUARK_X1000, NULL), + {} +}; + +/** + * imr_self_test_init - entry point for IMR driver. + * + * return: -ENODEV for no IMR support 0 if good to go. + */ +static int __init imr_self_test_init(void) +{ + if (x86_match_cpu(imr_ids)) + imr_self_test(); + return 0; +} + +/** + * imr_self_test_exit - exit point for IMR code. + * + * return: + */ +device_initcall(imr_self_test_init); diff --git a/arch/x86/platform/intel/Makefile b/arch/x86/platform/intel/Makefile new file mode 100644 index 000000000..dbee3b00f --- /dev/null +++ b/arch/x86/platform/intel/Makefile @@ -0,0 +1,2 @@ +# SPDX-License-Identifier: GPL-2.0-only +obj-$(CONFIG_IOSF_MBI) += iosf_mbi.o diff --git a/arch/x86/platform/intel/iosf_mbi.c b/arch/x86/platform/intel/iosf_mbi.c new file mode 100644 index 000000000..fdd49d70b --- /dev/null +++ b/arch/x86/platform/intel/iosf_mbi.c @@ -0,0 +1,571 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * IOSF-SB MailBox Interface Driver + * Copyright (c) 2013, Intel Corporation. + * + * The IOSF-SB is a fabric bus available on Atom based SOC's that uses a + * mailbox interface (MBI) to communicate with multiple devices. This + * driver implements access to this interface for those platforms that can + * enumerate the device using PCI. + */ + +#include <linux/delay.h> +#include <linux/module.h> +#include <linux/init.h> +#include <linux/spinlock.h> +#include <linux/pci.h> +#include <linux/debugfs.h> +#include <linux/capability.h> +#include <linux/pm_qos.h> +#include <linux/wait.h> + +#include <asm/iosf_mbi.h> + +#define PCI_DEVICE_ID_INTEL_BAYTRAIL 0x0F00 +#define PCI_DEVICE_ID_INTEL_BRASWELL 0x2280 +#define PCI_DEVICE_ID_INTEL_QUARK_X1000 0x0958 +#define PCI_DEVICE_ID_INTEL_TANGIER 0x1170 + +static struct pci_dev *mbi_pdev; +static DEFINE_SPINLOCK(iosf_mbi_lock); + +/**************** Generic iosf_mbi access helpers ****************/ + +static inline u32 iosf_mbi_form_mcr(u8 op, u8 port, u8 offset) +{ + return (op << 24) | (port << 16) | (offset << 8) | MBI_ENABLE; +} + +static int iosf_mbi_pci_read_mdr(u32 mcrx, u32 mcr, u32 *mdr) +{ + int result; + + if (!mbi_pdev) + return -ENODEV; + + if (mcrx) { + result = pci_write_config_dword(mbi_pdev, MBI_MCRX_OFFSET, + mcrx); + if (result < 0) + goto fail_read; + } + + result = pci_write_config_dword(mbi_pdev, MBI_MCR_OFFSET, mcr); + if (result < 0) + goto fail_read; + + result = pci_read_config_dword(mbi_pdev, MBI_MDR_OFFSET, mdr); + if (result < 0) + goto fail_read; + + return 0; + +fail_read: + dev_err(&mbi_pdev->dev, "PCI config access failed with %d\n", result); + return result; +} + +static int iosf_mbi_pci_write_mdr(u32 mcrx, u32 mcr, u32 mdr) +{ + int result; + + if (!mbi_pdev) + return -ENODEV; + + result = pci_write_config_dword(mbi_pdev, MBI_MDR_OFFSET, mdr); + if (result < 0) + goto fail_write; + + if (mcrx) { + result = pci_write_config_dword(mbi_pdev, MBI_MCRX_OFFSET, + mcrx); + if (result < 0) + goto fail_write; + } + + result = pci_write_config_dword(mbi_pdev, MBI_MCR_OFFSET, mcr); + if (result < 0) + goto fail_write; + + return 0; + +fail_write: + dev_err(&mbi_pdev->dev, "PCI config access failed with %d\n", result); + return result; +} + +int iosf_mbi_read(u8 port, u8 opcode, u32 offset, u32 *mdr) +{ + u32 mcr, mcrx; + unsigned long flags; + int ret; + + /* Access to the GFX unit is handled by GPU code */ + if (port == BT_MBI_UNIT_GFX) { + WARN_ON(1); + return -EPERM; + } + + mcr = iosf_mbi_form_mcr(opcode, port, offset & MBI_MASK_LO); + mcrx = offset & MBI_MASK_HI; + + spin_lock_irqsave(&iosf_mbi_lock, flags); + ret = iosf_mbi_pci_read_mdr(mcrx, mcr, mdr); + spin_unlock_irqrestore(&iosf_mbi_lock, flags); + + return ret; +} +EXPORT_SYMBOL(iosf_mbi_read); + +int iosf_mbi_write(u8 port, u8 opcode, u32 offset, u32 mdr) +{ + u32 mcr, mcrx; + unsigned long flags; + int ret; + + /* Access to the GFX unit is handled by GPU code */ + if (port == BT_MBI_UNIT_GFX) { + WARN_ON(1); + return -EPERM; + } + + mcr = iosf_mbi_form_mcr(opcode, port, offset & MBI_MASK_LO); + mcrx = offset & MBI_MASK_HI; + + spin_lock_irqsave(&iosf_mbi_lock, flags); + ret = iosf_mbi_pci_write_mdr(mcrx, mcr, mdr); + spin_unlock_irqrestore(&iosf_mbi_lock, flags); + + return ret; +} +EXPORT_SYMBOL(iosf_mbi_write); + +int iosf_mbi_modify(u8 port, u8 opcode, u32 offset, u32 mdr, u32 mask) +{ + u32 mcr, mcrx; + u32 value; + unsigned long flags; + int ret; + + /* Access to the GFX unit is handled by GPU code */ + if (port == BT_MBI_UNIT_GFX) { + WARN_ON(1); + return -EPERM; + } + + mcr = iosf_mbi_form_mcr(opcode, port, offset & MBI_MASK_LO); + mcrx = offset & MBI_MASK_HI; + + spin_lock_irqsave(&iosf_mbi_lock, flags); + + /* Read current mdr value */ + ret = iosf_mbi_pci_read_mdr(mcrx, mcr & MBI_RD_MASK, &value); + if (ret < 0) { + spin_unlock_irqrestore(&iosf_mbi_lock, flags); + return ret; + } + + /* Apply mask */ + value &= ~mask; + mdr &= mask; + value |= mdr; + + /* Write back */ + ret = iosf_mbi_pci_write_mdr(mcrx, mcr | MBI_WR_MASK, value); + + spin_unlock_irqrestore(&iosf_mbi_lock, flags); + + return ret; +} +EXPORT_SYMBOL(iosf_mbi_modify); + +bool iosf_mbi_available(void) +{ + /* Mbi isn't hot-pluggable. No remove routine is provided */ + return mbi_pdev; +} +EXPORT_SYMBOL(iosf_mbi_available); + +/* + **************** P-Unit/kernel shared I2C bus arbitration **************** + * + * Some Bay Trail and Cherry Trail devices have the P-Unit and us (the kernel) + * share a single I2C bus to the PMIC. Below are helpers to arbitrate the + * accesses between the kernel and the P-Unit. + * + * See arch/x86/include/asm/iosf_mbi.h for kernel-doc text for each function. + */ + +#define SEMAPHORE_TIMEOUT 500 +#define PUNIT_SEMAPHORE_BYT 0x7 +#define PUNIT_SEMAPHORE_CHT 0x10e +#define PUNIT_SEMAPHORE_BIT BIT(0) +#define PUNIT_SEMAPHORE_ACQUIRE BIT(1) + +static DEFINE_MUTEX(iosf_mbi_pmic_access_mutex); +static BLOCKING_NOTIFIER_HEAD(iosf_mbi_pmic_bus_access_notifier); +static DECLARE_WAIT_QUEUE_HEAD(iosf_mbi_pmic_access_waitq); +static u32 iosf_mbi_pmic_punit_access_count; +static u32 iosf_mbi_pmic_i2c_access_count; +static u32 iosf_mbi_sem_address; +static unsigned long iosf_mbi_sem_acquired; +static struct pm_qos_request iosf_mbi_pm_qos; + +void iosf_mbi_punit_acquire(void) +{ + /* Wait for any I2C PMIC accesses from in kernel drivers to finish. */ + mutex_lock(&iosf_mbi_pmic_access_mutex); + while (iosf_mbi_pmic_i2c_access_count != 0) { + mutex_unlock(&iosf_mbi_pmic_access_mutex); + wait_event(iosf_mbi_pmic_access_waitq, + iosf_mbi_pmic_i2c_access_count == 0); + mutex_lock(&iosf_mbi_pmic_access_mutex); + } + /* + * We do not need to do anything to allow the PUNIT to safely access + * the PMIC, other then block in kernel accesses to the PMIC. + */ + iosf_mbi_pmic_punit_access_count++; + mutex_unlock(&iosf_mbi_pmic_access_mutex); +} +EXPORT_SYMBOL(iosf_mbi_punit_acquire); + +void iosf_mbi_punit_release(void) +{ + bool do_wakeup; + + mutex_lock(&iosf_mbi_pmic_access_mutex); + iosf_mbi_pmic_punit_access_count--; + do_wakeup = iosf_mbi_pmic_punit_access_count == 0; + mutex_unlock(&iosf_mbi_pmic_access_mutex); + + if (do_wakeup) + wake_up(&iosf_mbi_pmic_access_waitq); +} +EXPORT_SYMBOL(iosf_mbi_punit_release); + +static int iosf_mbi_get_sem(u32 *sem) +{ + int ret; + + ret = iosf_mbi_read(BT_MBI_UNIT_PMC, MBI_REG_READ, + iosf_mbi_sem_address, sem); + if (ret) { + dev_err(&mbi_pdev->dev, "Error P-Unit semaphore read failed\n"); + return ret; + } + + *sem &= PUNIT_SEMAPHORE_BIT; + return 0; +} + +static void iosf_mbi_reset_semaphore(void) +{ + if (iosf_mbi_modify(BT_MBI_UNIT_PMC, MBI_REG_READ, + iosf_mbi_sem_address, 0, PUNIT_SEMAPHORE_BIT)) + dev_err(&mbi_pdev->dev, "Error P-Unit semaphore reset failed\n"); + + cpu_latency_qos_update_request(&iosf_mbi_pm_qos, PM_QOS_DEFAULT_VALUE); + + blocking_notifier_call_chain(&iosf_mbi_pmic_bus_access_notifier, + MBI_PMIC_BUS_ACCESS_END, NULL); +} + +/* + * This function blocks P-Unit accesses to the PMIC I2C bus, so that kernel + * I2C code, such as e.g. a fuel-gauge driver, can access it safely. + * + * This function may be called by I2C controller code while an I2C driver has + * already blocked P-Unit accesses because it wants them blocked over multiple + * i2c-transfers, for e.g. read-modify-write of an I2C client register. + * + * To allow safe PMIC i2c bus accesses this function takes the following steps: + * + * 1) Some code sends request to the P-Unit which make it access the PMIC + * I2C bus. Testing has shown that the P-Unit does not check its internal + * PMIC bus semaphore for these requests. Callers of these requests call + * iosf_mbi_punit_acquire()/_release() around their P-Unit accesses, these + * functions increase/decrease iosf_mbi_pmic_punit_access_count, so first + * we wait for iosf_mbi_pmic_punit_access_count to become 0. + * + * 2) Check iosf_mbi_pmic_i2c_access_count, if access has already + * been blocked by another caller, we only need to increment + * iosf_mbi_pmic_i2c_access_count and we can skip the other steps. + * + * 3) Some code makes such P-Unit requests from atomic contexts where it + * cannot call iosf_mbi_punit_acquire() as that may sleep. + * As the second step we call a notifier chain which allows any code + * needing P-Unit resources from atomic context to acquire them before + * we take control over the PMIC I2C bus. + * + * 4) When CPU cores enter C6 or C7 the P-Unit needs to talk to the PMIC + * if this happens while the kernel itself is accessing the PMIC I2C bus + * the SoC hangs. + * As the third step we call cpu_latency_qos_update_request() to disallow the + * CPU to enter C6 or C7. + * + * 5) The P-Unit has a PMIC bus semaphore which we can request to stop + * autonomous P-Unit tasks from accessing the PMIC I2C bus while we hold it. + * As the fourth and final step we request this semaphore and wait for our + * request to be acknowledged. + */ +int iosf_mbi_block_punit_i2c_access(void) +{ + unsigned long start, end; + int ret = 0; + u32 sem; + + if (WARN_ON(!mbi_pdev || !iosf_mbi_sem_address)) + return -ENXIO; + + mutex_lock(&iosf_mbi_pmic_access_mutex); + + while (iosf_mbi_pmic_punit_access_count != 0) { + mutex_unlock(&iosf_mbi_pmic_access_mutex); + wait_event(iosf_mbi_pmic_access_waitq, + iosf_mbi_pmic_punit_access_count == 0); + mutex_lock(&iosf_mbi_pmic_access_mutex); + } + + if (iosf_mbi_pmic_i2c_access_count > 0) + goto success; + + blocking_notifier_call_chain(&iosf_mbi_pmic_bus_access_notifier, + MBI_PMIC_BUS_ACCESS_BEGIN, NULL); + + /* + * Disallow the CPU to enter C6 or C7 state, entering these states + * requires the P-Unit to talk to the PMIC and if this happens while + * we're holding the semaphore, the SoC hangs. + */ + cpu_latency_qos_update_request(&iosf_mbi_pm_qos, 0); + + /* host driver writes to side band semaphore register */ + ret = iosf_mbi_write(BT_MBI_UNIT_PMC, MBI_REG_WRITE, + iosf_mbi_sem_address, PUNIT_SEMAPHORE_ACQUIRE); + if (ret) { + dev_err(&mbi_pdev->dev, "Error P-Unit semaphore request failed\n"); + goto error; + } + + /* host driver waits for bit 0 to be set in semaphore register */ + start = jiffies; + end = start + msecs_to_jiffies(SEMAPHORE_TIMEOUT); + do { + ret = iosf_mbi_get_sem(&sem); + if (!ret && sem) { + iosf_mbi_sem_acquired = jiffies; + dev_dbg(&mbi_pdev->dev, "P-Unit semaphore acquired after %ums\n", + jiffies_to_msecs(jiffies - start)); + goto success; + } + + usleep_range(1000, 2000); + } while (time_before(jiffies, end)); + + ret = -ETIMEDOUT; + dev_err(&mbi_pdev->dev, "Error P-Unit semaphore timed out, resetting\n"); +error: + iosf_mbi_reset_semaphore(); + if (!iosf_mbi_get_sem(&sem)) + dev_err(&mbi_pdev->dev, "P-Unit semaphore: %d\n", sem); +success: + if (!WARN_ON(ret)) + iosf_mbi_pmic_i2c_access_count++; + + mutex_unlock(&iosf_mbi_pmic_access_mutex); + + return ret; +} +EXPORT_SYMBOL(iosf_mbi_block_punit_i2c_access); + +void iosf_mbi_unblock_punit_i2c_access(void) +{ + bool do_wakeup = false; + + mutex_lock(&iosf_mbi_pmic_access_mutex); + iosf_mbi_pmic_i2c_access_count--; + if (iosf_mbi_pmic_i2c_access_count == 0) { + iosf_mbi_reset_semaphore(); + dev_dbg(&mbi_pdev->dev, "punit semaphore held for %ums\n", + jiffies_to_msecs(jiffies - iosf_mbi_sem_acquired)); + do_wakeup = true; + } + mutex_unlock(&iosf_mbi_pmic_access_mutex); + + if (do_wakeup) + wake_up(&iosf_mbi_pmic_access_waitq); +} +EXPORT_SYMBOL(iosf_mbi_unblock_punit_i2c_access); + +int iosf_mbi_register_pmic_bus_access_notifier(struct notifier_block *nb) +{ + int ret; + + /* Wait for the bus to go inactive before registering */ + iosf_mbi_punit_acquire(); + ret = blocking_notifier_chain_register( + &iosf_mbi_pmic_bus_access_notifier, nb); + iosf_mbi_punit_release(); + + return ret; +} +EXPORT_SYMBOL(iosf_mbi_register_pmic_bus_access_notifier); + +int iosf_mbi_unregister_pmic_bus_access_notifier_unlocked( + struct notifier_block *nb) +{ + iosf_mbi_assert_punit_acquired(); + + return blocking_notifier_chain_unregister( + &iosf_mbi_pmic_bus_access_notifier, nb); +} +EXPORT_SYMBOL(iosf_mbi_unregister_pmic_bus_access_notifier_unlocked); + +int iosf_mbi_unregister_pmic_bus_access_notifier(struct notifier_block *nb) +{ + int ret; + + /* Wait for the bus to go inactive before unregistering */ + iosf_mbi_punit_acquire(); + ret = iosf_mbi_unregister_pmic_bus_access_notifier_unlocked(nb); + iosf_mbi_punit_release(); + + return ret; +} +EXPORT_SYMBOL(iosf_mbi_unregister_pmic_bus_access_notifier); + +void iosf_mbi_assert_punit_acquired(void) +{ + WARN_ON(iosf_mbi_pmic_punit_access_count == 0); +} +EXPORT_SYMBOL(iosf_mbi_assert_punit_acquired); + +/**************** iosf_mbi debug code ****************/ + +#ifdef CONFIG_IOSF_MBI_DEBUG +static u32 dbg_mdr; +static u32 dbg_mcr; +static u32 dbg_mcrx; + +static int mcr_get(void *data, u64 *val) +{ + *val = *(u32 *)data; + return 0; +} + +static int mcr_set(void *data, u64 val) +{ + u8 command = ((u32)val & 0xFF000000) >> 24, + port = ((u32)val & 0x00FF0000) >> 16, + offset = ((u32)val & 0x0000FF00) >> 8; + int err; + + *(u32 *)data = val; + + if (!capable(CAP_SYS_RAWIO)) + return -EACCES; + + if (command & 1u) + err = iosf_mbi_write(port, + command, + dbg_mcrx | offset, + dbg_mdr); + else + err = iosf_mbi_read(port, + command, + dbg_mcrx | offset, + &dbg_mdr); + + return err; +} +DEFINE_SIMPLE_ATTRIBUTE(iosf_mcr_fops, mcr_get, mcr_set , "%llx\n"); + +static struct dentry *iosf_dbg; + +static void iosf_sideband_debug_init(void) +{ + iosf_dbg = debugfs_create_dir("iosf_sb", NULL); + + /* mdr */ + debugfs_create_x32("mdr", 0660, iosf_dbg, &dbg_mdr); + + /* mcrx */ + debugfs_create_x32("mcrx", 0660, iosf_dbg, &dbg_mcrx); + + /* mcr - initiates mailbox transaction */ + debugfs_create_file("mcr", 0660, iosf_dbg, &dbg_mcr, &iosf_mcr_fops); +} + +static void iosf_debugfs_init(void) +{ + iosf_sideband_debug_init(); +} + +static void iosf_debugfs_remove(void) +{ + debugfs_remove_recursive(iosf_dbg); +} +#else +static inline void iosf_debugfs_init(void) { } +static inline void iosf_debugfs_remove(void) { } +#endif /* CONFIG_IOSF_MBI_DEBUG */ + +static int iosf_mbi_probe(struct pci_dev *pdev, + const struct pci_device_id *dev_id) +{ + int ret; + + ret = pci_enable_device(pdev); + if (ret < 0) { + dev_err(&pdev->dev, "error: could not enable device\n"); + return ret; + } + + mbi_pdev = pci_dev_get(pdev); + iosf_mbi_sem_address = dev_id->driver_data; + + return 0; +} + +static const struct pci_device_id iosf_mbi_pci_ids[] = { + { PCI_DEVICE_DATA(INTEL, BAYTRAIL, PUNIT_SEMAPHORE_BYT) }, + { PCI_DEVICE_DATA(INTEL, BRASWELL, PUNIT_SEMAPHORE_CHT) }, + { PCI_DEVICE_DATA(INTEL, QUARK_X1000, 0) }, + { PCI_DEVICE_DATA(INTEL, TANGIER, 0) }, + { 0, }, +}; +MODULE_DEVICE_TABLE(pci, iosf_mbi_pci_ids); + +static struct pci_driver iosf_mbi_pci_driver = { + .name = "iosf_mbi_pci", + .probe = iosf_mbi_probe, + .id_table = iosf_mbi_pci_ids, +}; + +static int __init iosf_mbi_init(void) +{ + iosf_debugfs_init(); + + cpu_latency_qos_add_request(&iosf_mbi_pm_qos, PM_QOS_DEFAULT_VALUE); + + return pci_register_driver(&iosf_mbi_pci_driver); +} + +static void __exit iosf_mbi_exit(void) +{ + iosf_debugfs_remove(); + + pci_unregister_driver(&iosf_mbi_pci_driver); + pci_dev_put(mbi_pdev); + mbi_pdev = NULL; + + cpu_latency_qos_remove_request(&iosf_mbi_pm_qos); +} + +module_init(iosf_mbi_init); +module_exit(iosf_mbi_exit); + +MODULE_AUTHOR("David E. Box <david.e.box@linux.intel.com>"); +MODULE_DESCRIPTION("IOSF Mailbox Interface accessor"); +MODULE_LICENSE("GPL v2"); |