summaryrefslogtreecommitdiffstats
path: root/arch/x86/platform
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 18:49:45 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 18:49:45 +0000
commit2c3c1048746a4622d8c89a29670120dc8fab93c4 (patch)
tree848558de17fb3008cdf4d861b01ac7781903ce39 /arch/x86/platform
parentInitial commit. (diff)
downloadlinux-2c3c1048746a4622d8c89a29670120dc8fab93c4.tar.xz
linux-2c3c1048746a4622d8c89a29670120dc8fab93c4.zip
Adding upstream version 6.1.76.upstream/6.1.76
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'arch/x86/platform')
-rw-r--r--arch/x86/platform/Makefile14
-rw-r--r--arch/x86/platform/atom/Makefile2
-rw-r--r--arch/x86/platform/atom/punit_atom_debug.c156
-rw-r--r--arch/x86/platform/ce4100/Makefile2
-rw-r--r--arch/x86/platform/ce4100/ce4100.c156
-rw-r--r--arch/x86/platform/ce4100/falconfalls.dts430
-rw-r--r--arch/x86/platform/efi/Makefile6
-rw-r--r--arch/x86/platform/efi/efi.c901
-rw-r--r--arch/x86/platform/efi/efi_32.c142
-rw-r--r--arch/x86/platform/efi/efi_64.c863
-rw-r--r--arch/x86/platform/efi/efi_stub_32.S60
-rw-r--r--arch/x86/platform/efi/efi_stub_64.S27
-rw-r--r--arch/x86/platform/efi/efi_thunk_64.S98
-rw-r--r--arch/x86/platform/efi/quirks.c773
-rw-r--r--arch/x86/platform/geode/Makefile4
-rw-r--r--arch/x86/platform/geode/alix.c202
-rw-r--r--arch/x86/platform/geode/geos.c125
-rw-r--r--arch/x86/platform/geode/net5501.c152
-rw-r--r--arch/x86/platform/intel-mid/Makefile2
-rw-r--r--arch/x86/platform/intel-mid/intel-mid.c124
-rw-r--r--arch/x86/platform/intel-mid/pwr.c485
-rw-r--r--arch/x86/platform/intel-quark/Makefile3
-rw-r--r--arch/x86/platform/intel-quark/imr.c597
-rw-r--r--arch/x86/platform/intel-quark/imr_selftest.c129
-rw-r--r--arch/x86/platform/intel/Makefile2
-rw-r--r--arch/x86/platform/intel/iosf_mbi.c571
-rw-r--r--arch/x86/platform/iris/Makefile2
-rw-r--r--arch/x86/platform/iris/iris.c122
-rw-r--r--arch/x86/platform/olpc/Makefile6
-rw-r--r--arch/x86/platform/olpc/olpc-xo1-pm.c188
-rw-r--r--arch/x86/platform/olpc/olpc-xo1-rtc.c80
-rw-r--r--arch/x86/platform/olpc/olpc-xo1-sci.c629
-rw-r--r--arch/x86/platform/olpc/olpc-xo15-sci.c231
-rw-r--r--arch/x86/platform/olpc/olpc.c321
-rw-r--r--arch/x86/platform/olpc/olpc_dt.c326
-rw-r--r--arch/x86/platform/olpc/olpc_ofw.c121
-rw-r--r--arch/x86/platform/olpc/xo1-wakeup.S126
-rw-r--r--arch/x86/platform/pvh/Makefile5
-rw-r--r--arch/x86/platform/pvh/enlighten.c137
-rw-r--r--arch/x86/platform/pvh/head.S166
-rw-r--r--arch/x86/platform/scx200/Makefile3
-rw-r--r--arch/x86/platform/scx200/scx200_32.c130
-rw-r--r--arch/x86/platform/ts5500/Makefile2
-rw-r--r--arch/x86/platform/ts5500/ts5500.c341
-rw-r--r--arch/x86/platform/uv/Makefile2
-rw-r--r--arch/x86/platform/uv/bios_uv.c269
-rw-r--r--arch/x86/platform/uv/uv_irq.c217
-rw-r--r--arch/x86/platform/uv/uv_nmi.c1096
-rw-r--r--arch/x86/platform/uv/uv_time.c393
49 files changed, 10939 insertions, 0 deletions
diff --git a/arch/x86/platform/Makefile b/arch/x86/platform/Makefile
new file mode 100644
index 000000000..3ed03a255
--- /dev/null
+++ b/arch/x86/platform/Makefile
@@ -0,0 +1,14 @@
+# SPDX-License-Identifier: GPL-2.0
+# Platform specific code goes here
+obj-y += atom/
+obj-y += ce4100/
+obj-y += efi/
+obj-y += geode/
+obj-y += iris/
+obj-y += intel/
+obj-y += intel-mid/
+obj-y += intel-quark/
+obj-y += olpc/
+obj-y += scx200/
+obj-y += ts5500/
+obj-y += uv/
diff --git a/arch/x86/platform/atom/Makefile b/arch/x86/platform/atom/Makefile
new file mode 100644
index 000000000..e06bbecd6
--- /dev/null
+++ b/arch/x86/platform/atom/Makefile
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_PUNIT_ATOM_DEBUG) += punit_atom_debug.o
diff --git a/arch/x86/platform/atom/punit_atom_debug.c b/arch/x86/platform/atom/punit_atom_debug.c
new file mode 100644
index 000000000..f8ed5f66c
--- /dev/null
+++ b/arch/x86/platform/atom/punit_atom_debug.c
@@ -0,0 +1,156 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Intel SOC Punit device state debug driver
+ * Punit controls power management for North Complex devices (Graphics
+ * blocks, Image Signal Processing, video processing, display, DSP etc.)
+ *
+ * Copyright (c) 2015, Intel Corporation.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <linux/io.h>
+#include <asm/cpu_device_id.h>
+#include <asm/intel-family.h>
+#include <asm/iosf_mbi.h>
+
+/* Subsystem config/status Video processor */
+#define VED_SS_PM0 0x32
+/* Subsystem config/status ISP (Image Signal Processor) */
+#define ISP_SS_PM0 0x39
+/* Subsystem config/status Input/output controller */
+#define MIO_SS_PM 0x3B
+/* Shift bits for getting status for video, isp and i/o */
+#define SSS_SHIFT 24
+
+/* Power gate status reg */
+#define PWRGT_STATUS 0x61
+/* Shift bits for getting status for graphics rendering */
+#define RENDER_POS 0
+/* Shift bits for getting status for media control */
+#define MEDIA_POS 2
+/* Shift bits for getting status for Valley View/Baytrail display */
+#define VLV_DISPLAY_POS 6
+
+/* Subsystem config/status display for Cherry Trail SOC */
+#define CHT_DSP_SSS 0x36
+/* Shift bits for getting status for display */
+#define CHT_DSP_SSS_POS 16
+
+struct punit_device {
+ char *name;
+ int reg;
+ int sss_pos;
+};
+
+static const struct punit_device punit_device_tng[] = {
+ { "DISPLAY", CHT_DSP_SSS, SSS_SHIFT },
+ { "VED", VED_SS_PM0, SSS_SHIFT },
+ { "ISP", ISP_SS_PM0, SSS_SHIFT },
+ { "MIO", MIO_SS_PM, SSS_SHIFT },
+ { NULL }
+};
+
+static const struct punit_device punit_device_byt[] = {
+ { "GFX RENDER", PWRGT_STATUS, RENDER_POS },
+ { "GFX MEDIA", PWRGT_STATUS, MEDIA_POS },
+ { "DISPLAY", PWRGT_STATUS, VLV_DISPLAY_POS },
+ { "VED", VED_SS_PM0, SSS_SHIFT },
+ { "ISP", ISP_SS_PM0, SSS_SHIFT },
+ { "MIO", MIO_SS_PM, SSS_SHIFT },
+ { NULL }
+};
+
+static const struct punit_device punit_device_cht[] = {
+ { "GFX RENDER", PWRGT_STATUS, RENDER_POS },
+ { "GFX MEDIA", PWRGT_STATUS, MEDIA_POS },
+ { "DISPLAY", CHT_DSP_SSS, CHT_DSP_SSS_POS },
+ { "VED", VED_SS_PM0, SSS_SHIFT },
+ { "ISP", ISP_SS_PM0, SSS_SHIFT },
+ { "MIO", MIO_SS_PM, SSS_SHIFT },
+ { NULL }
+};
+
+static const char * const dstates[] = {"D0", "D0i1", "D0i2", "D0i3"};
+
+static int punit_dev_state_show(struct seq_file *seq_file, void *unused)
+{
+ u32 punit_pwr_status;
+ struct punit_device *punit_devp = seq_file->private;
+ int index;
+ int status;
+
+ seq_puts(seq_file, "\n\nPUNIT NORTH COMPLEX DEVICES :\n");
+ while (punit_devp->name) {
+ status = iosf_mbi_read(BT_MBI_UNIT_PMC, MBI_REG_READ,
+ punit_devp->reg, &punit_pwr_status);
+ if (status) {
+ seq_printf(seq_file, "%9s : Read Failed\n",
+ punit_devp->name);
+ } else {
+ index = (punit_pwr_status >> punit_devp->sss_pos) & 3;
+ seq_printf(seq_file, "%9s : %s\n", punit_devp->name,
+ dstates[index]);
+ }
+ punit_devp++;
+ }
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(punit_dev_state);
+
+static struct dentry *punit_dbg_file;
+
+static void punit_dbgfs_register(struct punit_device *punit_device)
+{
+ punit_dbg_file = debugfs_create_dir("punit_atom", NULL);
+
+ debugfs_create_file("dev_power_state", 0444, punit_dbg_file,
+ punit_device, &punit_dev_state_fops);
+}
+
+static void punit_dbgfs_unregister(void)
+{
+ debugfs_remove_recursive(punit_dbg_file);
+}
+
+#define X86_MATCH(model, data) \
+ X86_MATCH_VENDOR_FAM_MODEL_FEATURE(INTEL, 6, INTEL_FAM6_##model, \
+ X86_FEATURE_MWAIT, data)
+
+static const struct x86_cpu_id intel_punit_cpu_ids[] = {
+ X86_MATCH(ATOM_SILVERMONT, &punit_device_byt),
+ X86_MATCH(ATOM_SILVERMONT_MID, &punit_device_tng),
+ X86_MATCH(ATOM_AIRMONT, &punit_device_cht),
+ {}
+};
+MODULE_DEVICE_TABLE(x86cpu, intel_punit_cpu_ids);
+
+static int __init punit_atom_debug_init(void)
+{
+ const struct x86_cpu_id *id;
+
+ id = x86_match_cpu(intel_punit_cpu_ids);
+ if (!id)
+ return -ENODEV;
+
+ punit_dbgfs_register((struct punit_device *)id->driver_data);
+
+ return 0;
+}
+
+static void __exit punit_atom_debug_exit(void)
+{
+ punit_dbgfs_unregister();
+}
+
+module_init(punit_atom_debug_init);
+module_exit(punit_atom_debug_exit);
+
+MODULE_AUTHOR("Kumar P, Mahesh <mahesh.kumar.p@intel.com>");
+MODULE_AUTHOR("Srinivas Pandruvada <srinivas.pandruvada@linux.intel.com>");
+MODULE_DESCRIPTION("Driver for Punit devices states debugging");
+MODULE_LICENSE("GPL v2");
diff --git a/arch/x86/platform/ce4100/Makefile b/arch/x86/platform/ce4100/Makefile
new file mode 100644
index 000000000..7b7f37dc8
--- /dev/null
+++ b/arch/x86/platform/ce4100/Makefile
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_X86_INTEL_CE) += ce4100.o
diff --git a/arch/x86/platform/ce4100/ce4100.c b/arch/x86/platform/ce4100/ce4100.c
new file mode 100644
index 000000000..40745664d
--- /dev/null
+++ b/arch/x86/platform/ce4100/ce4100.c
@@ -0,0 +1,156 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Intel CE4100 platform specific setup code
+ *
+ * (C) Copyright 2010 Intel Corporation
+ */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/irq.h>
+#include <linux/reboot.h>
+#include <linux/serial_reg.h>
+#include <linux/serial_8250.h>
+
+#include <asm/ce4100.h>
+#include <asm/prom.h>
+#include <asm/setup.h>
+#include <asm/i8259.h>
+#include <asm/io.h>
+#include <asm/io_apic.h>
+#include <asm/emergency-restart.h>
+
+/*
+ * The CE4100 platform has an internal 8051 Microcontroller which is
+ * responsible for signaling to the external Power Management Unit the
+ * intention to reset, reboot or power off the system. This 8051 device has
+ * its command register mapped at I/O port 0xcf9 and the value 0x4 is used
+ * to power off the system.
+ */
+static void ce4100_power_off(void)
+{
+ outb(0x4, 0xcf9);
+}
+
+#ifdef CONFIG_SERIAL_8250
+
+static unsigned int mem_serial_in(struct uart_port *p, int offset)
+{
+ offset = offset << p->regshift;
+ return readl(p->membase + offset);
+}
+
+/*
+ * The UART Tx interrupts are not set under some conditions and therefore serial
+ * transmission hangs. This is a silicon issue and has not been root caused. The
+ * workaround for this silicon issue checks UART_LSR_THRE bit and UART_LSR_TEMT
+ * bit of LSR register in interrupt handler to see whether at least one of these
+ * two bits is set, if so then process the transmit request. If this workaround
+ * is not applied, then the serial transmission may hang. This workaround is for
+ * errata number 9 in Errata - B step.
+*/
+
+static unsigned int ce4100_mem_serial_in(struct uart_port *p, int offset)
+{
+ unsigned int ret, ier, lsr;
+
+ if (offset == UART_IIR) {
+ offset = offset << p->regshift;
+ ret = readl(p->membase + offset);
+ if (ret & UART_IIR_NO_INT) {
+ /* see if the TX interrupt should have really set */
+ ier = mem_serial_in(p, UART_IER);
+ /* see if the UART's XMIT interrupt is enabled */
+ if (ier & UART_IER_THRI) {
+ lsr = mem_serial_in(p, UART_LSR);
+ /* now check to see if the UART should be
+ generating an interrupt (but isn't) */
+ if (lsr & (UART_LSR_THRE | UART_LSR_TEMT))
+ ret &= ~UART_IIR_NO_INT;
+ }
+ }
+ } else
+ ret = mem_serial_in(p, offset);
+ return ret;
+}
+
+static void ce4100_mem_serial_out(struct uart_port *p, int offset, int value)
+{
+ offset = offset << p->regshift;
+ writel(value, p->membase + offset);
+}
+
+static void ce4100_serial_fixup(int port, struct uart_port *up,
+ u32 *capabilities)
+{
+#ifdef CONFIG_EARLY_PRINTK
+ /*
+ * Over ride the legacy port configuration that comes from
+ * asm/serial.h. Using the ioport driver then switching to the
+ * PCI memmaped driver hangs the IOAPIC
+ */
+ if (up->iotype != UPIO_MEM32) {
+ up->uartclk = 14745600;
+ up->mapbase = 0xdffe0200;
+ set_fixmap_nocache(FIX_EARLYCON_MEM_BASE,
+ up->mapbase & PAGE_MASK);
+ up->membase =
+ (void __iomem *)__fix_to_virt(FIX_EARLYCON_MEM_BASE);
+ up->membase += up->mapbase & ~PAGE_MASK;
+ up->mapbase += port * 0x100;
+ up->membase += port * 0x100;
+ up->iotype = UPIO_MEM32;
+ up->regshift = 2;
+ up->irq = 4;
+ }
+#endif
+ up->iobase = 0;
+ up->serial_in = ce4100_mem_serial_in;
+ up->serial_out = ce4100_mem_serial_out;
+
+ *capabilities |= (1 << 12);
+}
+
+static __init void sdv_serial_fixup(void)
+{
+ serial8250_set_isa_configurator(ce4100_serial_fixup);
+}
+
+#else
+static inline void sdv_serial_fixup(void) {};
+#endif
+
+static void __init sdv_arch_setup(void)
+{
+ sdv_serial_fixup();
+}
+
+static void sdv_pci_init(void)
+{
+ x86_of_pci_init();
+}
+
+/*
+ * CE4100 specific x86_init function overrides and early setup
+ * calls.
+ */
+void __init x86_ce4100_early_setup(void)
+{
+ x86_init.oem.arch_setup = sdv_arch_setup;
+ x86_init.resources.probe_roms = x86_init_noop;
+ x86_init.mpparse.get_smp_config = x86_init_uint_noop;
+ x86_init.mpparse.find_smp_config = x86_init_noop;
+ x86_init.mpparse.setup_ioapic_ids = setup_ioapic_ids_from_mpc_nocheck;
+ x86_init.pci.init = ce4100_pci_init;
+ x86_init.pci.init_irq = sdv_pci_init;
+
+ /*
+ * By default, the reboot method is ACPI which is supported by the
+ * CE4100 bootloader CEFDK using FADT.ResetReg Address and ResetValue
+ * the bootloader will however issue a system power off instead of
+ * reboot. By using BOOT_KBD we ensure proper system reboot as
+ * expected.
+ */
+ reboot_type = BOOT_KBD;
+
+ pm_power_off = ce4100_power_off;
+}
diff --git a/arch/x86/platform/ce4100/falconfalls.dts b/arch/x86/platform/ce4100/falconfalls.dts
new file mode 100644
index 000000000..65fa3d866
--- /dev/null
+++ b/arch/x86/platform/ce4100/falconfalls.dts
@@ -0,0 +1,430 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * CE4100 on Falcon Falls
+ *
+ * (c) Copyright 2010 Intel Corporation
+ */
+/dts-v1/;
+/ {
+ model = "intel,falconfalls";
+ compatible = "intel,falconfalls";
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ cpu@0 {
+ device_type = "cpu";
+ compatible = "intel,ce4100";
+ reg = <0>;
+ lapic = <&lapic0>;
+ };
+ };
+
+ soc@0 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "intel,ce4100-cp";
+ ranges;
+
+ ioapic1: interrupt-controller@fec00000 {
+ #interrupt-cells = <2>;
+ compatible = "intel,ce4100-ioapic";
+ interrupt-controller;
+ reg = <0xfec00000 0x1000>;
+ };
+
+ timer@fed00000 {
+ compatible = "intel,ce4100-hpet";
+ reg = <0xfed00000 0x200>;
+ };
+
+ lapic0: interrupt-controller@fee00000 {
+ compatible = "intel,ce4100-lapic";
+ reg = <0xfee00000 0x1000>;
+ };
+
+ pci@3fc {
+ #address-cells = <3>;
+ #size-cells = <2>;
+ compatible = "intel,ce4100-pci", "pci";
+ device_type = "pci";
+ bus-range = <0 0>;
+ ranges = <0x2000000 0 0xbffff000 0xbffff000 0 0x1000
+ 0x2000000 0 0xdffe0000 0xdffe0000 0 0x1000
+ 0x0000000 0 0x0 0x0 0 0x100>;
+
+ /* Secondary IO-APIC */
+ ioapic2: interrupt-controller@0,1 {
+ #interrupt-cells = <2>;
+ compatible = "intel,ce4100-ioapic";
+ interrupt-controller;
+ reg = <0x100 0x0 0x0 0x0 0x0>;
+ assigned-addresses = <0x02000000 0x0 0xbffff000 0x0 0x1000>;
+ };
+
+ pci@1,0 {
+ #address-cells = <3>;
+ #size-cells = <2>;
+ compatible = "intel,ce4100-pci", "pci";
+ device_type = "pci";
+ bus-range = <1 1>;
+ reg = <0x0800 0x0 0x0 0x0 0x0>;
+ ranges = <0x2000000 0 0xdffe0000 0x2000000 0 0xdffe0000 0 0x1000>;
+
+ interrupt-parent = <&ioapic2>;
+
+ display@2,0 {
+ compatible = "pci8086,2e5b.2",
+ "pci8086,2e5b",
+ "pciclass038000",
+ "pciclass0380";
+
+ reg = <0x11000 0x0 0x0 0x0 0x0>;
+ interrupts = <0 1>;
+ };
+
+ multimedia@3,0 {
+ compatible = "pci8086,2e5c.2",
+ "pci8086,2e5c",
+ "pciclass048000",
+ "pciclass0480";
+
+ reg = <0x11800 0x0 0x0 0x0 0x0>;
+ interrupts = <2 1>;
+ };
+
+ multimedia@4,0 {
+ compatible = "pci8086,2e5d.2",
+ "pci8086,2e5d",
+ "pciclass048000",
+ "pciclass0480";
+
+ reg = <0x12000 0x0 0x0 0x0 0x0>;
+ interrupts = <4 1>;
+ };
+
+ multimedia@4,1 {
+ compatible = "pci8086,2e5e.2",
+ "pci8086,2e5e",
+ "pciclass048000",
+ "pciclass0480";
+
+ reg = <0x12100 0x0 0x0 0x0 0x0>;
+ interrupts = <5 1>;
+ };
+
+ sound@6,0 {
+ compatible = "pci8086,2e5f.2",
+ "pci8086,2e5f",
+ "pciclass040100",
+ "pciclass0401";
+
+ reg = <0x13000 0x0 0x0 0x0 0x0>;
+ interrupts = <6 1>;
+ };
+
+ sound@6,1 {
+ compatible = "pci8086,2e5f.2",
+ "pci8086,2e5f",
+ "pciclass040100",
+ "pciclass0401";
+
+ reg = <0x13100 0x0 0x0 0x0 0x0>;
+ interrupts = <7 1>;
+ };
+
+ sound@6,2 {
+ compatible = "pci8086,2e60.2",
+ "pci8086,2e60",
+ "pciclass040100",
+ "pciclass0401";
+
+ reg = <0x13200 0x0 0x0 0x0 0x0>;
+ interrupts = <8 1>;
+ };
+
+ display@8,0 {
+ compatible = "pci8086,2e61.2",
+ "pci8086,2e61",
+ "pciclass038000",
+ "pciclass0380";
+
+ reg = <0x14000 0x0 0x0 0x0 0x0>;
+ interrupts = <9 1>;
+ };
+
+ display@8,1 {
+ compatible = "pci8086,2e62.2",
+ "pci8086,2e62",
+ "pciclass038000",
+ "pciclass0380";
+
+ reg = <0x14100 0x0 0x0 0x0 0x0>;
+ interrupts = <10 1>;
+ };
+
+ multimedia@8,2 {
+ compatible = "pci8086,2e63.2",
+ "pci8086,2e63",
+ "pciclass048000",
+ "pciclass0480";
+
+ reg = <0x14200 0x0 0x0 0x0 0x0>;
+ interrupts = <11 1>;
+ };
+
+ entertainment-encryption@9,0 {
+ compatible = "pci8086,2e64.2",
+ "pci8086,2e64",
+ "pciclass101000",
+ "pciclass1010";
+
+ reg = <0x14800 0x0 0x0 0x0 0x0>;
+ interrupts = <12 1>;
+ };
+
+ localbus@a,0 {
+ compatible = "pci8086,2e65.2",
+ "pci8086,2e65",
+ "pciclassff0000",
+ "pciclassff00";
+
+ reg = <0x15000 0x0 0x0 0x0 0x0>;
+ };
+
+ serial@b,0 {
+ compatible = "pci8086,2e66.2",
+ "pci8086,2e66",
+ "pciclass070003",
+ "pciclass0700";
+
+ reg = <0x15800 0x0 0x0 0x0 0x0>;
+ interrupts = <14 1>;
+ };
+
+ pcigpio: gpio@b,1 {
+ #gpio-cells = <2>;
+ #interrupt-cells = <2>;
+ compatible = "pci8086,2e67.2",
+ "pci8086,2e67",
+ "pciclassff0000",
+ "pciclassff00";
+
+ reg = <0x15900 0x0 0x0 0x0 0x0>;
+ interrupts = <15 1>;
+ interrupt-controller;
+ gpio-controller;
+ intel,muxctl = <0>;
+ };
+
+ i2c-controller@b,2 {
+ #address-cells = <2>;
+ #size-cells = <1>;
+ compatible = "pci8086,2e68.2",
+ "pci8086,2e68",
+ "pciclass,ff0000",
+ "pciclass,ff00";
+
+ reg = <0x15a00 0x0 0x0 0x0 0x0>;
+ interrupts = <16 1>;
+ ranges = <0 0 0x02000000 0 0xdffe0500 0x100
+ 1 0 0x02000000 0 0xdffe0600 0x100
+ 2 0 0x02000000 0 0xdffe0700 0x100>;
+
+ i2c@0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "intel,ce4100-i2c-controller";
+ reg = <0 0 0x100>;
+ };
+
+ i2c@1 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "intel,ce4100-i2c-controller";
+ reg = <1 0 0x100>;
+
+ gpio@26 {
+ #gpio-cells = <2>;
+ compatible = "nxp,pcf8575";
+ reg = <0x26>;
+ gpio-controller;
+ };
+ };
+
+ i2c@2 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "intel,ce4100-i2c-controller";
+ reg = <2 0 0x100>;
+
+ gpio@26 {
+ #gpio-cells = <2>;
+ compatible = "nxp,pcf8575";
+ reg = <0x26>;
+ gpio-controller;
+ };
+ };
+ };
+
+ smard-card@b,3 {
+ compatible = "pci8086,2e69.2",
+ "pci8086,2e69",
+ "pciclass070500",
+ "pciclass0705";
+
+ reg = <0x15b00 0x0 0x0 0x0 0x0>;
+ interrupts = <15 1>;
+ };
+
+ spi-controller@b,4 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible =
+ "pci8086,2e6a.2",
+ "pci8086,2e6a",
+ "pciclass,ff0000",
+ "pciclass,ff00";
+
+ reg = <0x15c00 0x0 0x0 0x0 0x0>;
+ interrupts = <15 1>;
+
+ dac@0 {
+ compatible = "ti,pcm1755";
+ reg = <0>;
+ spi-max-frequency = <115200>;
+ };
+
+ dac@1 {
+ compatible = "ti,pcm1609a";
+ reg = <1>;
+ spi-max-frequency = <115200>;
+ };
+
+ eeprom@2 {
+ compatible = "atmel,at93c46";
+ reg = <2>;
+ spi-max-frequency = <115200>;
+ };
+ };
+
+ multimedia@b,7 {
+ compatible = "pci8086,2e6d.2",
+ "pci8086,2e6d",
+ "pciclassff0000",
+ "pciclassff00";
+
+ reg = <0x15f00 0x0 0x0 0x0 0x0>;
+ };
+
+ ethernet@c,0 {
+ compatible = "pci8086,2e6e.2",
+ "pci8086,2e6e",
+ "pciclass020000",
+ "pciclass0200";
+
+ reg = <0x16000 0x0 0x0 0x0 0x0>;
+ interrupts = <21 1>;
+ };
+
+ clock@c,1 {
+ compatible = "pci8086,2e6f.2",
+ "pci8086,2e6f",
+ "pciclassff0000",
+ "pciclassff00";
+
+ reg = <0x16100 0x0 0x0 0x0 0x0>;
+ interrupts = <3 1>;
+ };
+
+ usb@d,0 {
+ compatible = "pci8086,2e70.2",
+ "pci8086,2e70",
+ "pciclass0c0320",
+ "pciclass0c03";
+
+ reg = <0x16800 0x0 0x0 0x0 0x0>;
+ interrupts = <22 1>;
+ };
+
+ usb@d,1 {
+ compatible = "pci8086,2e70.2",
+ "pci8086,2e70",
+ "pciclass0c0320",
+ "pciclass0c03";
+
+ reg = <0x16900 0x0 0x0 0x0 0x0>;
+ interrupts = <22 1>;
+ };
+
+ sata@e,0 {
+ compatible = "pci8086,2e71.0",
+ "pci8086,2e71",
+ "pciclass010601",
+ "pciclass0106";
+
+ reg = <0x17000 0x0 0x0 0x0 0x0>;
+ interrupts = <23 1>;
+ };
+
+ flash@f,0 {
+ compatible = "pci8086,701.1",
+ "pci8086,701",
+ "pciclass050100",
+ "pciclass0501";
+
+ reg = <0x17800 0x0 0x0 0x0 0x0>;
+ interrupts = <13 1>;
+ };
+
+ entertainment-encryption@10,0 {
+ compatible = "pci8086,702.1",
+ "pci8086,702",
+ "pciclass101000",
+ "pciclass1010";
+
+ reg = <0x18000 0x0 0x0 0x0 0x0>;
+ };
+
+ co-processor@11,0 {
+ compatible = "pci8086,703.1",
+ "pci8086,703",
+ "pciclass0b4000",
+ "pciclass0b40";
+
+ reg = <0x18800 0x0 0x0 0x0 0x0>;
+ interrupts = <1 1>;
+ };
+
+ multimedia@12,0 {
+ compatible = "pci8086,704.0",
+ "pci8086,704",
+ "pciclass048000",
+ "pciclass0480";
+
+ reg = <0x19000 0x0 0x0 0x0 0x0>;
+ };
+ };
+
+ isa@1f,0 {
+ #address-cells = <2>;
+ #size-cells = <1>;
+ compatible = "isa";
+ reg = <0xf800 0x0 0x0 0x0 0x0>;
+ ranges = <1 0 0 0 0 0x100>;
+
+ rtc@70 {
+ compatible = "intel,ce4100-rtc", "motorola,mc146818";
+ interrupts = <8 3>;
+ interrupt-parent = <&ioapic1>;
+ ctrl-reg = <2>;
+ freq-reg = <0x26>;
+ reg = <1 0x70 2>;
+ };
+ };
+ };
+ };
+};
diff --git a/arch/x86/platform/efi/Makefile b/arch/x86/platform/efi/Makefile
new file mode 100644
index 000000000..a50245157
--- /dev/null
+++ b/arch/x86/platform/efi/Makefile
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: GPL-2.0
+KASAN_SANITIZE := n
+GCOV_PROFILE := n
+
+obj-$(CONFIG_EFI) += quirks.o efi.o efi_$(BITS).o efi_stub_$(BITS).o
+obj-$(CONFIG_EFI_MIXED) += efi_thunk_$(BITS).o
diff --git a/arch/x86/platform/efi/efi.c b/arch/x86/platform/efi/efi.c
new file mode 100644
index 000000000..ebc98a68c
--- /dev/null
+++ b/arch/x86/platform/efi/efi.c
@@ -0,0 +1,901 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Common EFI (Extensible Firmware Interface) support functions
+ * Based on Extensible Firmware Interface Specification version 1.0
+ *
+ * Copyright (C) 1999 VA Linux Systems
+ * Copyright (C) 1999 Walt Drummond <drummond@valinux.com>
+ * Copyright (C) 1999-2002 Hewlett-Packard Co.
+ * David Mosberger-Tang <davidm@hpl.hp.com>
+ * Stephane Eranian <eranian@hpl.hp.com>
+ * Copyright (C) 2005-2008 Intel Co.
+ * Fenghua Yu <fenghua.yu@intel.com>
+ * Bibo Mao <bibo.mao@intel.com>
+ * Chandramouli Narayanan <mouli@linux.intel.com>
+ * Huang Ying <ying.huang@intel.com>
+ * Copyright (C) 2013 SuSE Labs
+ * Borislav Petkov <bp@suse.de> - runtime services VA mapping
+ *
+ * Copied from efi_32.c to eliminate the duplicated code between EFI
+ * 32/64 support code. --ying 2007-10-26
+ *
+ * All EFI Runtime Services are not implemented yet as EFI only
+ * supports physical mode addressing on SoftSDV. This is to be fixed
+ * in a future version. --drummond 1999-07-20
+ *
+ * Implemented EFI runtime services and virtual mode calls. --davidm
+ *
+ * Goutham Rao: <goutham.rao@intel.com>
+ * Skip non-WB memory and ignore empty memory ranges.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/efi.h>
+#include <linux/efi-bgrt.h>
+#include <linux/export.h>
+#include <linux/memblock.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/uaccess.h>
+#include <linux/time.h>
+#include <linux/io.h>
+#include <linux/reboot.h>
+#include <linux/bcd.h>
+
+#include <asm/setup.h>
+#include <asm/efi.h>
+#include <asm/e820/api.h>
+#include <asm/time.h>
+#include <asm/tlbflush.h>
+#include <asm/x86_init.h>
+#include <asm/uv/uv.h>
+
+static unsigned long efi_systab_phys __initdata;
+static unsigned long prop_phys = EFI_INVALID_TABLE_ADDR;
+static unsigned long uga_phys = EFI_INVALID_TABLE_ADDR;
+static unsigned long efi_runtime, efi_nr_tables;
+
+unsigned long efi_fw_vendor, efi_config_table;
+
+static const efi_config_table_type_t arch_tables[] __initconst = {
+ {EFI_PROPERTIES_TABLE_GUID, &prop_phys, "PROP" },
+ {UGA_IO_PROTOCOL_GUID, &uga_phys, "UGA" },
+#ifdef CONFIG_X86_UV
+ {UV_SYSTEM_TABLE_GUID, &uv_systab_phys, "UVsystab" },
+#endif
+ {},
+};
+
+static const unsigned long * const efi_tables[] = {
+ &efi.acpi,
+ &efi.acpi20,
+ &efi.smbios,
+ &efi.smbios3,
+ &uga_phys,
+#ifdef CONFIG_X86_UV
+ &uv_systab_phys,
+#endif
+ &efi_fw_vendor,
+ &efi_runtime,
+ &efi_config_table,
+ &efi.esrt,
+ &prop_phys,
+ &efi_mem_attr_table,
+#ifdef CONFIG_EFI_RCI2_TABLE
+ &rci2_table_phys,
+#endif
+ &efi.tpm_log,
+ &efi.tpm_final_log,
+ &efi_rng_seed,
+#ifdef CONFIG_LOAD_UEFI_KEYS
+ &efi.mokvar_table,
+#endif
+#ifdef CONFIG_EFI_COCO_SECRET
+ &efi.coco_secret,
+#endif
+};
+
+u64 efi_setup; /* efi setup_data physical address */
+
+static int add_efi_memmap __initdata;
+static int __init setup_add_efi_memmap(char *arg)
+{
+ add_efi_memmap = 1;
+ return 0;
+}
+early_param("add_efi_memmap", setup_add_efi_memmap);
+
+/*
+ * Tell the kernel about the EFI memory map. This might include
+ * more than the max 128 entries that can fit in the passed in e820
+ * legacy (zeropage) memory map, but the kernel's e820 table can hold
+ * E820_MAX_ENTRIES.
+ */
+
+static void __init do_add_efi_memmap(void)
+{
+ efi_memory_desc_t *md;
+
+ if (!efi_enabled(EFI_MEMMAP))
+ return;
+
+ for_each_efi_memory_desc(md) {
+ unsigned long long start = md->phys_addr;
+ unsigned long long size = md->num_pages << EFI_PAGE_SHIFT;
+ int e820_type;
+
+ switch (md->type) {
+ case EFI_LOADER_CODE:
+ case EFI_LOADER_DATA:
+ case EFI_BOOT_SERVICES_CODE:
+ case EFI_BOOT_SERVICES_DATA:
+ case EFI_CONVENTIONAL_MEMORY:
+ if (efi_soft_reserve_enabled()
+ && (md->attribute & EFI_MEMORY_SP))
+ e820_type = E820_TYPE_SOFT_RESERVED;
+ else if (md->attribute & EFI_MEMORY_WB)
+ e820_type = E820_TYPE_RAM;
+ else
+ e820_type = E820_TYPE_RESERVED;
+ break;
+ case EFI_ACPI_RECLAIM_MEMORY:
+ e820_type = E820_TYPE_ACPI;
+ break;
+ case EFI_ACPI_MEMORY_NVS:
+ e820_type = E820_TYPE_NVS;
+ break;
+ case EFI_UNUSABLE_MEMORY:
+ e820_type = E820_TYPE_UNUSABLE;
+ break;
+ case EFI_PERSISTENT_MEMORY:
+ e820_type = E820_TYPE_PMEM;
+ break;
+ default:
+ /*
+ * EFI_RESERVED_TYPE EFI_RUNTIME_SERVICES_CODE
+ * EFI_RUNTIME_SERVICES_DATA EFI_MEMORY_MAPPED_IO
+ * EFI_MEMORY_MAPPED_IO_PORT_SPACE EFI_PAL_CODE
+ */
+ e820_type = E820_TYPE_RESERVED;
+ break;
+ }
+
+ e820__range_add(start, size, e820_type);
+ }
+ e820__update_table(e820_table);
+}
+
+/*
+ * Given add_efi_memmap defaults to 0 and there is no alternative
+ * e820 mechanism for soft-reserved memory, import the full EFI memory
+ * map if soft reservations are present and enabled. Otherwise, the
+ * mechanism to disable the kernel's consideration of EFI_MEMORY_SP is
+ * the efi=nosoftreserve option.
+ */
+static bool do_efi_soft_reserve(void)
+{
+ efi_memory_desc_t *md;
+
+ if (!efi_enabled(EFI_MEMMAP))
+ return false;
+
+ if (!efi_soft_reserve_enabled())
+ return false;
+
+ for_each_efi_memory_desc(md)
+ if (md->type == EFI_CONVENTIONAL_MEMORY &&
+ (md->attribute & EFI_MEMORY_SP))
+ return true;
+ return false;
+}
+
+int __init efi_memblock_x86_reserve_range(void)
+{
+ struct efi_info *e = &boot_params.efi_info;
+ struct efi_memory_map_data data;
+ phys_addr_t pmap;
+ int rv;
+
+ if (efi_enabled(EFI_PARAVIRT))
+ return 0;
+
+ /* Can't handle firmware tables above 4GB on i386 */
+ if (IS_ENABLED(CONFIG_X86_32) && e->efi_memmap_hi > 0) {
+ pr_err("Memory map is above 4GB, disabling EFI.\n");
+ return -EINVAL;
+ }
+ pmap = (phys_addr_t)(e->efi_memmap | ((u64)e->efi_memmap_hi << 32));
+
+ data.phys_map = pmap;
+ data.size = e->efi_memmap_size;
+ data.desc_size = e->efi_memdesc_size;
+ data.desc_version = e->efi_memdesc_version;
+
+ rv = efi_memmap_init_early(&data);
+ if (rv)
+ return rv;
+
+ if (add_efi_memmap || do_efi_soft_reserve())
+ do_add_efi_memmap();
+
+ efi_fake_memmap_early();
+
+ WARN(efi.memmap.desc_version != 1,
+ "Unexpected EFI_MEMORY_DESCRIPTOR version %ld",
+ efi.memmap.desc_version);
+
+ memblock_reserve(pmap, efi.memmap.nr_map * efi.memmap.desc_size);
+ set_bit(EFI_PRESERVE_BS_REGIONS, &efi.flags);
+
+ return 0;
+}
+
+#define OVERFLOW_ADDR_SHIFT (64 - EFI_PAGE_SHIFT)
+#define OVERFLOW_ADDR_MASK (U64_MAX << OVERFLOW_ADDR_SHIFT)
+#define U64_HIGH_BIT (~(U64_MAX >> 1))
+
+static bool __init efi_memmap_entry_valid(const efi_memory_desc_t *md, int i)
+{
+ u64 end = (md->num_pages << EFI_PAGE_SHIFT) + md->phys_addr - 1;
+ u64 end_hi = 0;
+ char buf[64];
+
+ if (md->num_pages == 0) {
+ end = 0;
+ } else if (md->num_pages > EFI_PAGES_MAX ||
+ EFI_PAGES_MAX - md->num_pages <
+ (md->phys_addr >> EFI_PAGE_SHIFT)) {
+ end_hi = (md->num_pages & OVERFLOW_ADDR_MASK)
+ >> OVERFLOW_ADDR_SHIFT;
+
+ if ((md->phys_addr & U64_HIGH_BIT) && !(end & U64_HIGH_BIT))
+ end_hi += 1;
+ } else {
+ return true;
+ }
+
+ pr_warn_once(FW_BUG "Invalid EFI memory map entries:\n");
+
+ if (end_hi) {
+ pr_warn("mem%02u: %s range=[0x%016llx-0x%llx%016llx] (invalid)\n",
+ i, efi_md_typeattr_format(buf, sizeof(buf), md),
+ md->phys_addr, end_hi, end);
+ } else {
+ pr_warn("mem%02u: %s range=[0x%016llx-0x%016llx] (invalid)\n",
+ i, efi_md_typeattr_format(buf, sizeof(buf), md),
+ md->phys_addr, end);
+ }
+ return false;
+}
+
+static void __init efi_clean_memmap(void)
+{
+ efi_memory_desc_t *out = efi.memmap.map;
+ const efi_memory_desc_t *in = out;
+ const efi_memory_desc_t *end = efi.memmap.map_end;
+ int i, n_removal;
+
+ for (i = n_removal = 0; in < end; i++) {
+ if (efi_memmap_entry_valid(in, i)) {
+ if (out != in)
+ memcpy(out, in, efi.memmap.desc_size);
+ out = (void *)out + efi.memmap.desc_size;
+ } else {
+ n_removal++;
+ }
+ in = (void *)in + efi.memmap.desc_size;
+ }
+
+ if (n_removal > 0) {
+ struct efi_memory_map_data data = {
+ .phys_map = efi.memmap.phys_map,
+ .desc_version = efi.memmap.desc_version,
+ .desc_size = efi.memmap.desc_size,
+ .size = efi.memmap.desc_size * (efi.memmap.nr_map - n_removal),
+ .flags = 0,
+ };
+
+ pr_warn("Removing %d invalid memory map entries.\n", n_removal);
+ efi_memmap_install(&data);
+ }
+}
+
+void __init efi_print_memmap(void)
+{
+ efi_memory_desc_t *md;
+ int i = 0;
+
+ for_each_efi_memory_desc(md) {
+ char buf[64];
+
+ pr_info("mem%02u: %s range=[0x%016llx-0x%016llx] (%lluMB)\n",
+ i++, efi_md_typeattr_format(buf, sizeof(buf), md),
+ md->phys_addr,
+ md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT) - 1,
+ (md->num_pages >> (20 - EFI_PAGE_SHIFT)));
+ }
+}
+
+static int __init efi_systab_init(unsigned long phys)
+{
+ int size = efi_enabled(EFI_64BIT) ? sizeof(efi_system_table_64_t)
+ : sizeof(efi_system_table_32_t);
+ const efi_table_hdr_t *hdr;
+ bool over4g = false;
+ void *p;
+ int ret;
+
+ hdr = p = early_memremap_ro(phys, size);
+ if (p == NULL) {
+ pr_err("Couldn't map the system table!\n");
+ return -ENOMEM;
+ }
+
+ ret = efi_systab_check_header(hdr, 1);
+ if (ret) {
+ early_memunmap(p, size);
+ return ret;
+ }
+
+ if (efi_enabled(EFI_64BIT)) {
+ const efi_system_table_64_t *systab64 = p;
+
+ efi_runtime = systab64->runtime;
+ over4g = systab64->runtime > U32_MAX;
+
+ if (efi_setup) {
+ struct efi_setup_data *data;
+
+ data = early_memremap_ro(efi_setup, sizeof(*data));
+ if (!data) {
+ early_memunmap(p, size);
+ return -ENOMEM;
+ }
+
+ efi_fw_vendor = (unsigned long)data->fw_vendor;
+ efi_config_table = (unsigned long)data->tables;
+
+ over4g |= data->fw_vendor > U32_MAX ||
+ data->tables > U32_MAX;
+
+ early_memunmap(data, sizeof(*data));
+ } else {
+ efi_fw_vendor = systab64->fw_vendor;
+ efi_config_table = systab64->tables;
+
+ over4g |= systab64->fw_vendor > U32_MAX ||
+ systab64->tables > U32_MAX;
+ }
+ efi_nr_tables = systab64->nr_tables;
+ } else {
+ const efi_system_table_32_t *systab32 = p;
+
+ efi_fw_vendor = systab32->fw_vendor;
+ efi_runtime = systab32->runtime;
+ efi_config_table = systab32->tables;
+ efi_nr_tables = systab32->nr_tables;
+ }
+
+ efi.runtime_version = hdr->revision;
+
+ efi_systab_report_header(hdr, efi_fw_vendor);
+ early_memunmap(p, size);
+
+ if (IS_ENABLED(CONFIG_X86_32) && over4g) {
+ pr_err("EFI data located above 4GB, disabling EFI.\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int __init efi_config_init(const efi_config_table_type_t *arch_tables)
+{
+ void *config_tables;
+ int sz, ret;
+
+ if (efi_nr_tables == 0)
+ return 0;
+
+ if (efi_enabled(EFI_64BIT))
+ sz = sizeof(efi_config_table_64_t);
+ else
+ sz = sizeof(efi_config_table_32_t);
+
+ /*
+ * Let's see what config tables the firmware passed to us.
+ */
+ config_tables = early_memremap(efi_config_table, efi_nr_tables * sz);
+ if (config_tables == NULL) {
+ pr_err("Could not map Configuration table!\n");
+ return -ENOMEM;
+ }
+
+ ret = efi_config_parse_tables(config_tables, efi_nr_tables,
+ arch_tables);
+
+ early_memunmap(config_tables, efi_nr_tables * sz);
+ return ret;
+}
+
+void __init efi_init(void)
+{
+ if (IS_ENABLED(CONFIG_X86_32) &&
+ (boot_params.efi_info.efi_systab_hi ||
+ boot_params.efi_info.efi_memmap_hi)) {
+ pr_info("Table located above 4GB, disabling EFI.\n");
+ return;
+ }
+
+ efi_systab_phys = boot_params.efi_info.efi_systab |
+ ((__u64)boot_params.efi_info.efi_systab_hi << 32);
+
+ if (efi_systab_init(efi_systab_phys))
+ return;
+
+ if (efi_reuse_config(efi_config_table, efi_nr_tables))
+ return;
+
+ if (efi_config_init(arch_tables))
+ return;
+
+ /*
+ * Note: We currently don't support runtime services on an EFI
+ * that doesn't match the kernel 32/64-bit mode.
+ */
+
+ if (!efi_runtime_supported())
+ pr_err("No EFI runtime due to 32/64-bit mismatch with kernel\n");
+
+ if (!efi_runtime_supported() || efi_runtime_disabled()) {
+ efi_memmap_unmap();
+ return;
+ }
+
+ /* Parse the EFI Properties table if it exists */
+ if (prop_phys != EFI_INVALID_TABLE_ADDR) {
+ efi_properties_table_t *tbl;
+
+ tbl = early_memremap_ro(prop_phys, sizeof(*tbl));
+ if (tbl == NULL) {
+ pr_err("Could not map Properties table!\n");
+ } else {
+ if (tbl->memory_protection_attribute &
+ EFI_PROPERTIES_RUNTIME_MEMORY_PROTECTION_NON_EXECUTABLE_PE_DATA)
+ set_bit(EFI_NX_PE_DATA, &efi.flags);
+
+ early_memunmap(tbl, sizeof(*tbl));
+ }
+ }
+
+ set_bit(EFI_RUNTIME_SERVICES, &efi.flags);
+ efi_clean_memmap();
+
+ if (efi_enabled(EFI_DBG))
+ efi_print_memmap();
+}
+
+/* Merge contiguous regions of the same type and attribute */
+static void __init efi_merge_regions(void)
+{
+ efi_memory_desc_t *md, *prev_md = NULL;
+
+ for_each_efi_memory_desc(md) {
+ u64 prev_size;
+
+ if (!prev_md) {
+ prev_md = md;
+ continue;
+ }
+
+ if (prev_md->type != md->type ||
+ prev_md->attribute != md->attribute) {
+ prev_md = md;
+ continue;
+ }
+
+ prev_size = prev_md->num_pages << EFI_PAGE_SHIFT;
+
+ if (md->phys_addr == (prev_md->phys_addr + prev_size)) {
+ prev_md->num_pages += md->num_pages;
+ md->type = EFI_RESERVED_TYPE;
+ md->attribute = 0;
+ continue;
+ }
+ prev_md = md;
+ }
+}
+
+static void *realloc_pages(void *old_memmap, int old_shift)
+{
+ void *ret;
+
+ ret = (void *)__get_free_pages(GFP_KERNEL, old_shift + 1);
+ if (!ret)
+ goto out;
+
+ /*
+ * A first-time allocation doesn't have anything to copy.
+ */
+ if (!old_memmap)
+ return ret;
+
+ memcpy(ret, old_memmap, PAGE_SIZE << old_shift);
+
+out:
+ free_pages((unsigned long)old_memmap, old_shift);
+ return ret;
+}
+
+/*
+ * Iterate the EFI memory map in reverse order because the regions
+ * will be mapped top-down. The end result is the same as if we had
+ * mapped things forward, but doesn't require us to change the
+ * existing implementation of efi_map_region().
+ */
+static inline void *efi_map_next_entry_reverse(void *entry)
+{
+ /* Initial call */
+ if (!entry)
+ return efi.memmap.map_end - efi.memmap.desc_size;
+
+ entry -= efi.memmap.desc_size;
+ if (entry < efi.memmap.map)
+ return NULL;
+
+ return entry;
+}
+
+/*
+ * efi_map_next_entry - Return the next EFI memory map descriptor
+ * @entry: Previous EFI memory map descriptor
+ *
+ * This is a helper function to iterate over the EFI memory map, which
+ * we do in different orders depending on the current configuration.
+ *
+ * To begin traversing the memory map @entry must be %NULL.
+ *
+ * Returns %NULL when we reach the end of the memory map.
+ */
+static void *efi_map_next_entry(void *entry)
+{
+ if (efi_enabled(EFI_64BIT)) {
+ /*
+ * Starting in UEFI v2.5 the EFI_PROPERTIES_TABLE
+ * config table feature requires us to map all entries
+ * in the same order as they appear in the EFI memory
+ * map. That is to say, entry N must have a lower
+ * virtual address than entry N+1. This is because the
+ * firmware toolchain leaves relative references in
+ * the code/data sections, which are split and become
+ * separate EFI memory regions. Mapping things
+ * out-of-order leads to the firmware accessing
+ * unmapped addresses.
+ *
+ * Since we need to map things this way whether or not
+ * the kernel actually makes use of
+ * EFI_PROPERTIES_TABLE, let's just switch to this
+ * scheme by default for 64-bit.
+ */
+ return efi_map_next_entry_reverse(entry);
+ }
+
+ /* Initial call */
+ if (!entry)
+ return efi.memmap.map;
+
+ entry += efi.memmap.desc_size;
+ if (entry >= efi.memmap.map_end)
+ return NULL;
+
+ return entry;
+}
+
+static bool should_map_region(efi_memory_desc_t *md)
+{
+ /*
+ * Runtime regions always require runtime mappings (obviously).
+ */
+ if (md->attribute & EFI_MEMORY_RUNTIME)
+ return true;
+
+ /*
+ * 32-bit EFI doesn't suffer from the bug that requires us to
+ * reserve boot services regions, and mixed mode support
+ * doesn't exist for 32-bit kernels.
+ */
+ if (IS_ENABLED(CONFIG_X86_32))
+ return false;
+
+ /*
+ * EFI specific purpose memory may be reserved by default
+ * depending on kernel config and boot options.
+ */
+ if (md->type == EFI_CONVENTIONAL_MEMORY &&
+ efi_soft_reserve_enabled() &&
+ (md->attribute & EFI_MEMORY_SP))
+ return false;
+
+ /*
+ * Map all of RAM so that we can access arguments in the 1:1
+ * mapping when making EFI runtime calls.
+ */
+ if (efi_is_mixed()) {
+ if (md->type == EFI_CONVENTIONAL_MEMORY ||
+ md->type == EFI_LOADER_DATA ||
+ md->type == EFI_LOADER_CODE)
+ return true;
+ }
+
+ /*
+ * Map boot services regions as a workaround for buggy
+ * firmware that accesses them even when they shouldn't.
+ *
+ * See efi_{reserve,free}_boot_services().
+ */
+ if (md->type == EFI_BOOT_SERVICES_CODE ||
+ md->type == EFI_BOOT_SERVICES_DATA)
+ return true;
+
+ return false;
+}
+
+/*
+ * Map the efi memory ranges of the runtime services and update new_mmap with
+ * virtual addresses.
+ */
+static void * __init efi_map_regions(int *count, int *pg_shift)
+{
+ void *p, *new_memmap = NULL;
+ unsigned long left = 0;
+ unsigned long desc_size;
+ efi_memory_desc_t *md;
+
+ desc_size = efi.memmap.desc_size;
+
+ p = NULL;
+ while ((p = efi_map_next_entry(p))) {
+ md = p;
+
+ if (!should_map_region(md))
+ continue;
+
+ efi_map_region(md);
+
+ if (left < desc_size) {
+ new_memmap = realloc_pages(new_memmap, *pg_shift);
+ if (!new_memmap)
+ return NULL;
+
+ left += PAGE_SIZE << *pg_shift;
+ (*pg_shift)++;
+ }
+
+ memcpy(new_memmap + (*count * desc_size), md, desc_size);
+
+ left -= desc_size;
+ (*count)++;
+ }
+
+ return new_memmap;
+}
+
+static void __init kexec_enter_virtual_mode(void)
+{
+#ifdef CONFIG_KEXEC_CORE
+ efi_memory_desc_t *md;
+ unsigned int num_pages;
+
+ /*
+ * We don't do virtual mode, since we don't do runtime services, on
+ * non-native EFI.
+ */
+ if (efi_is_mixed()) {
+ efi_memmap_unmap();
+ clear_bit(EFI_RUNTIME_SERVICES, &efi.flags);
+ return;
+ }
+
+ if (efi_alloc_page_tables()) {
+ pr_err("Failed to allocate EFI page tables\n");
+ clear_bit(EFI_RUNTIME_SERVICES, &efi.flags);
+ return;
+ }
+
+ /*
+ * Map efi regions which were passed via setup_data. The virt_addr is a
+ * fixed addr which was used in first kernel of a kexec boot.
+ */
+ for_each_efi_memory_desc(md)
+ efi_map_region_fixed(md); /* FIXME: add error handling */
+
+ /*
+ * Unregister the early EFI memmap from efi_init() and install
+ * the new EFI memory map.
+ */
+ efi_memmap_unmap();
+
+ if (efi_memmap_init_late(efi.memmap.phys_map,
+ efi.memmap.desc_size * efi.memmap.nr_map)) {
+ pr_err("Failed to remap late EFI memory map\n");
+ clear_bit(EFI_RUNTIME_SERVICES, &efi.flags);
+ return;
+ }
+
+ num_pages = ALIGN(efi.memmap.nr_map * efi.memmap.desc_size, PAGE_SIZE);
+ num_pages >>= PAGE_SHIFT;
+
+ if (efi_setup_page_tables(efi.memmap.phys_map, num_pages)) {
+ clear_bit(EFI_RUNTIME_SERVICES, &efi.flags);
+ return;
+ }
+
+ efi_sync_low_kernel_mappings();
+ efi_native_runtime_setup();
+#endif
+}
+
+/*
+ * This function will switch the EFI runtime services to virtual mode.
+ * Essentially, we look through the EFI memmap and map every region that
+ * has the runtime attribute bit set in its memory descriptor into the
+ * efi_pgd page table.
+ *
+ * The new method does a pagetable switch in a preemption-safe manner
+ * so that we're in a different address space when calling a runtime
+ * function. For function arguments passing we do copy the PUDs of the
+ * kernel page table into efi_pgd prior to each call.
+ *
+ * Specially for kexec boot, efi runtime maps in previous kernel should
+ * be passed in via setup_data. In that case runtime ranges will be mapped
+ * to the same virtual addresses as the first kernel, see
+ * kexec_enter_virtual_mode().
+ */
+static void __init __efi_enter_virtual_mode(void)
+{
+ int count = 0, pg_shift = 0;
+ void *new_memmap = NULL;
+ efi_status_t status;
+ unsigned long pa;
+
+ if (efi_alloc_page_tables()) {
+ pr_err("Failed to allocate EFI page tables\n");
+ goto err;
+ }
+
+ efi_merge_regions();
+ new_memmap = efi_map_regions(&count, &pg_shift);
+ if (!new_memmap) {
+ pr_err("Error reallocating memory, EFI runtime non-functional!\n");
+ goto err;
+ }
+
+ pa = __pa(new_memmap);
+
+ /*
+ * Unregister the early EFI memmap from efi_init() and install
+ * the new EFI memory map that we are about to pass to the
+ * firmware via SetVirtualAddressMap().
+ */
+ efi_memmap_unmap();
+
+ if (efi_memmap_init_late(pa, efi.memmap.desc_size * count)) {
+ pr_err("Failed to remap late EFI memory map\n");
+ goto err;
+ }
+
+ if (efi_enabled(EFI_DBG)) {
+ pr_info("EFI runtime memory map:\n");
+ efi_print_memmap();
+ }
+
+ if (efi_setup_page_tables(pa, 1 << pg_shift))
+ goto err;
+
+ efi_sync_low_kernel_mappings();
+
+ status = efi_set_virtual_address_map(efi.memmap.desc_size * count,
+ efi.memmap.desc_size,
+ efi.memmap.desc_version,
+ (efi_memory_desc_t *)pa,
+ efi_systab_phys);
+ if (status != EFI_SUCCESS) {
+ pr_err("Unable to switch EFI into virtual mode (status=%lx)!\n",
+ status);
+ goto err;
+ }
+
+ efi_check_for_embedded_firmwares();
+ efi_free_boot_services();
+
+ if (!efi_is_mixed())
+ efi_native_runtime_setup();
+ else
+ efi_thunk_runtime_setup();
+
+ /*
+ * Apply more restrictive page table mapping attributes now that
+ * SVAM() has been called and the firmware has performed all
+ * necessary relocation fixups for the new virtual addresses.
+ */
+ efi_runtime_update_mappings();
+
+ /* clean DUMMY object */
+ efi_delete_dummy_variable();
+ return;
+
+err:
+ clear_bit(EFI_RUNTIME_SERVICES, &efi.flags);
+}
+
+void __init efi_enter_virtual_mode(void)
+{
+ if (efi_enabled(EFI_PARAVIRT))
+ return;
+
+ efi.runtime = (efi_runtime_services_t *)efi_runtime;
+
+ if (efi_setup)
+ kexec_enter_virtual_mode();
+ else
+ __efi_enter_virtual_mode();
+
+ efi_dump_pagetable();
+}
+
+bool efi_is_table_address(unsigned long phys_addr)
+{
+ unsigned int i;
+
+ if (phys_addr == EFI_INVALID_TABLE_ADDR)
+ return false;
+
+ for (i = 0; i < ARRAY_SIZE(efi_tables); i++)
+ if (*(efi_tables[i]) == phys_addr)
+ return true;
+
+ return false;
+}
+
+char *efi_systab_show_arch(char *str)
+{
+ if (uga_phys != EFI_INVALID_TABLE_ADDR)
+ str += sprintf(str, "UGA=0x%lx\n", uga_phys);
+ return str;
+}
+
+#define EFI_FIELD(var) efi_ ## var
+
+#define EFI_ATTR_SHOW(name) \
+static ssize_t name##_show(struct kobject *kobj, \
+ struct kobj_attribute *attr, char *buf) \
+{ \
+ return sprintf(buf, "0x%lx\n", EFI_FIELD(name)); \
+}
+
+EFI_ATTR_SHOW(fw_vendor);
+EFI_ATTR_SHOW(runtime);
+EFI_ATTR_SHOW(config_table);
+
+struct kobj_attribute efi_attr_fw_vendor = __ATTR_RO(fw_vendor);
+struct kobj_attribute efi_attr_runtime = __ATTR_RO(runtime);
+struct kobj_attribute efi_attr_config_table = __ATTR_RO(config_table);
+
+umode_t efi_attr_is_visible(struct kobject *kobj, struct attribute *attr, int n)
+{
+ if (attr == &efi_attr_fw_vendor.attr) {
+ if (efi_enabled(EFI_PARAVIRT) ||
+ efi_fw_vendor == EFI_INVALID_TABLE_ADDR)
+ return 0;
+ } else if (attr == &efi_attr_runtime.attr) {
+ if (efi_runtime == EFI_INVALID_TABLE_ADDR)
+ return 0;
+ } else if (attr == &efi_attr_config_table.attr) {
+ if (efi_config_table == EFI_INVALID_TABLE_ADDR)
+ return 0;
+ }
+ return attr->mode;
+}
diff --git a/arch/x86/platform/efi/efi_32.c b/arch/x86/platform/efi/efi_32.c
new file mode 100644
index 000000000..e06a19942
--- /dev/null
+++ b/arch/x86/platform/efi/efi_32.c
@@ -0,0 +1,142 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Extensible Firmware Interface
+ *
+ * Based on Extensible Firmware Interface Specification version 1.0
+ *
+ * Copyright (C) 1999 VA Linux Systems
+ * Copyright (C) 1999 Walt Drummond <drummond@valinux.com>
+ * Copyright (C) 1999-2002 Hewlett-Packard Co.
+ * David Mosberger-Tang <davidm@hpl.hp.com>
+ * Stephane Eranian <eranian@hpl.hp.com>
+ *
+ * All EFI Runtime Services are not implemented yet as EFI only
+ * supports physical mode addressing on SoftSDV. This is to be fixed
+ * in a future version. --drummond 1999-07-20
+ *
+ * Implemented EFI runtime services and virtual mode calls. --davidm
+ *
+ * Goutham Rao: <goutham.rao@intel.com>
+ * Skip non-WB memory and ignore empty memory ranges.
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/ioport.h>
+#include <linux/efi.h>
+#include <linux/pgtable.h>
+
+#include <asm/io.h>
+#include <asm/desc.h>
+#include <asm/page.h>
+#include <asm/set_memory.h>
+#include <asm/tlbflush.h>
+#include <asm/efi.h>
+
+void __init efi_map_region(efi_memory_desc_t *md)
+{
+ u64 start_pfn, end_pfn, end;
+ unsigned long size;
+ void *va;
+
+ start_pfn = PFN_DOWN(md->phys_addr);
+ size = md->num_pages << PAGE_SHIFT;
+ end = md->phys_addr + size;
+ end_pfn = PFN_UP(end);
+
+ if (pfn_range_is_mapped(start_pfn, end_pfn)) {
+ va = __va(md->phys_addr);
+
+ if (!(md->attribute & EFI_MEMORY_WB))
+ set_memory_uc((unsigned long)va, md->num_pages);
+ } else {
+ va = ioremap_cache(md->phys_addr, size);
+ }
+
+ md->virt_addr = (unsigned long)va;
+ if (!va)
+ pr_err("ioremap of 0x%llX failed!\n", md->phys_addr);
+}
+
+/*
+ * To make EFI call EFI runtime service in physical addressing mode we need
+ * prolog/epilog before/after the invocation to claim the EFI runtime service
+ * handler exclusively and to duplicate a memory mapping in low memory space,
+ * say 0 - 3G.
+ */
+
+int __init efi_alloc_page_tables(void)
+{
+ return 0;
+}
+
+void efi_sync_low_kernel_mappings(void) {}
+
+void __init efi_dump_pagetable(void)
+{
+#ifdef CONFIG_EFI_PGT_DUMP
+ ptdump_walk_pgd_level(NULL, &init_mm);
+#endif
+}
+
+int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages)
+{
+ return 0;
+}
+
+void __init efi_map_region_fixed(efi_memory_desc_t *md) {}
+void __init parse_efi_setup(u64 phys_addr, u32 data_len) {}
+
+efi_status_t efi_call_svam(efi_runtime_services_t * const *,
+ u32, u32, u32, void *, u32);
+
+efi_status_t __init efi_set_virtual_address_map(unsigned long memory_map_size,
+ unsigned long descriptor_size,
+ u32 descriptor_version,
+ efi_memory_desc_t *virtual_map,
+ unsigned long systab_phys)
+{
+ const efi_system_table_t *systab = (efi_system_table_t *)systab_phys;
+ struct desc_ptr gdt_descr;
+ efi_status_t status;
+ unsigned long flags;
+ pgd_t *save_pgd;
+
+ /* Current pgd is swapper_pg_dir, we'll restore it later: */
+ save_pgd = swapper_pg_dir;
+ load_cr3(initial_page_table);
+ __flush_tlb_all();
+
+ gdt_descr.address = get_cpu_gdt_paddr(0);
+ gdt_descr.size = GDT_SIZE - 1;
+ load_gdt(&gdt_descr);
+
+ /* Disable interrupts around EFI calls: */
+ local_irq_save(flags);
+ status = efi_call_svam(&systab->runtime,
+ memory_map_size, descriptor_size,
+ descriptor_version, virtual_map,
+ __pa(&efi.runtime));
+ local_irq_restore(flags);
+
+ load_fixmap_gdt(0);
+ load_cr3(save_pgd);
+ __flush_tlb_all();
+
+ return status;
+}
+
+void __init efi_runtime_update_mappings(void)
+{
+ if (__supported_pte_mask & _PAGE_NX) {
+ efi_memory_desc_t *md;
+
+ /* Make EFI runtime service code area executable */
+ for_each_efi_memory_desc(md) {
+ if (md->type != EFI_RUNTIME_SERVICES_CODE)
+ continue;
+
+ set_memory_x(md->virt_addr, md->num_pages);
+ }
+ }
+}
diff --git a/arch/x86/platform/efi/efi_64.c b/arch/x86/platform/efi/efi_64.c
new file mode 100644
index 000000000..601908bdb
--- /dev/null
+++ b/arch/x86/platform/efi/efi_64.c
@@ -0,0 +1,863 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * x86_64 specific EFI support functions
+ * Based on Extensible Firmware Interface Specification version 1.0
+ *
+ * Copyright (C) 2005-2008 Intel Co.
+ * Fenghua Yu <fenghua.yu@intel.com>
+ * Bibo Mao <bibo.mao@intel.com>
+ * Chandramouli Narayanan <mouli@linux.intel.com>
+ * Huang Ying <ying.huang@intel.com>
+ *
+ * Code to convert EFI to E820 map has been implemented in elilo bootloader
+ * based on a EFI patch by Edgar Hucek. Based on the E820 map, the page table
+ * is setup appropriately for EFI runtime code.
+ * - mouli 06/14/2007.
+ *
+ */
+
+#define pr_fmt(fmt) "efi: " fmt
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/mm.h>
+#include <linux/types.h>
+#include <linux/spinlock.h>
+#include <linux/memblock.h>
+#include <linux/ioport.h>
+#include <linux/mc146818rtc.h>
+#include <linux/efi.h>
+#include <linux/export.h>
+#include <linux/uaccess.h>
+#include <linux/io.h>
+#include <linux/reboot.h>
+#include <linux/slab.h>
+#include <linux/ucs2_string.h>
+#include <linux/cc_platform.h>
+#include <linux/sched/task.h>
+
+#include <asm/setup.h>
+#include <asm/page.h>
+#include <asm/e820/api.h>
+#include <asm/tlbflush.h>
+#include <asm/proto.h>
+#include <asm/efi.h>
+#include <asm/cacheflush.h>
+#include <asm/fixmap.h>
+#include <asm/realmode.h>
+#include <asm/time.h>
+#include <asm/pgalloc.h>
+#include <asm/sev.h>
+
+/*
+ * We allocate runtime services regions top-down, starting from -4G, i.e.
+ * 0xffff_ffff_0000_0000 and limit EFI VA mapping space to 64G.
+ */
+static u64 efi_va = EFI_VA_START;
+static struct mm_struct *efi_prev_mm;
+
+/*
+ * We need our own copy of the higher levels of the page tables
+ * because we want to avoid inserting EFI region mappings (EFI_VA_END
+ * to EFI_VA_START) into the standard kernel page tables. Everything
+ * else can be shared, see efi_sync_low_kernel_mappings().
+ *
+ * We don't want the pgd on the pgd_list and cannot use pgd_alloc() for the
+ * allocation.
+ */
+int __init efi_alloc_page_tables(void)
+{
+ pgd_t *pgd, *efi_pgd;
+ p4d_t *p4d;
+ pud_t *pud;
+ gfp_t gfp_mask;
+
+ gfp_mask = GFP_KERNEL | __GFP_ZERO;
+ efi_pgd = (pgd_t *)__get_free_pages(gfp_mask, PGD_ALLOCATION_ORDER);
+ if (!efi_pgd)
+ goto fail;
+
+ pgd = efi_pgd + pgd_index(EFI_VA_END);
+ p4d = p4d_alloc(&init_mm, pgd, EFI_VA_END);
+ if (!p4d)
+ goto free_pgd;
+
+ pud = pud_alloc(&init_mm, p4d, EFI_VA_END);
+ if (!pud)
+ goto free_p4d;
+
+ efi_mm.pgd = efi_pgd;
+ mm_init_cpumask(&efi_mm);
+ init_new_context(NULL, &efi_mm);
+
+ return 0;
+
+free_p4d:
+ if (pgtable_l5_enabled())
+ free_page((unsigned long)pgd_page_vaddr(*pgd));
+free_pgd:
+ free_pages((unsigned long)efi_pgd, PGD_ALLOCATION_ORDER);
+fail:
+ return -ENOMEM;
+}
+
+/*
+ * Add low kernel mappings for passing arguments to EFI functions.
+ */
+void efi_sync_low_kernel_mappings(void)
+{
+ unsigned num_entries;
+ pgd_t *pgd_k, *pgd_efi;
+ p4d_t *p4d_k, *p4d_efi;
+ pud_t *pud_k, *pud_efi;
+ pgd_t *efi_pgd = efi_mm.pgd;
+
+ pgd_efi = efi_pgd + pgd_index(PAGE_OFFSET);
+ pgd_k = pgd_offset_k(PAGE_OFFSET);
+
+ num_entries = pgd_index(EFI_VA_END) - pgd_index(PAGE_OFFSET);
+ memcpy(pgd_efi, pgd_k, sizeof(pgd_t) * num_entries);
+
+ pgd_efi = efi_pgd + pgd_index(EFI_VA_END);
+ pgd_k = pgd_offset_k(EFI_VA_END);
+ p4d_efi = p4d_offset(pgd_efi, 0);
+ p4d_k = p4d_offset(pgd_k, 0);
+
+ num_entries = p4d_index(EFI_VA_END);
+ memcpy(p4d_efi, p4d_k, sizeof(p4d_t) * num_entries);
+
+ /*
+ * We share all the PUD entries apart from those that map the
+ * EFI regions. Copy around them.
+ */
+ BUILD_BUG_ON((EFI_VA_START & ~PUD_MASK) != 0);
+ BUILD_BUG_ON((EFI_VA_END & ~PUD_MASK) != 0);
+
+ p4d_efi = p4d_offset(pgd_efi, EFI_VA_END);
+ p4d_k = p4d_offset(pgd_k, EFI_VA_END);
+ pud_efi = pud_offset(p4d_efi, 0);
+ pud_k = pud_offset(p4d_k, 0);
+
+ num_entries = pud_index(EFI_VA_END);
+ memcpy(pud_efi, pud_k, sizeof(pud_t) * num_entries);
+
+ pud_efi = pud_offset(p4d_efi, EFI_VA_START);
+ pud_k = pud_offset(p4d_k, EFI_VA_START);
+
+ num_entries = PTRS_PER_PUD - pud_index(EFI_VA_START);
+ memcpy(pud_efi, pud_k, sizeof(pud_t) * num_entries);
+}
+
+/*
+ * Wrapper for slow_virt_to_phys() that handles NULL addresses.
+ */
+static inline phys_addr_t
+virt_to_phys_or_null_size(void *va, unsigned long size)
+{
+ phys_addr_t pa;
+
+ if (!va)
+ return 0;
+
+ if (virt_addr_valid(va))
+ return virt_to_phys(va);
+
+ pa = slow_virt_to_phys(va);
+
+ /* check if the object crosses a page boundary */
+ if (WARN_ON((pa ^ (pa + size - 1)) & PAGE_MASK))
+ return 0;
+
+ return pa;
+}
+
+#define virt_to_phys_or_null(addr) \
+ virt_to_phys_or_null_size((addr), sizeof(*(addr)))
+
+int __init efi_setup_page_tables(unsigned long pa_memmap, unsigned num_pages)
+{
+ extern const u8 __efi64_thunk_ret_tramp[];
+ unsigned long pfn, text, pf, rodata, tramp;
+ struct page *page;
+ unsigned npages;
+ pgd_t *pgd = efi_mm.pgd;
+
+ /*
+ * It can happen that the physical address of new_memmap lands in memory
+ * which is not mapped in the EFI page table. Therefore we need to go
+ * and ident-map those pages containing the map before calling
+ * phys_efi_set_virtual_address_map().
+ */
+ pfn = pa_memmap >> PAGE_SHIFT;
+ pf = _PAGE_NX | _PAGE_RW | _PAGE_ENC;
+ if (kernel_map_pages_in_pgd(pgd, pfn, pa_memmap, num_pages, pf)) {
+ pr_err("Error ident-mapping new memmap (0x%lx)!\n", pa_memmap);
+ return 1;
+ }
+
+ /*
+ * Certain firmware versions are way too sentimental and still believe
+ * they are exclusive and unquestionable owners of the first physical page,
+ * even though they explicitly mark it as EFI_CONVENTIONAL_MEMORY
+ * (but then write-access it later during SetVirtualAddressMap()).
+ *
+ * Create a 1:1 mapping for this page, to avoid triple faults during early
+ * boot with such firmware. We are free to hand this page to the BIOS,
+ * as trim_bios_range() will reserve the first page and isolate it away
+ * from memory allocators anyway.
+ */
+ if (kernel_map_pages_in_pgd(pgd, 0x0, 0x0, 1, pf)) {
+ pr_err("Failed to create 1:1 mapping for the first page!\n");
+ return 1;
+ }
+
+ /*
+ * When SEV-ES is active, the GHCB as set by the kernel will be used
+ * by firmware. Create a 1:1 unencrypted mapping for each GHCB.
+ */
+ if (sev_es_efi_map_ghcbs(pgd)) {
+ pr_err("Failed to create 1:1 mapping for the GHCBs!\n");
+ return 1;
+ }
+
+ /*
+ * When making calls to the firmware everything needs to be 1:1
+ * mapped and addressable with 32-bit pointers. Map the kernel
+ * text and allocate a new stack because we can't rely on the
+ * stack pointer being < 4GB.
+ */
+ if (!efi_is_mixed())
+ return 0;
+
+ page = alloc_page(GFP_KERNEL|__GFP_DMA32);
+ if (!page) {
+ pr_err("Unable to allocate EFI runtime stack < 4GB\n");
+ return 1;
+ }
+
+ efi_mixed_mode_stack_pa = page_to_phys(page + 1); /* stack grows down */
+
+ npages = (_etext - _text) >> PAGE_SHIFT;
+ text = __pa(_text);
+
+ if (kernel_unmap_pages_in_pgd(pgd, text, npages)) {
+ pr_err("Failed to unmap kernel text 1:1 mapping\n");
+ return 1;
+ }
+
+ npages = (__end_rodata - __start_rodata) >> PAGE_SHIFT;
+ rodata = __pa(__start_rodata);
+ pfn = rodata >> PAGE_SHIFT;
+
+ pf = _PAGE_NX | _PAGE_ENC;
+ if (kernel_map_pages_in_pgd(pgd, pfn, rodata, npages, pf)) {
+ pr_err("Failed to map kernel rodata 1:1\n");
+ return 1;
+ }
+
+ tramp = __pa(__efi64_thunk_ret_tramp);
+ pfn = tramp >> PAGE_SHIFT;
+
+ pf = _PAGE_ENC;
+ if (kernel_map_pages_in_pgd(pgd, pfn, tramp, 1, pf)) {
+ pr_err("Failed to map mixed mode return trampoline\n");
+ return 1;
+ }
+
+ return 0;
+}
+
+static void __init __map_region(efi_memory_desc_t *md, u64 va)
+{
+ unsigned long flags = _PAGE_RW;
+ unsigned long pfn;
+ pgd_t *pgd = efi_mm.pgd;
+
+ /*
+ * EFI_RUNTIME_SERVICES_CODE regions typically cover PE/COFF
+ * executable images in memory that consist of both R-X and
+ * RW- sections, so we cannot apply read-only or non-exec
+ * permissions just yet. However, modern EFI systems provide
+ * a memory attributes table that describes those sections
+ * with the appropriate restricted permissions, which are
+ * applied in efi_runtime_update_mappings() below. All other
+ * regions can be mapped non-executable at this point, with
+ * the exception of boot services code regions, but those will
+ * be unmapped again entirely in efi_free_boot_services().
+ */
+ if (md->type != EFI_BOOT_SERVICES_CODE &&
+ md->type != EFI_RUNTIME_SERVICES_CODE)
+ flags |= _PAGE_NX;
+
+ if (!(md->attribute & EFI_MEMORY_WB))
+ flags |= _PAGE_PCD;
+
+ if (cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT) &&
+ md->type != EFI_MEMORY_MAPPED_IO)
+ flags |= _PAGE_ENC;
+
+ pfn = md->phys_addr >> PAGE_SHIFT;
+ if (kernel_map_pages_in_pgd(pgd, pfn, va, md->num_pages, flags))
+ pr_warn("Error mapping PA 0x%llx -> VA 0x%llx!\n",
+ md->phys_addr, va);
+}
+
+void __init efi_map_region(efi_memory_desc_t *md)
+{
+ unsigned long size = md->num_pages << PAGE_SHIFT;
+ u64 pa = md->phys_addr;
+
+ /*
+ * Make sure the 1:1 mappings are present as a catch-all for b0rked
+ * firmware which doesn't update all internal pointers after switching
+ * to virtual mode and would otherwise crap on us.
+ */
+ __map_region(md, md->phys_addr);
+
+ /*
+ * Enforce the 1:1 mapping as the default virtual address when
+ * booting in EFI mixed mode, because even though we may be
+ * running a 64-bit kernel, the firmware may only be 32-bit.
+ */
+ if (efi_is_mixed()) {
+ md->virt_addr = md->phys_addr;
+ return;
+ }
+
+ efi_va -= size;
+
+ /* Is PA 2M-aligned? */
+ if (!(pa & (PMD_SIZE - 1))) {
+ efi_va &= PMD_MASK;
+ } else {
+ u64 pa_offset = pa & (PMD_SIZE - 1);
+ u64 prev_va = efi_va;
+
+ /* get us the same offset within this 2M page */
+ efi_va = (efi_va & PMD_MASK) + pa_offset;
+
+ if (efi_va > prev_va)
+ efi_va -= PMD_SIZE;
+ }
+
+ if (efi_va < EFI_VA_END) {
+ pr_warn(FW_WARN "VA address range overflow!\n");
+ return;
+ }
+
+ /* Do the VA map */
+ __map_region(md, efi_va);
+ md->virt_addr = efi_va;
+}
+
+/*
+ * kexec kernel will use efi_map_region_fixed to map efi runtime memory ranges.
+ * md->virt_addr is the original virtual address which had been mapped in kexec
+ * 1st kernel.
+ */
+void __init efi_map_region_fixed(efi_memory_desc_t *md)
+{
+ __map_region(md, md->phys_addr);
+ __map_region(md, md->virt_addr);
+}
+
+void __init parse_efi_setup(u64 phys_addr, u32 data_len)
+{
+ efi_setup = phys_addr + sizeof(struct setup_data);
+}
+
+static int __init efi_update_mappings(efi_memory_desc_t *md, unsigned long pf)
+{
+ unsigned long pfn;
+ pgd_t *pgd = efi_mm.pgd;
+ int err1, err2;
+
+ /* Update the 1:1 mapping */
+ pfn = md->phys_addr >> PAGE_SHIFT;
+ err1 = kernel_map_pages_in_pgd(pgd, pfn, md->phys_addr, md->num_pages, pf);
+ if (err1) {
+ pr_err("Error while updating 1:1 mapping PA 0x%llx -> VA 0x%llx!\n",
+ md->phys_addr, md->virt_addr);
+ }
+
+ err2 = kernel_map_pages_in_pgd(pgd, pfn, md->virt_addr, md->num_pages, pf);
+ if (err2) {
+ pr_err("Error while updating VA mapping PA 0x%llx -> VA 0x%llx!\n",
+ md->phys_addr, md->virt_addr);
+ }
+
+ return err1 || err2;
+}
+
+static int __init efi_update_mem_attr(struct mm_struct *mm, efi_memory_desc_t *md)
+{
+ unsigned long pf = 0;
+
+ if (md->attribute & EFI_MEMORY_XP)
+ pf |= _PAGE_NX;
+
+ if (!(md->attribute & EFI_MEMORY_RO))
+ pf |= _PAGE_RW;
+
+ if (cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT))
+ pf |= _PAGE_ENC;
+
+ return efi_update_mappings(md, pf);
+}
+
+void __init efi_runtime_update_mappings(void)
+{
+ efi_memory_desc_t *md;
+
+ /*
+ * Use the EFI Memory Attribute Table for mapping permissions if it
+ * exists, since it is intended to supersede EFI_PROPERTIES_TABLE.
+ */
+ if (efi_enabled(EFI_MEM_ATTR)) {
+ efi_memattr_apply_permissions(NULL, efi_update_mem_attr);
+ return;
+ }
+
+ /*
+ * EFI_MEMORY_ATTRIBUTES_TABLE is intended to replace
+ * EFI_PROPERTIES_TABLE. So, use EFI_PROPERTIES_TABLE to update
+ * permissions only if EFI_MEMORY_ATTRIBUTES_TABLE is not
+ * published by the firmware. Even if we find a buggy implementation of
+ * EFI_MEMORY_ATTRIBUTES_TABLE, don't fall back to
+ * EFI_PROPERTIES_TABLE, because of the same reason.
+ */
+
+ if (!efi_enabled(EFI_NX_PE_DATA))
+ return;
+
+ for_each_efi_memory_desc(md) {
+ unsigned long pf = 0;
+
+ if (!(md->attribute & EFI_MEMORY_RUNTIME))
+ continue;
+
+ if (!(md->attribute & EFI_MEMORY_WB))
+ pf |= _PAGE_PCD;
+
+ if ((md->attribute & EFI_MEMORY_XP) ||
+ (md->type == EFI_RUNTIME_SERVICES_DATA))
+ pf |= _PAGE_NX;
+
+ if (!(md->attribute & EFI_MEMORY_RO) &&
+ (md->type != EFI_RUNTIME_SERVICES_CODE))
+ pf |= _PAGE_RW;
+
+ if (cc_platform_has(CC_ATTR_GUEST_MEM_ENCRYPT))
+ pf |= _PAGE_ENC;
+
+ efi_update_mappings(md, pf);
+ }
+}
+
+void __init efi_dump_pagetable(void)
+{
+#ifdef CONFIG_EFI_PGT_DUMP
+ ptdump_walk_pgd_level(NULL, &efi_mm);
+#endif
+}
+
+/*
+ * Makes the calling thread switch to/from efi_mm context. Can be used
+ * in a kernel thread and user context. Preemption needs to remain disabled
+ * while the EFI-mm is borrowed. mmgrab()/mmdrop() is not used because the mm
+ * can not change under us.
+ * It should be ensured that there are no concurrent calls to this function.
+ */
+void efi_enter_mm(void)
+{
+ efi_prev_mm = current->active_mm;
+ current->active_mm = &efi_mm;
+ switch_mm(efi_prev_mm, &efi_mm, NULL);
+}
+
+void efi_leave_mm(void)
+{
+ current->active_mm = efi_prev_mm;
+ switch_mm(&efi_mm, efi_prev_mm, NULL);
+}
+
+static DEFINE_SPINLOCK(efi_runtime_lock);
+
+/*
+ * DS and ES contain user values. We need to save them.
+ * The 32-bit EFI code needs a valid DS, ES, and SS. There's no
+ * need to save the old SS: __KERNEL_DS is always acceptable.
+ */
+#define __efi_thunk(func, ...) \
+({ \
+ unsigned short __ds, __es; \
+ efi_status_t ____s; \
+ \
+ savesegment(ds, __ds); \
+ savesegment(es, __es); \
+ \
+ loadsegment(ss, __KERNEL_DS); \
+ loadsegment(ds, __KERNEL_DS); \
+ loadsegment(es, __KERNEL_DS); \
+ \
+ ____s = efi64_thunk(efi.runtime->mixed_mode.func, __VA_ARGS__); \
+ \
+ loadsegment(ds, __ds); \
+ loadsegment(es, __es); \
+ \
+ ____s ^= (____s & BIT(31)) | (____s & BIT_ULL(31)) << 32; \
+ ____s; \
+})
+
+/*
+ * Switch to the EFI page tables early so that we can access the 1:1
+ * runtime services mappings which are not mapped in any other page
+ * tables.
+ *
+ * Also, disable interrupts because the IDT points to 64-bit handlers,
+ * which aren't going to function correctly when we switch to 32-bit.
+ */
+#define efi_thunk(func...) \
+({ \
+ efi_status_t __s; \
+ \
+ arch_efi_call_virt_setup(); \
+ \
+ __s = __efi_thunk(func); \
+ \
+ arch_efi_call_virt_teardown(); \
+ \
+ __s; \
+})
+
+static efi_status_t __init __no_sanitize_address
+efi_thunk_set_virtual_address_map(unsigned long memory_map_size,
+ unsigned long descriptor_size,
+ u32 descriptor_version,
+ efi_memory_desc_t *virtual_map)
+{
+ efi_status_t status;
+ unsigned long flags;
+
+ efi_sync_low_kernel_mappings();
+ local_irq_save(flags);
+
+ efi_enter_mm();
+
+ status = __efi_thunk(set_virtual_address_map, memory_map_size,
+ descriptor_size, descriptor_version, virtual_map);
+
+ efi_leave_mm();
+ local_irq_restore(flags);
+
+ return status;
+}
+
+static efi_status_t efi_thunk_get_time(efi_time_t *tm, efi_time_cap_t *tc)
+{
+ return EFI_UNSUPPORTED;
+}
+
+static efi_status_t efi_thunk_set_time(efi_time_t *tm)
+{
+ return EFI_UNSUPPORTED;
+}
+
+static efi_status_t
+efi_thunk_get_wakeup_time(efi_bool_t *enabled, efi_bool_t *pending,
+ efi_time_t *tm)
+{
+ return EFI_UNSUPPORTED;
+}
+
+static efi_status_t
+efi_thunk_set_wakeup_time(efi_bool_t enabled, efi_time_t *tm)
+{
+ return EFI_UNSUPPORTED;
+}
+
+static unsigned long efi_name_size(efi_char16_t *name)
+{
+ return ucs2_strsize(name, EFI_VAR_NAME_LEN) + 1;
+}
+
+static efi_status_t
+efi_thunk_get_variable(efi_char16_t *name, efi_guid_t *vendor,
+ u32 *attr, unsigned long *data_size, void *data)
+{
+ u8 buf[24] __aligned(8);
+ efi_guid_t *vnd = PTR_ALIGN((efi_guid_t *)buf, sizeof(*vnd));
+ efi_status_t status;
+ u32 phys_name, phys_vendor, phys_attr;
+ u32 phys_data_size, phys_data;
+ unsigned long flags;
+
+ spin_lock_irqsave(&efi_runtime_lock, flags);
+
+ *vnd = *vendor;
+
+ phys_data_size = virt_to_phys_or_null(data_size);
+ phys_vendor = virt_to_phys_or_null(vnd);
+ phys_name = virt_to_phys_or_null_size(name, efi_name_size(name));
+ phys_attr = virt_to_phys_or_null(attr);
+ phys_data = virt_to_phys_or_null_size(data, *data_size);
+
+ if (!phys_name || (data && !phys_data))
+ status = EFI_INVALID_PARAMETER;
+ else
+ status = efi_thunk(get_variable, phys_name, phys_vendor,
+ phys_attr, phys_data_size, phys_data);
+
+ spin_unlock_irqrestore(&efi_runtime_lock, flags);
+
+ return status;
+}
+
+static efi_status_t
+efi_thunk_set_variable(efi_char16_t *name, efi_guid_t *vendor,
+ u32 attr, unsigned long data_size, void *data)
+{
+ u8 buf[24] __aligned(8);
+ efi_guid_t *vnd = PTR_ALIGN((efi_guid_t *)buf, sizeof(*vnd));
+ u32 phys_name, phys_vendor, phys_data;
+ efi_status_t status;
+ unsigned long flags;
+
+ spin_lock_irqsave(&efi_runtime_lock, flags);
+
+ *vnd = *vendor;
+
+ phys_name = virt_to_phys_or_null_size(name, efi_name_size(name));
+ phys_vendor = virt_to_phys_or_null(vnd);
+ phys_data = virt_to_phys_or_null_size(data, data_size);
+
+ if (!phys_name || (data && !phys_data))
+ status = EFI_INVALID_PARAMETER;
+ else
+ status = efi_thunk(set_variable, phys_name, phys_vendor,
+ attr, data_size, phys_data);
+
+ spin_unlock_irqrestore(&efi_runtime_lock, flags);
+
+ return status;
+}
+
+static efi_status_t
+efi_thunk_set_variable_nonblocking(efi_char16_t *name, efi_guid_t *vendor,
+ u32 attr, unsigned long data_size,
+ void *data)
+{
+ u8 buf[24] __aligned(8);
+ efi_guid_t *vnd = PTR_ALIGN((efi_guid_t *)buf, sizeof(*vnd));
+ u32 phys_name, phys_vendor, phys_data;
+ efi_status_t status;
+ unsigned long flags;
+
+ if (!spin_trylock_irqsave(&efi_runtime_lock, flags))
+ return EFI_NOT_READY;
+
+ *vnd = *vendor;
+
+ phys_name = virt_to_phys_or_null_size(name, efi_name_size(name));
+ phys_vendor = virt_to_phys_or_null(vnd);
+ phys_data = virt_to_phys_or_null_size(data, data_size);
+
+ if (!phys_name || (data && !phys_data))
+ status = EFI_INVALID_PARAMETER;
+ else
+ status = efi_thunk(set_variable, phys_name, phys_vendor,
+ attr, data_size, phys_data);
+
+ spin_unlock_irqrestore(&efi_runtime_lock, flags);
+
+ return status;
+}
+
+static efi_status_t
+efi_thunk_get_next_variable(unsigned long *name_size,
+ efi_char16_t *name,
+ efi_guid_t *vendor)
+{
+ u8 buf[24] __aligned(8);
+ efi_guid_t *vnd = PTR_ALIGN((efi_guid_t *)buf, sizeof(*vnd));
+ efi_status_t status;
+ u32 phys_name_size, phys_name, phys_vendor;
+ unsigned long flags;
+
+ spin_lock_irqsave(&efi_runtime_lock, flags);
+
+ *vnd = *vendor;
+
+ phys_name_size = virt_to_phys_or_null(name_size);
+ phys_vendor = virt_to_phys_or_null(vnd);
+ phys_name = virt_to_phys_or_null_size(name, *name_size);
+
+ if (!phys_name)
+ status = EFI_INVALID_PARAMETER;
+ else
+ status = efi_thunk(get_next_variable, phys_name_size,
+ phys_name, phys_vendor);
+
+ spin_unlock_irqrestore(&efi_runtime_lock, flags);
+
+ *vendor = *vnd;
+ return status;
+}
+
+static efi_status_t
+efi_thunk_get_next_high_mono_count(u32 *count)
+{
+ return EFI_UNSUPPORTED;
+}
+
+static void
+efi_thunk_reset_system(int reset_type, efi_status_t status,
+ unsigned long data_size, efi_char16_t *data)
+{
+ u32 phys_data;
+ unsigned long flags;
+
+ spin_lock_irqsave(&efi_runtime_lock, flags);
+
+ phys_data = virt_to_phys_or_null_size(data, data_size);
+
+ efi_thunk(reset_system, reset_type, status, data_size, phys_data);
+
+ spin_unlock_irqrestore(&efi_runtime_lock, flags);
+}
+
+static efi_status_t
+efi_thunk_update_capsule(efi_capsule_header_t **capsules,
+ unsigned long count, unsigned long sg_list)
+{
+ /*
+ * To properly support this function we would need to repackage
+ * 'capsules' because the firmware doesn't understand 64-bit
+ * pointers.
+ */
+ return EFI_UNSUPPORTED;
+}
+
+static efi_status_t
+efi_thunk_query_variable_info(u32 attr, u64 *storage_space,
+ u64 *remaining_space,
+ u64 *max_variable_size)
+{
+ efi_status_t status;
+ u32 phys_storage, phys_remaining, phys_max;
+ unsigned long flags;
+
+ if (efi.runtime_version < EFI_2_00_SYSTEM_TABLE_REVISION)
+ return EFI_UNSUPPORTED;
+
+ spin_lock_irqsave(&efi_runtime_lock, flags);
+
+ phys_storage = virt_to_phys_or_null(storage_space);
+ phys_remaining = virt_to_phys_or_null(remaining_space);
+ phys_max = virt_to_phys_or_null(max_variable_size);
+
+ status = efi_thunk(query_variable_info, attr, phys_storage,
+ phys_remaining, phys_max);
+
+ spin_unlock_irqrestore(&efi_runtime_lock, flags);
+
+ return status;
+}
+
+static efi_status_t
+efi_thunk_query_variable_info_nonblocking(u32 attr, u64 *storage_space,
+ u64 *remaining_space,
+ u64 *max_variable_size)
+{
+ efi_status_t status;
+ u32 phys_storage, phys_remaining, phys_max;
+ unsigned long flags;
+
+ if (efi.runtime_version < EFI_2_00_SYSTEM_TABLE_REVISION)
+ return EFI_UNSUPPORTED;
+
+ if (!spin_trylock_irqsave(&efi_runtime_lock, flags))
+ return EFI_NOT_READY;
+
+ phys_storage = virt_to_phys_or_null(storage_space);
+ phys_remaining = virt_to_phys_or_null(remaining_space);
+ phys_max = virt_to_phys_or_null(max_variable_size);
+
+ status = efi_thunk(query_variable_info, attr, phys_storage,
+ phys_remaining, phys_max);
+
+ spin_unlock_irqrestore(&efi_runtime_lock, flags);
+
+ return status;
+}
+
+static efi_status_t
+efi_thunk_query_capsule_caps(efi_capsule_header_t **capsules,
+ unsigned long count, u64 *max_size,
+ int *reset_type)
+{
+ /*
+ * To properly support this function we would need to repackage
+ * 'capsules' because the firmware doesn't understand 64-bit
+ * pointers.
+ */
+ return EFI_UNSUPPORTED;
+}
+
+void __init efi_thunk_runtime_setup(void)
+{
+ if (!IS_ENABLED(CONFIG_EFI_MIXED))
+ return;
+
+ efi.get_time = efi_thunk_get_time;
+ efi.set_time = efi_thunk_set_time;
+ efi.get_wakeup_time = efi_thunk_get_wakeup_time;
+ efi.set_wakeup_time = efi_thunk_set_wakeup_time;
+ efi.get_variable = efi_thunk_get_variable;
+ efi.get_next_variable = efi_thunk_get_next_variable;
+ efi.set_variable = efi_thunk_set_variable;
+ efi.set_variable_nonblocking = efi_thunk_set_variable_nonblocking;
+ efi.get_next_high_mono_count = efi_thunk_get_next_high_mono_count;
+ efi.reset_system = efi_thunk_reset_system;
+ efi.query_variable_info = efi_thunk_query_variable_info;
+ efi.query_variable_info_nonblocking = efi_thunk_query_variable_info_nonblocking;
+ efi.update_capsule = efi_thunk_update_capsule;
+ efi.query_capsule_caps = efi_thunk_query_capsule_caps;
+}
+
+efi_status_t __init __no_sanitize_address
+efi_set_virtual_address_map(unsigned long memory_map_size,
+ unsigned long descriptor_size,
+ u32 descriptor_version,
+ efi_memory_desc_t *virtual_map,
+ unsigned long systab_phys)
+{
+ const efi_system_table_t *systab = (efi_system_table_t *)systab_phys;
+ efi_status_t status;
+ unsigned long flags;
+
+ if (efi_is_mixed())
+ return efi_thunk_set_virtual_address_map(memory_map_size,
+ descriptor_size,
+ descriptor_version,
+ virtual_map);
+ efi_enter_mm();
+
+ efi_fpu_begin();
+
+ /* Disable interrupts around EFI calls: */
+ local_irq_save(flags);
+ status = arch_efi_call_virt(efi.runtime, set_virtual_address_map,
+ memory_map_size, descriptor_size,
+ descriptor_version, virtual_map);
+ local_irq_restore(flags);
+
+ efi_fpu_end();
+
+ /* grab the virtually remapped EFI runtime services table pointer */
+ efi.runtime = READ_ONCE(systab->runtime);
+
+ efi_leave_mm();
+
+ return status;
+}
diff --git a/arch/x86/platform/efi/efi_stub_32.S b/arch/x86/platform/efi/efi_stub_32.S
new file mode 100644
index 000000000..f3cfdb1c9
--- /dev/null
+++ b/arch/x86/platform/efi/efi_stub_32.S
@@ -0,0 +1,60 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * EFI call stub for IA32.
+ *
+ * This stub allows us to make EFI calls in physical mode with interrupts
+ * turned off.
+ */
+
+#include <linux/linkage.h>
+#include <linux/init.h>
+#include <asm/asm-offsets.h>
+#include <asm/page_types.h>
+
+ __INIT
+SYM_FUNC_START(efi_call_svam)
+ push %ebp
+ movl %esp, %ebp
+ push %ebx
+
+ push 16(%esp)
+ push 16(%esp)
+ push %ecx
+ push %edx
+ movl %eax, %ebx // &systab_phys->runtime
+
+ /*
+ * Switch to the flat mapped alias of this routine, by jumping to the
+ * address of label '1' after subtracting PAGE_OFFSET from it.
+ */
+ movl $1f, %edx
+ subl $__PAGE_OFFSET, %edx
+ jmp *%edx
+1:
+
+ /* disable paging */
+ movl %cr0, %edx
+ andl $0x7fffffff, %edx
+ movl %edx, %cr0
+
+ /* convert the stack pointer to a flat mapped address */
+ subl $__PAGE_OFFSET, %esp
+
+ /* call the EFI routine */
+ movl (%eax), %eax
+ call *EFI_svam(%eax)
+
+ /* grab the virtually remapped EFI runtime services table pointer */
+ movl (%ebx), %ecx
+ movl 36(%esp), %edx // &efi.runtime
+ movl %ecx, (%edx)
+
+ /* re-enable paging */
+ movl %cr0, %edx
+ orl $0x80000000, %edx
+ movl %edx, %cr0
+
+ movl 16(%esp), %ebx
+ leave
+ RET
+SYM_FUNC_END(efi_call_svam)
diff --git a/arch/x86/platform/efi/efi_stub_64.S b/arch/x86/platform/efi/efi_stub_64.S
new file mode 100644
index 000000000..2206b8bc4
--- /dev/null
+++ b/arch/x86/platform/efi/efi_stub_64.S
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Function calling ABI conversion from Linux to EFI for x86_64
+ *
+ * Copyright (C) 2007 Intel Corp
+ * Bibo Mao <bibo.mao@intel.com>
+ * Huang Ying <ying.huang@intel.com>
+ */
+
+#include <linux/linkage.h>
+#include <asm/nospec-branch.h>
+
+SYM_FUNC_START(__efi_call)
+ pushq %rbp
+ movq %rsp, %rbp
+ and $~0xf, %rsp
+ mov 16(%rbp), %rax
+ subq $48, %rsp
+ mov %r9, 32(%rsp)
+ mov %rax, 40(%rsp)
+ mov %r8, %r9
+ mov %rcx, %r8
+ mov %rsi, %rcx
+ CALL_NOSPEC rdi
+ leave
+ RET
+SYM_FUNC_END(__efi_call)
diff --git a/arch/x86/platform/efi/efi_thunk_64.S b/arch/x86/platform/efi/efi_thunk_64.S
new file mode 100644
index 000000000..c4b1144f9
--- /dev/null
+++ b/arch/x86/platform/efi/efi_thunk_64.S
@@ -0,0 +1,98 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2014 Intel Corporation; author Matt Fleming
+ *
+ * Support for invoking 32-bit EFI runtime services from a 64-bit
+ * kernel.
+ *
+ * The below thunking functions are only used after ExitBootServices()
+ * has been called. This simplifies things considerably as compared with
+ * the early EFI thunking because we can leave all the kernel state
+ * intact (GDT, IDT, etc) and simply invoke the 32-bit EFI runtime
+ * services from __KERNEL32_CS. This means we can continue to service
+ * interrupts across an EFI mixed mode call.
+ *
+ * We do however, need to handle the fact that we're running in a full
+ * 64-bit virtual address space. Things like the stack and instruction
+ * addresses need to be accessible by the 32-bit firmware, so we rely on
+ * using the identity mappings in the EFI page table to access the stack
+ * and kernel text (see efi_setup_page_tables()).
+ */
+
+#include <linux/linkage.h>
+#include <linux/objtool.h>
+#include <asm/page_types.h>
+#include <asm/segment.h>
+
+ .text
+ .code64
+SYM_FUNC_START(__efi64_thunk)
+STACK_FRAME_NON_STANDARD __efi64_thunk
+ push %rbp
+ push %rbx
+
+ /*
+ * Switch to 1:1 mapped 32-bit stack pointer.
+ */
+ movq %rsp, %rax
+ movq efi_mixed_mode_stack_pa(%rip), %rsp
+ push %rax
+
+ /*
+ * Copy args passed via the stack
+ */
+ subq $0x24, %rsp
+ movq 0x18(%rax), %rbp
+ movq 0x20(%rax), %rbx
+ movq 0x28(%rax), %rax
+ movl %ebp, 0x18(%rsp)
+ movl %ebx, 0x1c(%rsp)
+ movl %eax, 0x20(%rsp)
+
+ /*
+ * Calculate the physical address of the kernel text.
+ */
+ movq $__START_KERNEL_map, %rax
+ subq phys_base(%rip), %rax
+
+ leaq 1f(%rip), %rbp
+ leaq 2f(%rip), %rbx
+ subq %rax, %rbp
+ subq %rax, %rbx
+
+ movl %ebx, 0x0(%rsp) /* return address */
+ movl %esi, 0x4(%rsp)
+ movl %edx, 0x8(%rsp)
+ movl %ecx, 0xc(%rsp)
+ movl %r8d, 0x10(%rsp)
+ movl %r9d, 0x14(%rsp)
+
+ /* Switch to 32-bit descriptor */
+ pushq $__KERNEL32_CS
+ pushq %rdi /* EFI runtime service address */
+ lretq
+
+ // This return instruction is not needed for correctness, as it will
+ // never be reached. It only exists to make objtool happy, which will
+ // otherwise complain about unreachable instructions in the callers.
+ RET
+SYM_FUNC_END(__efi64_thunk)
+
+ .section ".rodata", "a", @progbits
+ .balign 16
+SYM_DATA_START(__efi64_thunk_ret_tramp)
+1: movq 0x20(%rsp), %rsp
+ pop %rbx
+ pop %rbp
+ ret
+ int3
+
+ .code32
+2: pushl $__KERNEL_CS
+ pushl %ebp
+ lret
+SYM_DATA_END(__efi64_thunk_ret_tramp)
+
+ .bss
+ .balign 8
+SYM_DATA(efi_mixed_mode_stack_pa, .quad 0)
diff --git a/arch/x86/platform/efi/quirks.c b/arch/x86/platform/efi/quirks.c
new file mode 100644
index 000000000..b0b848d69
--- /dev/null
+++ b/arch/x86/platform/efi/quirks.c
@@ -0,0 +1,773 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#define pr_fmt(fmt) "efi: " fmt
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/time.h>
+#include <linux/types.h>
+#include <linux/efi.h>
+#include <linux/slab.h>
+#include <linux/memblock.h>
+#include <linux/acpi.h>
+#include <linux/dmi.h>
+
+#include <asm/e820/api.h>
+#include <asm/efi.h>
+#include <asm/uv/uv.h>
+#include <asm/cpu_device_id.h>
+#include <asm/realmode.h>
+#include <asm/reboot.h>
+
+#define EFI_MIN_RESERVE 5120
+
+#define EFI_DUMMY_GUID \
+ EFI_GUID(0x4424ac57, 0xbe4b, 0x47dd, 0x9e, 0x97, 0xed, 0x50, 0xf0, 0x9f, 0x92, 0xa9)
+
+#define QUARK_CSH_SIGNATURE 0x5f435348 /* _CSH */
+#define QUARK_SECURITY_HEADER_SIZE 0x400
+
+/*
+ * Header prepended to the standard EFI capsule on Quark systems the are based
+ * on Intel firmware BSP.
+ * @csh_signature: Unique identifier to sanity check signed module
+ * presence ("_CSH").
+ * @version: Current version of CSH used. Should be one for Quark A0.
+ * @modulesize: Size of the entire module including the module header
+ * and payload.
+ * @security_version_number_index: Index of SVN to use for validation of signed
+ * module.
+ * @security_version_number: Used to prevent against roll back of modules.
+ * @rsvd_module_id: Currently unused for Clanton (Quark).
+ * @rsvd_module_vendor: Vendor Identifier. For Intel products value is
+ * 0x00008086.
+ * @rsvd_date: BCD representation of build date as yyyymmdd, where
+ * yyyy=4 digit year, mm=1-12, dd=1-31.
+ * @headersize: Total length of the header including including any
+ * padding optionally added by the signing tool.
+ * @hash_algo: What Hash is used in the module signing.
+ * @cryp_algo: What Crypto is used in the module signing.
+ * @keysize: Total length of the key data including including any
+ * padding optionally added by the signing tool.
+ * @signaturesize: Total length of the signature including including any
+ * padding optionally added by the signing tool.
+ * @rsvd_next_header: 32-bit pointer to the next Secure Boot Module in the
+ * chain, if there is a next header.
+ * @rsvd: Reserved, padding structure to required size.
+ *
+ * See also QuartSecurityHeader_t in
+ * Quark_EDKII_v1.2.1.1/QuarkPlatformPkg/Include/QuarkBootRom.h
+ * from https://downloadcenter.intel.com/download/23197/Intel-Quark-SoC-X1000-Board-Support-Package-BSP
+ */
+struct quark_security_header {
+ u32 csh_signature;
+ u32 version;
+ u32 modulesize;
+ u32 security_version_number_index;
+ u32 security_version_number;
+ u32 rsvd_module_id;
+ u32 rsvd_module_vendor;
+ u32 rsvd_date;
+ u32 headersize;
+ u32 hash_algo;
+ u32 cryp_algo;
+ u32 keysize;
+ u32 signaturesize;
+ u32 rsvd_next_header;
+ u32 rsvd[2];
+};
+
+static const efi_char16_t efi_dummy_name[] = L"DUMMY";
+
+static bool efi_no_storage_paranoia;
+
+/*
+ * Some firmware implementations refuse to boot if there's insufficient
+ * space in the variable store. The implementation of garbage collection
+ * in some FW versions causes stale (deleted) variables to take up space
+ * longer than intended and space is only freed once the store becomes
+ * almost completely full.
+ *
+ * Enabling this option disables the space checks in
+ * efi_query_variable_store() and forces garbage collection.
+ *
+ * Only enable this option if deleting EFI variables does not free up
+ * space in your variable store, e.g. if despite deleting variables
+ * you're unable to create new ones.
+ */
+static int __init setup_storage_paranoia(char *arg)
+{
+ efi_no_storage_paranoia = true;
+ return 0;
+}
+early_param("efi_no_storage_paranoia", setup_storage_paranoia);
+
+/*
+ * Deleting the dummy variable which kicks off garbage collection
+*/
+void efi_delete_dummy_variable(void)
+{
+ efi.set_variable_nonblocking((efi_char16_t *)efi_dummy_name,
+ &EFI_DUMMY_GUID,
+ EFI_VARIABLE_NON_VOLATILE |
+ EFI_VARIABLE_BOOTSERVICE_ACCESS |
+ EFI_VARIABLE_RUNTIME_ACCESS, 0, NULL);
+}
+
+/*
+ * In the nonblocking case we do not attempt to perform garbage
+ * collection if we do not have enough free space. Rather, we do the
+ * bare minimum check and give up immediately if the available space
+ * is below EFI_MIN_RESERVE.
+ *
+ * This function is intended to be small and simple because it is
+ * invoked from crash handler paths.
+ */
+static efi_status_t
+query_variable_store_nonblocking(u32 attributes, unsigned long size)
+{
+ efi_status_t status;
+ u64 storage_size, remaining_size, max_size;
+
+ status = efi.query_variable_info_nonblocking(attributes, &storage_size,
+ &remaining_size,
+ &max_size);
+ if (status != EFI_SUCCESS)
+ return status;
+
+ if (remaining_size - size < EFI_MIN_RESERVE)
+ return EFI_OUT_OF_RESOURCES;
+
+ return EFI_SUCCESS;
+}
+
+/*
+ * Some firmware implementations refuse to boot if there's insufficient space
+ * in the variable store. Ensure that we never use more than a safe limit.
+ *
+ * Return EFI_SUCCESS if it is safe to write 'size' bytes to the variable
+ * store.
+ */
+efi_status_t efi_query_variable_store(u32 attributes, unsigned long size,
+ bool nonblocking)
+{
+ efi_status_t status;
+ u64 storage_size, remaining_size, max_size;
+
+ if (!(attributes & EFI_VARIABLE_NON_VOLATILE))
+ return 0;
+
+ if (nonblocking)
+ return query_variable_store_nonblocking(attributes, size);
+
+ status = efi.query_variable_info(attributes, &storage_size,
+ &remaining_size, &max_size);
+ if (status != EFI_SUCCESS)
+ return status;
+
+ /*
+ * We account for that by refusing the write if permitting it would
+ * reduce the available space to under 5KB. This figure was provided by
+ * Samsung, so should be safe.
+ */
+ if ((remaining_size - size < EFI_MIN_RESERVE) &&
+ !efi_no_storage_paranoia) {
+
+ /*
+ * Triggering garbage collection may require that the firmware
+ * generate a real EFI_OUT_OF_RESOURCES error. We can force
+ * that by attempting to use more space than is available.
+ */
+ unsigned long dummy_size = remaining_size + 1024;
+ void *dummy = kzalloc(dummy_size, GFP_KERNEL);
+
+ if (!dummy)
+ return EFI_OUT_OF_RESOURCES;
+
+ status = efi.set_variable((efi_char16_t *)efi_dummy_name,
+ &EFI_DUMMY_GUID,
+ EFI_VARIABLE_NON_VOLATILE |
+ EFI_VARIABLE_BOOTSERVICE_ACCESS |
+ EFI_VARIABLE_RUNTIME_ACCESS,
+ dummy_size, dummy);
+
+ if (status == EFI_SUCCESS) {
+ /*
+ * This should have failed, so if it didn't make sure
+ * that we delete it...
+ */
+ efi_delete_dummy_variable();
+ }
+
+ kfree(dummy);
+
+ /*
+ * The runtime code may now have triggered a garbage collection
+ * run, so check the variable info again
+ */
+ status = efi.query_variable_info(attributes, &storage_size,
+ &remaining_size, &max_size);
+
+ if (status != EFI_SUCCESS)
+ return status;
+
+ /*
+ * There still isn't enough room, so return an error
+ */
+ if (remaining_size - size < EFI_MIN_RESERVE)
+ return EFI_OUT_OF_RESOURCES;
+ }
+
+ return EFI_SUCCESS;
+}
+EXPORT_SYMBOL_GPL(efi_query_variable_store);
+
+/*
+ * The UEFI specification makes it clear that the operating system is
+ * free to do whatever it wants with boot services code after
+ * ExitBootServices() has been called. Ignoring this recommendation a
+ * significant bunch of EFI implementations continue calling into boot
+ * services code (SetVirtualAddressMap). In order to work around such
+ * buggy implementations we reserve boot services region during EFI
+ * init and make sure it stays executable. Then, after
+ * SetVirtualAddressMap(), it is discarded.
+ *
+ * However, some boot services regions contain data that is required
+ * by drivers, so we need to track which memory ranges can never be
+ * freed. This is done by tagging those regions with the
+ * EFI_MEMORY_RUNTIME attribute.
+ *
+ * Any driver that wants to mark a region as reserved must use
+ * efi_mem_reserve() which will insert a new EFI memory descriptor
+ * into efi.memmap (splitting existing regions if necessary) and tag
+ * it with EFI_MEMORY_RUNTIME.
+ */
+void __init efi_arch_mem_reserve(phys_addr_t addr, u64 size)
+{
+ struct efi_memory_map_data data = { 0 };
+ struct efi_mem_range mr;
+ efi_memory_desc_t md;
+ int num_entries;
+ void *new;
+
+ if (efi_mem_desc_lookup(addr, &md) ||
+ md.type != EFI_BOOT_SERVICES_DATA) {
+ pr_err("Failed to lookup EFI memory descriptor for %pa\n", &addr);
+ return;
+ }
+
+ if (addr + size > md.phys_addr + (md.num_pages << EFI_PAGE_SHIFT)) {
+ pr_err("Region spans EFI memory descriptors, %pa\n", &addr);
+ return;
+ }
+
+ size += addr % EFI_PAGE_SIZE;
+ size = round_up(size, EFI_PAGE_SIZE);
+ addr = round_down(addr, EFI_PAGE_SIZE);
+
+ mr.range.start = addr;
+ mr.range.end = addr + size - 1;
+ mr.attribute = md.attribute | EFI_MEMORY_RUNTIME;
+
+ num_entries = efi_memmap_split_count(&md, &mr.range);
+ num_entries += efi.memmap.nr_map;
+
+ if (efi_memmap_alloc(num_entries, &data) != 0) {
+ pr_err("Could not allocate boot services memmap\n");
+ return;
+ }
+
+ new = early_memremap_prot(data.phys_map, data.size,
+ pgprot_val(pgprot_encrypted(FIXMAP_PAGE_NORMAL)));
+ if (!new) {
+ pr_err("Failed to map new boot services memmap\n");
+ return;
+ }
+
+ efi_memmap_insert(&efi.memmap, new, &mr);
+ early_memunmap(new, data.size);
+
+ efi_memmap_install(&data);
+ e820__range_update(addr, size, E820_TYPE_RAM, E820_TYPE_RESERVED);
+ e820__update_table(e820_table);
+}
+
+/*
+ * Helper function for efi_reserve_boot_services() to figure out if we
+ * can free regions in efi_free_boot_services().
+ *
+ * Use this function to ensure we do not free regions owned by somebody
+ * else. We must only reserve (and then free) regions:
+ *
+ * - Not within any part of the kernel
+ * - Not the BIOS reserved area (E820_TYPE_RESERVED, E820_TYPE_NVS, etc)
+ */
+static __init bool can_free_region(u64 start, u64 size)
+{
+ if (start + size > __pa_symbol(_text) && start <= __pa_symbol(_end))
+ return false;
+
+ if (!e820__mapped_all(start, start+size, E820_TYPE_RAM))
+ return false;
+
+ return true;
+}
+
+void __init efi_reserve_boot_services(void)
+{
+ efi_memory_desc_t *md;
+
+ if (!efi_enabled(EFI_MEMMAP))
+ return;
+
+ for_each_efi_memory_desc(md) {
+ u64 start = md->phys_addr;
+ u64 size = md->num_pages << EFI_PAGE_SHIFT;
+ bool already_reserved;
+
+ if (md->type != EFI_BOOT_SERVICES_CODE &&
+ md->type != EFI_BOOT_SERVICES_DATA)
+ continue;
+
+ already_reserved = memblock_is_region_reserved(start, size);
+
+ /*
+ * Because the following memblock_reserve() is paired
+ * with memblock_free_late() for this region in
+ * efi_free_boot_services(), we must be extremely
+ * careful not to reserve, and subsequently free,
+ * critical regions of memory (like the kernel image) or
+ * those regions that somebody else has already
+ * reserved.
+ *
+ * A good example of a critical region that must not be
+ * freed is page zero (first 4Kb of memory), which may
+ * contain boot services code/data but is marked
+ * E820_TYPE_RESERVED by trim_bios_range().
+ */
+ if (!already_reserved) {
+ memblock_reserve(start, size);
+
+ /*
+ * If we are the first to reserve the region, no
+ * one else cares about it. We own it and can
+ * free it later.
+ */
+ if (can_free_region(start, size))
+ continue;
+ }
+
+ /*
+ * We don't own the region. We must not free it.
+ *
+ * Setting this bit for a boot services region really
+ * doesn't make sense as far as the firmware is
+ * concerned, but it does provide us with a way to tag
+ * those regions that must not be paired with
+ * memblock_free_late().
+ */
+ md->attribute |= EFI_MEMORY_RUNTIME;
+ }
+}
+
+/*
+ * Apart from having VA mappings for EFI boot services code/data regions,
+ * (duplicate) 1:1 mappings were also created as a quirk for buggy firmware. So,
+ * unmap both 1:1 and VA mappings.
+ */
+static void __init efi_unmap_pages(efi_memory_desc_t *md)
+{
+ pgd_t *pgd = efi_mm.pgd;
+ u64 pa = md->phys_addr;
+ u64 va = md->virt_addr;
+
+ /*
+ * EFI mixed mode has all RAM mapped to access arguments while making
+ * EFI runtime calls, hence don't unmap EFI boot services code/data
+ * regions.
+ */
+ if (efi_is_mixed())
+ return;
+
+ if (kernel_unmap_pages_in_pgd(pgd, pa, md->num_pages))
+ pr_err("Failed to unmap 1:1 mapping for 0x%llx\n", pa);
+
+ if (kernel_unmap_pages_in_pgd(pgd, va, md->num_pages))
+ pr_err("Failed to unmap VA mapping for 0x%llx\n", va);
+}
+
+void __init efi_free_boot_services(void)
+{
+ struct efi_memory_map_data data = { 0 };
+ efi_memory_desc_t *md;
+ int num_entries = 0;
+ void *new, *new_md;
+
+ /* Keep all regions for /sys/kernel/debug/efi */
+ if (efi_enabled(EFI_DBG))
+ return;
+
+ for_each_efi_memory_desc(md) {
+ unsigned long long start = md->phys_addr;
+ unsigned long long size = md->num_pages << EFI_PAGE_SHIFT;
+ size_t rm_size;
+
+ if (md->type != EFI_BOOT_SERVICES_CODE &&
+ md->type != EFI_BOOT_SERVICES_DATA) {
+ num_entries++;
+ continue;
+ }
+
+ /* Do not free, someone else owns it: */
+ if (md->attribute & EFI_MEMORY_RUNTIME) {
+ num_entries++;
+ continue;
+ }
+
+ /*
+ * Before calling set_virtual_address_map(), EFI boot services
+ * code/data regions were mapped as a quirk for buggy firmware.
+ * Unmap them from efi_pgd before freeing them up.
+ */
+ efi_unmap_pages(md);
+
+ /*
+ * Nasty quirk: if all sub-1MB memory is used for boot
+ * services, we can get here without having allocated the
+ * real mode trampoline. It's too late to hand boot services
+ * memory back to the memblock allocator, so instead
+ * try to manually allocate the trampoline if needed.
+ *
+ * I've seen this on a Dell XPS 13 9350 with firmware
+ * 1.4.4 with SGX enabled booting Linux via Fedora 24's
+ * grub2-efi on a hard disk. (And no, I don't know why
+ * this happened, but Linux should still try to boot rather
+ * panicking early.)
+ */
+ rm_size = real_mode_size_needed();
+ if (rm_size && (start + rm_size) < (1<<20) && size >= rm_size) {
+ set_real_mode_mem(start);
+ start += rm_size;
+ size -= rm_size;
+ }
+
+ /*
+ * Don't free memory under 1M for two reasons:
+ * - BIOS might clobber it
+ * - Crash kernel needs it to be reserved
+ */
+ if (start + size < SZ_1M)
+ continue;
+ if (start < SZ_1M) {
+ size -= (SZ_1M - start);
+ start = SZ_1M;
+ }
+
+ memblock_free_late(start, size);
+ }
+
+ if (!num_entries)
+ return;
+
+ if (efi_memmap_alloc(num_entries, &data) != 0) {
+ pr_err("Failed to allocate new EFI memmap\n");
+ return;
+ }
+
+ new = memremap(data.phys_map, data.size, MEMREMAP_WB);
+ if (!new) {
+ pr_err("Failed to map new EFI memmap\n");
+ return;
+ }
+
+ /*
+ * Build a new EFI memmap that excludes any boot services
+ * regions that are not tagged EFI_MEMORY_RUNTIME, since those
+ * regions have now been freed.
+ */
+ new_md = new;
+ for_each_efi_memory_desc(md) {
+ if (!(md->attribute & EFI_MEMORY_RUNTIME) &&
+ (md->type == EFI_BOOT_SERVICES_CODE ||
+ md->type == EFI_BOOT_SERVICES_DATA))
+ continue;
+
+ memcpy(new_md, md, efi.memmap.desc_size);
+ new_md += efi.memmap.desc_size;
+ }
+
+ memunmap(new);
+
+ if (efi_memmap_install(&data) != 0) {
+ pr_err("Could not install new EFI memmap\n");
+ return;
+ }
+}
+
+/*
+ * A number of config table entries get remapped to virtual addresses
+ * after entering EFI virtual mode. However, the kexec kernel requires
+ * their physical addresses therefore we pass them via setup_data and
+ * correct those entries to their respective physical addresses here.
+ *
+ * Currently only handles smbios which is necessary for some firmware
+ * implementation.
+ */
+int __init efi_reuse_config(u64 tables, int nr_tables)
+{
+ int i, sz, ret = 0;
+ void *p, *tablep;
+ struct efi_setup_data *data;
+
+ if (nr_tables == 0)
+ return 0;
+
+ if (!efi_setup)
+ return 0;
+
+ if (!efi_enabled(EFI_64BIT))
+ return 0;
+
+ data = early_memremap(efi_setup, sizeof(*data));
+ if (!data) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ if (!data->smbios)
+ goto out_memremap;
+
+ sz = sizeof(efi_config_table_64_t);
+
+ p = tablep = early_memremap(tables, nr_tables * sz);
+ if (!p) {
+ pr_err("Could not map Configuration table!\n");
+ ret = -ENOMEM;
+ goto out_memremap;
+ }
+
+ for (i = 0; i < nr_tables; i++) {
+ efi_guid_t guid;
+
+ guid = ((efi_config_table_64_t *)p)->guid;
+
+ if (!efi_guidcmp(guid, SMBIOS_TABLE_GUID))
+ ((efi_config_table_64_t *)p)->table = data->smbios;
+ p += sz;
+ }
+ early_memunmap(tablep, nr_tables * sz);
+
+out_memremap:
+ early_memunmap(data, sizeof(*data));
+out:
+ return ret;
+}
+
+void __init efi_apply_memmap_quirks(void)
+{
+ /*
+ * Once setup is done earlier, unmap the EFI memory map on mismatched
+ * firmware/kernel architectures since there is no support for runtime
+ * services.
+ */
+ if (!efi_runtime_supported()) {
+ pr_info("Setup done, disabling due to 32/64-bit mismatch\n");
+ efi_memmap_unmap();
+ }
+}
+
+/*
+ * For most modern platforms the preferred method of powering off is via
+ * ACPI. However, there are some that are known to require the use of
+ * EFI runtime services and for which ACPI does not work at all.
+ *
+ * Using EFI is a last resort, to be used only if no other option
+ * exists.
+ */
+bool efi_reboot_required(void)
+{
+ if (!acpi_gbl_reduced_hardware)
+ return false;
+
+ efi_reboot_quirk_mode = EFI_RESET_WARM;
+ return true;
+}
+
+bool efi_poweroff_required(void)
+{
+ return acpi_gbl_reduced_hardware || acpi_no_s5;
+}
+
+#ifdef CONFIG_EFI_CAPSULE_QUIRK_QUARK_CSH
+
+static int qrk_capsule_setup_info(struct capsule_info *cap_info, void **pkbuff,
+ size_t hdr_bytes)
+{
+ struct quark_security_header *csh = *pkbuff;
+
+ /* Only process data block that is larger than the security header */
+ if (hdr_bytes < sizeof(struct quark_security_header))
+ return 0;
+
+ if (csh->csh_signature != QUARK_CSH_SIGNATURE ||
+ csh->headersize != QUARK_SECURITY_HEADER_SIZE)
+ return 1;
+
+ /* Only process data block if EFI header is included */
+ if (hdr_bytes < QUARK_SECURITY_HEADER_SIZE +
+ sizeof(efi_capsule_header_t))
+ return 0;
+
+ pr_debug("Quark security header detected\n");
+
+ if (csh->rsvd_next_header != 0) {
+ pr_err("multiple Quark security headers not supported\n");
+ return -EINVAL;
+ }
+
+ *pkbuff += csh->headersize;
+ cap_info->total_size = csh->headersize;
+
+ /*
+ * Update the first page pointer to skip over the CSH header.
+ */
+ cap_info->phys[0] += csh->headersize;
+
+ /*
+ * cap_info->capsule should point at a virtual mapping of the entire
+ * capsule, starting at the capsule header. Our image has the Quark
+ * security header prepended, so we cannot rely on the default vmap()
+ * mapping created by the generic capsule code.
+ * Given that the Quark firmware does not appear to care about the
+ * virtual mapping, let's just point cap_info->capsule at our copy
+ * of the capsule header.
+ */
+ cap_info->capsule = &cap_info->header;
+
+ return 1;
+}
+
+static const struct x86_cpu_id efi_capsule_quirk_ids[] = {
+ X86_MATCH_VENDOR_FAM_MODEL(INTEL, 5, INTEL_FAM5_QUARK_X1000,
+ &qrk_capsule_setup_info),
+ { }
+};
+
+int efi_capsule_setup_info(struct capsule_info *cap_info, void *kbuff,
+ size_t hdr_bytes)
+{
+ int (*quirk_handler)(struct capsule_info *, void **, size_t);
+ const struct x86_cpu_id *id;
+ int ret;
+
+ if (hdr_bytes < sizeof(efi_capsule_header_t))
+ return 0;
+
+ cap_info->total_size = 0;
+
+ id = x86_match_cpu(efi_capsule_quirk_ids);
+ if (id) {
+ /*
+ * The quirk handler is supposed to return
+ * - a value > 0 if the setup should continue, after advancing
+ * kbuff as needed
+ * - 0 if not enough hdr_bytes are available yet
+ * - a negative error code otherwise
+ */
+ quirk_handler = (typeof(quirk_handler))id->driver_data;
+ ret = quirk_handler(cap_info, &kbuff, hdr_bytes);
+ if (ret <= 0)
+ return ret;
+ }
+
+ memcpy(&cap_info->header, kbuff, sizeof(cap_info->header));
+
+ cap_info->total_size += cap_info->header.imagesize;
+
+ return __efi_capsule_setup_info(cap_info);
+}
+
+#endif
+
+/*
+ * If any access by any efi runtime service causes a page fault, then,
+ * 1. If it's efi_reset_system(), reboot through BIOS.
+ * 2. If any other efi runtime service, then
+ * a. Return error status to the efi caller process.
+ * b. Disable EFI Runtime Services forever and
+ * c. Freeze efi_rts_wq and schedule new process.
+ *
+ * @return: Returns, if the page fault is not handled. This function
+ * will never return if the page fault is handled successfully.
+ */
+void efi_crash_gracefully_on_page_fault(unsigned long phys_addr)
+{
+ if (!IS_ENABLED(CONFIG_X86_64))
+ return;
+
+ /*
+ * If we get an interrupt/NMI while processing an EFI runtime service
+ * then this is a regular OOPS, not an EFI failure.
+ */
+ if (in_interrupt())
+ return;
+
+ /*
+ * Make sure that an efi runtime service caused the page fault.
+ * READ_ONCE() because we might be OOPSing in a different thread,
+ * and we don't want to trip KTSAN while trying to OOPS.
+ */
+ if (READ_ONCE(efi_rts_work.efi_rts_id) == EFI_NONE ||
+ current_work() != &efi_rts_work.work)
+ return;
+
+ /*
+ * Address range 0x0000 - 0x0fff is always mapped in the efi_pgd, so
+ * page faulting on these addresses isn't expected.
+ */
+ if (phys_addr <= 0x0fff)
+ return;
+
+ /*
+ * Print stack trace as it might be useful to know which EFI Runtime
+ * Service is buggy.
+ */
+ WARN(1, FW_BUG "Page fault caused by firmware at PA: 0x%lx\n",
+ phys_addr);
+
+ /*
+ * Buggy efi_reset_system() is handled differently from other EFI
+ * Runtime Services as it doesn't use efi_rts_wq. Although,
+ * native_machine_emergency_restart() says that machine_real_restart()
+ * could fail, it's better not to complicate this fault handler
+ * because this case occurs *very* rarely and hence could be improved
+ * on a need by basis.
+ */
+ if (efi_rts_work.efi_rts_id == EFI_RESET_SYSTEM) {
+ pr_info("efi_reset_system() buggy! Reboot through BIOS\n");
+ machine_real_restart(MRR_BIOS);
+ return;
+ }
+
+ /*
+ * Before calling EFI Runtime Service, the kernel has switched the
+ * calling process to efi_mm. Hence, switch back to task_mm.
+ */
+ arch_efi_call_virt_teardown();
+
+ /* Signal error status to the efi caller process */
+ efi_rts_work.status = EFI_ABORTED;
+ complete(&efi_rts_work.efi_rts_comp);
+
+ clear_bit(EFI_RUNTIME_SERVICES, &efi.flags);
+ pr_info("Froze efi_rts_wq and disabled EFI Runtime Services\n");
+
+ /*
+ * Call schedule() in an infinite loop, so that any spurious wake ups
+ * will never run efi_rts_wq again.
+ */
+ for (;;) {
+ set_current_state(TASK_IDLE);
+ schedule();
+ }
+}
diff --git a/arch/x86/platform/geode/Makefile b/arch/x86/platform/geode/Makefile
new file mode 100644
index 000000000..a8a6b1ded
--- /dev/null
+++ b/arch/x86/platform/geode/Makefile
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_ALIX) += alix.o
+obj-$(CONFIG_NET5501) += net5501.o
+obj-$(CONFIG_GEOS) += geos.o
diff --git a/arch/x86/platform/geode/alix.c b/arch/x86/platform/geode/alix.c
new file mode 100644
index 000000000..b39bf3b5e
--- /dev/null
+++ b/arch/x86/platform/geode/alix.c
@@ -0,0 +1,202 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * System Specific setup for PCEngines ALIX.
+ * At the moment this means setup of GPIO control of LEDs
+ * on Alix.2/3/6 boards.
+ *
+ * Copyright (C) 2008 Constantin Baranov <const@mimas.ru>
+ * Copyright (C) 2011 Ed Wildgoose <kernel@wildgooses.com>
+ * and Philip Prindeville <philipp@redfish-solutions.com>
+ *
+ * TODO: There are large similarities with leds-net5501.c
+ * by Alessandro Zummo <a.zummo@towertech.it>
+ * In the future leds-net5501.c should be migrated over to platform
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/string.h>
+#include <linux/moduleparam.h>
+#include <linux/leds.h>
+#include <linux/platform_device.h>
+#include <linux/input.h>
+#include <linux/gpio_keys.h>
+#include <linux/gpio/machine.h>
+#include <linux/dmi.h>
+
+#include <asm/geode.h>
+
+#define BIOS_SIGNATURE_TINYBIOS 0xf0000
+#define BIOS_SIGNATURE_COREBOOT 0x500
+#define BIOS_REGION_SIZE 0x10000
+
+/*
+ * This driver is not modular, but to keep back compatibility
+ * with existing use cases, continuing with module_param is
+ * the easiest way forward.
+ */
+static bool force = 0;
+module_param(force, bool, 0444);
+/* FIXME: Award bios is not automatically detected as Alix platform */
+MODULE_PARM_DESC(force, "Force detection as ALIX.2/ALIX.3 platform");
+
+static struct gpio_keys_button alix_gpio_buttons[] = {
+ {
+ .code = KEY_RESTART,
+ .gpio = 24,
+ .active_low = 1,
+ .desc = "Reset button",
+ .type = EV_KEY,
+ .wakeup = 0,
+ .debounce_interval = 100,
+ .can_disable = 0,
+ }
+};
+static struct gpio_keys_platform_data alix_buttons_data = {
+ .buttons = alix_gpio_buttons,
+ .nbuttons = ARRAY_SIZE(alix_gpio_buttons),
+ .poll_interval = 20,
+};
+
+static struct platform_device alix_buttons_dev = {
+ .name = "gpio-keys-polled",
+ .id = 1,
+ .dev = {
+ .platform_data = &alix_buttons_data,
+ }
+};
+
+static struct gpio_led alix_leds[] = {
+ {
+ .name = "alix:1",
+ .default_trigger = "default-on",
+ },
+ {
+ .name = "alix:2",
+ .default_trigger = "default-off",
+ },
+ {
+ .name = "alix:3",
+ .default_trigger = "default-off",
+ },
+};
+
+static struct gpio_led_platform_data alix_leds_data = {
+ .num_leds = ARRAY_SIZE(alix_leds),
+ .leds = alix_leds,
+};
+
+static struct gpiod_lookup_table alix_leds_gpio_table = {
+ .dev_id = "leds-gpio",
+ .table = {
+ /* The Geode GPIOs should be on the CS5535 companion chip */
+ GPIO_LOOKUP_IDX("cs5535-gpio", 6, NULL, 0, GPIO_ACTIVE_LOW),
+ GPIO_LOOKUP_IDX("cs5535-gpio", 25, NULL, 1, GPIO_ACTIVE_LOW),
+ GPIO_LOOKUP_IDX("cs5535-gpio", 27, NULL, 2, GPIO_ACTIVE_LOW),
+ { }
+ },
+};
+
+static struct platform_device alix_leds_dev = {
+ .name = "leds-gpio",
+ .id = -1,
+ .dev.platform_data = &alix_leds_data,
+};
+
+static struct platform_device *alix_devs[] __initdata = {
+ &alix_buttons_dev,
+ &alix_leds_dev,
+};
+
+static void __init register_alix(void)
+{
+ /* Setup LED control through leds-gpio driver */
+ gpiod_add_lookup_table(&alix_leds_gpio_table);
+ platform_add_devices(alix_devs, ARRAY_SIZE(alix_devs));
+}
+
+static bool __init alix_present(unsigned long bios_phys,
+ const char *alix_sig,
+ size_t alix_sig_len)
+{
+ const size_t bios_len = BIOS_REGION_SIZE;
+ const char *bios_virt;
+ const char *scan_end;
+ const char *p;
+ char name[64];
+
+ if (force) {
+ printk(KERN_NOTICE "%s: forced to skip BIOS test, "
+ "assume system is ALIX.2/ALIX.3\n",
+ KBUILD_MODNAME);
+ return true;
+ }
+
+ bios_virt = phys_to_virt(bios_phys);
+ scan_end = bios_virt + bios_len - (alix_sig_len + 2);
+ for (p = bios_virt; p < scan_end; p++) {
+ const char *tail;
+ char *a;
+
+ if (memcmp(p, alix_sig, alix_sig_len) != 0)
+ continue;
+
+ memcpy(name, p, sizeof(name));
+
+ /* remove the first \0 character from string */
+ a = strchr(name, '\0');
+ if (a)
+ *a = ' ';
+
+ /* cut the string at a newline */
+ a = strchr(name, '\r');
+ if (a)
+ *a = '\0';
+
+ tail = p + alix_sig_len;
+ if ((tail[0] == '2' || tail[0] == '3' || tail[0] == '6')) {
+ printk(KERN_INFO
+ "%s: system is recognized as \"%s\"\n",
+ KBUILD_MODNAME, name);
+ return true;
+ }
+ }
+
+ return false;
+}
+
+static bool __init alix_present_dmi(void)
+{
+ const char *vendor, *product;
+
+ vendor = dmi_get_system_info(DMI_SYS_VENDOR);
+ if (!vendor || strcmp(vendor, "PC Engines"))
+ return false;
+
+ product = dmi_get_system_info(DMI_PRODUCT_NAME);
+ if (!product || (strcmp(product, "ALIX.2D") && strcmp(product, "ALIX.6")))
+ return false;
+
+ printk(KERN_INFO "%s: system is recognized as \"%s %s\"\n",
+ KBUILD_MODNAME, vendor, product);
+
+ return true;
+}
+
+static int __init alix_init(void)
+{
+ const char tinybios_sig[] = "PC Engines ALIX.";
+ const char coreboot_sig[] = "PC Engines\0ALIX.";
+
+ if (!is_geode())
+ return 0;
+
+ if (alix_present(BIOS_SIGNATURE_TINYBIOS, tinybios_sig, sizeof(tinybios_sig) - 1) ||
+ alix_present(BIOS_SIGNATURE_COREBOOT, coreboot_sig, sizeof(coreboot_sig) - 1) ||
+ alix_present_dmi())
+ register_alix();
+
+ return 0;
+}
+device_initcall(alix_init);
diff --git a/arch/x86/platform/geode/geos.c b/arch/x86/platform/geode/geos.c
new file mode 100644
index 000000000..d263528c9
--- /dev/null
+++ b/arch/x86/platform/geode/geos.c
@@ -0,0 +1,125 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * System Specific setup for Traverse Technologies GEOS.
+ * At the moment this means setup of GPIO control of LEDs.
+ *
+ * Copyright (C) 2008 Constantin Baranov <const@mimas.ru>
+ * Copyright (C) 2011 Ed Wildgoose <kernel@wildgooses.com>
+ * and Philip Prindeville <philipp@redfish-solutions.com>
+ *
+ * TODO: There are large similarities with leds-net5501.c
+ * by Alessandro Zummo <a.zummo@towertech.it>
+ * In the future leds-net5501.c should be migrated over to platform
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/string.h>
+#include <linux/leds.h>
+#include <linux/platform_device.h>
+#include <linux/input.h>
+#include <linux/gpio_keys.h>
+#include <linux/gpio/machine.h>
+#include <linux/dmi.h>
+
+#include <asm/geode.h>
+
+static struct gpio_keys_button geos_gpio_buttons[] = {
+ {
+ .code = KEY_RESTART,
+ .gpio = 3,
+ .active_low = 1,
+ .desc = "Reset button",
+ .type = EV_KEY,
+ .wakeup = 0,
+ .debounce_interval = 100,
+ .can_disable = 0,
+ }
+};
+static struct gpio_keys_platform_data geos_buttons_data = {
+ .buttons = geos_gpio_buttons,
+ .nbuttons = ARRAY_SIZE(geos_gpio_buttons),
+ .poll_interval = 20,
+};
+
+static struct platform_device geos_buttons_dev = {
+ .name = "gpio-keys-polled",
+ .id = 1,
+ .dev = {
+ .platform_data = &geos_buttons_data,
+ }
+};
+
+static struct gpio_led geos_leds[] = {
+ {
+ .name = "geos:1",
+ .default_trigger = "default-on",
+ },
+ {
+ .name = "geos:2",
+ .default_trigger = "default-off",
+ },
+ {
+ .name = "geos:3",
+ .default_trigger = "default-off",
+ },
+};
+
+static struct gpio_led_platform_data geos_leds_data = {
+ .num_leds = ARRAY_SIZE(geos_leds),
+ .leds = geos_leds,
+};
+
+static struct gpiod_lookup_table geos_leds_gpio_table = {
+ .dev_id = "leds-gpio",
+ .table = {
+ /* The Geode GPIOs should be on the CS5535 companion chip */
+ GPIO_LOOKUP_IDX("cs5535-gpio", 6, NULL, 0, GPIO_ACTIVE_LOW),
+ GPIO_LOOKUP_IDX("cs5535-gpio", 25, NULL, 1, GPIO_ACTIVE_LOW),
+ GPIO_LOOKUP_IDX("cs5535-gpio", 27, NULL, 2, GPIO_ACTIVE_LOW),
+ { }
+ },
+};
+
+static struct platform_device geos_leds_dev = {
+ .name = "leds-gpio",
+ .id = -1,
+ .dev.platform_data = &geos_leds_data,
+};
+
+static struct platform_device *geos_devs[] __initdata = {
+ &geos_buttons_dev,
+ &geos_leds_dev,
+};
+
+static void __init register_geos(void)
+{
+ /* Setup LED control through leds-gpio driver */
+ gpiod_add_lookup_table(&geos_leds_gpio_table);
+ platform_add_devices(geos_devs, ARRAY_SIZE(geos_devs));
+}
+
+static int __init geos_init(void)
+{
+ const char *vendor, *product;
+
+ if (!is_geode())
+ return 0;
+
+ vendor = dmi_get_system_info(DMI_SYS_VENDOR);
+ if (!vendor || strcmp(vendor, "Traverse Technologies"))
+ return 0;
+
+ product = dmi_get_system_info(DMI_PRODUCT_NAME);
+ if (!product || strcmp(product, "Geos"))
+ return 0;
+
+ printk(KERN_INFO "%s: system is recognized as \"%s %s\"\n",
+ KBUILD_MODNAME, vendor, product);
+
+ register_geos();
+
+ return 0;
+}
+device_initcall(geos_init);
diff --git a/arch/x86/platform/geode/net5501.c b/arch/x86/platform/geode/net5501.c
new file mode 100644
index 000000000..558384acd
--- /dev/null
+++ b/arch/x86/platform/geode/net5501.c
@@ -0,0 +1,152 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * System Specific setup for Soekris net5501
+ * At the moment this means setup of GPIO control of LEDs and buttons
+ * on net5501 boards.
+ *
+ * Copyright (C) 2008-2009 Tower Technologies
+ * Written by Alessandro Zummo <a.zummo@towertech.it>
+ *
+ * Copyright (C) 2008 Constantin Baranov <const@mimas.ru>
+ * Copyright (C) 2011 Ed Wildgoose <kernel@wildgooses.com>
+ * and Philip Prindeville <philipp@redfish-solutions.com>
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/string.h>
+#include <linux/leds.h>
+#include <linux/platform_device.h>
+#include <linux/input.h>
+#include <linux/gpio_keys.h>
+#include <linux/gpio/machine.h>
+
+#include <asm/geode.h>
+
+#define BIOS_REGION_BASE 0xffff0000
+#define BIOS_REGION_SIZE 0x00010000
+
+static struct gpio_keys_button net5501_gpio_buttons[] = {
+ {
+ .code = KEY_RESTART,
+ .gpio = 24,
+ .active_low = 1,
+ .desc = "Reset button",
+ .type = EV_KEY,
+ .wakeup = 0,
+ .debounce_interval = 100,
+ .can_disable = 0,
+ }
+};
+static struct gpio_keys_platform_data net5501_buttons_data = {
+ .buttons = net5501_gpio_buttons,
+ .nbuttons = ARRAY_SIZE(net5501_gpio_buttons),
+ .poll_interval = 20,
+};
+
+static struct platform_device net5501_buttons_dev = {
+ .name = "gpio-keys-polled",
+ .id = 1,
+ .dev = {
+ .platform_data = &net5501_buttons_data,
+ }
+};
+
+static struct gpio_led net5501_leds[] = {
+ {
+ .name = "net5501:1",
+ .default_trigger = "default-on",
+ },
+};
+
+static struct gpio_led_platform_data net5501_leds_data = {
+ .num_leds = ARRAY_SIZE(net5501_leds),
+ .leds = net5501_leds,
+};
+
+static struct gpiod_lookup_table net5501_leds_gpio_table = {
+ .dev_id = "leds-gpio",
+ .table = {
+ /* The Geode GPIOs should be on the CS5535 companion chip */
+ GPIO_LOOKUP_IDX("cs5535-gpio", 6, NULL, 0, GPIO_ACTIVE_HIGH),
+ { }
+ },
+};
+
+static struct platform_device net5501_leds_dev = {
+ .name = "leds-gpio",
+ .id = -1,
+ .dev.platform_data = &net5501_leds_data,
+};
+
+static struct platform_device *net5501_devs[] __initdata = {
+ &net5501_buttons_dev,
+ &net5501_leds_dev,
+};
+
+static void __init register_net5501(void)
+{
+ /* Setup LED control through leds-gpio driver */
+ gpiod_add_lookup_table(&net5501_leds_gpio_table);
+ platform_add_devices(net5501_devs, ARRAY_SIZE(net5501_devs));
+}
+
+struct net5501_board {
+ u16 offset;
+ u16 len;
+ char *sig;
+};
+
+static struct net5501_board __initdata boards[] = {
+ { 0xb7b, 7, "net5501" }, /* net5501 v1.33/1.33c */
+ { 0xb1f, 7, "net5501" }, /* net5501 v1.32i */
+};
+
+static bool __init net5501_present(void)
+{
+ int i;
+ unsigned char *rombase, *bios;
+ bool found = false;
+
+ rombase = ioremap(BIOS_REGION_BASE, BIOS_REGION_SIZE - 1);
+ if (!rombase) {
+ printk(KERN_ERR "%s: failed to get rombase\n", KBUILD_MODNAME);
+ return found;
+ }
+
+ bios = rombase + 0x20; /* null terminated */
+
+ if (memcmp(bios, "comBIOS", 7))
+ goto unmap;
+
+ for (i = 0; i < ARRAY_SIZE(boards); i++) {
+ unsigned char *model = rombase + boards[i].offset;
+
+ if (!memcmp(model, boards[i].sig, boards[i].len)) {
+ printk(KERN_INFO "%s: system is recognized as \"%s\"\n",
+ KBUILD_MODNAME, model);
+
+ found = true;
+ break;
+ }
+ }
+
+unmap:
+ iounmap(rombase);
+ return found;
+}
+
+static int __init net5501_init(void)
+{
+ if (!is_geode())
+ return 0;
+
+ if (!net5501_present())
+ return 0;
+
+ register_net5501();
+
+ return 0;
+}
+device_initcall(net5501_init);
diff --git a/arch/x86/platform/intel-mid/Makefile b/arch/x86/platform/intel-mid/Makefile
new file mode 100644
index 000000000..ddfc08783
--- /dev/null
+++ b/arch/x86/platform/intel-mid/Makefile
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_X86_INTEL_MID) += intel-mid.o pwr.o
diff --git a/arch/x86/platform/intel-mid/intel-mid.c b/arch/x86/platform/intel-mid/intel-mid.c
new file mode 100644
index 000000000..f4592dc7a
--- /dev/null
+++ b/arch/x86/platform/intel-mid/intel-mid.c
@@ -0,0 +1,124 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Intel MID platform setup code
+ *
+ * (C) Copyright 2008, 2012, 2021 Intel Corporation
+ * Author: Jacob Pan (jacob.jun.pan@intel.com)
+ * Author: Sathyanarayanan Kuppuswamy <sathyanarayanan.kuppuswamy@intel.com>
+ */
+
+#define pr_fmt(fmt) "intel_mid: " fmt
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/regulator/machine.h>
+#include <linux/scatterlist.h>
+#include <linux/irq.h>
+#include <linux/export.h>
+#include <linux/notifier.h>
+
+#include <asm/setup.h>
+#include <asm/mpspec_def.h>
+#include <asm/hw_irq.h>
+#include <asm/apic.h>
+#include <asm/io_apic.h>
+#include <asm/intel-mid.h>
+#include <asm/io.h>
+#include <asm/i8259.h>
+#include <asm/intel_scu_ipc.h>
+#include <asm/reboot.h>
+
+#define IPCMSG_COLD_OFF 0x80 /* Only for Tangier */
+#define IPCMSG_COLD_RESET 0xF1
+
+static void intel_mid_power_off(void)
+{
+ /* Shut down South Complex via PWRMU */
+ intel_mid_pwr_power_off();
+
+ /* Only for Tangier, the rest will ignore this command */
+ intel_scu_ipc_dev_simple_command(NULL, IPCMSG_COLD_OFF, 1);
+};
+
+static void intel_mid_reboot(void)
+{
+ intel_scu_ipc_dev_simple_command(NULL, IPCMSG_COLD_RESET, 0);
+}
+
+static void __init intel_mid_time_init(void)
+{
+ /* Lapic only, no apbt */
+ x86_init.timers.setup_percpu_clockev = setup_boot_APIC_clock;
+ x86_cpuinit.setup_percpu_clockev = setup_secondary_APIC_clock;
+}
+
+static void intel_mid_arch_setup(void)
+{
+ switch (boot_cpu_data.x86_model) {
+ case 0x3C:
+ case 0x4A:
+ x86_platform.legacy.rtc = 1;
+ break;
+ default:
+ break;
+ }
+
+ /*
+ * Intel MID platforms are using explicitly defined regulators.
+ *
+ * Let the regulator core know that we do not have any additional
+ * regulators left. This lets it substitute unprovided regulators with
+ * dummy ones:
+ */
+ regulator_has_full_constraints();
+}
+
+/*
+ * Moorestown does not have external NMI source nor port 0x61 to report
+ * NMI status. The possible NMI sources are from pmu as a result of NMI
+ * watchdog or lock debug. Reading io port 0x61 results in 0xff which
+ * misled NMI handler.
+ */
+static unsigned char intel_mid_get_nmi_reason(void)
+{
+ return 0;
+}
+
+/*
+ * Moorestown specific x86_init function overrides and early setup
+ * calls.
+ */
+void __init x86_intel_mid_early_setup(void)
+{
+ x86_init.resources.probe_roms = x86_init_noop;
+ x86_init.resources.reserve_resources = x86_init_noop;
+
+ x86_init.timers.timer_init = intel_mid_time_init;
+ x86_init.timers.setup_percpu_clockev = x86_init_noop;
+
+ x86_init.irqs.pre_vector_init = x86_init_noop;
+
+ x86_init.oem.arch_setup = intel_mid_arch_setup;
+
+ x86_platform.get_nmi_reason = intel_mid_get_nmi_reason;
+
+ x86_init.pci.arch_init = intel_mid_pci_init;
+ x86_init.pci.fixup_irqs = x86_init_noop;
+
+ legacy_pic = &null_legacy_pic;
+
+ /*
+ * Do nothing for now as everything needed done in
+ * x86_intel_mid_early_setup() below.
+ */
+ x86_init.acpi.reduced_hw_early_init = x86_init_noop;
+
+ pm_power_off = intel_mid_power_off;
+ machine_ops.emergency_restart = intel_mid_reboot;
+
+ /* Avoid searching for BIOS MP tables */
+ x86_init.mpparse.find_smp_config = x86_init_noop;
+ x86_init.mpparse.get_smp_config = x86_init_uint_noop;
+ set_bit(MP_BUS_ISA, mp_bus_not_pci);
+}
diff --git a/arch/x86/platform/intel-mid/pwr.c b/arch/x86/platform/intel-mid/pwr.c
new file mode 100644
index 000000000..27288d8d3
--- /dev/null
+++ b/arch/x86/platform/intel-mid/pwr.c
@@ -0,0 +1,485 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Intel MID Power Management Unit (PWRMU) device driver
+ *
+ * Copyright (C) 2016, Intel Corporation
+ *
+ * Author: Andy Shevchenko <andriy.shevchenko@linux.intel.com>
+ *
+ * Intel MID Power Management Unit device driver handles the South Complex PCI
+ * devices such as GPDMA, SPI, I2C, PWM, and so on. By default PCI core
+ * modifies bits in PMCSR register in the PCI configuration space. This is not
+ * enough on some SoCs like Intel Tangier. In such case PCI core sets a new
+ * power state of the device in question through a PM hook registered in struct
+ * pci_platform_pm_ops (see drivers/pci/pci-mid.c).
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/delay.h>
+#include <linux/errno.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/export.h>
+#include <linux/mutex.h>
+#include <linux/pci.h>
+
+#include <asm/intel-mid.h>
+
+/* Registers */
+#define PM_STS 0x00
+#define PM_CMD 0x04
+#define PM_ICS 0x08
+#define PM_WKC(x) (0x10 + (x) * 4)
+#define PM_WKS(x) (0x18 + (x) * 4)
+#define PM_SSC(x) (0x20 + (x) * 4)
+#define PM_SSS(x) (0x30 + (x) * 4)
+
+/* Bits in PM_STS */
+#define PM_STS_BUSY (1 << 8)
+
+/* Bits in PM_CMD */
+#define PM_CMD_CMD(x) ((x) << 0)
+#define PM_CMD_IOC (1 << 8)
+#define PM_CMD_CM_NOP (0 << 9)
+#define PM_CMD_CM_IMMEDIATE (1 << 9)
+#define PM_CMD_CM_DELAY (2 << 9)
+#define PM_CMD_CM_TRIGGER (3 << 9)
+
+/* System states */
+#define PM_CMD_SYS_STATE_S5 (5 << 16)
+
+/* Trigger variants */
+#define PM_CMD_CFG_TRIGGER_NC (3 << 19)
+
+/* Message to wait for TRIGGER_NC case */
+#define TRIGGER_NC_MSG_2 (2 << 22)
+
+/* List of commands */
+#define CMD_SET_CFG 0x01
+
+/* Bits in PM_ICS */
+#define PM_ICS_INT_STATUS(x) ((x) & 0xff)
+#define PM_ICS_IE (1 << 8)
+#define PM_ICS_IP (1 << 9)
+#define PM_ICS_SW_INT_STS (1 << 10)
+
+/* List of interrupts */
+#define INT_INVALID 0
+#define INT_CMD_COMPLETE 1
+#define INT_CMD_ERR 2
+#define INT_WAKE_EVENT 3
+#define INT_LSS_POWER_ERR 4
+#define INT_S0iX_MSG_ERR 5
+#define INT_NO_C6 6
+#define INT_TRIGGER_ERR 7
+#define INT_INACTIVITY 8
+
+/* South Complex devices */
+#define LSS_MAX_SHARED_DEVS 4
+#define LSS_MAX_DEVS 64
+
+#define LSS_WS_BITS 1 /* wake state width */
+#define LSS_PWS_BITS 2 /* power state width */
+
+/* Supported device IDs */
+#define PCI_DEVICE_ID_PENWELL 0x0828
+#define PCI_DEVICE_ID_TANGIER 0x11a1
+
+struct mid_pwr_dev {
+ struct pci_dev *pdev;
+ pci_power_t state;
+};
+
+struct mid_pwr {
+ struct device *dev;
+ void __iomem *regs;
+ int irq;
+ bool available;
+
+ struct mutex lock;
+ struct mid_pwr_dev lss[LSS_MAX_DEVS][LSS_MAX_SHARED_DEVS];
+};
+
+static struct mid_pwr *midpwr;
+
+static u32 mid_pwr_get_state(struct mid_pwr *pwr, int reg)
+{
+ return readl(pwr->regs + PM_SSS(reg));
+}
+
+static void mid_pwr_set_state(struct mid_pwr *pwr, int reg, u32 value)
+{
+ writel(value, pwr->regs + PM_SSC(reg));
+}
+
+static void mid_pwr_set_wake(struct mid_pwr *pwr, int reg, u32 value)
+{
+ writel(value, pwr->regs + PM_WKC(reg));
+}
+
+static void mid_pwr_interrupt_disable(struct mid_pwr *pwr)
+{
+ writel(~PM_ICS_IE, pwr->regs + PM_ICS);
+}
+
+static bool mid_pwr_is_busy(struct mid_pwr *pwr)
+{
+ return !!(readl(pwr->regs + PM_STS) & PM_STS_BUSY);
+}
+
+/* Wait 500ms that the latest PWRMU command finished */
+static int mid_pwr_wait(struct mid_pwr *pwr)
+{
+ unsigned int count = 500000;
+ bool busy;
+
+ do {
+ busy = mid_pwr_is_busy(pwr);
+ if (!busy)
+ return 0;
+ udelay(1);
+ } while (--count);
+
+ return -EBUSY;
+}
+
+static int mid_pwr_wait_for_cmd(struct mid_pwr *pwr, u8 cmd)
+{
+ writel(PM_CMD_CMD(cmd) | PM_CMD_CM_IMMEDIATE, pwr->regs + PM_CMD);
+ return mid_pwr_wait(pwr);
+}
+
+static int __update_power_state(struct mid_pwr *pwr, int reg, int bit, int new)
+{
+ int curstate;
+ u32 power;
+ int ret;
+
+ /* Check if the device is already in desired state */
+ power = mid_pwr_get_state(pwr, reg);
+ curstate = (power >> bit) & 3;
+ if (curstate == new)
+ return 0;
+
+ /* Update the power state */
+ mid_pwr_set_state(pwr, reg, (power & ~(3 << bit)) | (new << bit));
+
+ /* Send command to SCU */
+ ret = mid_pwr_wait_for_cmd(pwr, CMD_SET_CFG);
+ if (ret)
+ return ret;
+
+ /* Check if the device is already in desired state */
+ power = mid_pwr_get_state(pwr, reg);
+ curstate = (power >> bit) & 3;
+ if (curstate != new)
+ return -EAGAIN;
+
+ return 0;
+}
+
+static pci_power_t __find_weakest_power_state(struct mid_pwr_dev *lss,
+ struct pci_dev *pdev,
+ pci_power_t state)
+{
+ pci_power_t weakest = PCI_D3hot;
+ unsigned int j;
+
+ /* Find device in cache or first free cell */
+ for (j = 0; j < LSS_MAX_SHARED_DEVS; j++) {
+ if (lss[j].pdev == pdev || !lss[j].pdev)
+ break;
+ }
+
+ /* Store the desired state in cache */
+ if (j < LSS_MAX_SHARED_DEVS) {
+ lss[j].pdev = pdev;
+ lss[j].state = state;
+ } else {
+ dev_WARN(&pdev->dev, "No room for device in PWRMU LSS cache\n");
+ weakest = state;
+ }
+
+ /* Find the power state we may use */
+ for (j = 0; j < LSS_MAX_SHARED_DEVS; j++) {
+ if (lss[j].state < weakest)
+ weakest = lss[j].state;
+ }
+
+ return weakest;
+}
+
+static int __set_power_state(struct mid_pwr *pwr, struct pci_dev *pdev,
+ pci_power_t state, int id, int reg, int bit)
+{
+ const char *name;
+ int ret;
+
+ state = __find_weakest_power_state(pwr->lss[id], pdev, state);
+ name = pci_power_name(state);
+
+ ret = __update_power_state(pwr, reg, bit, (__force int)state);
+ if (ret) {
+ dev_warn(&pdev->dev, "Can't set power state %s: %d\n", name, ret);
+ return ret;
+ }
+
+ dev_vdbg(&pdev->dev, "Set power state %s\n", name);
+ return 0;
+}
+
+static int mid_pwr_set_power_state(struct mid_pwr *pwr, struct pci_dev *pdev,
+ pci_power_t state)
+{
+ int id, reg, bit;
+ int ret;
+
+ id = intel_mid_pwr_get_lss_id(pdev);
+ if (id < 0)
+ return id;
+
+ reg = (id * LSS_PWS_BITS) / 32;
+ bit = (id * LSS_PWS_BITS) % 32;
+
+ /* We support states between PCI_D0 and PCI_D3hot */
+ if (state < PCI_D0)
+ state = PCI_D0;
+ if (state > PCI_D3hot)
+ state = PCI_D3hot;
+
+ mutex_lock(&pwr->lock);
+ ret = __set_power_state(pwr, pdev, state, id, reg, bit);
+ mutex_unlock(&pwr->lock);
+ return ret;
+}
+
+int intel_mid_pci_set_power_state(struct pci_dev *pdev, pci_power_t state)
+{
+ struct mid_pwr *pwr = midpwr;
+ int ret = 0;
+
+ might_sleep();
+
+ if (pwr && pwr->available)
+ ret = mid_pwr_set_power_state(pwr, pdev, state);
+ dev_vdbg(&pdev->dev, "set_power_state() returns %d\n", ret);
+
+ return 0;
+}
+
+pci_power_t intel_mid_pci_get_power_state(struct pci_dev *pdev)
+{
+ struct mid_pwr *pwr = midpwr;
+ int id, reg, bit;
+ u32 power;
+
+ if (!pwr || !pwr->available)
+ return PCI_UNKNOWN;
+
+ id = intel_mid_pwr_get_lss_id(pdev);
+ if (id < 0)
+ return PCI_UNKNOWN;
+
+ reg = (id * LSS_PWS_BITS) / 32;
+ bit = (id * LSS_PWS_BITS) % 32;
+ power = mid_pwr_get_state(pwr, reg);
+ return (__force pci_power_t)((power >> bit) & 3);
+}
+
+void intel_mid_pwr_power_off(void)
+{
+ struct mid_pwr *pwr = midpwr;
+ u32 cmd = PM_CMD_SYS_STATE_S5 |
+ PM_CMD_CMD(CMD_SET_CFG) |
+ PM_CMD_CM_TRIGGER |
+ PM_CMD_CFG_TRIGGER_NC |
+ TRIGGER_NC_MSG_2;
+
+ /* Send command to SCU */
+ writel(cmd, pwr->regs + PM_CMD);
+ mid_pwr_wait(pwr);
+}
+
+int intel_mid_pwr_get_lss_id(struct pci_dev *pdev)
+{
+ int vndr;
+ u8 id;
+
+ /*
+ * Mapping to PWRMU index is kept in the Logical SubSystem ID byte of
+ * Vendor capability.
+ */
+ vndr = pci_find_capability(pdev, PCI_CAP_ID_VNDR);
+ if (!vndr)
+ return -EINVAL;
+
+ /* Read the Logical SubSystem ID byte */
+ pci_read_config_byte(pdev, vndr + INTEL_MID_PWR_LSS_OFFSET, &id);
+ if (!(id & INTEL_MID_PWR_LSS_TYPE))
+ return -ENODEV;
+
+ id &= ~INTEL_MID_PWR_LSS_TYPE;
+ if (id >= LSS_MAX_DEVS)
+ return -ERANGE;
+
+ return id;
+}
+
+static irqreturn_t mid_pwr_irq_handler(int irq, void *dev_id)
+{
+ struct mid_pwr *pwr = dev_id;
+ u32 ics;
+
+ ics = readl(pwr->regs + PM_ICS);
+ if (!(ics & PM_ICS_IP))
+ return IRQ_NONE;
+
+ writel(ics | PM_ICS_IP, pwr->regs + PM_ICS);
+
+ dev_warn(pwr->dev, "Unexpected IRQ: %#x\n", PM_ICS_INT_STATUS(ics));
+ return IRQ_HANDLED;
+}
+
+struct mid_pwr_device_info {
+ int (*set_initial_state)(struct mid_pwr *pwr);
+};
+
+static int mid_pwr_probe(struct pci_dev *pdev, const struct pci_device_id *id)
+{
+ struct mid_pwr_device_info *info = (void *)id->driver_data;
+ struct device *dev = &pdev->dev;
+ struct mid_pwr *pwr;
+ int ret;
+
+ ret = pcim_enable_device(pdev);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "error: could not enable device\n");
+ return ret;
+ }
+
+ ret = pcim_iomap_regions(pdev, 1 << 0, pci_name(pdev));
+ if (ret) {
+ dev_err(&pdev->dev, "I/O memory remapping failed\n");
+ return ret;
+ }
+
+ pwr = devm_kzalloc(dev, sizeof(*pwr), GFP_KERNEL);
+ if (!pwr)
+ return -ENOMEM;
+
+ pwr->dev = dev;
+ pwr->regs = pcim_iomap_table(pdev)[0];
+ pwr->irq = pdev->irq;
+
+ mutex_init(&pwr->lock);
+
+ /* Disable interrupts */
+ mid_pwr_interrupt_disable(pwr);
+
+ if (info && info->set_initial_state) {
+ ret = info->set_initial_state(pwr);
+ if (ret)
+ dev_warn(dev, "Can't set initial state: %d\n", ret);
+ }
+
+ ret = devm_request_irq(dev, pdev->irq, mid_pwr_irq_handler,
+ IRQF_NO_SUSPEND, pci_name(pdev), pwr);
+ if (ret)
+ return ret;
+
+ pwr->available = true;
+ midpwr = pwr;
+
+ pci_set_drvdata(pdev, pwr);
+ return 0;
+}
+
+static int mid_set_initial_state(struct mid_pwr *pwr, const u32 *states)
+{
+ unsigned int i, j;
+ int ret;
+
+ /*
+ * Enable wake events.
+ *
+ * PWRMU supports up to 32 sources for wake up the system. Ungate them
+ * all here.
+ */
+ mid_pwr_set_wake(pwr, 0, 0xffffffff);
+ mid_pwr_set_wake(pwr, 1, 0xffffffff);
+
+ /*
+ * Power off South Complex devices.
+ *
+ * There is a map (see a note below) of 64 devices with 2 bits per each
+ * on 32-bit HW registers. The following calls set all devices to one
+ * known initial state, i.e. PCI_D3hot. This is done in conjunction
+ * with PMCSR setting in arch/x86/pci/intel_mid_pci.c.
+ *
+ * NOTE: The actual device mapping is provided by a platform at run
+ * time using vendor capability of PCI configuration space.
+ */
+ mid_pwr_set_state(pwr, 0, states[0]);
+ mid_pwr_set_state(pwr, 1, states[1]);
+ mid_pwr_set_state(pwr, 2, states[2]);
+ mid_pwr_set_state(pwr, 3, states[3]);
+
+ /* Send command to SCU */
+ ret = mid_pwr_wait_for_cmd(pwr, CMD_SET_CFG);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < LSS_MAX_DEVS; i++) {
+ for (j = 0; j < LSS_MAX_SHARED_DEVS; j++)
+ pwr->lss[i][j].state = PCI_D3hot;
+ }
+
+ return 0;
+}
+
+static int pnw_set_initial_state(struct mid_pwr *pwr)
+{
+ /* On Penwell SRAM must stay powered on */
+ static const u32 states[] = {
+ 0xf00fffff, /* PM_SSC(0) */
+ 0xffffffff, /* PM_SSC(1) */
+ 0xffffffff, /* PM_SSC(2) */
+ 0xffffffff, /* PM_SSC(3) */
+ };
+ return mid_set_initial_state(pwr, states);
+}
+
+static int tng_set_initial_state(struct mid_pwr *pwr)
+{
+ static const u32 states[] = {
+ 0xffffffff, /* PM_SSC(0) */
+ 0xffffffff, /* PM_SSC(1) */
+ 0xffffffff, /* PM_SSC(2) */
+ 0xffffffff, /* PM_SSC(3) */
+ };
+ return mid_set_initial_state(pwr, states);
+}
+
+static const struct mid_pwr_device_info pnw_info = {
+ .set_initial_state = pnw_set_initial_state,
+};
+
+static const struct mid_pwr_device_info tng_info = {
+ .set_initial_state = tng_set_initial_state,
+};
+
+/* This table should be in sync with the one in drivers/pci/pci-mid.c */
+static const struct pci_device_id mid_pwr_pci_ids[] = {
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_PENWELL), (kernel_ulong_t)&pnw_info },
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_TANGIER), (kernel_ulong_t)&tng_info },
+ {}
+};
+
+static struct pci_driver mid_pwr_pci_driver = {
+ .name = "intel_mid_pwr",
+ .probe = mid_pwr_probe,
+ .id_table = mid_pwr_pci_ids,
+};
+
+builtin_pci_driver(mid_pwr_pci_driver);
diff --git a/arch/x86/platform/intel-quark/Makefile b/arch/x86/platform/intel-quark/Makefile
new file mode 100644
index 000000000..ed77cb952
--- /dev/null
+++ b/arch/x86/platform/intel-quark/Makefile
@@ -0,0 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_INTEL_IMR) += imr.o
+obj-$(CONFIG_DEBUG_IMR_SELFTEST) += imr_selftest.o
diff --git a/arch/x86/platform/intel-quark/imr.c b/arch/x86/platform/intel-quark/imr.c
new file mode 100644
index 000000000..d3d456925
--- /dev/null
+++ b/arch/x86/platform/intel-quark/imr.c
@@ -0,0 +1,597 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * imr.c -- Intel Isolated Memory Region driver
+ *
+ * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2015 Bryan O'Donoghue <pure.logic@nexus-software.ie>
+ *
+ * IMR registers define an isolated region of memory that can
+ * be masked to prohibit certain system agents from accessing memory.
+ * When a device behind a masked port performs an access - snooped or
+ * not, an IMR may optionally prevent that transaction from changing
+ * the state of memory or from getting correct data in response to the
+ * operation.
+ *
+ * Write data will be dropped and reads will return 0xFFFFFFFF, the
+ * system will reset and system BIOS will print out an error message to
+ * inform the user that an IMR has been violated.
+ *
+ * This code is based on the Linux MTRR code and reference code from
+ * Intel's Quark BSP EFI, Linux and grub code.
+ *
+ * See quark-x1000-datasheet.pdf for register definitions.
+ * http://www.intel.com/content/dam/www/public/us/en/documents/datasheets/quark-x1000-datasheet.pdf
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <asm-generic/sections.h>
+#include <asm/cpu_device_id.h>
+#include <asm/imr.h>
+#include <asm/iosf_mbi.h>
+#include <asm/io.h>
+
+#include <linux/debugfs.h>
+#include <linux/init.h>
+#include <linux/mm.h>
+#include <linux/types.h>
+
+struct imr_device {
+ bool init;
+ struct mutex lock;
+ int max_imr;
+ int reg_base;
+};
+
+static struct imr_device imr_dev;
+
+/*
+ * IMR read/write mask control registers.
+ * See quark-x1000-datasheet.pdf sections 12.7.4.5 and 12.7.4.6 for
+ * bit definitions.
+ *
+ * addr_hi
+ * 31 Lock bit
+ * 30:24 Reserved
+ * 23:2 1 KiB aligned lo address
+ * 1:0 Reserved
+ *
+ * addr_hi
+ * 31:24 Reserved
+ * 23:2 1 KiB aligned hi address
+ * 1:0 Reserved
+ */
+#define IMR_LOCK BIT(31)
+
+struct imr_regs {
+ u32 addr_lo;
+ u32 addr_hi;
+ u32 rmask;
+ u32 wmask;
+};
+
+#define IMR_NUM_REGS (sizeof(struct imr_regs)/sizeof(u32))
+#define IMR_SHIFT 8
+#define imr_to_phys(x) ((x) << IMR_SHIFT)
+#define phys_to_imr(x) ((x) >> IMR_SHIFT)
+
+/**
+ * imr_is_enabled - true if an IMR is enabled false otherwise.
+ *
+ * Determines if an IMR is enabled based on address range and read/write
+ * mask. An IMR set with an address range set to zero and a read/write
+ * access mask set to all is considered to be disabled. An IMR in any
+ * other state - for example set to zero but without read/write access
+ * all is considered to be enabled. This definition of disabled is how
+ * firmware switches off an IMR and is maintained in kernel for
+ * consistency.
+ *
+ * @imr: pointer to IMR descriptor.
+ * @return: true if IMR enabled false if disabled.
+ */
+static inline int imr_is_enabled(struct imr_regs *imr)
+{
+ return !(imr->rmask == IMR_READ_ACCESS_ALL &&
+ imr->wmask == IMR_WRITE_ACCESS_ALL &&
+ imr_to_phys(imr->addr_lo) == 0 &&
+ imr_to_phys(imr->addr_hi) == 0);
+}
+
+/**
+ * imr_read - read an IMR at a given index.
+ *
+ * Requires caller to hold imr mutex.
+ *
+ * @idev: pointer to imr_device structure.
+ * @imr_id: IMR entry to read.
+ * @imr: IMR structure representing address and access masks.
+ * @return: 0 on success or error code passed from mbi_iosf on failure.
+ */
+static int imr_read(struct imr_device *idev, u32 imr_id, struct imr_regs *imr)
+{
+ u32 reg = imr_id * IMR_NUM_REGS + idev->reg_base;
+ int ret;
+
+ ret = iosf_mbi_read(QRK_MBI_UNIT_MM, MBI_REG_READ, reg++, &imr->addr_lo);
+ if (ret)
+ return ret;
+
+ ret = iosf_mbi_read(QRK_MBI_UNIT_MM, MBI_REG_READ, reg++, &imr->addr_hi);
+ if (ret)
+ return ret;
+
+ ret = iosf_mbi_read(QRK_MBI_UNIT_MM, MBI_REG_READ, reg++, &imr->rmask);
+ if (ret)
+ return ret;
+
+ return iosf_mbi_read(QRK_MBI_UNIT_MM, MBI_REG_READ, reg++, &imr->wmask);
+}
+
+/**
+ * imr_write - write an IMR at a given index.
+ *
+ * Requires caller to hold imr mutex.
+ * Note lock bits need to be written independently of address bits.
+ *
+ * @idev: pointer to imr_device structure.
+ * @imr_id: IMR entry to write.
+ * @imr: IMR structure representing address and access masks.
+ * @return: 0 on success or error code passed from mbi_iosf on failure.
+ */
+static int imr_write(struct imr_device *idev, u32 imr_id, struct imr_regs *imr)
+{
+ unsigned long flags;
+ u32 reg = imr_id * IMR_NUM_REGS + idev->reg_base;
+ int ret;
+
+ local_irq_save(flags);
+
+ ret = iosf_mbi_write(QRK_MBI_UNIT_MM, MBI_REG_WRITE, reg++, imr->addr_lo);
+ if (ret)
+ goto failed;
+
+ ret = iosf_mbi_write(QRK_MBI_UNIT_MM, MBI_REG_WRITE, reg++, imr->addr_hi);
+ if (ret)
+ goto failed;
+
+ ret = iosf_mbi_write(QRK_MBI_UNIT_MM, MBI_REG_WRITE, reg++, imr->rmask);
+ if (ret)
+ goto failed;
+
+ ret = iosf_mbi_write(QRK_MBI_UNIT_MM, MBI_REG_WRITE, reg++, imr->wmask);
+ if (ret)
+ goto failed;
+
+ local_irq_restore(flags);
+ return 0;
+failed:
+ /*
+ * If writing to the IOSF failed then we're in an unknown state,
+ * likely a very bad state. An IMR in an invalid state will almost
+ * certainly lead to a memory access violation.
+ */
+ local_irq_restore(flags);
+ WARN(ret, "IOSF-MBI write fail range 0x%08x-0x%08x unreliable\n",
+ imr_to_phys(imr->addr_lo), imr_to_phys(imr->addr_hi) + IMR_MASK);
+
+ return ret;
+}
+
+/**
+ * imr_dbgfs_state_show - print state of IMR registers.
+ *
+ * @s: pointer to seq_file for output.
+ * @unused: unused parameter.
+ * @return: 0 on success or error code passed from mbi_iosf on failure.
+ */
+static int imr_dbgfs_state_show(struct seq_file *s, void *unused)
+{
+ phys_addr_t base;
+ phys_addr_t end;
+ int i;
+ struct imr_device *idev = s->private;
+ struct imr_regs imr;
+ size_t size;
+ int ret = -ENODEV;
+
+ mutex_lock(&idev->lock);
+
+ for (i = 0; i < idev->max_imr; i++) {
+
+ ret = imr_read(idev, i, &imr);
+ if (ret)
+ break;
+
+ /*
+ * Remember to add IMR_ALIGN bytes to size to indicate the
+ * inherent IMR_ALIGN size bytes contained in the masked away
+ * lower ten bits.
+ */
+ if (imr_is_enabled(&imr)) {
+ base = imr_to_phys(imr.addr_lo);
+ end = imr_to_phys(imr.addr_hi) + IMR_MASK;
+ size = end - base + 1;
+ } else {
+ base = 0;
+ end = 0;
+ size = 0;
+ }
+ seq_printf(s, "imr%02i: base=%pa, end=%pa, size=0x%08zx "
+ "rmask=0x%08x, wmask=0x%08x, %s, %s\n", i,
+ &base, &end, size, imr.rmask, imr.wmask,
+ imr_is_enabled(&imr) ? "enabled " : "disabled",
+ imr.addr_lo & IMR_LOCK ? "locked" : "unlocked");
+ }
+
+ mutex_unlock(&idev->lock);
+ return ret;
+}
+DEFINE_SHOW_ATTRIBUTE(imr_dbgfs_state);
+
+/**
+ * imr_debugfs_register - register debugfs hooks.
+ *
+ * @idev: pointer to imr_device structure.
+ */
+static void imr_debugfs_register(struct imr_device *idev)
+{
+ debugfs_create_file("imr_state", 0444, NULL, idev,
+ &imr_dbgfs_state_fops);
+}
+
+/**
+ * imr_check_params - check passed address range IMR alignment and non-zero size
+ *
+ * @base: base address of intended IMR.
+ * @size: size of intended IMR.
+ * @return: zero on valid range -EINVAL on unaligned base/size.
+ */
+static int imr_check_params(phys_addr_t base, size_t size)
+{
+ if ((base & IMR_MASK) || (size & IMR_MASK)) {
+ pr_err("base %pa size 0x%08zx must align to 1KiB\n",
+ &base, size);
+ return -EINVAL;
+ }
+ if (size == 0)
+ return -EINVAL;
+
+ return 0;
+}
+
+/**
+ * imr_raw_size - account for the IMR_ALIGN bytes that addr_hi appends.
+ *
+ * IMR addr_hi has a built in offset of plus IMR_ALIGN (0x400) bytes from the
+ * value in the register. We need to subtract IMR_ALIGN bytes from input sizes
+ * as a result.
+ *
+ * @size: input size bytes.
+ * @return: reduced size.
+ */
+static inline size_t imr_raw_size(size_t size)
+{
+ return size - IMR_ALIGN;
+}
+
+/**
+ * imr_address_overlap - detects an address overlap.
+ *
+ * @addr: address to check against an existing IMR.
+ * @imr: imr being checked.
+ * @return: true for overlap false for no overlap.
+ */
+static inline int imr_address_overlap(phys_addr_t addr, struct imr_regs *imr)
+{
+ return addr >= imr_to_phys(imr->addr_lo) && addr <= imr_to_phys(imr->addr_hi);
+}
+
+/**
+ * imr_add_range - add an Isolated Memory Region.
+ *
+ * @base: physical base address of region aligned to 1KiB.
+ * @size: physical size of region in bytes must be aligned to 1KiB.
+ * @read_mask: read access mask.
+ * @write_mask: write access mask.
+ * @return: zero on success or negative value indicating error.
+ */
+int imr_add_range(phys_addr_t base, size_t size,
+ unsigned int rmask, unsigned int wmask)
+{
+ phys_addr_t end;
+ unsigned int i;
+ struct imr_device *idev = &imr_dev;
+ struct imr_regs imr;
+ size_t raw_size;
+ int reg;
+ int ret;
+
+ if (WARN_ONCE(idev->init == false, "driver not initialized"))
+ return -ENODEV;
+
+ ret = imr_check_params(base, size);
+ if (ret)
+ return ret;
+
+ /* Tweak the size value. */
+ raw_size = imr_raw_size(size);
+ end = base + raw_size;
+
+ /*
+ * Check for reserved IMR value common to firmware, kernel and grub
+ * indicating a disabled IMR.
+ */
+ imr.addr_lo = phys_to_imr(base);
+ imr.addr_hi = phys_to_imr(end);
+ imr.rmask = rmask;
+ imr.wmask = wmask;
+ if (!imr_is_enabled(&imr))
+ return -ENOTSUPP;
+
+ mutex_lock(&idev->lock);
+
+ /*
+ * Find a free IMR while checking for an existing overlapping range.
+ * Note there's no restriction in silicon to prevent IMR overlaps.
+ * For the sake of simplicity and ease in defining/debugging an IMR
+ * memory map we exclude IMR overlaps.
+ */
+ reg = -1;
+ for (i = 0; i < idev->max_imr; i++) {
+ ret = imr_read(idev, i, &imr);
+ if (ret)
+ goto failed;
+
+ /* Find overlap @ base or end of requested range. */
+ ret = -EINVAL;
+ if (imr_is_enabled(&imr)) {
+ if (imr_address_overlap(base, &imr))
+ goto failed;
+ if (imr_address_overlap(end, &imr))
+ goto failed;
+ } else {
+ reg = i;
+ }
+ }
+
+ /* Error out if we have no free IMR entries. */
+ if (reg == -1) {
+ ret = -ENOMEM;
+ goto failed;
+ }
+
+ pr_debug("add %d phys %pa-%pa size %zx mask 0x%08x wmask 0x%08x\n",
+ reg, &base, &end, raw_size, rmask, wmask);
+
+ /* Enable IMR at specified range and access mask. */
+ imr.addr_lo = phys_to_imr(base);
+ imr.addr_hi = phys_to_imr(end);
+ imr.rmask = rmask;
+ imr.wmask = wmask;
+
+ ret = imr_write(idev, reg, &imr);
+ if (ret < 0) {
+ /*
+ * In the highly unlikely event iosf_mbi_write failed
+ * attempt to rollback the IMR setup skipping the trapping
+ * of further IOSF write failures.
+ */
+ imr.addr_lo = 0;
+ imr.addr_hi = 0;
+ imr.rmask = IMR_READ_ACCESS_ALL;
+ imr.wmask = IMR_WRITE_ACCESS_ALL;
+ imr_write(idev, reg, &imr);
+ }
+failed:
+ mutex_unlock(&idev->lock);
+ return ret;
+}
+EXPORT_SYMBOL_GPL(imr_add_range);
+
+/**
+ * __imr_remove_range - delete an Isolated Memory Region.
+ *
+ * This function allows you to delete an IMR by its index specified by reg or
+ * by address range specified by base and size respectively. If you specify an
+ * index on its own the base and size parameters are ignored.
+ * imr_remove_range(0, base, size); delete IMR at index 0 base/size ignored.
+ * imr_remove_range(-1, base, size); delete IMR from base to base+size.
+ *
+ * @reg: imr index to remove.
+ * @base: physical base address of region aligned to 1 KiB.
+ * @size: physical size of region in bytes aligned to 1 KiB.
+ * @return: -EINVAL on invalid range or out or range id
+ * -ENODEV if reg is valid but no IMR exists or is locked
+ * 0 on success.
+ */
+static int __imr_remove_range(int reg, phys_addr_t base, size_t size)
+{
+ phys_addr_t end;
+ bool found = false;
+ unsigned int i;
+ struct imr_device *idev = &imr_dev;
+ struct imr_regs imr;
+ size_t raw_size;
+ int ret = 0;
+
+ if (WARN_ONCE(idev->init == false, "driver not initialized"))
+ return -ENODEV;
+
+ /*
+ * Validate address range if deleting by address, else we are
+ * deleting by index where base and size will be ignored.
+ */
+ if (reg == -1) {
+ ret = imr_check_params(base, size);
+ if (ret)
+ return ret;
+ }
+
+ /* Tweak the size value. */
+ raw_size = imr_raw_size(size);
+ end = base + raw_size;
+
+ mutex_lock(&idev->lock);
+
+ if (reg >= 0) {
+ /* If a specific IMR is given try to use it. */
+ ret = imr_read(idev, reg, &imr);
+ if (ret)
+ goto failed;
+
+ if (!imr_is_enabled(&imr) || imr.addr_lo & IMR_LOCK) {
+ ret = -ENODEV;
+ goto failed;
+ }
+ found = true;
+ } else {
+ /* Search for match based on address range. */
+ for (i = 0; i < idev->max_imr; i++) {
+ ret = imr_read(idev, i, &imr);
+ if (ret)
+ goto failed;
+
+ if (!imr_is_enabled(&imr) || imr.addr_lo & IMR_LOCK)
+ continue;
+
+ if ((imr_to_phys(imr.addr_lo) == base) &&
+ (imr_to_phys(imr.addr_hi) == end)) {
+ found = true;
+ reg = i;
+ break;
+ }
+ }
+ }
+
+ if (!found) {
+ ret = -ENODEV;
+ goto failed;
+ }
+
+ pr_debug("remove %d phys %pa-%pa size %zx\n", reg, &base, &end, raw_size);
+
+ /* Tear down the IMR. */
+ imr.addr_lo = 0;
+ imr.addr_hi = 0;
+ imr.rmask = IMR_READ_ACCESS_ALL;
+ imr.wmask = IMR_WRITE_ACCESS_ALL;
+
+ ret = imr_write(idev, reg, &imr);
+
+failed:
+ mutex_unlock(&idev->lock);
+ return ret;
+}
+
+/**
+ * imr_remove_range - delete an Isolated Memory Region by address
+ *
+ * This function allows you to delete an IMR by an address range specified
+ * by base and size respectively.
+ * imr_remove_range(base, size); delete IMR from base to base+size.
+ *
+ * @base: physical base address of region aligned to 1 KiB.
+ * @size: physical size of region in bytes aligned to 1 KiB.
+ * @return: -EINVAL on invalid range or out or range id
+ * -ENODEV if reg is valid but no IMR exists or is locked
+ * 0 on success.
+ */
+int imr_remove_range(phys_addr_t base, size_t size)
+{
+ return __imr_remove_range(-1, base, size);
+}
+EXPORT_SYMBOL_GPL(imr_remove_range);
+
+/**
+ * imr_clear - delete an Isolated Memory Region by index
+ *
+ * This function allows you to delete an IMR by an address range specified
+ * by the index of the IMR. Useful for initial sanitization of the IMR
+ * address map.
+ * imr_ge(base, size); delete IMR from base to base+size.
+ *
+ * @reg: imr index to remove.
+ * @return: -EINVAL on invalid range or out or range id
+ * -ENODEV if reg is valid but no IMR exists or is locked
+ * 0 on success.
+ */
+static inline int imr_clear(int reg)
+{
+ return __imr_remove_range(reg, 0, 0);
+}
+
+/**
+ * imr_fixup_memmap - Tear down IMRs used during bootup.
+ *
+ * BIOS and Grub both setup IMRs around compressed kernel, initrd memory
+ * that need to be removed before the kernel hands out one of the IMR
+ * encased addresses to a downstream DMA agent such as the SD or Ethernet.
+ * IMRs on Galileo are setup to immediately reset the system on violation.
+ * As a result if you're running a root filesystem from SD - you'll need
+ * the boot-time IMRs torn down or you'll find seemingly random resets when
+ * using your filesystem.
+ *
+ * @idev: pointer to imr_device structure.
+ * @return:
+ */
+static void __init imr_fixup_memmap(struct imr_device *idev)
+{
+ phys_addr_t base = virt_to_phys(&_text);
+ size_t size = virt_to_phys(&__end_rodata) - base;
+ unsigned long start, end;
+ int i;
+ int ret;
+
+ /* Tear down all existing unlocked IMRs. */
+ for (i = 0; i < idev->max_imr; i++)
+ imr_clear(i);
+
+ start = (unsigned long)_text;
+ end = (unsigned long)__end_rodata - 1;
+
+ /*
+ * Setup an unlocked IMR around the physical extent of the kernel
+ * from the beginning of the .text section to the end of the
+ * .rodata section as one physically contiguous block.
+ *
+ * We don't round up @size since it is already PAGE_SIZE aligned.
+ * See vmlinux.lds.S for details.
+ */
+ ret = imr_add_range(base, size, IMR_CPU, IMR_CPU);
+ if (ret < 0) {
+ pr_err("unable to setup IMR for kernel: %zu KiB (%lx - %lx)\n",
+ size / 1024, start, end);
+ } else {
+ pr_info("protecting kernel .text - .rodata: %zu KiB (%lx - %lx)\n",
+ size / 1024, start, end);
+ }
+
+}
+
+static const struct x86_cpu_id imr_ids[] __initconst = {
+ X86_MATCH_VENDOR_FAM_MODEL(INTEL, 5, INTEL_FAM5_QUARK_X1000, NULL),
+ {}
+};
+
+/**
+ * imr_init - entry point for IMR driver.
+ *
+ * return: -ENODEV for no IMR support 0 if good to go.
+ */
+static int __init imr_init(void)
+{
+ struct imr_device *idev = &imr_dev;
+
+ if (!x86_match_cpu(imr_ids) || !iosf_mbi_available())
+ return -ENODEV;
+
+ idev->max_imr = QUARK_X1000_IMR_MAX;
+ idev->reg_base = QUARK_X1000_IMR_REGBASE;
+ idev->init = true;
+
+ mutex_init(&idev->lock);
+ imr_debugfs_register(idev);
+ imr_fixup_memmap(idev);
+ return 0;
+}
+device_initcall(imr_init);
diff --git a/arch/x86/platform/intel-quark/imr_selftest.c b/arch/x86/platform/intel-quark/imr_selftest.c
new file mode 100644
index 000000000..761f3689f
--- /dev/null
+++ b/arch/x86/platform/intel-quark/imr_selftest.c
@@ -0,0 +1,129 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * imr_selftest.c -- Intel Isolated Memory Region self-test driver
+ *
+ * Copyright(c) 2013 Intel Corporation.
+ * Copyright(c) 2015 Bryan O'Donoghue <pure.logic@nexus-software.ie>
+ *
+ * IMR self test. The purpose of this module is to run a set of tests on the
+ * IMR API to validate it's sanity. We check for overlapping, reserved
+ * addresses and setup/teardown sanity.
+ *
+ */
+
+#include <asm-generic/sections.h>
+#include <asm/cpu_device_id.h>
+#include <asm/imr.h>
+#include <asm/io.h>
+
+#include <linux/init.h>
+#include <linux/mm.h>
+#include <linux/types.h>
+
+#define SELFTEST KBUILD_MODNAME ": "
+/**
+ * imr_self_test_result - Print result string for self test.
+ *
+ * @res: result code - true if test passed false otherwise.
+ * @fmt: format string.
+ * ... variadic argument list.
+ */
+static __printf(2, 3)
+void __init imr_self_test_result(int res, const char *fmt, ...)
+{
+ va_list vlist;
+
+ /* Print pass/fail. */
+ if (res)
+ pr_info(SELFTEST "pass ");
+ else
+ pr_info(SELFTEST "fail ");
+
+ /* Print variable string. */
+ va_start(vlist, fmt);
+ vprintk(fmt, vlist);
+ va_end(vlist);
+
+ /* Optional warning. */
+ WARN(res == 0, "test failed");
+}
+#undef SELFTEST
+
+/**
+ * imr_self_test
+ *
+ * Verify IMR self_test with some simple tests to verify overlap,
+ * zero sized allocations and 1 KiB sized areas.
+ *
+ */
+static void __init imr_self_test(void)
+{
+ phys_addr_t base = virt_to_phys(&_text);
+ size_t size = virt_to_phys(&__end_rodata) - base;
+ const char *fmt_over = "overlapped IMR @ (0x%08lx - 0x%08lx)\n";
+ int ret;
+
+ /* Test zero zero. */
+ ret = imr_add_range(0, 0, 0, 0);
+ imr_self_test_result(ret < 0, "zero sized IMR\n");
+
+ /* Test exact overlap. */
+ ret = imr_add_range(base, size, IMR_CPU, IMR_CPU);
+ imr_self_test_result(ret < 0, fmt_over, __va(base), __va(base + size));
+
+ /* Test overlap with base inside of existing. */
+ base += size - IMR_ALIGN;
+ ret = imr_add_range(base, size, IMR_CPU, IMR_CPU);
+ imr_self_test_result(ret < 0, fmt_over, __va(base), __va(base + size));
+
+ /* Test overlap with end inside of existing. */
+ base -= size + IMR_ALIGN * 2;
+ ret = imr_add_range(base, size, IMR_CPU, IMR_CPU);
+ imr_self_test_result(ret < 0, fmt_over, __va(base), __va(base + size));
+
+ /* Test that a 1 KiB IMR @ zero with read/write all will bomb out. */
+ ret = imr_add_range(0, IMR_ALIGN, IMR_READ_ACCESS_ALL,
+ IMR_WRITE_ACCESS_ALL);
+ imr_self_test_result(ret < 0, "1KiB IMR @ 0x00000000 - access-all\n");
+
+ /* Test that a 1 KiB IMR @ zero with CPU only will work. */
+ ret = imr_add_range(0, IMR_ALIGN, IMR_CPU, IMR_CPU);
+ imr_self_test_result(ret >= 0, "1KiB IMR @ 0x00000000 - cpu-access\n");
+ if (ret >= 0) {
+ ret = imr_remove_range(0, IMR_ALIGN);
+ imr_self_test_result(ret == 0, "teardown - cpu-access\n");
+ }
+
+ /* Test 2 KiB works. */
+ size = IMR_ALIGN * 2;
+ ret = imr_add_range(0, size, IMR_READ_ACCESS_ALL, IMR_WRITE_ACCESS_ALL);
+ imr_self_test_result(ret >= 0, "2KiB IMR @ 0x00000000\n");
+ if (ret >= 0) {
+ ret = imr_remove_range(0, size);
+ imr_self_test_result(ret == 0, "teardown 2KiB\n");
+ }
+}
+
+static const struct x86_cpu_id imr_ids[] __initconst = {
+ X86_MATCH_VENDOR_FAM_MODEL(INTEL, 5, INTEL_FAM5_QUARK_X1000, NULL),
+ {}
+};
+
+/**
+ * imr_self_test_init - entry point for IMR driver.
+ *
+ * return: -ENODEV for no IMR support 0 if good to go.
+ */
+static int __init imr_self_test_init(void)
+{
+ if (x86_match_cpu(imr_ids))
+ imr_self_test();
+ return 0;
+}
+
+/**
+ * imr_self_test_exit - exit point for IMR code.
+ *
+ * return:
+ */
+device_initcall(imr_self_test_init);
diff --git a/arch/x86/platform/intel/Makefile b/arch/x86/platform/intel/Makefile
new file mode 100644
index 000000000..dbee3b00f
--- /dev/null
+++ b/arch/x86/platform/intel/Makefile
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_IOSF_MBI) += iosf_mbi.o
diff --git a/arch/x86/platform/intel/iosf_mbi.c b/arch/x86/platform/intel/iosf_mbi.c
new file mode 100644
index 000000000..fdd49d70b
--- /dev/null
+++ b/arch/x86/platform/intel/iosf_mbi.c
@@ -0,0 +1,571 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * IOSF-SB MailBox Interface Driver
+ * Copyright (c) 2013, Intel Corporation.
+ *
+ * The IOSF-SB is a fabric bus available on Atom based SOC's that uses a
+ * mailbox interface (MBI) to communicate with multiple devices. This
+ * driver implements access to this interface for those platforms that can
+ * enumerate the device using PCI.
+ */
+
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/spinlock.h>
+#include <linux/pci.h>
+#include <linux/debugfs.h>
+#include <linux/capability.h>
+#include <linux/pm_qos.h>
+#include <linux/wait.h>
+
+#include <asm/iosf_mbi.h>
+
+#define PCI_DEVICE_ID_INTEL_BAYTRAIL 0x0F00
+#define PCI_DEVICE_ID_INTEL_BRASWELL 0x2280
+#define PCI_DEVICE_ID_INTEL_QUARK_X1000 0x0958
+#define PCI_DEVICE_ID_INTEL_TANGIER 0x1170
+
+static struct pci_dev *mbi_pdev;
+static DEFINE_SPINLOCK(iosf_mbi_lock);
+
+/**************** Generic iosf_mbi access helpers ****************/
+
+static inline u32 iosf_mbi_form_mcr(u8 op, u8 port, u8 offset)
+{
+ return (op << 24) | (port << 16) | (offset << 8) | MBI_ENABLE;
+}
+
+static int iosf_mbi_pci_read_mdr(u32 mcrx, u32 mcr, u32 *mdr)
+{
+ int result;
+
+ if (!mbi_pdev)
+ return -ENODEV;
+
+ if (mcrx) {
+ result = pci_write_config_dword(mbi_pdev, MBI_MCRX_OFFSET,
+ mcrx);
+ if (result < 0)
+ goto fail_read;
+ }
+
+ result = pci_write_config_dword(mbi_pdev, MBI_MCR_OFFSET, mcr);
+ if (result < 0)
+ goto fail_read;
+
+ result = pci_read_config_dword(mbi_pdev, MBI_MDR_OFFSET, mdr);
+ if (result < 0)
+ goto fail_read;
+
+ return 0;
+
+fail_read:
+ dev_err(&mbi_pdev->dev, "PCI config access failed with %d\n", result);
+ return result;
+}
+
+static int iosf_mbi_pci_write_mdr(u32 mcrx, u32 mcr, u32 mdr)
+{
+ int result;
+
+ if (!mbi_pdev)
+ return -ENODEV;
+
+ result = pci_write_config_dword(mbi_pdev, MBI_MDR_OFFSET, mdr);
+ if (result < 0)
+ goto fail_write;
+
+ if (mcrx) {
+ result = pci_write_config_dword(mbi_pdev, MBI_MCRX_OFFSET,
+ mcrx);
+ if (result < 0)
+ goto fail_write;
+ }
+
+ result = pci_write_config_dword(mbi_pdev, MBI_MCR_OFFSET, mcr);
+ if (result < 0)
+ goto fail_write;
+
+ return 0;
+
+fail_write:
+ dev_err(&mbi_pdev->dev, "PCI config access failed with %d\n", result);
+ return result;
+}
+
+int iosf_mbi_read(u8 port, u8 opcode, u32 offset, u32 *mdr)
+{
+ u32 mcr, mcrx;
+ unsigned long flags;
+ int ret;
+
+ /* Access to the GFX unit is handled by GPU code */
+ if (port == BT_MBI_UNIT_GFX) {
+ WARN_ON(1);
+ return -EPERM;
+ }
+
+ mcr = iosf_mbi_form_mcr(opcode, port, offset & MBI_MASK_LO);
+ mcrx = offset & MBI_MASK_HI;
+
+ spin_lock_irqsave(&iosf_mbi_lock, flags);
+ ret = iosf_mbi_pci_read_mdr(mcrx, mcr, mdr);
+ spin_unlock_irqrestore(&iosf_mbi_lock, flags);
+
+ return ret;
+}
+EXPORT_SYMBOL(iosf_mbi_read);
+
+int iosf_mbi_write(u8 port, u8 opcode, u32 offset, u32 mdr)
+{
+ u32 mcr, mcrx;
+ unsigned long flags;
+ int ret;
+
+ /* Access to the GFX unit is handled by GPU code */
+ if (port == BT_MBI_UNIT_GFX) {
+ WARN_ON(1);
+ return -EPERM;
+ }
+
+ mcr = iosf_mbi_form_mcr(opcode, port, offset & MBI_MASK_LO);
+ mcrx = offset & MBI_MASK_HI;
+
+ spin_lock_irqsave(&iosf_mbi_lock, flags);
+ ret = iosf_mbi_pci_write_mdr(mcrx, mcr, mdr);
+ spin_unlock_irqrestore(&iosf_mbi_lock, flags);
+
+ return ret;
+}
+EXPORT_SYMBOL(iosf_mbi_write);
+
+int iosf_mbi_modify(u8 port, u8 opcode, u32 offset, u32 mdr, u32 mask)
+{
+ u32 mcr, mcrx;
+ u32 value;
+ unsigned long flags;
+ int ret;
+
+ /* Access to the GFX unit is handled by GPU code */
+ if (port == BT_MBI_UNIT_GFX) {
+ WARN_ON(1);
+ return -EPERM;
+ }
+
+ mcr = iosf_mbi_form_mcr(opcode, port, offset & MBI_MASK_LO);
+ mcrx = offset & MBI_MASK_HI;
+
+ spin_lock_irqsave(&iosf_mbi_lock, flags);
+
+ /* Read current mdr value */
+ ret = iosf_mbi_pci_read_mdr(mcrx, mcr & MBI_RD_MASK, &value);
+ if (ret < 0) {
+ spin_unlock_irqrestore(&iosf_mbi_lock, flags);
+ return ret;
+ }
+
+ /* Apply mask */
+ value &= ~mask;
+ mdr &= mask;
+ value |= mdr;
+
+ /* Write back */
+ ret = iosf_mbi_pci_write_mdr(mcrx, mcr | MBI_WR_MASK, value);
+
+ spin_unlock_irqrestore(&iosf_mbi_lock, flags);
+
+ return ret;
+}
+EXPORT_SYMBOL(iosf_mbi_modify);
+
+bool iosf_mbi_available(void)
+{
+ /* Mbi isn't hot-pluggable. No remove routine is provided */
+ return mbi_pdev;
+}
+EXPORT_SYMBOL(iosf_mbi_available);
+
+/*
+ **************** P-Unit/kernel shared I2C bus arbitration ****************
+ *
+ * Some Bay Trail and Cherry Trail devices have the P-Unit and us (the kernel)
+ * share a single I2C bus to the PMIC. Below are helpers to arbitrate the
+ * accesses between the kernel and the P-Unit.
+ *
+ * See arch/x86/include/asm/iosf_mbi.h for kernel-doc text for each function.
+ */
+
+#define SEMAPHORE_TIMEOUT 500
+#define PUNIT_SEMAPHORE_BYT 0x7
+#define PUNIT_SEMAPHORE_CHT 0x10e
+#define PUNIT_SEMAPHORE_BIT BIT(0)
+#define PUNIT_SEMAPHORE_ACQUIRE BIT(1)
+
+static DEFINE_MUTEX(iosf_mbi_pmic_access_mutex);
+static BLOCKING_NOTIFIER_HEAD(iosf_mbi_pmic_bus_access_notifier);
+static DECLARE_WAIT_QUEUE_HEAD(iosf_mbi_pmic_access_waitq);
+static u32 iosf_mbi_pmic_punit_access_count;
+static u32 iosf_mbi_pmic_i2c_access_count;
+static u32 iosf_mbi_sem_address;
+static unsigned long iosf_mbi_sem_acquired;
+static struct pm_qos_request iosf_mbi_pm_qos;
+
+void iosf_mbi_punit_acquire(void)
+{
+ /* Wait for any I2C PMIC accesses from in kernel drivers to finish. */
+ mutex_lock(&iosf_mbi_pmic_access_mutex);
+ while (iosf_mbi_pmic_i2c_access_count != 0) {
+ mutex_unlock(&iosf_mbi_pmic_access_mutex);
+ wait_event(iosf_mbi_pmic_access_waitq,
+ iosf_mbi_pmic_i2c_access_count == 0);
+ mutex_lock(&iosf_mbi_pmic_access_mutex);
+ }
+ /*
+ * We do not need to do anything to allow the PUNIT to safely access
+ * the PMIC, other then block in kernel accesses to the PMIC.
+ */
+ iosf_mbi_pmic_punit_access_count++;
+ mutex_unlock(&iosf_mbi_pmic_access_mutex);
+}
+EXPORT_SYMBOL(iosf_mbi_punit_acquire);
+
+void iosf_mbi_punit_release(void)
+{
+ bool do_wakeup;
+
+ mutex_lock(&iosf_mbi_pmic_access_mutex);
+ iosf_mbi_pmic_punit_access_count--;
+ do_wakeup = iosf_mbi_pmic_punit_access_count == 0;
+ mutex_unlock(&iosf_mbi_pmic_access_mutex);
+
+ if (do_wakeup)
+ wake_up(&iosf_mbi_pmic_access_waitq);
+}
+EXPORT_SYMBOL(iosf_mbi_punit_release);
+
+static int iosf_mbi_get_sem(u32 *sem)
+{
+ int ret;
+
+ ret = iosf_mbi_read(BT_MBI_UNIT_PMC, MBI_REG_READ,
+ iosf_mbi_sem_address, sem);
+ if (ret) {
+ dev_err(&mbi_pdev->dev, "Error P-Unit semaphore read failed\n");
+ return ret;
+ }
+
+ *sem &= PUNIT_SEMAPHORE_BIT;
+ return 0;
+}
+
+static void iosf_mbi_reset_semaphore(void)
+{
+ if (iosf_mbi_modify(BT_MBI_UNIT_PMC, MBI_REG_READ,
+ iosf_mbi_sem_address, 0, PUNIT_SEMAPHORE_BIT))
+ dev_err(&mbi_pdev->dev, "Error P-Unit semaphore reset failed\n");
+
+ cpu_latency_qos_update_request(&iosf_mbi_pm_qos, PM_QOS_DEFAULT_VALUE);
+
+ blocking_notifier_call_chain(&iosf_mbi_pmic_bus_access_notifier,
+ MBI_PMIC_BUS_ACCESS_END, NULL);
+}
+
+/*
+ * This function blocks P-Unit accesses to the PMIC I2C bus, so that kernel
+ * I2C code, such as e.g. a fuel-gauge driver, can access it safely.
+ *
+ * This function may be called by I2C controller code while an I2C driver has
+ * already blocked P-Unit accesses because it wants them blocked over multiple
+ * i2c-transfers, for e.g. read-modify-write of an I2C client register.
+ *
+ * To allow safe PMIC i2c bus accesses this function takes the following steps:
+ *
+ * 1) Some code sends request to the P-Unit which make it access the PMIC
+ * I2C bus. Testing has shown that the P-Unit does not check its internal
+ * PMIC bus semaphore for these requests. Callers of these requests call
+ * iosf_mbi_punit_acquire()/_release() around their P-Unit accesses, these
+ * functions increase/decrease iosf_mbi_pmic_punit_access_count, so first
+ * we wait for iosf_mbi_pmic_punit_access_count to become 0.
+ *
+ * 2) Check iosf_mbi_pmic_i2c_access_count, if access has already
+ * been blocked by another caller, we only need to increment
+ * iosf_mbi_pmic_i2c_access_count and we can skip the other steps.
+ *
+ * 3) Some code makes such P-Unit requests from atomic contexts where it
+ * cannot call iosf_mbi_punit_acquire() as that may sleep.
+ * As the second step we call a notifier chain which allows any code
+ * needing P-Unit resources from atomic context to acquire them before
+ * we take control over the PMIC I2C bus.
+ *
+ * 4) When CPU cores enter C6 or C7 the P-Unit needs to talk to the PMIC
+ * if this happens while the kernel itself is accessing the PMIC I2C bus
+ * the SoC hangs.
+ * As the third step we call cpu_latency_qos_update_request() to disallow the
+ * CPU to enter C6 or C7.
+ *
+ * 5) The P-Unit has a PMIC bus semaphore which we can request to stop
+ * autonomous P-Unit tasks from accessing the PMIC I2C bus while we hold it.
+ * As the fourth and final step we request this semaphore and wait for our
+ * request to be acknowledged.
+ */
+int iosf_mbi_block_punit_i2c_access(void)
+{
+ unsigned long start, end;
+ int ret = 0;
+ u32 sem;
+
+ if (WARN_ON(!mbi_pdev || !iosf_mbi_sem_address))
+ return -ENXIO;
+
+ mutex_lock(&iosf_mbi_pmic_access_mutex);
+
+ while (iosf_mbi_pmic_punit_access_count != 0) {
+ mutex_unlock(&iosf_mbi_pmic_access_mutex);
+ wait_event(iosf_mbi_pmic_access_waitq,
+ iosf_mbi_pmic_punit_access_count == 0);
+ mutex_lock(&iosf_mbi_pmic_access_mutex);
+ }
+
+ if (iosf_mbi_pmic_i2c_access_count > 0)
+ goto success;
+
+ blocking_notifier_call_chain(&iosf_mbi_pmic_bus_access_notifier,
+ MBI_PMIC_BUS_ACCESS_BEGIN, NULL);
+
+ /*
+ * Disallow the CPU to enter C6 or C7 state, entering these states
+ * requires the P-Unit to talk to the PMIC and if this happens while
+ * we're holding the semaphore, the SoC hangs.
+ */
+ cpu_latency_qos_update_request(&iosf_mbi_pm_qos, 0);
+
+ /* host driver writes to side band semaphore register */
+ ret = iosf_mbi_write(BT_MBI_UNIT_PMC, MBI_REG_WRITE,
+ iosf_mbi_sem_address, PUNIT_SEMAPHORE_ACQUIRE);
+ if (ret) {
+ dev_err(&mbi_pdev->dev, "Error P-Unit semaphore request failed\n");
+ goto error;
+ }
+
+ /* host driver waits for bit 0 to be set in semaphore register */
+ start = jiffies;
+ end = start + msecs_to_jiffies(SEMAPHORE_TIMEOUT);
+ do {
+ ret = iosf_mbi_get_sem(&sem);
+ if (!ret && sem) {
+ iosf_mbi_sem_acquired = jiffies;
+ dev_dbg(&mbi_pdev->dev, "P-Unit semaphore acquired after %ums\n",
+ jiffies_to_msecs(jiffies - start));
+ goto success;
+ }
+
+ usleep_range(1000, 2000);
+ } while (time_before(jiffies, end));
+
+ ret = -ETIMEDOUT;
+ dev_err(&mbi_pdev->dev, "Error P-Unit semaphore timed out, resetting\n");
+error:
+ iosf_mbi_reset_semaphore();
+ if (!iosf_mbi_get_sem(&sem))
+ dev_err(&mbi_pdev->dev, "P-Unit semaphore: %d\n", sem);
+success:
+ if (!WARN_ON(ret))
+ iosf_mbi_pmic_i2c_access_count++;
+
+ mutex_unlock(&iosf_mbi_pmic_access_mutex);
+
+ return ret;
+}
+EXPORT_SYMBOL(iosf_mbi_block_punit_i2c_access);
+
+void iosf_mbi_unblock_punit_i2c_access(void)
+{
+ bool do_wakeup = false;
+
+ mutex_lock(&iosf_mbi_pmic_access_mutex);
+ iosf_mbi_pmic_i2c_access_count--;
+ if (iosf_mbi_pmic_i2c_access_count == 0) {
+ iosf_mbi_reset_semaphore();
+ dev_dbg(&mbi_pdev->dev, "punit semaphore held for %ums\n",
+ jiffies_to_msecs(jiffies - iosf_mbi_sem_acquired));
+ do_wakeup = true;
+ }
+ mutex_unlock(&iosf_mbi_pmic_access_mutex);
+
+ if (do_wakeup)
+ wake_up(&iosf_mbi_pmic_access_waitq);
+}
+EXPORT_SYMBOL(iosf_mbi_unblock_punit_i2c_access);
+
+int iosf_mbi_register_pmic_bus_access_notifier(struct notifier_block *nb)
+{
+ int ret;
+
+ /* Wait for the bus to go inactive before registering */
+ iosf_mbi_punit_acquire();
+ ret = blocking_notifier_chain_register(
+ &iosf_mbi_pmic_bus_access_notifier, nb);
+ iosf_mbi_punit_release();
+
+ return ret;
+}
+EXPORT_SYMBOL(iosf_mbi_register_pmic_bus_access_notifier);
+
+int iosf_mbi_unregister_pmic_bus_access_notifier_unlocked(
+ struct notifier_block *nb)
+{
+ iosf_mbi_assert_punit_acquired();
+
+ return blocking_notifier_chain_unregister(
+ &iosf_mbi_pmic_bus_access_notifier, nb);
+}
+EXPORT_SYMBOL(iosf_mbi_unregister_pmic_bus_access_notifier_unlocked);
+
+int iosf_mbi_unregister_pmic_bus_access_notifier(struct notifier_block *nb)
+{
+ int ret;
+
+ /* Wait for the bus to go inactive before unregistering */
+ iosf_mbi_punit_acquire();
+ ret = iosf_mbi_unregister_pmic_bus_access_notifier_unlocked(nb);
+ iosf_mbi_punit_release();
+
+ return ret;
+}
+EXPORT_SYMBOL(iosf_mbi_unregister_pmic_bus_access_notifier);
+
+void iosf_mbi_assert_punit_acquired(void)
+{
+ WARN_ON(iosf_mbi_pmic_punit_access_count == 0);
+}
+EXPORT_SYMBOL(iosf_mbi_assert_punit_acquired);
+
+/**************** iosf_mbi debug code ****************/
+
+#ifdef CONFIG_IOSF_MBI_DEBUG
+static u32 dbg_mdr;
+static u32 dbg_mcr;
+static u32 dbg_mcrx;
+
+static int mcr_get(void *data, u64 *val)
+{
+ *val = *(u32 *)data;
+ return 0;
+}
+
+static int mcr_set(void *data, u64 val)
+{
+ u8 command = ((u32)val & 0xFF000000) >> 24,
+ port = ((u32)val & 0x00FF0000) >> 16,
+ offset = ((u32)val & 0x0000FF00) >> 8;
+ int err;
+
+ *(u32 *)data = val;
+
+ if (!capable(CAP_SYS_RAWIO))
+ return -EACCES;
+
+ if (command & 1u)
+ err = iosf_mbi_write(port,
+ command,
+ dbg_mcrx | offset,
+ dbg_mdr);
+ else
+ err = iosf_mbi_read(port,
+ command,
+ dbg_mcrx | offset,
+ &dbg_mdr);
+
+ return err;
+}
+DEFINE_SIMPLE_ATTRIBUTE(iosf_mcr_fops, mcr_get, mcr_set , "%llx\n");
+
+static struct dentry *iosf_dbg;
+
+static void iosf_sideband_debug_init(void)
+{
+ iosf_dbg = debugfs_create_dir("iosf_sb", NULL);
+
+ /* mdr */
+ debugfs_create_x32("mdr", 0660, iosf_dbg, &dbg_mdr);
+
+ /* mcrx */
+ debugfs_create_x32("mcrx", 0660, iosf_dbg, &dbg_mcrx);
+
+ /* mcr - initiates mailbox transaction */
+ debugfs_create_file("mcr", 0660, iosf_dbg, &dbg_mcr, &iosf_mcr_fops);
+}
+
+static void iosf_debugfs_init(void)
+{
+ iosf_sideband_debug_init();
+}
+
+static void iosf_debugfs_remove(void)
+{
+ debugfs_remove_recursive(iosf_dbg);
+}
+#else
+static inline void iosf_debugfs_init(void) { }
+static inline void iosf_debugfs_remove(void) { }
+#endif /* CONFIG_IOSF_MBI_DEBUG */
+
+static int iosf_mbi_probe(struct pci_dev *pdev,
+ const struct pci_device_id *dev_id)
+{
+ int ret;
+
+ ret = pci_enable_device(pdev);
+ if (ret < 0) {
+ dev_err(&pdev->dev, "error: could not enable device\n");
+ return ret;
+ }
+
+ mbi_pdev = pci_dev_get(pdev);
+ iosf_mbi_sem_address = dev_id->driver_data;
+
+ return 0;
+}
+
+static const struct pci_device_id iosf_mbi_pci_ids[] = {
+ { PCI_DEVICE_DATA(INTEL, BAYTRAIL, PUNIT_SEMAPHORE_BYT) },
+ { PCI_DEVICE_DATA(INTEL, BRASWELL, PUNIT_SEMAPHORE_CHT) },
+ { PCI_DEVICE_DATA(INTEL, QUARK_X1000, 0) },
+ { PCI_DEVICE_DATA(INTEL, TANGIER, 0) },
+ { 0, },
+};
+MODULE_DEVICE_TABLE(pci, iosf_mbi_pci_ids);
+
+static struct pci_driver iosf_mbi_pci_driver = {
+ .name = "iosf_mbi_pci",
+ .probe = iosf_mbi_probe,
+ .id_table = iosf_mbi_pci_ids,
+};
+
+static int __init iosf_mbi_init(void)
+{
+ iosf_debugfs_init();
+
+ cpu_latency_qos_add_request(&iosf_mbi_pm_qos, PM_QOS_DEFAULT_VALUE);
+
+ return pci_register_driver(&iosf_mbi_pci_driver);
+}
+
+static void __exit iosf_mbi_exit(void)
+{
+ iosf_debugfs_remove();
+
+ pci_unregister_driver(&iosf_mbi_pci_driver);
+ pci_dev_put(mbi_pdev);
+ mbi_pdev = NULL;
+
+ cpu_latency_qos_remove_request(&iosf_mbi_pm_qos);
+}
+
+module_init(iosf_mbi_init);
+module_exit(iosf_mbi_exit);
+
+MODULE_AUTHOR("David E. Box <david.e.box@linux.intel.com>");
+MODULE_DESCRIPTION("IOSF Mailbox Interface accessor");
+MODULE_LICENSE("GPL v2");
diff --git a/arch/x86/platform/iris/Makefile b/arch/x86/platform/iris/Makefile
new file mode 100644
index 000000000..354352748
--- /dev/null
+++ b/arch/x86/platform/iris/Makefile
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_X86_32_IRIS) += iris.o
diff --git a/arch/x86/platform/iris/iris.c b/arch/x86/platform/iris/iris.c
new file mode 100644
index 000000000..b42bfdab0
--- /dev/null
+++ b/arch/x86/platform/iris/iris.c
@@ -0,0 +1,122 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Eurobraille/Iris power off support.
+ *
+ * Eurobraille's Iris machine is a PC with no APM or ACPI support.
+ * It is shutdown by a special I/O sequence which this module provides.
+ *
+ * Copyright (C) Shérab <Sebastien.Hinderer@ens-lyon.org>
+ */
+
+#include <linux/moduleparam.h>
+#include <linux/module.h>
+#include <linux/platform_device.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/delay.h>
+#include <linux/pm.h>
+#include <asm/io.h>
+
+#define IRIS_GIO_BASE 0x340
+#define IRIS_GIO_INPUT IRIS_GIO_BASE
+#define IRIS_GIO_OUTPUT (IRIS_GIO_BASE + 1)
+#define IRIS_GIO_PULSE 0x80 /* First byte to send */
+#define IRIS_GIO_REST 0x00 /* Second byte to send */
+#define IRIS_GIO_NODEV 0xff /* Likely not an Iris */
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Sébastien Hinderer <Sebastien.Hinderer@ens-lyon.org>");
+MODULE_DESCRIPTION("A power_off handler for Iris devices from EuroBraille");
+
+static bool force;
+
+module_param(force, bool, 0);
+MODULE_PARM_DESC(force, "Set to one to force poweroff handler installation.");
+
+static void (*old_pm_power_off)(void);
+
+static void iris_power_off(void)
+{
+ outb(IRIS_GIO_PULSE, IRIS_GIO_OUTPUT);
+ msleep(850);
+ outb(IRIS_GIO_REST, IRIS_GIO_OUTPUT);
+}
+
+/*
+ * Before installing the power_off handler, try to make sure the OS is
+ * running on an Iris. Since Iris does not support DMI, this is done
+ * by reading its input port and seeing whether the read value is
+ * meaningful.
+ */
+static int iris_probe(struct platform_device *pdev)
+{
+ unsigned char status = inb(IRIS_GIO_INPUT);
+ if (status == IRIS_GIO_NODEV) {
+ printk(KERN_ERR "This machine does not seem to be an Iris. "
+ "Power off handler not installed.\n");
+ return -ENODEV;
+ }
+ old_pm_power_off = pm_power_off;
+ pm_power_off = &iris_power_off;
+ printk(KERN_INFO "Iris power_off handler installed.\n");
+ return 0;
+}
+
+static int iris_remove(struct platform_device *pdev)
+{
+ pm_power_off = old_pm_power_off;
+ printk(KERN_INFO "Iris power_off handler uninstalled.\n");
+ return 0;
+}
+
+static struct platform_driver iris_driver = {
+ .driver = {
+ .name = "iris",
+ },
+ .probe = iris_probe,
+ .remove = iris_remove,
+};
+
+static struct resource iris_resources[] = {
+ {
+ .start = IRIS_GIO_BASE,
+ .end = IRIS_GIO_OUTPUT,
+ .flags = IORESOURCE_IO,
+ .name = "address"
+ }
+};
+
+static struct platform_device *iris_device;
+
+static int iris_init(void)
+{
+ int ret;
+ if (force != 1) {
+ printk(KERN_ERR "The force parameter has not been set to 1."
+ " The Iris poweroff handler will not be installed.\n");
+ return -ENODEV;
+ }
+ ret = platform_driver_register(&iris_driver);
+ if (ret < 0) {
+ printk(KERN_ERR "Failed to register iris platform driver: %d\n",
+ ret);
+ return ret;
+ }
+ iris_device = platform_device_register_simple("iris", (-1),
+ iris_resources, ARRAY_SIZE(iris_resources));
+ if (IS_ERR(iris_device)) {
+ printk(KERN_ERR "Failed to register iris platform device\n");
+ platform_driver_unregister(&iris_driver);
+ return PTR_ERR(iris_device);
+ }
+ return 0;
+}
+
+static void iris_exit(void)
+{
+ platform_device_unregister(iris_device);
+ platform_driver_unregister(&iris_driver);
+}
+
+module_init(iris_init);
+module_exit(iris_exit);
diff --git a/arch/x86/platform/olpc/Makefile b/arch/x86/platform/olpc/Makefile
new file mode 100644
index 000000000..049f92a93
--- /dev/null
+++ b/arch/x86/platform/olpc/Makefile
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: GPL-2.0
+obj-$(CONFIG_OLPC) += olpc.o olpc_ofw.o olpc_dt.o
+obj-$(CONFIG_OLPC_XO1_PM) += olpc-xo1-pm.o xo1-wakeup.o
+obj-$(CONFIG_OLPC_XO1_RTC) += olpc-xo1-rtc.o
+obj-$(CONFIG_OLPC_XO1_SCI) += olpc-xo1-sci.o
+obj-$(CONFIG_OLPC_XO15_SCI) += olpc-xo15-sci.o
diff --git a/arch/x86/platform/olpc/olpc-xo1-pm.c b/arch/x86/platform/olpc/olpc-xo1-pm.c
new file mode 100644
index 000000000..f067ac780
--- /dev/null
+++ b/arch/x86/platform/olpc/olpc-xo1-pm.c
@@ -0,0 +1,188 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Support for power management features of the OLPC XO-1 laptop
+ *
+ * Copyright (C) 2010 Andres Salomon <dilinger@queued.net>
+ * Copyright (C) 2010 One Laptop per Child
+ * Copyright (C) 2006 Red Hat, Inc.
+ * Copyright (C) 2006 Advanced Micro Devices, Inc.
+ */
+
+#include <linux/cs5535.h>
+#include <linux/platform_device.h>
+#include <linux/export.h>
+#include <linux/pm.h>
+#include <linux/suspend.h>
+#include <linux/olpc-ec.h>
+
+#include <asm/io.h>
+#include <asm/olpc.h>
+
+#define DRV_NAME "olpc-xo1-pm"
+
+static unsigned long acpi_base;
+static unsigned long pms_base;
+
+static u16 wakeup_mask = CS5536_PM_PWRBTN;
+
+static struct {
+ unsigned long address;
+ unsigned short segment;
+} ofw_bios_entry = { 0xF0000 + PAGE_OFFSET, __KERNEL_CS };
+
+/* Set bits in the wakeup mask */
+void olpc_xo1_pm_wakeup_set(u16 value)
+{
+ wakeup_mask |= value;
+}
+EXPORT_SYMBOL_GPL(olpc_xo1_pm_wakeup_set);
+
+/* Clear bits in the wakeup mask */
+void olpc_xo1_pm_wakeup_clear(u16 value)
+{
+ wakeup_mask &= ~value;
+}
+EXPORT_SYMBOL_GPL(olpc_xo1_pm_wakeup_clear);
+
+static int xo1_power_state_enter(suspend_state_t pm_state)
+{
+ unsigned long saved_sci_mask;
+
+ /* Only STR is supported */
+ if (pm_state != PM_SUSPEND_MEM)
+ return -EINVAL;
+
+ /*
+ * Save SCI mask (this gets lost since PM1_EN is used as a mask for
+ * wakeup events, which is not necessarily the same event set)
+ */
+ saved_sci_mask = inl(acpi_base + CS5536_PM1_STS);
+ saved_sci_mask &= 0xffff0000;
+
+ /* Save CPU state */
+ do_olpc_suspend_lowlevel();
+
+ /* Resume path starts here */
+
+ /* Restore SCI mask (using dword access to CS5536_PM1_EN) */
+ outl(saved_sci_mask, acpi_base + CS5536_PM1_STS);
+
+ return 0;
+}
+
+asmlinkage __visible int xo1_do_sleep(u8 sleep_state)
+{
+ void *pgd_addr = __va(read_cr3_pa());
+
+ /* Program wakeup mask (using dword access to CS5536_PM1_EN) */
+ outl(wakeup_mask << 16, acpi_base + CS5536_PM1_STS);
+
+ __asm__("movl %0,%%eax" : : "r" (pgd_addr));
+ __asm__("call *(%%edi); cld"
+ : : "D" (&ofw_bios_entry));
+ __asm__("movb $0x34, %al\n\t"
+ "outb %al, $0x70\n\t"
+ "movb $0x30, %al\n\t"
+ "outb %al, $0x71\n\t");
+ return 0;
+}
+
+static void xo1_power_off(void)
+{
+ printk(KERN_INFO "OLPC XO-1 power off sequence...\n");
+
+ /* Enable all of these controls with 0 delay */
+ outl(0x40000000, pms_base + CS5536_PM_SCLK);
+ outl(0x40000000, pms_base + CS5536_PM_IN_SLPCTL);
+ outl(0x40000000, pms_base + CS5536_PM_WKXD);
+ outl(0x40000000, pms_base + CS5536_PM_WKD);
+
+ /* Clear status bits (possibly unnecessary) */
+ outl(0x0002ffff, pms_base + CS5536_PM_SSC);
+ outl(0xffffffff, acpi_base + CS5536_PM_GPE0_STS);
+
+ /* Write SLP_EN bit to start the machinery */
+ outl(0x00002000, acpi_base + CS5536_PM1_CNT);
+}
+
+static int xo1_power_state_valid(suspend_state_t pm_state)
+{
+ /* suspend-to-RAM only */
+ return pm_state == PM_SUSPEND_MEM;
+}
+
+static const struct platform_suspend_ops xo1_suspend_ops = {
+ .valid = xo1_power_state_valid,
+ .enter = xo1_power_state_enter,
+};
+
+static int xo1_pm_probe(struct platform_device *pdev)
+{
+ struct resource *res;
+
+ /* don't run on non-XOs */
+ if (!machine_is_olpc())
+ return -ENODEV;
+
+ res = platform_get_resource(pdev, IORESOURCE_IO, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "can't fetch device resource info\n");
+ return -EIO;
+ }
+ if (strcmp(pdev->name, "cs5535-pms") == 0)
+ pms_base = res->start;
+ else if (strcmp(pdev->name, "olpc-xo1-pm-acpi") == 0)
+ acpi_base = res->start;
+
+ /* If we have both addresses, we can override the poweroff hook */
+ if (pms_base && acpi_base) {
+ suspend_set_ops(&xo1_suspend_ops);
+ pm_power_off = xo1_power_off;
+ printk(KERN_INFO "OLPC XO-1 support registered\n");
+ }
+
+ return 0;
+}
+
+static int xo1_pm_remove(struct platform_device *pdev)
+{
+ if (strcmp(pdev->name, "cs5535-pms") == 0)
+ pms_base = 0;
+ else if (strcmp(pdev->name, "olpc-xo1-pm-acpi") == 0)
+ acpi_base = 0;
+
+ pm_power_off = NULL;
+ return 0;
+}
+
+static struct platform_driver cs5535_pms_driver = {
+ .driver = {
+ .name = "cs5535-pms",
+ },
+ .probe = xo1_pm_probe,
+ .remove = xo1_pm_remove,
+};
+
+static struct platform_driver cs5535_acpi_driver = {
+ .driver = {
+ .name = "olpc-xo1-pm-acpi",
+ },
+ .probe = xo1_pm_probe,
+ .remove = xo1_pm_remove,
+};
+
+static int __init xo1_pm_init(void)
+{
+ int r;
+
+ r = platform_driver_register(&cs5535_pms_driver);
+ if (r)
+ return r;
+
+ r = platform_driver_register(&cs5535_acpi_driver);
+ if (r)
+ platform_driver_unregister(&cs5535_pms_driver);
+
+ return r;
+}
+arch_initcall(xo1_pm_init);
diff --git a/arch/x86/platform/olpc/olpc-xo1-rtc.c b/arch/x86/platform/olpc/olpc-xo1-rtc.c
new file mode 100644
index 000000000..57f210cda
--- /dev/null
+++ b/arch/x86/platform/olpc/olpc-xo1-rtc.c
@@ -0,0 +1,80 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Support for OLPC XO-1 Real Time Clock (RTC)
+ *
+ * Copyright (C) 2011 One Laptop per Child
+ */
+
+#include <linux/mc146818rtc.h>
+#include <linux/platform_device.h>
+#include <linux/rtc.h>
+#include <linux/of.h>
+
+#include <asm/msr.h>
+#include <asm/olpc.h>
+#include <asm/x86_init.h>
+
+static void rtc_wake_on(struct device *dev)
+{
+ olpc_xo1_pm_wakeup_set(CS5536_PM_RTC);
+}
+
+static void rtc_wake_off(struct device *dev)
+{
+ olpc_xo1_pm_wakeup_clear(CS5536_PM_RTC);
+}
+
+static struct resource rtc_platform_resource[] = {
+ [0] = {
+ .start = RTC_PORT(0),
+ .end = RTC_PORT(1),
+ .flags = IORESOURCE_IO,
+ },
+ [1] = {
+ .start = RTC_IRQ,
+ .end = RTC_IRQ,
+ .flags = IORESOURCE_IRQ,
+ }
+};
+
+static struct cmos_rtc_board_info rtc_info = {
+ .rtc_day_alarm = 0,
+ .rtc_mon_alarm = 0,
+ .rtc_century = 0,
+ .wake_on = rtc_wake_on,
+ .wake_off = rtc_wake_off,
+};
+
+static struct platform_device xo1_rtc_device = {
+ .name = "rtc_cmos",
+ .id = -1,
+ .num_resources = ARRAY_SIZE(rtc_platform_resource),
+ .dev.platform_data = &rtc_info,
+ .resource = rtc_platform_resource,
+};
+
+static int __init xo1_rtc_init(void)
+{
+ int r;
+ struct device_node *node;
+
+ node = of_find_compatible_node(NULL, NULL, "olpc,xo1-rtc");
+ if (!node)
+ return 0;
+ of_node_put(node);
+
+ pr_info("olpc-xo1-rtc: Initializing OLPC XO-1 RTC\n");
+ rdmsrl(MSR_RTC_DOMA_OFFSET, rtc_info.rtc_day_alarm);
+ rdmsrl(MSR_RTC_MONA_OFFSET, rtc_info.rtc_mon_alarm);
+ rdmsrl(MSR_RTC_CEN_OFFSET, rtc_info.rtc_century);
+
+ r = platform_device_register(&xo1_rtc_device);
+ if (r)
+ return r;
+
+ x86_platform.legacy.rtc = 0;
+
+ device_init_wakeup(&xo1_rtc_device.dev, 1);
+ return 0;
+}
+arch_initcall(xo1_rtc_init);
diff --git a/arch/x86/platform/olpc/olpc-xo1-sci.c b/arch/x86/platform/olpc/olpc-xo1-sci.c
new file mode 100644
index 000000000..89f25af4b
--- /dev/null
+++ b/arch/x86/platform/olpc/olpc-xo1-sci.c
@@ -0,0 +1,629 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Support for OLPC XO-1 System Control Interrupts (SCI)
+ *
+ * Copyright (C) 2010 One Laptop per Child
+ * Copyright (C) 2006 Red Hat, Inc.
+ * Copyright (C) 2006 Advanced Micro Devices, Inc.
+ */
+
+#include <linux/cs5535.h>
+#include <linux/device.h>
+#include <linux/gpio.h>
+#include <linux/input.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/pm.h>
+#include <linux/pm_wakeup.h>
+#include <linux/power_supply.h>
+#include <linux/suspend.h>
+#include <linux/workqueue.h>
+#include <linux/olpc-ec.h>
+
+#include <asm/io.h>
+#include <asm/msr.h>
+#include <asm/olpc.h>
+
+#define DRV_NAME "olpc-xo1-sci"
+#define PFX DRV_NAME ": "
+
+static unsigned long acpi_base;
+static struct input_dev *power_button_idev;
+static struct input_dev *ebook_switch_idev;
+static struct input_dev *lid_switch_idev;
+
+static int sci_irq;
+
+static bool lid_open;
+static bool lid_inverted;
+static int lid_wake_mode;
+
+enum lid_wake_modes {
+ LID_WAKE_ALWAYS,
+ LID_WAKE_OPEN,
+ LID_WAKE_CLOSE,
+};
+
+static const char * const lid_wake_mode_names[] = {
+ [LID_WAKE_ALWAYS] = "always",
+ [LID_WAKE_OPEN] = "open",
+ [LID_WAKE_CLOSE] = "close",
+};
+
+static void battery_status_changed(void)
+{
+ struct power_supply *psy = power_supply_get_by_name("olpc_battery");
+
+ if (psy) {
+ power_supply_changed(psy);
+ power_supply_put(psy);
+ }
+}
+
+static void ac_status_changed(void)
+{
+ struct power_supply *psy = power_supply_get_by_name("olpc_ac");
+
+ if (psy) {
+ power_supply_changed(psy);
+ power_supply_put(psy);
+ }
+}
+
+/* Report current ebook switch state through input layer */
+static void send_ebook_state(void)
+{
+ unsigned char state;
+
+ if (olpc_ec_cmd(EC_READ_EB_MODE, NULL, 0, &state, 1)) {
+ pr_err(PFX "failed to get ebook state\n");
+ return;
+ }
+
+ if (test_bit(SW_TABLET_MODE, ebook_switch_idev->sw) == !!state)
+ return; /* Nothing new to report. */
+
+ input_report_switch(ebook_switch_idev, SW_TABLET_MODE, state);
+ input_sync(ebook_switch_idev);
+ pm_wakeup_event(&ebook_switch_idev->dev, 0);
+}
+
+static void flip_lid_inverter(void)
+{
+ /* gpio is high; invert so we'll get l->h event interrupt */
+ if (lid_inverted)
+ cs5535_gpio_clear(OLPC_GPIO_LID, GPIO_INPUT_INVERT);
+ else
+ cs5535_gpio_set(OLPC_GPIO_LID, GPIO_INPUT_INVERT);
+ lid_inverted = !lid_inverted;
+}
+
+static void detect_lid_state(void)
+{
+ /*
+ * the edge detector hookup on the gpio inputs on the geode is
+ * odd, to say the least. See http://dev.laptop.org/ticket/5703
+ * for details, but in a nutshell: we don't use the edge
+ * detectors. instead, we make use of an anomaly: with the both
+ * edge detectors turned off, we still get an edge event on a
+ * positive edge transition. to take advantage of this, we use the
+ * front-end inverter to ensure that that's the edge we're always
+ * going to see next.
+ */
+
+ int state;
+
+ state = cs5535_gpio_isset(OLPC_GPIO_LID, GPIO_READ_BACK);
+ lid_open = !state ^ !lid_inverted; /* x ^^ y */
+ if (!state)
+ return;
+
+ flip_lid_inverter();
+}
+
+/* Report current lid switch state through input layer */
+static void send_lid_state(void)
+{
+ if (!!test_bit(SW_LID, lid_switch_idev->sw) == !lid_open)
+ return; /* Nothing new to report. */
+
+ input_report_switch(lid_switch_idev, SW_LID, !lid_open);
+ input_sync(lid_switch_idev);
+ pm_wakeup_event(&lid_switch_idev->dev, 0);
+}
+
+static ssize_t lid_wake_mode_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ const char *mode = lid_wake_mode_names[lid_wake_mode];
+ return sprintf(buf, "%s\n", mode);
+}
+static ssize_t lid_wake_mode_set(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ int i;
+ for (i = 0; i < ARRAY_SIZE(lid_wake_mode_names); i++) {
+ const char *mode = lid_wake_mode_names[i];
+ if (strlen(mode) != count || strncasecmp(mode, buf, count))
+ continue;
+
+ lid_wake_mode = i;
+ return count;
+ }
+ return -EINVAL;
+}
+static DEVICE_ATTR(lid_wake_mode, S_IWUSR | S_IRUGO, lid_wake_mode_show,
+ lid_wake_mode_set);
+
+static struct attribute *lid_attrs[] = {
+ &dev_attr_lid_wake_mode.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(lid);
+
+/*
+ * Process all items in the EC's SCI queue.
+ *
+ * This is handled in a workqueue because olpc_ec_cmd can be slow (and
+ * can even timeout).
+ *
+ * If propagate_events is false, the queue is drained without events being
+ * generated for the interrupts.
+ */
+static void process_sci_queue(bool propagate_events)
+{
+ int r;
+ u16 data;
+
+ do {
+ r = olpc_ec_sci_query(&data);
+ if (r || !data)
+ break;
+
+ pr_debug(PFX "SCI 0x%x received\n", data);
+
+ switch (data) {
+ case EC_SCI_SRC_BATERR:
+ case EC_SCI_SRC_BATSOC:
+ case EC_SCI_SRC_BATTERY:
+ case EC_SCI_SRC_BATCRIT:
+ battery_status_changed();
+ break;
+ case EC_SCI_SRC_ACPWR:
+ ac_status_changed();
+ break;
+ }
+
+ if (data == EC_SCI_SRC_EBOOK && propagate_events)
+ send_ebook_state();
+ } while (data);
+
+ if (r)
+ pr_err(PFX "Failed to clear SCI queue");
+}
+
+static void process_sci_queue_work(struct work_struct *work)
+{
+ process_sci_queue(true);
+}
+
+static DECLARE_WORK(sci_work, process_sci_queue_work);
+
+static irqreturn_t xo1_sci_intr(int irq, void *dev_id)
+{
+ struct platform_device *pdev = dev_id;
+ u32 sts;
+ u32 gpe;
+
+ sts = inl(acpi_base + CS5536_PM1_STS);
+ outl(sts | 0xffff, acpi_base + CS5536_PM1_STS);
+
+ gpe = inl(acpi_base + CS5536_PM_GPE0_STS);
+ outl(0xffffffff, acpi_base + CS5536_PM_GPE0_STS);
+
+ dev_dbg(&pdev->dev, "sts %x gpe %x\n", sts, gpe);
+
+ if (sts & CS5536_PWRBTN_FLAG) {
+ if (!(sts & CS5536_WAK_FLAG)) {
+ /* Only report power button input when it was pressed
+ * during regular operation (as opposed to when it
+ * was used to wake the system). */
+ input_report_key(power_button_idev, KEY_POWER, 1);
+ input_sync(power_button_idev);
+ input_report_key(power_button_idev, KEY_POWER, 0);
+ input_sync(power_button_idev);
+ }
+ /* Report the wakeup event in all cases. */
+ pm_wakeup_event(&power_button_idev->dev, 0);
+ }
+
+ if ((sts & (CS5536_RTC_FLAG | CS5536_WAK_FLAG)) ==
+ (CS5536_RTC_FLAG | CS5536_WAK_FLAG)) {
+ /* When the system is woken by the RTC alarm, report the
+ * event on the rtc device. */
+ struct device *rtc = bus_find_device_by_name(
+ &platform_bus_type, NULL, "rtc_cmos");
+ if (rtc) {
+ pm_wakeup_event(rtc, 0);
+ put_device(rtc);
+ }
+ }
+
+ if (gpe & CS5536_GPIOM7_PME_FLAG) { /* EC GPIO */
+ cs5535_gpio_set(OLPC_GPIO_ECSCI, GPIO_NEGATIVE_EDGE_STS);
+ schedule_work(&sci_work);
+ }
+
+ cs5535_gpio_set(OLPC_GPIO_LID, GPIO_NEGATIVE_EDGE_STS);
+ cs5535_gpio_set(OLPC_GPIO_LID, GPIO_POSITIVE_EDGE_STS);
+ detect_lid_state();
+ send_lid_state();
+
+ return IRQ_HANDLED;
+}
+
+static int xo1_sci_suspend(struct platform_device *pdev, pm_message_t state)
+{
+ if (device_may_wakeup(&power_button_idev->dev))
+ olpc_xo1_pm_wakeup_set(CS5536_PM_PWRBTN);
+ else
+ olpc_xo1_pm_wakeup_clear(CS5536_PM_PWRBTN);
+
+ if (device_may_wakeup(&ebook_switch_idev->dev))
+ olpc_ec_wakeup_set(EC_SCI_SRC_EBOOK);
+ else
+ olpc_ec_wakeup_clear(EC_SCI_SRC_EBOOK);
+
+ if (!device_may_wakeup(&lid_switch_idev->dev)) {
+ cs5535_gpio_clear(OLPC_GPIO_LID, GPIO_EVENTS_ENABLE);
+ } else if ((lid_open && lid_wake_mode == LID_WAKE_OPEN) ||
+ (!lid_open && lid_wake_mode == LID_WAKE_CLOSE)) {
+ flip_lid_inverter();
+
+ /* we may have just caused an event */
+ cs5535_gpio_set(OLPC_GPIO_LID, GPIO_NEGATIVE_EDGE_STS);
+ cs5535_gpio_set(OLPC_GPIO_LID, GPIO_POSITIVE_EDGE_STS);
+
+ cs5535_gpio_set(OLPC_GPIO_LID, GPIO_EVENTS_ENABLE);
+ }
+
+ return 0;
+}
+
+static int xo1_sci_resume(struct platform_device *pdev)
+{
+ /*
+ * We don't know what may have happened while we were asleep.
+ * Reestablish our lid setup so we're sure to catch all transitions.
+ */
+ detect_lid_state();
+ send_lid_state();
+ cs5535_gpio_set(OLPC_GPIO_LID, GPIO_EVENTS_ENABLE);
+
+ /* Enable all EC events */
+ olpc_ec_mask_write(EC_SCI_SRC_ALL);
+
+ /* Power/battery status might have changed too */
+ battery_status_changed();
+ ac_status_changed();
+ return 0;
+}
+
+static int setup_sci_interrupt(struct platform_device *pdev)
+{
+ u32 lo, hi;
+ u32 sts;
+ int r;
+
+ rdmsr(0x51400020, lo, hi);
+ sci_irq = (lo >> 20) & 15;
+
+ if (sci_irq) {
+ dev_info(&pdev->dev, "SCI is mapped to IRQ %d\n", sci_irq);
+ } else {
+ /* Zero means masked */
+ dev_info(&pdev->dev, "SCI unmapped. Mapping to IRQ 3\n");
+ sci_irq = 3;
+ lo |= 0x00300000;
+ wrmsrl(0x51400020, lo);
+ }
+
+ /* Select level triggered in PIC */
+ if (sci_irq < 8) {
+ lo = inb(CS5536_PIC_INT_SEL1);
+ lo |= 1 << sci_irq;
+ outb(lo, CS5536_PIC_INT_SEL1);
+ } else {
+ lo = inb(CS5536_PIC_INT_SEL2);
+ lo |= 1 << (sci_irq - 8);
+ outb(lo, CS5536_PIC_INT_SEL2);
+ }
+
+ /* Enable interesting SCI events, and clear pending interrupts */
+ sts = inl(acpi_base + CS5536_PM1_STS);
+ outl(((CS5536_PM_PWRBTN | CS5536_PM_RTC) << 16) | 0xffff,
+ acpi_base + CS5536_PM1_STS);
+
+ r = request_irq(sci_irq, xo1_sci_intr, 0, DRV_NAME, pdev);
+ if (r)
+ dev_err(&pdev->dev, "can't request interrupt\n");
+
+ return r;
+}
+
+static int setup_ec_sci(void)
+{
+ int r;
+
+ r = gpio_request(OLPC_GPIO_ECSCI, "OLPC-ECSCI");
+ if (r)
+ return r;
+
+ gpio_direction_input(OLPC_GPIO_ECSCI);
+
+ /* Clear pending EC SCI events */
+ cs5535_gpio_set(OLPC_GPIO_ECSCI, GPIO_NEGATIVE_EDGE_STS);
+ cs5535_gpio_set(OLPC_GPIO_ECSCI, GPIO_POSITIVE_EDGE_STS);
+
+ /*
+ * Enable EC SCI events, and map them to both a PME and the SCI
+ * interrupt.
+ *
+ * Ordinarily, in addition to functioning as GPIOs, Geode GPIOs can
+ * be mapped to regular interrupts *or* Geode-specific Power
+ * Management Events (PMEs) - events that bring the system out of
+ * suspend. In this case, we want both of those things - the system
+ * wakeup, *and* the ability to get an interrupt when an event occurs.
+ *
+ * To achieve this, we map the GPIO to a PME, and then we use one
+ * of the many generic knobs on the CS5535 PIC to additionally map the
+ * PME to the regular SCI interrupt line.
+ */
+ cs5535_gpio_set(OLPC_GPIO_ECSCI, GPIO_EVENTS_ENABLE);
+
+ /* Set the SCI to cause a PME event on group 7 */
+ cs5535_gpio_setup_event(OLPC_GPIO_ECSCI, 7, 1);
+
+ /* And have group 7 also fire the SCI interrupt */
+ cs5535_pic_unreqz_select_high(7, sci_irq);
+
+ return 0;
+}
+
+static void free_ec_sci(void)
+{
+ gpio_free(OLPC_GPIO_ECSCI);
+}
+
+static int setup_lid_events(void)
+{
+ int r;
+
+ r = gpio_request(OLPC_GPIO_LID, "OLPC-LID");
+ if (r)
+ return r;
+
+ gpio_direction_input(OLPC_GPIO_LID);
+
+ cs5535_gpio_clear(OLPC_GPIO_LID, GPIO_INPUT_INVERT);
+ lid_inverted = 0;
+
+ /* Clear edge detection and event enable for now */
+ cs5535_gpio_clear(OLPC_GPIO_LID, GPIO_EVENTS_ENABLE);
+ cs5535_gpio_clear(OLPC_GPIO_LID, GPIO_NEGATIVE_EDGE_EN);
+ cs5535_gpio_clear(OLPC_GPIO_LID, GPIO_POSITIVE_EDGE_EN);
+ cs5535_gpio_set(OLPC_GPIO_LID, GPIO_NEGATIVE_EDGE_STS);
+ cs5535_gpio_set(OLPC_GPIO_LID, GPIO_POSITIVE_EDGE_STS);
+
+ /* Set the LID to cause an PME event on group 6 */
+ cs5535_gpio_setup_event(OLPC_GPIO_LID, 6, 1);
+
+ /* Set PME group 6 to fire the SCI interrupt */
+ cs5535_gpio_set_irq(6, sci_irq);
+
+ /* Enable the event */
+ cs5535_gpio_set(OLPC_GPIO_LID, GPIO_EVENTS_ENABLE);
+
+ return 0;
+}
+
+static void free_lid_events(void)
+{
+ gpio_free(OLPC_GPIO_LID);
+}
+
+static int setup_power_button(struct platform_device *pdev)
+{
+ int r;
+
+ power_button_idev = input_allocate_device();
+ if (!power_button_idev)
+ return -ENOMEM;
+
+ power_button_idev->name = "Power Button";
+ power_button_idev->phys = DRV_NAME "/input0";
+ set_bit(EV_KEY, power_button_idev->evbit);
+ set_bit(KEY_POWER, power_button_idev->keybit);
+
+ power_button_idev->dev.parent = &pdev->dev;
+ device_init_wakeup(&power_button_idev->dev, 1);
+
+ r = input_register_device(power_button_idev);
+ if (r) {
+ dev_err(&pdev->dev, "failed to register power button: %d\n", r);
+ input_free_device(power_button_idev);
+ }
+
+ return r;
+}
+
+static void free_power_button(void)
+{
+ input_unregister_device(power_button_idev);
+}
+
+static int setup_ebook_switch(struct platform_device *pdev)
+{
+ int r;
+
+ ebook_switch_idev = input_allocate_device();
+ if (!ebook_switch_idev)
+ return -ENOMEM;
+
+ ebook_switch_idev->name = "EBook Switch";
+ ebook_switch_idev->phys = DRV_NAME "/input1";
+ set_bit(EV_SW, ebook_switch_idev->evbit);
+ set_bit(SW_TABLET_MODE, ebook_switch_idev->swbit);
+
+ ebook_switch_idev->dev.parent = &pdev->dev;
+ device_set_wakeup_capable(&ebook_switch_idev->dev, true);
+
+ r = input_register_device(ebook_switch_idev);
+ if (r) {
+ dev_err(&pdev->dev, "failed to register ebook switch: %d\n", r);
+ input_free_device(ebook_switch_idev);
+ }
+
+ return r;
+}
+
+static void free_ebook_switch(void)
+{
+ input_unregister_device(ebook_switch_idev);
+}
+
+static int setup_lid_switch(struct platform_device *pdev)
+{
+ int r;
+
+ lid_switch_idev = input_allocate_device();
+ if (!lid_switch_idev)
+ return -ENOMEM;
+
+ lid_switch_idev->name = "Lid Switch";
+ lid_switch_idev->phys = DRV_NAME "/input2";
+ set_bit(EV_SW, lid_switch_idev->evbit);
+ set_bit(SW_LID, lid_switch_idev->swbit);
+
+ lid_switch_idev->dev.parent = &pdev->dev;
+ device_set_wakeup_capable(&lid_switch_idev->dev, true);
+
+ r = input_register_device(lid_switch_idev);
+ if (r) {
+ dev_err(&pdev->dev, "failed to register lid switch: %d\n", r);
+ goto err_register;
+ }
+
+ return 0;
+
+err_register:
+ input_free_device(lid_switch_idev);
+ return r;
+}
+
+static void free_lid_switch(void)
+{
+ input_unregister_device(lid_switch_idev);
+}
+
+static int xo1_sci_probe(struct platform_device *pdev)
+{
+ struct resource *res;
+ int r;
+
+ /* don't run on non-XOs */
+ if (!machine_is_olpc())
+ return -ENODEV;
+
+ res = platform_get_resource(pdev, IORESOURCE_IO, 0);
+ if (!res) {
+ dev_err(&pdev->dev, "can't fetch device resource info\n");
+ return -EIO;
+ }
+ acpi_base = res->start;
+
+ r = setup_power_button(pdev);
+ if (r)
+ return r;
+
+ r = setup_ebook_switch(pdev);
+ if (r)
+ goto err_ebook;
+
+ r = setup_lid_switch(pdev);
+ if (r)
+ goto err_lid;
+
+ r = setup_lid_events();
+ if (r)
+ goto err_lidevt;
+
+ r = setup_ec_sci();
+ if (r)
+ goto err_ecsci;
+
+ /* Enable PME generation for EC-generated events */
+ outl(CS5536_GPIOM6_PME_EN | CS5536_GPIOM7_PME_EN,
+ acpi_base + CS5536_PM_GPE0_EN);
+
+ /* Clear pending events */
+ outl(0xffffffff, acpi_base + CS5536_PM_GPE0_STS);
+ process_sci_queue(false);
+
+ /* Initial sync */
+ send_ebook_state();
+ detect_lid_state();
+ send_lid_state();
+
+ r = setup_sci_interrupt(pdev);
+ if (r)
+ goto err_sci;
+
+ /* Enable all EC events */
+ olpc_ec_mask_write(EC_SCI_SRC_ALL);
+
+ return r;
+
+err_sci:
+ free_ec_sci();
+err_ecsci:
+ free_lid_events();
+err_lidevt:
+ free_lid_switch();
+err_lid:
+ free_ebook_switch();
+err_ebook:
+ free_power_button();
+ return r;
+}
+
+static int xo1_sci_remove(struct platform_device *pdev)
+{
+ free_irq(sci_irq, pdev);
+ cancel_work_sync(&sci_work);
+ free_ec_sci();
+ free_lid_events();
+ free_lid_switch();
+ free_ebook_switch();
+ free_power_button();
+ acpi_base = 0;
+ return 0;
+}
+
+static struct platform_driver xo1_sci_driver = {
+ .driver = {
+ .name = "olpc-xo1-sci-acpi",
+ .dev_groups = lid_groups,
+ },
+ .probe = xo1_sci_probe,
+ .remove = xo1_sci_remove,
+ .suspend = xo1_sci_suspend,
+ .resume = xo1_sci_resume,
+};
+
+static int __init xo1_sci_init(void)
+{
+ return platform_driver_register(&xo1_sci_driver);
+}
+arch_initcall(xo1_sci_init);
diff --git a/arch/x86/platform/olpc/olpc-xo15-sci.c b/arch/x86/platform/olpc/olpc-xo15-sci.c
new file mode 100644
index 000000000..994a229cb
--- /dev/null
+++ b/arch/x86/platform/olpc/olpc-xo15-sci.c
@@ -0,0 +1,231 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Support for OLPC XO-1.5 System Control Interrupts (SCI)
+ *
+ * Copyright (C) 2009-2010 One Laptop per Child
+ */
+
+#include <linux/device.h>
+#include <linux/slab.h>
+#include <linux/workqueue.h>
+#include <linux/power_supply.h>
+#include <linux/olpc-ec.h>
+
+#include <linux/acpi.h>
+#include <asm/olpc.h>
+
+#define DRV_NAME "olpc-xo15-sci"
+#define PFX DRV_NAME ": "
+#define XO15_SCI_CLASS DRV_NAME
+#define XO15_SCI_DEVICE_NAME "OLPC XO-1.5 SCI"
+
+static unsigned long xo15_sci_gpe;
+static bool lid_wake_on_close;
+
+/*
+ * The normal ACPI LID wakeup behavior is wake-on-open, but not
+ * wake-on-close. This is implemented as standard by the XO-1.5 DSDT.
+ *
+ * We provide here a sysfs attribute that will additionally enable
+ * wake-on-close behavior. This is useful (e.g.) when we opportunistically
+ * suspend with the display running; if the lid is then closed, we want to
+ * wake up to turn the display off.
+ *
+ * This is controlled through a custom method in the XO-1.5 DSDT.
+ */
+static int set_lid_wake_behavior(bool wake_on_close)
+{
+ acpi_status status;
+
+ status = acpi_execute_simple_method(NULL, "\\_SB.PCI0.LID.LIDW", wake_on_close);
+ if (ACPI_FAILURE(status)) {
+ pr_warn(PFX "failed to set lid behavior\n");
+ return 1;
+ }
+
+ lid_wake_on_close = wake_on_close;
+
+ return 0;
+}
+
+static ssize_t
+lid_wake_on_close_show(struct kobject *s, struct kobj_attribute *attr, char *buf)
+{
+ return sprintf(buf, "%u\n", lid_wake_on_close);
+}
+
+static ssize_t lid_wake_on_close_store(struct kobject *s,
+ struct kobj_attribute *attr,
+ const char *buf, size_t n)
+{
+ unsigned int val;
+
+ if (sscanf(buf, "%u", &val) != 1)
+ return -EINVAL;
+
+ set_lid_wake_behavior(!!val);
+
+ return n;
+}
+
+static struct kobj_attribute lid_wake_on_close_attr =
+ __ATTR(lid_wake_on_close, 0644,
+ lid_wake_on_close_show,
+ lid_wake_on_close_store);
+
+static void battery_status_changed(void)
+{
+ struct power_supply *psy = power_supply_get_by_name("olpc_battery");
+
+ if (psy) {
+ power_supply_changed(psy);
+ power_supply_put(psy);
+ }
+}
+
+static void ac_status_changed(void)
+{
+ struct power_supply *psy = power_supply_get_by_name("olpc_ac");
+
+ if (psy) {
+ power_supply_changed(psy);
+ power_supply_put(psy);
+ }
+}
+
+static void process_sci_queue(void)
+{
+ u16 data;
+ int r;
+
+ do {
+ r = olpc_ec_sci_query(&data);
+ if (r || !data)
+ break;
+
+ pr_debug(PFX "SCI 0x%x received\n", data);
+
+ switch (data) {
+ case EC_SCI_SRC_BATERR:
+ case EC_SCI_SRC_BATSOC:
+ case EC_SCI_SRC_BATTERY:
+ case EC_SCI_SRC_BATCRIT:
+ battery_status_changed();
+ break;
+ case EC_SCI_SRC_ACPWR:
+ ac_status_changed();
+ break;
+ }
+ } while (data);
+
+ if (r)
+ pr_err(PFX "Failed to clear SCI queue");
+}
+
+static void process_sci_queue_work(struct work_struct *work)
+{
+ process_sci_queue();
+}
+
+static DECLARE_WORK(sci_work, process_sci_queue_work);
+
+static u32 xo15_sci_gpe_handler(acpi_handle gpe_device, u32 gpe, void *context)
+{
+ schedule_work(&sci_work);
+ return ACPI_INTERRUPT_HANDLED | ACPI_REENABLE_GPE;
+}
+
+static int xo15_sci_add(struct acpi_device *device)
+{
+ unsigned long long tmp;
+ acpi_status status;
+ int r;
+
+ if (!device)
+ return -EINVAL;
+
+ strcpy(acpi_device_name(device), XO15_SCI_DEVICE_NAME);
+ strcpy(acpi_device_class(device), XO15_SCI_CLASS);
+
+ /* Get GPE bit assignment (EC events). */
+ status = acpi_evaluate_integer(device->handle, "_GPE", NULL, &tmp);
+ if (ACPI_FAILURE(status))
+ return -EINVAL;
+
+ xo15_sci_gpe = tmp;
+ status = acpi_install_gpe_handler(NULL, xo15_sci_gpe,
+ ACPI_GPE_EDGE_TRIGGERED,
+ xo15_sci_gpe_handler, device);
+ if (ACPI_FAILURE(status))
+ return -ENODEV;
+
+ dev_info(&device->dev, "Initialized, GPE = 0x%lx\n", xo15_sci_gpe);
+
+ r = sysfs_create_file(&device->dev.kobj, &lid_wake_on_close_attr.attr);
+ if (r)
+ goto err_sysfs;
+
+ /* Flush queue, and enable all SCI events */
+ process_sci_queue();
+ olpc_ec_mask_write(EC_SCI_SRC_ALL);
+
+ acpi_enable_gpe(NULL, xo15_sci_gpe);
+
+ /* Enable wake-on-EC */
+ if (device->wakeup.flags.valid)
+ device_init_wakeup(&device->dev, true);
+
+ return 0;
+
+err_sysfs:
+ acpi_remove_gpe_handler(NULL, xo15_sci_gpe, xo15_sci_gpe_handler);
+ cancel_work_sync(&sci_work);
+ return r;
+}
+
+static int xo15_sci_remove(struct acpi_device *device)
+{
+ acpi_disable_gpe(NULL, xo15_sci_gpe);
+ acpi_remove_gpe_handler(NULL, xo15_sci_gpe, xo15_sci_gpe_handler);
+ cancel_work_sync(&sci_work);
+ sysfs_remove_file(&device->dev.kobj, &lid_wake_on_close_attr.attr);
+ return 0;
+}
+
+#ifdef CONFIG_PM_SLEEP
+static int xo15_sci_resume(struct device *dev)
+{
+ /* Enable all EC events */
+ olpc_ec_mask_write(EC_SCI_SRC_ALL);
+
+ /* Power/battery status might have changed */
+ battery_status_changed();
+ ac_status_changed();
+
+ return 0;
+}
+#endif
+
+static SIMPLE_DEV_PM_OPS(xo15_sci_pm, NULL, xo15_sci_resume);
+
+static const struct acpi_device_id xo15_sci_device_ids[] = {
+ {"XO15EC", 0},
+ {"", 0},
+};
+
+static struct acpi_driver xo15_sci_drv = {
+ .name = DRV_NAME,
+ .class = XO15_SCI_CLASS,
+ .ids = xo15_sci_device_ids,
+ .ops = {
+ .add = xo15_sci_add,
+ .remove = xo15_sci_remove,
+ },
+ .drv.pm = &xo15_sci_pm,
+};
+
+static int __init xo15_sci_init(void)
+{
+ return acpi_bus_register_driver(&xo15_sci_drv);
+}
+device_initcall(xo15_sci_init);
diff --git a/arch/x86/platform/olpc/olpc.c b/arch/x86/platform/olpc/olpc.c
new file mode 100644
index 000000000..1d4a00e76
--- /dev/null
+++ b/arch/x86/platform/olpc/olpc.c
@@ -0,0 +1,321 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Support for the OLPC DCON and OLPC EC access
+ *
+ * Copyright © 2006 Advanced Micro Devices, Inc.
+ * Copyright © 2007-2008 Andres Salomon <dilinger@debian.org>
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/export.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/string.h>
+#include <linux/platform_device.h>
+#include <linux/of.h>
+#include <linux/syscore_ops.h>
+#include <linux/mutex.h>
+#include <linux/olpc-ec.h>
+
+#include <asm/geode.h>
+#include <asm/setup.h>
+#include <asm/olpc.h>
+#include <asm/olpc_ofw.h>
+
+struct olpc_platform_t olpc_platform_info;
+EXPORT_SYMBOL_GPL(olpc_platform_info);
+
+/* what the timeout *should* be (in ms) */
+#define EC_BASE_TIMEOUT 20
+
+/* the timeout that bugs in the EC might force us to actually use */
+static int ec_timeout = EC_BASE_TIMEOUT;
+
+static int __init olpc_ec_timeout_set(char *str)
+{
+ if (get_option(&str, &ec_timeout) != 1) {
+ ec_timeout = EC_BASE_TIMEOUT;
+ printk(KERN_ERR "olpc-ec: invalid argument to "
+ "'olpc_ec_timeout=', ignoring!\n");
+ }
+ printk(KERN_DEBUG "olpc-ec: using %d ms delay for EC commands.\n",
+ ec_timeout);
+ return 1;
+}
+__setup("olpc_ec_timeout=", olpc_ec_timeout_set);
+
+/*
+ * These {i,o}bf_status functions return whether the buffers are full or not.
+ */
+
+static inline unsigned int ibf_status(unsigned int port)
+{
+ return !!(inb(port) & 0x02);
+}
+
+static inline unsigned int obf_status(unsigned int port)
+{
+ return inb(port) & 0x01;
+}
+
+#define wait_on_ibf(p, d) __wait_on_ibf(__LINE__, (p), (d))
+static int __wait_on_ibf(unsigned int line, unsigned int port, int desired)
+{
+ unsigned int timeo;
+ int state = ibf_status(port);
+
+ for (timeo = ec_timeout; state != desired && timeo; timeo--) {
+ mdelay(1);
+ state = ibf_status(port);
+ }
+
+ if ((state == desired) && (ec_timeout > EC_BASE_TIMEOUT) &&
+ timeo < (ec_timeout - EC_BASE_TIMEOUT)) {
+ printk(KERN_WARNING "olpc-ec: %d: waited %u ms for IBF!\n",
+ line, ec_timeout - timeo);
+ }
+
+ return !(state == desired);
+}
+
+#define wait_on_obf(p, d) __wait_on_obf(__LINE__, (p), (d))
+static int __wait_on_obf(unsigned int line, unsigned int port, int desired)
+{
+ unsigned int timeo;
+ int state = obf_status(port);
+
+ for (timeo = ec_timeout; state != desired && timeo; timeo--) {
+ mdelay(1);
+ state = obf_status(port);
+ }
+
+ if ((state == desired) && (ec_timeout > EC_BASE_TIMEOUT) &&
+ timeo < (ec_timeout - EC_BASE_TIMEOUT)) {
+ printk(KERN_WARNING "olpc-ec: %d: waited %u ms for OBF!\n",
+ line, ec_timeout - timeo);
+ }
+
+ return !(state == desired);
+}
+
+/*
+ * This allows the kernel to run Embedded Controller commands. The EC is
+ * documented at <http://wiki.laptop.org/go/Embedded_controller>, and the
+ * available EC commands are here:
+ * <http://wiki.laptop.org/go/Ec_specification>. Unfortunately, while
+ * OpenFirmware's source is available, the EC's is not.
+ */
+static int olpc_xo1_ec_cmd(u8 cmd, u8 *inbuf, size_t inlen, u8 *outbuf,
+ size_t outlen, void *arg)
+{
+ int ret = -EIO;
+ int i;
+ int restarts = 0;
+
+ /* Clear OBF */
+ for (i = 0; i < 10 && (obf_status(0x6c) == 1); i++)
+ inb(0x68);
+ if (i == 10) {
+ printk(KERN_ERR "olpc-ec: timeout while attempting to "
+ "clear OBF flag!\n");
+ goto err;
+ }
+
+ if (wait_on_ibf(0x6c, 0)) {
+ printk(KERN_ERR "olpc-ec: timeout waiting for EC to "
+ "quiesce!\n");
+ goto err;
+ }
+
+restart:
+ /*
+ * Note that if we time out during any IBF checks, that's a failure;
+ * we have to return. There's no way for the kernel to clear that.
+ *
+ * If we time out during an OBF check, we can restart the command;
+ * reissuing it will clear the OBF flag, and we should be alright.
+ * The OBF flag will sometimes misbehave due to what we believe
+ * is a hardware quirk..
+ */
+ pr_devel("olpc-ec: running cmd 0x%x\n", cmd);
+ outb(cmd, 0x6c);
+
+ if (wait_on_ibf(0x6c, 0)) {
+ printk(KERN_ERR "olpc-ec: timeout waiting for EC to read "
+ "command!\n");
+ goto err;
+ }
+
+ if (inbuf && inlen) {
+ /* write data to EC */
+ for (i = 0; i < inlen; i++) {
+ pr_devel("olpc-ec: sending cmd arg 0x%x\n", inbuf[i]);
+ outb(inbuf[i], 0x68);
+ if (wait_on_ibf(0x6c, 0)) {
+ printk(KERN_ERR "olpc-ec: timeout waiting for"
+ " EC accept data!\n");
+ goto err;
+ }
+ }
+ }
+ if (outbuf && outlen) {
+ /* read data from EC */
+ for (i = 0; i < outlen; i++) {
+ if (wait_on_obf(0x6c, 1)) {
+ printk(KERN_ERR "olpc-ec: timeout waiting for"
+ " EC to provide data!\n");
+ if (restarts++ < 10)
+ goto restart;
+ goto err;
+ }
+ outbuf[i] = inb(0x68);
+ pr_devel("olpc-ec: received 0x%x\n", outbuf[i]);
+ }
+ }
+
+ ret = 0;
+err:
+ return ret;
+}
+
+static bool __init check_ofw_architecture(struct device_node *root)
+{
+ const char *olpc_arch;
+ int propsize;
+
+ olpc_arch = of_get_property(root, "architecture", &propsize);
+ return propsize == 5 && strncmp("OLPC", olpc_arch, 5) == 0;
+}
+
+static u32 __init get_board_revision(struct device_node *root)
+{
+ int propsize;
+ const __be32 *rev;
+
+ rev = of_get_property(root, "board-revision-int", &propsize);
+ if (propsize != 4)
+ return 0;
+
+ return be32_to_cpu(*rev);
+}
+
+static bool __init platform_detect(void)
+{
+ struct device_node *root = of_find_node_by_path("/");
+ bool success;
+
+ if (!root)
+ return false;
+
+ success = check_ofw_architecture(root);
+ if (success) {
+ olpc_platform_info.boardrev = get_board_revision(root);
+ olpc_platform_info.flags |= OLPC_F_PRESENT;
+
+ pr_info("OLPC board revision %s%X\n",
+ ((olpc_platform_info.boardrev & 0xf) < 8) ? "pre" : "",
+ olpc_platform_info.boardrev >> 4);
+ }
+
+ of_node_put(root);
+ return success;
+}
+
+static int __init add_xo1_platform_devices(void)
+{
+ struct platform_device *pdev;
+
+ pdev = platform_device_register_simple("xo1-rfkill", -1, NULL, 0);
+ if (IS_ERR(pdev))
+ return PTR_ERR(pdev);
+
+ pdev = platform_device_register_simple("olpc-xo1", -1, NULL, 0);
+
+ return PTR_ERR_OR_ZERO(pdev);
+}
+
+static int olpc_xo1_ec_suspend(struct platform_device *pdev)
+{
+ /*
+ * Squelch SCIs while suspended. This is a fix for
+ * <http://dev.laptop.org/ticket/1835>.
+ */
+ return olpc_ec_cmd(EC_SET_SCI_INHIBIT, NULL, 0, NULL, 0);
+}
+
+static int olpc_xo1_ec_resume(struct platform_device *pdev)
+{
+ /* Tell the EC to stop inhibiting SCIs */
+ olpc_ec_cmd(EC_SET_SCI_INHIBIT_RELEASE, NULL, 0, NULL, 0);
+
+ /*
+ * Tell the wireless module to restart USB communication.
+ * Must be done twice.
+ */
+ olpc_ec_cmd(EC_WAKE_UP_WLAN, NULL, 0, NULL, 0);
+ olpc_ec_cmd(EC_WAKE_UP_WLAN, NULL, 0, NULL, 0);
+
+ return 0;
+}
+
+static struct olpc_ec_driver ec_xo1_driver = {
+ .suspend = olpc_xo1_ec_suspend,
+ .resume = olpc_xo1_ec_resume,
+ .ec_cmd = olpc_xo1_ec_cmd,
+#ifdef CONFIG_OLPC_XO1_SCI
+ /*
+ * XO-1 EC wakeups are available when olpc-xo1-sci driver is
+ * compiled in
+ */
+ .wakeup_available = true,
+#endif
+};
+
+static struct olpc_ec_driver ec_xo1_5_driver = {
+ .ec_cmd = olpc_xo1_ec_cmd,
+#ifdef CONFIG_OLPC_XO15_SCI
+ /*
+ * XO-1.5 EC wakeups are available when olpc-xo15-sci driver is
+ * compiled in
+ */
+ .wakeup_available = true,
+#endif
+};
+
+static int __init olpc_init(void)
+{
+ int r = 0;
+
+ if (!olpc_ofw_present() || !platform_detect())
+ return 0;
+
+ /* register the XO-1 and 1.5-specific EC handler */
+ if (olpc_platform_info.boardrev < olpc_board_pre(0xd0)) /* XO-1 */
+ olpc_ec_driver_register(&ec_xo1_driver, NULL);
+ else
+ olpc_ec_driver_register(&ec_xo1_5_driver, NULL);
+ platform_device_register_simple("olpc-ec", -1, NULL, 0);
+
+ /* assume B1 and above models always have a DCON */
+ if (olpc_board_at_least(olpc_board(0xb1)))
+ olpc_platform_info.flags |= OLPC_F_DCON;
+
+#ifdef CONFIG_PCI_OLPC
+ /* If the VSA exists let it emulate PCI, if not emulate in kernel.
+ * XO-1 only. */
+ if (olpc_platform_info.boardrev < olpc_board_pre(0xd0) &&
+ !cs5535_has_vsa2())
+ x86_init.pci.arch_init = pci_olpc_init;
+#endif
+
+ if (olpc_platform_info.boardrev < olpc_board_pre(0xd0)) { /* XO-1 */
+ r = add_xo1_platform_devices();
+ if (r)
+ return r;
+ }
+
+ return 0;
+}
+
+postcore_initcall(olpc_init);
diff --git a/arch/x86/platform/olpc/olpc_dt.c b/arch/x86/platform/olpc/olpc_dt.c
new file mode 100644
index 000000000..75e3319e8
--- /dev/null
+++ b/arch/x86/platform/olpc/olpc_dt.c
@@ -0,0 +1,326 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * OLPC-specific OFW device tree support code.
+ *
+ * Paul Mackerras August 1996.
+ * Copyright (C) 1996-2005 Paul Mackerras.
+ *
+ * Adapted for 64bit PowerPC by Dave Engebretsen and Peter Bergner.
+ * {engebret|bergner}@us.ibm.com
+ *
+ * Adapted for sparc by David S. Miller davem@davemloft.net
+ * Adapted for x86/OLPC by Andres Salomon <dilinger@queued.net>
+ */
+
+#include <linux/kernel.h>
+#include <linux/memblock.h>
+#include <linux/of.h>
+#include <linux/of_pdt.h>
+#include <asm/olpc.h>
+#include <asm/olpc_ofw.h>
+
+static phandle __init olpc_dt_getsibling(phandle node)
+{
+ const void *args[] = { (void *)node };
+ void *res[] = { &node };
+
+ if ((s32)node == -1)
+ return 0;
+
+ if (olpc_ofw("peer", args, res) || (s32)node == -1)
+ return 0;
+
+ return node;
+}
+
+static phandle __init olpc_dt_getchild(phandle node)
+{
+ const void *args[] = { (void *)node };
+ void *res[] = { &node };
+
+ if ((s32)node == -1)
+ return 0;
+
+ if (olpc_ofw("child", args, res) || (s32)node == -1) {
+ pr_err("PROM: %s: fetching child failed!\n", __func__);
+ return 0;
+ }
+
+ return node;
+}
+
+static int __init olpc_dt_getproplen(phandle node, const char *prop)
+{
+ const void *args[] = { (void *)node, prop };
+ int len;
+ void *res[] = { &len };
+
+ if ((s32)node == -1)
+ return -1;
+
+ if (olpc_ofw("getproplen", args, res)) {
+ pr_err("PROM: %s: getproplen failed!\n", __func__);
+ return -1;
+ }
+
+ return len;
+}
+
+static int __init olpc_dt_getproperty(phandle node, const char *prop,
+ char *buf, int bufsize)
+{
+ int plen;
+
+ plen = olpc_dt_getproplen(node, prop);
+ if (plen > bufsize || plen < 1) {
+ return -1;
+ } else {
+ const void *args[] = { (void *)node, prop, buf, (void *)plen };
+ void *res[] = { &plen };
+
+ if (olpc_ofw("getprop", args, res)) {
+ pr_err("PROM: %s: getprop failed!\n", __func__);
+ return -1;
+ }
+ }
+
+ return plen;
+}
+
+static int __init olpc_dt_nextprop(phandle node, char *prev, char *buf)
+{
+ const void *args[] = { (void *)node, prev, buf };
+ int success;
+ void *res[] = { &success };
+
+ buf[0] = '\0';
+
+ if ((s32)node == -1)
+ return -1;
+
+ if (olpc_ofw("nextprop", args, res) || success != 1)
+ return -1;
+
+ return 0;
+}
+
+static int __init olpc_dt_pkg2path(phandle node, char *buf,
+ const int buflen, int *len)
+{
+ const void *args[] = { (void *)node, buf, (void *)buflen };
+ void *res[] = { len };
+
+ if ((s32)node == -1)
+ return -1;
+
+ if (olpc_ofw("package-to-path", args, res) || *len < 1)
+ return -1;
+
+ return 0;
+}
+
+static unsigned int prom_early_allocated __initdata;
+
+void * __init prom_early_alloc(unsigned long size)
+{
+ static u8 *mem;
+ static size_t free_mem;
+ void *res;
+
+ if (free_mem < size) {
+ const size_t chunk_size = max(PAGE_SIZE, size);
+
+ /*
+ * To minimize the number of allocations, grab at least
+ * PAGE_SIZE of memory (that's an arbitrary choice that's
+ * fast enough on the platforms we care about while minimizing
+ * wasted bootmem) and hand off chunks of it to callers.
+ */
+ res = memblock_alloc(chunk_size, SMP_CACHE_BYTES);
+ if (!res)
+ panic("%s: Failed to allocate %zu bytes\n", __func__,
+ chunk_size);
+ BUG_ON(!res);
+ prom_early_allocated += chunk_size;
+ memset(res, 0, chunk_size);
+ free_mem = chunk_size;
+ mem = res;
+ }
+
+ /* allocate from the local cache */
+ free_mem -= size;
+ res = mem;
+ mem += size;
+ return res;
+}
+
+static struct of_pdt_ops prom_olpc_ops __initdata = {
+ .nextprop = olpc_dt_nextprop,
+ .getproplen = olpc_dt_getproplen,
+ .getproperty = olpc_dt_getproperty,
+ .getchild = olpc_dt_getchild,
+ .getsibling = olpc_dt_getsibling,
+ .pkg2path = olpc_dt_pkg2path,
+};
+
+static phandle __init olpc_dt_finddevice(const char *path)
+{
+ phandle node;
+ const void *args[] = { path };
+ void *res[] = { &node };
+
+ if (olpc_ofw("finddevice", args, res)) {
+ pr_err("olpc_dt: finddevice failed!\n");
+ return 0;
+ }
+
+ if ((s32) node == -1)
+ return 0;
+
+ return node;
+}
+
+static int __init olpc_dt_interpret(const char *words)
+{
+ int result;
+ const void *args[] = { words };
+ void *res[] = { &result };
+
+ if (olpc_ofw("interpret", args, res)) {
+ pr_err("olpc_dt: interpret failed!\n");
+ return -1;
+ }
+
+ return result;
+}
+
+/*
+ * Extract board revision directly from OFW device tree.
+ * We can't use olpc_platform_info because that hasn't been set up yet.
+ */
+static u32 __init olpc_dt_get_board_revision(void)
+{
+ phandle node;
+ __be32 rev;
+ int r;
+
+ node = olpc_dt_finddevice("/");
+ if (!node)
+ return 0;
+
+ r = olpc_dt_getproperty(node, "board-revision-int",
+ (char *) &rev, sizeof(rev));
+ if (r < 0)
+ return 0;
+
+ return be32_to_cpu(rev);
+}
+
+static int __init olpc_dt_compatible_match(phandle node, const char *compat)
+{
+ char buf[64], *p;
+ int plen, len;
+
+ plen = olpc_dt_getproperty(node, "compatible", buf, sizeof(buf));
+ if (plen <= 0)
+ return 0;
+
+ len = strlen(compat);
+ for (p = buf; p < buf + plen; p += strlen(p) + 1) {
+ if (strcmp(p, compat) == 0)
+ return 1;
+ }
+
+ return 0;
+}
+
+void __init olpc_dt_fixup(void)
+{
+ phandle node;
+ u32 board_rev;
+
+ node = olpc_dt_finddevice("/battery@0");
+ if (!node)
+ return;
+
+ board_rev = olpc_dt_get_board_revision();
+ if (!board_rev)
+ return;
+
+ if (board_rev >= olpc_board_pre(0xd0)) {
+ /* XO-1.5 */
+
+ if (olpc_dt_compatible_match(node, "olpc,xo1.5-battery"))
+ return;
+
+ /* Add olpc,xo1.5-battery compatible marker to battery node */
+ olpc_dt_interpret("\" /battery@0\" find-device");
+ olpc_dt_interpret(" \" olpc,xo1.5-battery\" +compatible");
+ olpc_dt_interpret("device-end");
+
+ if (olpc_dt_compatible_match(node, "olpc,xo1-battery")) {
+ /*
+ * If we have a olpc,xo1-battery compatible, then we're
+ * running a new enough firmware that already has
+ * the dcon node.
+ */
+ return;
+ }
+
+ /* Add dcon device */
+ olpc_dt_interpret("\" /pci/display@1\" find-device");
+ olpc_dt_interpret(" new-device");
+ olpc_dt_interpret(" \" dcon\" device-name");
+ olpc_dt_interpret(" \" olpc,xo1-dcon\" +compatible");
+ olpc_dt_interpret(" finish-device");
+ olpc_dt_interpret("device-end");
+ } else {
+ /* XO-1 */
+
+ if (olpc_dt_compatible_match(node, "olpc,xo1-battery")) {
+ /*
+ * If we have a olpc,xo1-battery compatible, then we're
+ * running a new enough firmware that already has
+ * the dcon and RTC nodes.
+ */
+ return;
+ }
+
+ /* Add dcon device, mark RTC as olpc,xo1-rtc */
+ olpc_dt_interpret("\" /pci/display@1,1\" find-device");
+ olpc_dt_interpret(" new-device");
+ olpc_dt_interpret(" \" dcon\" device-name");
+ olpc_dt_interpret(" \" olpc,xo1-dcon\" +compatible");
+ olpc_dt_interpret(" finish-device");
+ olpc_dt_interpret("device-end");
+
+ olpc_dt_interpret("\" /rtc\" find-device");
+ olpc_dt_interpret(" \" olpc,xo1-rtc\" +compatible");
+ olpc_dt_interpret("device-end");
+ }
+
+ /* Add olpc,xo1-battery compatible marker to battery node */
+ olpc_dt_interpret("\" /battery@0\" find-device");
+ olpc_dt_interpret(" \" olpc,xo1-battery\" +compatible");
+ olpc_dt_interpret("device-end");
+}
+
+void __init olpc_dt_build_devicetree(void)
+{
+ phandle root;
+
+ if (!olpc_ofw_is_installed())
+ return;
+
+ olpc_dt_fixup();
+
+ root = olpc_dt_getsibling(0);
+ if (!root) {
+ pr_err("PROM: unable to get root node from OFW!\n");
+ return;
+ }
+ of_pdt_build_devicetree(root, &prom_olpc_ops);
+
+ pr_info("PROM DT: Built device tree with %u bytes of memory.\n",
+ prom_early_allocated);
+}
diff --git a/arch/x86/platform/olpc/olpc_ofw.c b/arch/x86/platform/olpc/olpc_ofw.c
new file mode 100644
index 000000000..6bab0f0aa
--- /dev/null
+++ b/arch/x86/platform/olpc/olpc_ofw.c
@@ -0,0 +1,121 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#include <linux/kernel.h>
+#include <linux/export.h>
+#include <linux/spinlock_types.h>
+#include <linux/init.h>
+#include <linux/pgtable.h>
+#include <asm/page.h>
+#include <asm/setup.h>
+#include <asm/io.h>
+#include <asm/cpufeature.h>
+#include <asm/special_insns.h>
+#include <asm/olpc_ofw.h>
+
+/* address of OFW callback interface; will be NULL if OFW isn't found */
+static int (*olpc_ofw_cif)(int *);
+
+/* page dir entry containing OFW's pgdir table; filled in by head_32.S */
+u32 olpc_ofw_pgd __initdata;
+
+static DEFINE_SPINLOCK(ofw_lock);
+
+#define MAXARGS 10
+
+void __init setup_olpc_ofw_pgd(void)
+{
+ pgd_t *base, *ofw_pde;
+
+ if (!olpc_ofw_cif)
+ return;
+
+ /* fetch OFW's PDE */
+ base = early_ioremap(olpc_ofw_pgd, sizeof(olpc_ofw_pgd) * PTRS_PER_PGD);
+ if (!base) {
+ printk(KERN_ERR "failed to remap OFW's pgd - disabling OFW!\n");
+ olpc_ofw_cif = NULL;
+ return;
+ }
+ ofw_pde = &base[OLPC_OFW_PDE_NR];
+
+ /* install OFW's PDE permanently into the kernel's pgtable */
+ set_pgd(&swapper_pg_dir[OLPC_OFW_PDE_NR], *ofw_pde);
+ /* implicit optimization barrier here due to uninline function return */
+
+ early_iounmap(base, sizeof(olpc_ofw_pgd) * PTRS_PER_PGD);
+}
+
+int __olpc_ofw(const char *name, int nr_args, const void **args, int nr_res,
+ void **res)
+{
+ int ofw_args[MAXARGS + 3];
+ unsigned long flags;
+ int ret, i, *p;
+
+ BUG_ON(nr_args + nr_res > MAXARGS);
+
+ if (!olpc_ofw_cif)
+ return -EIO;
+
+ ofw_args[0] = (int)name;
+ ofw_args[1] = nr_args;
+ ofw_args[2] = nr_res;
+
+ p = &ofw_args[3];
+ for (i = 0; i < nr_args; i++, p++)
+ *p = (int)args[i];
+
+ /* call into ofw */
+ spin_lock_irqsave(&ofw_lock, flags);
+ ret = olpc_ofw_cif(ofw_args);
+ spin_unlock_irqrestore(&ofw_lock, flags);
+
+ if (!ret) {
+ for (i = 0; i < nr_res; i++, p++)
+ *((int *)res[i]) = *p;
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(__olpc_ofw);
+
+bool olpc_ofw_present(void)
+{
+ return olpc_ofw_cif != NULL;
+}
+EXPORT_SYMBOL_GPL(olpc_ofw_present);
+
+/* OFW cif _should_ be above this address */
+#define OFW_MIN 0xff000000
+
+/* OFW starts on a 1MB boundary */
+#define OFW_BOUND (1<<20)
+
+void __init olpc_ofw_detect(void)
+{
+ struct olpc_ofw_header *hdr = &boot_params.olpc_ofw_header;
+ unsigned long start;
+
+ /* ensure OFW booted us by checking for "OFW " string */
+ if (hdr->ofw_magic != OLPC_OFW_SIG)
+ return;
+
+ olpc_ofw_cif = (int (*)(int *))hdr->cif_handler;
+
+ if ((unsigned long)olpc_ofw_cif < OFW_MIN) {
+ printk(KERN_ERR "OFW detected, but cif has invalid address 0x%lx - disabling.\n",
+ (unsigned long)olpc_ofw_cif);
+ olpc_ofw_cif = NULL;
+ return;
+ }
+
+ /* determine where OFW starts in memory */
+ start = round_down((unsigned long)olpc_ofw_cif, OFW_BOUND);
+ printk(KERN_INFO "OFW detected in memory, cif @ 0x%lx (reserving top %ldMB)\n",
+ (unsigned long)olpc_ofw_cif, (-start) >> 20);
+ reserve_top_address(-start);
+}
+
+bool __init olpc_ofw_is_installed(void)
+{
+ return olpc_ofw_cif != NULL;
+}
diff --git a/arch/x86/platform/olpc/xo1-wakeup.S b/arch/x86/platform/olpc/xo1-wakeup.S
new file mode 100644
index 000000000..3a5abffe5
--- /dev/null
+++ b/arch/x86/platform/olpc/xo1-wakeup.S
@@ -0,0 +1,126 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+.text
+#include <linux/linkage.h>
+#include <asm/segment.h>
+#include <asm/page.h>
+#include <asm/pgtable_32.h>
+
+ .macro writepost,value
+ movb $0x34, %al
+ outb %al, $0x70
+ movb $\value, %al
+ outb %al, $0x71
+ .endm
+
+wakeup_start:
+ # OFW lands us here, running in protected mode, with a
+ # kernel-compatible GDT already setup.
+
+ # Clear any dangerous flags
+ pushl $0
+ popfl
+
+ writepost 0x31
+
+ # Set up %cr3
+ movl $initial_page_table - __PAGE_OFFSET, %eax
+ movl %eax, %cr3
+
+ movl saved_cr4, %eax
+ movl %eax, %cr4
+
+ movl saved_cr0, %eax
+ movl %eax, %cr0
+
+ # Control registers were modified, pipeline resync is needed
+ jmp 1f
+1:
+
+ movw $__KERNEL_DS, %ax
+ movw %ax, %ss
+ movw %ax, %ds
+ movw %ax, %es
+ movw %ax, %fs
+ movw %ax, %gs
+
+ lgdt saved_gdt
+ lidt saved_idt
+ lldt saved_ldt
+ ljmp $(__KERNEL_CS),$1f
+1:
+ movl %cr3, %eax
+ movl %eax, %cr3
+ wbinvd
+
+ # Go back to the return point
+ jmp ret_point
+
+save_registers:
+ sgdt saved_gdt
+ sidt saved_idt
+ sldt saved_ldt
+
+ pushl %edx
+ movl %cr4, %edx
+ movl %edx, saved_cr4
+
+ movl %cr0, %edx
+ movl %edx, saved_cr0
+
+ popl %edx
+
+ movl %ebx, saved_context_ebx
+ movl %ebp, saved_context_ebp
+ movl %esi, saved_context_esi
+ movl %edi, saved_context_edi
+
+ pushfl
+ popl saved_context_eflags
+
+ RET
+
+restore_registers:
+ movl saved_context_ebp, %ebp
+ movl saved_context_ebx, %ebx
+ movl saved_context_esi, %esi
+ movl saved_context_edi, %edi
+
+ pushl saved_context_eflags
+ popfl
+
+ RET
+
+SYM_CODE_START(do_olpc_suspend_lowlevel)
+ call save_processor_state
+ call save_registers
+
+ # This is the stack context we want to remember
+ movl %esp, saved_context_esp
+
+ pushl $3
+ call xo1_do_sleep
+
+ jmp wakeup_start
+ .p2align 4,,7
+ret_point:
+ movl saved_context_esp, %esp
+
+ writepost 0x32
+
+ call restore_registers
+ call restore_processor_state
+ RET
+SYM_CODE_END(do_olpc_suspend_lowlevel)
+
+.data
+saved_gdt: .long 0,0
+saved_idt: .long 0,0
+saved_ldt: .long 0
+saved_cr4: .long 0
+saved_cr0: .long 0
+saved_context_esp: .long 0
+saved_context_edi: .long 0
+saved_context_esi: .long 0
+saved_context_ebx: .long 0
+saved_context_ebp: .long 0
+saved_context_eflags: .long 0
diff --git a/arch/x86/platform/pvh/Makefile b/arch/x86/platform/pvh/Makefile
new file mode 100644
index 000000000..5dec5067c
--- /dev/null
+++ b/arch/x86/platform/pvh/Makefile
@@ -0,0 +1,5 @@
+# SPDX-License-Identifier: GPL-2.0
+OBJECT_FILES_NON_STANDARD_head.o := y
+
+obj-$(CONFIG_PVH) += enlighten.o
+obj-$(CONFIG_PVH) += head.o
diff --git a/arch/x86/platform/pvh/enlighten.c b/arch/x86/platform/pvh/enlighten.c
new file mode 100644
index 000000000..ed0442e35
--- /dev/null
+++ b/arch/x86/platform/pvh/enlighten.c
@@ -0,0 +1,137 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/acpi.h>
+
+#include <xen/hvc-console.h>
+
+#include <asm/io_apic.h>
+#include <asm/hypervisor.h>
+#include <asm/e820/api.h>
+#include <asm/x86_init.h>
+
+#include <asm/xen/interface.h>
+
+#include <xen/xen.h>
+#include <xen/interface/hvm/start_info.h>
+
+/*
+ * PVH variables.
+ *
+ * pvh_bootparams and pvh_start_info need to live in a data segment since
+ * they are used after startup_{32|64}, which clear .bss, are invoked.
+ */
+struct boot_params __initdata pvh_bootparams;
+struct hvm_start_info __initdata pvh_start_info;
+
+const unsigned int __initconst pvh_start_info_sz = sizeof(pvh_start_info);
+
+static u64 __init pvh_get_root_pointer(void)
+{
+ return pvh_start_info.rsdp_paddr;
+}
+
+/*
+ * Xen guests are able to obtain the memory map from the hypervisor via the
+ * HYPERVISOR_memory_op hypercall.
+ * If we are trying to boot a Xen PVH guest, it is expected that the kernel
+ * will have been configured to provide an override for this routine to do
+ * just that.
+ */
+void __init __weak mem_map_via_hcall(struct boot_params *ptr __maybe_unused)
+{
+ xen_raw_printk("Error: Could not find memory map\n");
+ BUG();
+}
+
+static void __init init_pvh_bootparams(bool xen_guest)
+{
+ if ((pvh_start_info.version > 0) && (pvh_start_info.memmap_entries)) {
+ struct hvm_memmap_table_entry *ep;
+ int i;
+
+ ep = __va(pvh_start_info.memmap_paddr);
+ pvh_bootparams.e820_entries = pvh_start_info.memmap_entries;
+
+ for (i = 0; i < pvh_bootparams.e820_entries ; i++, ep++) {
+ pvh_bootparams.e820_table[i].addr = ep->addr;
+ pvh_bootparams.e820_table[i].size = ep->size;
+ pvh_bootparams.e820_table[i].type = ep->type;
+ }
+ } else if (xen_guest) {
+ mem_map_via_hcall(&pvh_bootparams);
+ } else {
+ /* Non-xen guests are not supported by version 0 */
+ BUG();
+ }
+
+ if (pvh_bootparams.e820_entries < E820_MAX_ENTRIES_ZEROPAGE - 1) {
+ pvh_bootparams.e820_table[pvh_bootparams.e820_entries].addr =
+ ISA_START_ADDRESS;
+ pvh_bootparams.e820_table[pvh_bootparams.e820_entries].size =
+ ISA_END_ADDRESS - ISA_START_ADDRESS;
+ pvh_bootparams.e820_table[pvh_bootparams.e820_entries].type =
+ E820_TYPE_RESERVED;
+ pvh_bootparams.e820_entries++;
+ } else
+ xen_raw_printk("Warning: Can fit ISA range into e820\n");
+
+ pvh_bootparams.hdr.cmd_line_ptr =
+ pvh_start_info.cmdline_paddr;
+
+ /* The first module is always ramdisk. */
+ if (pvh_start_info.nr_modules) {
+ struct hvm_modlist_entry *modaddr =
+ __va(pvh_start_info.modlist_paddr);
+ pvh_bootparams.hdr.ramdisk_image = modaddr->paddr;
+ pvh_bootparams.hdr.ramdisk_size = modaddr->size;
+ }
+
+ /*
+ * See Documentation/x86/boot.rst.
+ *
+ * Version 2.12 supports Xen entry point but we will use default x86/PC
+ * environment (i.e. hardware_subarch 0).
+ */
+ pvh_bootparams.hdr.version = (2 << 8) | 12;
+ pvh_bootparams.hdr.type_of_loader = ((xen_guest ? 0x9 : 0xb) << 4) | 0;
+
+ x86_init.acpi.get_root_pointer = pvh_get_root_pointer;
+}
+
+/*
+ * If we are trying to boot a Xen PVH guest, it is expected that the kernel
+ * will have been configured to provide the required override for this routine.
+ */
+void __init __weak xen_pvh_init(struct boot_params *boot_params)
+{
+ xen_raw_printk("Error: Missing xen PVH initialization\n");
+ BUG();
+}
+
+static void __init hypervisor_specific_init(bool xen_guest)
+{
+ if (xen_guest)
+ xen_pvh_init(&pvh_bootparams);
+}
+
+/*
+ * This routine (and those that it might call) should not use
+ * anything that lives in .bss since that segment will be cleared later.
+ */
+void __init xen_prepare_pvh(void)
+{
+
+ u32 msr = xen_cpuid_base();
+ bool xen_guest = !!msr;
+
+ if (pvh_start_info.magic != XEN_HVM_START_MAGIC_VALUE) {
+ xen_raw_printk("Error: Unexpected magic value (0x%08x)\n",
+ pvh_start_info.magic);
+ BUG();
+ }
+
+ memset(&pvh_bootparams, 0, sizeof(pvh_bootparams));
+
+ hypervisor_specific_init(xen_guest);
+
+ init_pvh_bootparams(xen_guest);
+}
diff --git a/arch/x86/platform/pvh/head.S b/arch/x86/platform/pvh/head.S
new file mode 100644
index 000000000..7fe564eaf
--- /dev/null
+++ b/arch/x86/platform/pvh/head.S
@@ -0,0 +1,166 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+/*
+ * Copyright C 2016, Oracle and/or its affiliates. All rights reserved.
+ */
+
+ .code32
+ .text
+#define _pa(x) ((x) - __START_KERNEL_map)
+
+#include <linux/elfnote.h>
+#include <linux/init.h>
+#include <linux/linkage.h>
+#include <asm/segment.h>
+#include <asm/asm.h>
+#include <asm/boot.h>
+#include <asm/processor-flags.h>
+#include <asm/msr.h>
+#include <asm/nospec-branch.h>
+#include <xen/interface/elfnote.h>
+
+ __HEAD
+
+/*
+ * Entry point for PVH guests.
+ *
+ * Xen ABI specifies the following register state when we come here:
+ *
+ * - `ebx`: contains the physical memory address where the loader has placed
+ * the boot start info structure.
+ * - `cr0`: bit 0 (PE) must be set. All the other writeable bits are cleared.
+ * - `cr4`: all bits are cleared.
+ * - `cs `: must be a 32-bit read/execute code segment with a base of `0`
+ * and a limit of `0xFFFFFFFF`. The selector value is unspecified.
+ * - `ds`, `es`: must be a 32-bit read/write data segment with a base of
+ * `0` and a limit of `0xFFFFFFFF`. The selector values are all
+ * unspecified.
+ * - `tr`: must be a 32-bit TSS (active) with a base of '0' and a limit
+ * of '0x67'.
+ * - `eflags`: bit 17 (VM) must be cleared. Bit 9 (IF) must be cleared.
+ * Bit 8 (TF) must be cleared. Other bits are all unspecified.
+ *
+ * All other processor registers and flag bits are unspecified. The OS is in
+ * charge of setting up it's own stack, GDT and IDT.
+ */
+
+#define PVH_GDT_ENTRY_CS 1
+#define PVH_GDT_ENTRY_DS 2
+#define PVH_CS_SEL (PVH_GDT_ENTRY_CS * 8)
+#define PVH_DS_SEL (PVH_GDT_ENTRY_DS * 8)
+
+SYM_CODE_START_LOCAL(pvh_start_xen)
+ UNWIND_HINT_EMPTY
+ cld
+
+ lgdt (_pa(gdt))
+
+ mov $PVH_DS_SEL,%eax
+ mov %eax,%ds
+ mov %eax,%es
+ mov %eax,%ss
+
+ /* Stash hvm_start_info. */
+ mov $_pa(pvh_start_info), %edi
+ mov %ebx, %esi
+ mov _pa(pvh_start_info_sz), %ecx
+ shr $2,%ecx
+ rep
+ movsl
+
+ mov $_pa(early_stack_end), %esp
+
+ /* Enable PAE mode. */
+ mov %cr4, %eax
+ orl $X86_CR4_PAE, %eax
+ mov %eax, %cr4
+
+#ifdef CONFIG_X86_64
+ /* Enable Long mode. */
+ mov $MSR_EFER, %ecx
+ rdmsr
+ btsl $_EFER_LME, %eax
+ wrmsr
+
+ /* Enable pre-constructed page tables. */
+ mov $_pa(init_top_pgt), %eax
+ mov %eax, %cr3
+ mov $(X86_CR0_PG | X86_CR0_PE), %eax
+ mov %eax, %cr0
+
+ /* Jump to 64-bit mode. */
+ ljmp $PVH_CS_SEL, $_pa(1f)
+
+ /* 64-bit entry point. */
+ .code64
+1:
+ /* Set base address in stack canary descriptor. */
+ mov $MSR_GS_BASE,%ecx
+ mov $_pa(canary), %eax
+ xor %edx, %edx
+ wrmsr
+
+ call xen_prepare_pvh
+
+ /* startup_64 expects boot_params in %rsi. */
+ mov $_pa(pvh_bootparams), %rsi
+ mov $_pa(startup_64), %rax
+ ANNOTATE_RETPOLINE_SAFE
+ jmp *%rax
+
+#else /* CONFIG_X86_64 */
+
+ call mk_early_pgtbl_32
+
+ mov $_pa(initial_page_table), %eax
+ mov %eax, %cr3
+
+ mov %cr0, %eax
+ or $(X86_CR0_PG | X86_CR0_PE), %eax
+ mov %eax, %cr0
+
+ ljmp $PVH_CS_SEL, $1f
+1:
+ call xen_prepare_pvh
+ mov $_pa(pvh_bootparams), %esi
+
+ /* startup_32 doesn't expect paging and PAE to be on. */
+ ljmp $PVH_CS_SEL, $_pa(2f)
+2:
+ mov %cr0, %eax
+ and $~X86_CR0_PG, %eax
+ mov %eax, %cr0
+ mov %cr4, %eax
+ and $~X86_CR4_PAE, %eax
+ mov %eax, %cr4
+
+ ljmp $PVH_CS_SEL, $_pa(startup_32)
+#endif
+SYM_CODE_END(pvh_start_xen)
+
+ .section ".init.data","aw"
+ .balign 8
+SYM_DATA_START_LOCAL(gdt)
+ .word gdt_end - gdt_start
+ .long _pa(gdt_start)
+ .word 0
+SYM_DATA_END(gdt)
+SYM_DATA_START_LOCAL(gdt_start)
+ .quad 0x0000000000000000 /* NULL descriptor */
+#ifdef CONFIG_X86_64
+ .quad GDT_ENTRY(0xa09a, 0, 0xfffff) /* PVH_CS_SEL */
+#else
+ .quad GDT_ENTRY(0xc09a, 0, 0xfffff) /* PVH_CS_SEL */
+#endif
+ .quad GDT_ENTRY(0xc092, 0, 0xfffff) /* PVH_DS_SEL */
+SYM_DATA_END_LABEL(gdt_start, SYM_L_LOCAL, gdt_end)
+
+ .balign 16
+SYM_DATA_LOCAL(canary, .fill 48, 1, 0)
+
+SYM_DATA_START_LOCAL(early_stack)
+ .fill BOOT_STACK_SIZE, 1, 0
+SYM_DATA_END_LABEL(early_stack, SYM_L_LOCAL, early_stack_end)
+
+ ELFNOTE(Xen, XEN_ELFNOTE_PHYS32_ENTRY,
+ _ASM_PTR (pvh_start_xen - __START_KERNEL_map))
diff --git a/arch/x86/platform/scx200/Makefile b/arch/x86/platform/scx200/Makefile
new file mode 100644
index 000000000..981b3e430
--- /dev/null
+++ b/arch/x86/platform/scx200/Makefile
@@ -0,0 +1,3 @@
+# SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_SCx200) += scx200.o
+scx200-y += scx200_32.o
diff --git a/arch/x86/platform/scx200/scx200_32.c b/arch/x86/platform/scx200/scx200_32.c
new file mode 100644
index 000000000..80662b720
--- /dev/null
+++ b/arch/x86/platform/scx200/scx200_32.c
@@ -0,0 +1,130 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2001,2002 Christer Weinigel <wingel@nano-system.com>
+ *
+ * National Semiconductor SCx200 support.
+ */
+
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/mutex.h>
+#include <linux/pci.h>
+
+#include <linux/scx200.h>
+#include <linux/scx200_gpio.h>
+
+/* Verify that the configuration block really is there */
+#define scx200_cb_probe(base) (inw((base) + SCx200_CBA) == (base))
+
+MODULE_AUTHOR("Christer Weinigel <wingel@nano-system.com>");
+MODULE_DESCRIPTION("NatSemi SCx200 Driver");
+MODULE_LICENSE("GPL");
+
+unsigned scx200_gpio_base = 0;
+unsigned long scx200_gpio_shadow[2];
+
+unsigned scx200_cb_base = 0;
+
+static struct pci_device_id scx200_tbl[] = {
+ { PCI_VDEVICE(NS, PCI_DEVICE_ID_NS_SCx200_BRIDGE) },
+ { PCI_VDEVICE(NS, PCI_DEVICE_ID_NS_SC1100_BRIDGE) },
+ { PCI_VDEVICE(NS, PCI_DEVICE_ID_NS_SCx200_XBUS) },
+ { PCI_VDEVICE(NS, PCI_DEVICE_ID_NS_SC1100_XBUS) },
+ { },
+};
+MODULE_DEVICE_TABLE(pci,scx200_tbl);
+
+static int scx200_probe(struct pci_dev *, const struct pci_device_id *);
+
+static struct pci_driver scx200_pci_driver = {
+ .name = "scx200",
+ .id_table = scx200_tbl,
+ .probe = scx200_probe,
+};
+
+static DEFINE_MUTEX(scx200_gpio_config_lock);
+
+static void scx200_init_shadow(void)
+{
+ int bank;
+
+ /* read the current values driven on the GPIO signals */
+ for (bank = 0; bank < 2; ++bank)
+ scx200_gpio_shadow[bank] = inl(scx200_gpio_base + 0x10 * bank);
+}
+
+static int scx200_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+ unsigned base;
+
+ if (pdev->device == PCI_DEVICE_ID_NS_SCx200_BRIDGE ||
+ pdev->device == PCI_DEVICE_ID_NS_SC1100_BRIDGE) {
+ base = pci_resource_start(pdev, 0);
+ pr_info("GPIO base 0x%x\n", base);
+
+ if (!request_region(base, SCx200_GPIO_SIZE,
+ "NatSemi SCx200 GPIO")) {
+ pr_err("can't allocate I/O for GPIOs\n");
+ return -EBUSY;
+ }
+
+ scx200_gpio_base = base;
+ scx200_init_shadow();
+
+ } else {
+ /* find the base of the Configuration Block */
+ if (scx200_cb_probe(SCx200_CB_BASE_FIXED)) {
+ scx200_cb_base = SCx200_CB_BASE_FIXED;
+ } else {
+ pci_read_config_dword(pdev, SCx200_CBA_SCRATCH, &base);
+ if (scx200_cb_probe(base)) {
+ scx200_cb_base = base;
+ } else {
+ pr_warn("Configuration Block not found\n");
+ return -ENODEV;
+ }
+ }
+ pr_info("Configuration Block base 0x%x\n", scx200_cb_base);
+ }
+
+ return 0;
+}
+
+u32 scx200_gpio_configure(unsigned index, u32 mask, u32 bits)
+{
+ u32 config, new_config;
+
+ mutex_lock(&scx200_gpio_config_lock);
+
+ outl(index, scx200_gpio_base + 0x20);
+ config = inl(scx200_gpio_base + 0x24);
+
+ new_config = (config & mask) | bits;
+ outl(new_config, scx200_gpio_base + 0x24);
+
+ mutex_unlock(&scx200_gpio_config_lock);
+
+ return config;
+}
+
+static int __init scx200_init(void)
+{
+ pr_info("NatSemi SCx200 Driver\n");
+ return pci_register_driver(&scx200_pci_driver);
+}
+
+static void __exit scx200_cleanup(void)
+{
+ pci_unregister_driver(&scx200_pci_driver);
+ release_region(scx200_gpio_base, SCx200_GPIO_SIZE);
+}
+
+module_init(scx200_init);
+module_exit(scx200_cleanup);
+
+EXPORT_SYMBOL(scx200_gpio_base);
+EXPORT_SYMBOL(scx200_gpio_shadow);
+EXPORT_SYMBOL(scx200_gpio_configure);
+EXPORT_SYMBOL(scx200_cb_base);
diff --git a/arch/x86/platform/ts5500/Makefile b/arch/x86/platform/ts5500/Makefile
new file mode 100644
index 000000000..910fe9e3f
--- /dev/null
+++ b/arch/x86/platform/ts5500/Makefile
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_TS5500) += ts5500.o
diff --git a/arch/x86/platform/ts5500/ts5500.c b/arch/x86/platform/ts5500/ts5500.c
new file mode 100644
index 000000000..0b67da056
--- /dev/null
+++ b/arch/x86/platform/ts5500/ts5500.c
@@ -0,0 +1,341 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Technologic Systems TS-5500 Single Board Computer support
+ *
+ * Copyright (C) 2013-2014 Savoir-faire Linux Inc.
+ * Vivien Didelot <vivien.didelot@savoirfairelinux.com>
+ *
+ * This driver registers the Technologic Systems TS-5500 Single Board Computer
+ * (SBC) and its devices, and exposes information to userspace such as jumpers'
+ * state or available options. For further information about sysfs entries, see
+ * Documentation/ABI/testing/sysfs-platform-ts5500.
+ *
+ * This code may be extended to support similar x86-based platforms.
+ * Actually, the TS-5500 and TS-5400 are supported.
+ */
+
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/leds.h>
+#include <linux/init.h>
+#include <linux/platform_data/max197.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+/* Product code register */
+#define TS5500_PRODUCT_CODE_ADDR 0x74
+#define TS5500_PRODUCT_CODE 0x60 /* TS-5500 product code */
+#define TS5400_PRODUCT_CODE 0x40 /* TS-5400 product code */
+
+/* SRAM/RS-485/ADC options, and RS-485 RTS/Automatic RS-485 flags register */
+#define TS5500_SRAM_RS485_ADC_ADDR 0x75
+#define TS5500_SRAM BIT(0) /* SRAM option */
+#define TS5500_RS485 BIT(1) /* RS-485 option */
+#define TS5500_ADC BIT(2) /* A/D converter option */
+#define TS5500_RS485_RTS BIT(6) /* RTS for RS-485 */
+#define TS5500_RS485_AUTO BIT(7) /* Automatic RS-485 */
+
+/* External Reset/Industrial Temperature Range options register */
+#define TS5500_ERESET_ITR_ADDR 0x76
+#define TS5500_ERESET BIT(0) /* External Reset option */
+#define TS5500_ITR BIT(1) /* Indust. Temp. Range option */
+
+/* LED/Jumpers register */
+#define TS5500_LED_JP_ADDR 0x77
+#define TS5500_LED BIT(0) /* LED flag */
+#define TS5500_JP1 BIT(1) /* Automatic CMOS */
+#define TS5500_JP2 BIT(2) /* Enable Serial Console */
+#define TS5500_JP3 BIT(3) /* Write Enable Drive A */
+#define TS5500_JP4 BIT(4) /* Fast Console (115K baud) */
+#define TS5500_JP5 BIT(5) /* User Jumper */
+#define TS5500_JP6 BIT(6) /* Console on COM1 (req. JP2) */
+#define TS5500_JP7 BIT(7) /* Undocumented (Unused) */
+
+/* A/D Converter registers */
+#define TS5500_ADC_CONV_BUSY_ADDR 0x195 /* Conversion state register */
+#define TS5500_ADC_CONV_BUSY BIT(0)
+#define TS5500_ADC_CONV_INIT_LSB_ADDR 0x196 /* Start conv. / LSB register */
+#define TS5500_ADC_CONV_MSB_ADDR 0x197 /* MSB register */
+#define TS5500_ADC_CONV_DELAY 12 /* usec */
+
+/**
+ * struct ts5500_sbc - TS-5500 board description
+ * @name: Board model name.
+ * @id: Board product ID.
+ * @sram: Flag for SRAM option.
+ * @rs485: Flag for RS-485 option.
+ * @adc: Flag for Analog/Digital converter option.
+ * @ereset: Flag for External Reset option.
+ * @itr: Flag for Industrial Temperature Range option.
+ * @jumpers: Bitfield for jumpers' state.
+ */
+struct ts5500_sbc {
+ const char *name;
+ int id;
+ bool sram;
+ bool rs485;
+ bool adc;
+ bool ereset;
+ bool itr;
+ u8 jumpers;
+};
+
+/* Board signatures in BIOS shadow RAM */
+static const struct {
+ const char * const string;
+ const ssize_t offset;
+} ts5500_signatures[] __initconst = {
+ { "TS-5x00 AMD Elan", 0xb14 },
+};
+
+static int __init ts5500_check_signature(void)
+{
+ void __iomem *bios;
+ int i, ret = -ENODEV;
+
+ bios = ioremap(0xf0000, 0x10000);
+ if (!bios)
+ return -ENOMEM;
+
+ for (i = 0; i < ARRAY_SIZE(ts5500_signatures); i++) {
+ if (check_signature(bios + ts5500_signatures[i].offset,
+ ts5500_signatures[i].string,
+ strlen(ts5500_signatures[i].string))) {
+ ret = 0;
+ break;
+ }
+ }
+
+ iounmap(bios);
+ return ret;
+}
+
+static int __init ts5500_detect_config(struct ts5500_sbc *sbc)
+{
+ u8 tmp;
+ int ret = 0;
+
+ if (!request_region(TS5500_PRODUCT_CODE_ADDR, 4, "ts5500"))
+ return -EBUSY;
+
+ sbc->id = inb(TS5500_PRODUCT_CODE_ADDR);
+ if (sbc->id == TS5500_PRODUCT_CODE) {
+ sbc->name = "TS-5500";
+ } else if (sbc->id == TS5400_PRODUCT_CODE) {
+ sbc->name = "TS-5400";
+ } else {
+ pr_err("ts5500: unknown product code 0x%x\n", sbc->id);
+ ret = -ENODEV;
+ goto cleanup;
+ }
+
+ tmp = inb(TS5500_SRAM_RS485_ADC_ADDR);
+ sbc->sram = tmp & TS5500_SRAM;
+ sbc->rs485 = tmp & TS5500_RS485;
+ sbc->adc = tmp & TS5500_ADC;
+
+ tmp = inb(TS5500_ERESET_ITR_ADDR);
+ sbc->ereset = tmp & TS5500_ERESET;
+ sbc->itr = tmp & TS5500_ITR;
+
+ tmp = inb(TS5500_LED_JP_ADDR);
+ sbc->jumpers = tmp & ~TS5500_LED;
+
+cleanup:
+ release_region(TS5500_PRODUCT_CODE_ADDR, 4);
+ return ret;
+}
+
+static ssize_t name_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct ts5500_sbc *sbc = dev_get_drvdata(dev);
+
+ return sprintf(buf, "%s\n", sbc->name);
+}
+static DEVICE_ATTR_RO(name);
+
+static ssize_t id_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct ts5500_sbc *sbc = dev_get_drvdata(dev);
+
+ return sprintf(buf, "0x%.2x\n", sbc->id);
+}
+static DEVICE_ATTR_RO(id);
+
+static ssize_t jumpers_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct ts5500_sbc *sbc = dev_get_drvdata(dev);
+
+ return sprintf(buf, "0x%.2x\n", sbc->jumpers >> 1);
+}
+static DEVICE_ATTR_RO(jumpers);
+
+#define TS5500_ATTR_BOOL(_field) \
+ static ssize_t _field##_show(struct device *dev, \
+ struct device_attribute *attr, char *buf) \
+ { \
+ struct ts5500_sbc *sbc = dev_get_drvdata(dev); \
+ \
+ return sprintf(buf, "%d\n", sbc->_field); \
+ } \
+ static DEVICE_ATTR_RO(_field)
+
+TS5500_ATTR_BOOL(sram);
+TS5500_ATTR_BOOL(rs485);
+TS5500_ATTR_BOOL(adc);
+TS5500_ATTR_BOOL(ereset);
+TS5500_ATTR_BOOL(itr);
+
+static struct attribute *ts5500_attributes[] = {
+ &dev_attr_id.attr,
+ &dev_attr_name.attr,
+ &dev_attr_jumpers.attr,
+ &dev_attr_sram.attr,
+ &dev_attr_rs485.attr,
+ &dev_attr_adc.attr,
+ &dev_attr_ereset.attr,
+ &dev_attr_itr.attr,
+ NULL
+};
+
+static const struct attribute_group ts5500_attr_group = {
+ .attrs = ts5500_attributes,
+};
+
+static struct resource ts5500_dio1_resource[] = {
+ DEFINE_RES_IRQ_NAMED(7, "DIO1 interrupt"),
+};
+
+static struct platform_device ts5500_dio1_pdev = {
+ .name = "ts5500-dio1",
+ .id = -1,
+ .resource = ts5500_dio1_resource,
+ .num_resources = 1,
+};
+
+static struct resource ts5500_dio2_resource[] = {
+ DEFINE_RES_IRQ_NAMED(6, "DIO2 interrupt"),
+};
+
+static struct platform_device ts5500_dio2_pdev = {
+ .name = "ts5500-dio2",
+ .id = -1,
+ .resource = ts5500_dio2_resource,
+ .num_resources = 1,
+};
+
+static void ts5500_led_set(struct led_classdev *led_cdev,
+ enum led_brightness brightness)
+{
+ outb(!!brightness, TS5500_LED_JP_ADDR);
+}
+
+static enum led_brightness ts5500_led_get(struct led_classdev *led_cdev)
+{
+ return (inb(TS5500_LED_JP_ADDR) & TS5500_LED) ? LED_FULL : LED_OFF;
+}
+
+static struct led_classdev ts5500_led_cdev = {
+ .name = "ts5500:green:",
+ .brightness_set = ts5500_led_set,
+ .brightness_get = ts5500_led_get,
+};
+
+static int ts5500_adc_convert(u8 ctrl)
+{
+ u8 lsb, msb;
+
+ /* Start conversion (ensure the 3 MSB are set to 0) */
+ outb(ctrl & 0x1f, TS5500_ADC_CONV_INIT_LSB_ADDR);
+
+ /*
+ * The platform has CPLD logic driving the A/D converter.
+ * The conversion must complete within 11 microseconds,
+ * otherwise we have to re-initiate a conversion.
+ */
+ udelay(TS5500_ADC_CONV_DELAY);
+ if (inb(TS5500_ADC_CONV_BUSY_ADDR) & TS5500_ADC_CONV_BUSY)
+ return -EBUSY;
+
+ /* Read the raw data */
+ lsb = inb(TS5500_ADC_CONV_INIT_LSB_ADDR);
+ msb = inb(TS5500_ADC_CONV_MSB_ADDR);
+
+ return (msb << 8) | lsb;
+}
+
+static struct max197_platform_data ts5500_adc_pdata = {
+ .convert = ts5500_adc_convert,
+};
+
+static struct platform_device ts5500_adc_pdev = {
+ .name = "max197",
+ .id = -1,
+ .dev = {
+ .platform_data = &ts5500_adc_pdata,
+ },
+};
+
+static int __init ts5500_init(void)
+{
+ struct platform_device *pdev;
+ struct ts5500_sbc *sbc;
+ int err;
+
+ /*
+ * There is no DMI available or PCI bridge subvendor info,
+ * only the BIOS provides a 16-bit identification call.
+ * It is safer to find a signature in the BIOS shadow RAM.
+ */
+ err = ts5500_check_signature();
+ if (err)
+ return err;
+
+ pdev = platform_device_register_simple("ts5500", -1, NULL, 0);
+ if (IS_ERR(pdev))
+ return PTR_ERR(pdev);
+
+ sbc = devm_kzalloc(&pdev->dev, sizeof(struct ts5500_sbc), GFP_KERNEL);
+ if (!sbc) {
+ err = -ENOMEM;
+ goto error;
+ }
+
+ err = ts5500_detect_config(sbc);
+ if (err)
+ goto error;
+
+ platform_set_drvdata(pdev, sbc);
+
+ err = sysfs_create_group(&pdev->dev.kobj, &ts5500_attr_group);
+ if (err)
+ goto error;
+
+ if (sbc->id == TS5500_PRODUCT_CODE) {
+ ts5500_dio1_pdev.dev.parent = &pdev->dev;
+ if (platform_device_register(&ts5500_dio1_pdev))
+ dev_warn(&pdev->dev, "DIO1 block registration failed\n");
+ ts5500_dio2_pdev.dev.parent = &pdev->dev;
+ if (platform_device_register(&ts5500_dio2_pdev))
+ dev_warn(&pdev->dev, "DIO2 block registration failed\n");
+ }
+
+ if (led_classdev_register(&pdev->dev, &ts5500_led_cdev))
+ dev_warn(&pdev->dev, "LED registration failed\n");
+
+ if (sbc->adc) {
+ ts5500_adc_pdev.dev.parent = &pdev->dev;
+ if (platform_device_register(&ts5500_adc_pdev))
+ dev_warn(&pdev->dev, "ADC registration failed\n");
+ }
+
+ return 0;
+error:
+ platform_device_unregister(pdev);
+ return err;
+}
+device_initcall(ts5500_init);
diff --git a/arch/x86/platform/uv/Makefile b/arch/x86/platform/uv/Makefile
new file mode 100644
index 000000000..1441dda8e
--- /dev/null
+++ b/arch/x86/platform/uv/Makefile
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
+obj-$(CONFIG_X86_UV) += bios_uv.o uv_irq.o uv_time.o uv_nmi.o
diff --git a/arch/x86/platform/uv/bios_uv.c b/arch/x86/platform/uv/bios_uv.c
new file mode 100644
index 000000000..bf31af3d3
--- /dev/null
+++ b/arch/x86/platform/uv/bios_uv.c
@@ -0,0 +1,269 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * BIOS run time interface routines.
+ *
+ * (C) Copyright 2020 Hewlett Packard Enterprise Development LP
+ * Copyright (C) 2007-2017 Silicon Graphics, Inc. All rights reserved.
+ * Copyright (c) Russ Anderson <rja@sgi.com>
+ */
+
+#include <linux/efi.h>
+#include <linux/export.h>
+#include <linux/slab.h>
+#include <asm/efi.h>
+#include <linux/io.h>
+#include <asm/pgalloc.h>
+#include <asm/uv/bios.h>
+#include <asm/uv/uv_hub.h>
+
+unsigned long uv_systab_phys __ro_after_init = EFI_INVALID_TABLE_ADDR;
+
+struct uv_systab *uv_systab;
+
+static s64 __uv_bios_call(enum uv_bios_cmd which, u64 a1, u64 a2, u64 a3,
+ u64 a4, u64 a5)
+{
+ struct uv_systab *tab = uv_systab;
+ s64 ret;
+
+ if (!tab || !tab->function)
+ /*
+ * BIOS does not support UV systab
+ */
+ return BIOS_STATUS_UNIMPLEMENTED;
+
+ ret = efi_call_virt_pointer(tab, function, (u64)which, a1, a2, a3, a4, a5);
+
+ return ret;
+}
+
+static s64 uv_bios_call(enum uv_bios_cmd which, u64 a1, u64 a2, u64 a3, u64 a4,
+ u64 a5)
+{
+ s64 ret;
+
+ if (down_interruptible(&__efi_uv_runtime_lock))
+ return BIOS_STATUS_ABORT;
+
+ ret = __uv_bios_call(which, a1, a2, a3, a4, a5);
+ up(&__efi_uv_runtime_lock);
+
+ return ret;
+}
+
+static s64 uv_bios_call_irqsave(enum uv_bios_cmd which, u64 a1, u64 a2, u64 a3,
+ u64 a4, u64 a5)
+{
+ unsigned long bios_flags;
+ s64 ret;
+
+ if (down_interruptible(&__efi_uv_runtime_lock))
+ return BIOS_STATUS_ABORT;
+
+ local_irq_save(bios_flags);
+ ret = __uv_bios_call(which, a1, a2, a3, a4, a5);
+ local_irq_restore(bios_flags);
+
+ up(&__efi_uv_runtime_lock);
+
+ return ret;
+}
+
+long sn_partition_id;
+EXPORT_SYMBOL_GPL(sn_partition_id);
+long sn_coherency_id;
+EXPORT_SYMBOL_GPL(sn_coherency_id);
+long sn_region_size;
+EXPORT_SYMBOL_GPL(sn_region_size);
+long system_serial_number;
+int uv_type;
+
+s64 uv_bios_get_sn_info(int fc, int *uvtype, long *partid, long *coher,
+ long *region, long *ssn)
+{
+ s64 ret;
+ u64 v0, v1;
+ union partition_info_u part;
+
+ ret = uv_bios_call_irqsave(UV_BIOS_GET_SN_INFO, fc,
+ (u64)(&v0), (u64)(&v1), 0, 0);
+ if (ret != BIOS_STATUS_SUCCESS)
+ return ret;
+
+ part.val = v0;
+ if (uvtype)
+ *uvtype = part.hub_version;
+ if (partid)
+ *partid = part.partition_id;
+ if (coher)
+ *coher = part.coherence_id;
+ if (region)
+ *region = part.region_size;
+ if (ssn)
+ *ssn = v1;
+ return ret;
+}
+
+int
+uv_bios_mq_watchlist_alloc(unsigned long addr, unsigned int mq_size,
+ unsigned long *intr_mmr_offset)
+{
+ u64 watchlist;
+ s64 ret;
+
+ /*
+ * bios returns watchlist number or negative error number.
+ */
+ ret = (int)uv_bios_call_irqsave(UV_BIOS_WATCHLIST_ALLOC, addr,
+ mq_size, (u64)intr_mmr_offset,
+ (u64)&watchlist, 0);
+ if (ret < BIOS_STATUS_SUCCESS)
+ return ret;
+
+ return watchlist;
+}
+EXPORT_SYMBOL_GPL(uv_bios_mq_watchlist_alloc);
+
+int
+uv_bios_mq_watchlist_free(int blade, int watchlist_num)
+{
+ return (int)uv_bios_call_irqsave(UV_BIOS_WATCHLIST_FREE,
+ blade, watchlist_num, 0, 0, 0);
+}
+EXPORT_SYMBOL_GPL(uv_bios_mq_watchlist_free);
+
+s64
+uv_bios_change_memprotect(u64 paddr, u64 len, enum uv_memprotect perms)
+{
+ return uv_bios_call_irqsave(UV_BIOS_MEMPROTECT, paddr, len,
+ perms, 0, 0);
+}
+EXPORT_SYMBOL_GPL(uv_bios_change_memprotect);
+
+s64
+uv_bios_reserved_page_pa(u64 buf, u64 *cookie, u64 *addr, u64 *len)
+{
+ return uv_bios_call_irqsave(UV_BIOS_GET_PARTITION_ADDR, (u64)cookie,
+ (u64)addr, buf, (u64)len, 0);
+}
+EXPORT_SYMBOL_GPL(uv_bios_reserved_page_pa);
+
+s64 uv_bios_freq_base(u64 clock_type, u64 *ticks_per_second)
+{
+ return uv_bios_call(UV_BIOS_FREQ_BASE, clock_type,
+ (u64)ticks_per_second, 0, 0, 0);
+}
+
+/*
+ * uv_bios_set_legacy_vga_target - Set Legacy VGA I/O Target
+ * @decode: true to enable target, false to disable target
+ * @domain: PCI domain number
+ * @bus: PCI bus number
+ *
+ * Returns:
+ * 0: Success
+ * -EINVAL: Invalid domain or bus number
+ * -ENOSYS: Capability not available
+ * -EBUSY: Legacy VGA I/O cannot be retargeted at this time
+ */
+int uv_bios_set_legacy_vga_target(bool decode, int domain, int bus)
+{
+ return uv_bios_call(UV_BIOS_SET_LEGACY_VGA_TARGET,
+ (u64)decode, (u64)domain, (u64)bus, 0, 0);
+}
+
+extern s64 uv_bios_get_master_nasid(u64 size, u64 *master_nasid)
+{
+ return uv_bios_call(UV_BIOS_EXTRA, 0, UV_BIOS_EXTRA_MASTER_NASID, 0,
+ size, (u64)master_nasid);
+}
+EXPORT_SYMBOL_GPL(uv_bios_get_master_nasid);
+
+extern s64 uv_bios_get_heapsize(u64 nasid, u64 size, u64 *heap_size)
+{
+ return uv_bios_call(UV_BIOS_EXTRA, nasid, UV_BIOS_EXTRA_GET_HEAPSIZE,
+ 0, size, (u64)heap_size);
+}
+EXPORT_SYMBOL_GPL(uv_bios_get_heapsize);
+
+extern s64 uv_bios_install_heap(u64 nasid, u64 heap_size, u64 *bios_heap)
+{
+ return uv_bios_call(UV_BIOS_EXTRA, nasid, UV_BIOS_EXTRA_INSTALL_HEAP,
+ 0, heap_size, (u64)bios_heap);
+}
+EXPORT_SYMBOL_GPL(uv_bios_install_heap);
+
+extern s64 uv_bios_obj_count(u64 nasid, u64 size, u64 *objcnt)
+{
+ return uv_bios_call(UV_BIOS_EXTRA, nasid, UV_BIOS_EXTRA_OBJECT_COUNT,
+ 0, size, (u64)objcnt);
+}
+EXPORT_SYMBOL_GPL(uv_bios_obj_count);
+
+extern s64 uv_bios_enum_objs(u64 nasid, u64 size, u64 *objbuf)
+{
+ return uv_bios_call(UV_BIOS_EXTRA, nasid, UV_BIOS_EXTRA_ENUM_OBJECTS,
+ 0, size, (u64)objbuf);
+}
+EXPORT_SYMBOL_GPL(uv_bios_enum_objs);
+
+extern s64 uv_bios_enum_ports(u64 nasid, u64 obj_id, u64 size, u64 *portbuf)
+{
+ return uv_bios_call(UV_BIOS_EXTRA, nasid, UV_BIOS_EXTRA_ENUM_PORTS,
+ obj_id, size, (u64)portbuf);
+}
+EXPORT_SYMBOL_GPL(uv_bios_enum_ports);
+
+extern s64 uv_bios_get_geoinfo(u64 nasid, u64 size, u64 *buf)
+{
+ return uv_bios_call(UV_BIOS_GET_GEOINFO, nasid, (u64)buf, size, 0, 0);
+}
+EXPORT_SYMBOL_GPL(uv_bios_get_geoinfo);
+
+extern s64 uv_bios_get_pci_topology(u64 size, u64 *buf)
+{
+ return uv_bios_call(UV_BIOS_GET_PCI_TOPOLOGY, (u64)buf, size, 0, 0, 0);
+}
+EXPORT_SYMBOL_GPL(uv_bios_get_pci_topology);
+
+unsigned long get_uv_systab_phys(bool msg)
+{
+ if ((uv_systab_phys == EFI_INVALID_TABLE_ADDR) ||
+ !uv_systab_phys || efi_runtime_disabled()) {
+ if (msg)
+ pr_crit("UV: UVsystab: missing\n");
+ return 0;
+ }
+ return uv_systab_phys;
+}
+
+int uv_bios_init(void)
+{
+ unsigned long uv_systab_phys_addr;
+
+ uv_systab = NULL;
+ uv_systab_phys_addr = get_uv_systab_phys(1);
+ if (!uv_systab_phys_addr)
+ return -EEXIST;
+
+ uv_systab = ioremap(uv_systab_phys_addr, sizeof(struct uv_systab));
+ if (!uv_systab || strncmp(uv_systab->signature, UV_SYSTAB_SIG, 4)) {
+ pr_err("UV: UVsystab: bad signature!\n");
+ iounmap(uv_systab);
+ return -EINVAL;
+ }
+
+ /* Starting with UV4 the UV systab size is variable */
+ if (uv_systab->revision >= UV_SYSTAB_VERSION_UV4) {
+ int size = uv_systab->size;
+
+ iounmap(uv_systab);
+ uv_systab = ioremap(uv_systab_phys_addr, size);
+ if (!uv_systab) {
+ pr_err("UV: UVsystab: ioremap(%d) failed!\n", size);
+ return -EFAULT;
+ }
+ }
+ pr_info("UV: UVsystab: Revision:%x\n", uv_systab->revision);
+ return 0;
+}
diff --git a/arch/x86/platform/uv/uv_irq.c b/arch/x86/platform/uv/uv_irq.c
new file mode 100644
index 000000000..1a536a187
--- /dev/null
+++ b/arch/x86/platform/uv/uv_irq.c
@@ -0,0 +1,217 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * SGI UV IRQ functions
+ *
+ * Copyright (C) 2008 Silicon Graphics, Inc. All rights reserved.
+ */
+
+#include <linux/export.h>
+#include <linux/rbtree.h>
+#include <linux/slab.h>
+#include <linux/irq.h>
+
+#include <asm/irqdomain.h>
+#include <asm/apic.h>
+#include <asm/uv/uv_irq.h>
+#include <asm/uv/uv_hub.h>
+
+/* MMR offset and pnode of hub sourcing interrupts for a given irq */
+struct uv_irq_2_mmr_pnode {
+ unsigned long offset;
+ int pnode;
+};
+
+static void uv_program_mmr(struct irq_cfg *cfg, struct uv_irq_2_mmr_pnode *info)
+{
+ unsigned long mmr_value;
+ struct uv_IO_APIC_route_entry *entry;
+
+ BUILD_BUG_ON(sizeof(struct uv_IO_APIC_route_entry) !=
+ sizeof(unsigned long));
+
+ mmr_value = 0;
+ entry = (struct uv_IO_APIC_route_entry *)&mmr_value;
+ entry->vector = cfg->vector;
+ entry->delivery_mode = apic->delivery_mode;
+ entry->dest_mode = apic->dest_mode_logical;
+ entry->polarity = 0;
+ entry->trigger = 0;
+ entry->mask = 0;
+ entry->dest = cfg->dest_apicid;
+
+ uv_write_global_mmr64(info->pnode, info->offset, mmr_value);
+}
+
+static void uv_noop(struct irq_data *data) { }
+
+static int
+uv_set_irq_affinity(struct irq_data *data, const struct cpumask *mask,
+ bool force)
+{
+ struct irq_data *parent = data->parent_data;
+ struct irq_cfg *cfg = irqd_cfg(data);
+ int ret;
+
+ ret = parent->chip->irq_set_affinity(parent, mask, force);
+ if (ret >= 0) {
+ uv_program_mmr(cfg, data->chip_data);
+ send_cleanup_vector(cfg);
+ }
+
+ return ret;
+}
+
+static struct irq_chip uv_irq_chip = {
+ .name = "UV-CORE",
+ .irq_mask = uv_noop,
+ .irq_unmask = uv_noop,
+ .irq_eoi = apic_ack_irq,
+ .irq_set_affinity = uv_set_irq_affinity,
+};
+
+static int uv_domain_alloc(struct irq_domain *domain, unsigned int virq,
+ unsigned int nr_irqs, void *arg)
+{
+ struct uv_irq_2_mmr_pnode *chip_data;
+ struct irq_alloc_info *info = arg;
+ struct irq_data *irq_data = irq_domain_get_irq_data(domain, virq);
+ int ret;
+
+ if (nr_irqs > 1 || !info || info->type != X86_IRQ_ALLOC_TYPE_UV)
+ return -EINVAL;
+
+ chip_data = kmalloc_node(sizeof(*chip_data), GFP_KERNEL,
+ irq_data_get_node(irq_data));
+ if (!chip_data)
+ return -ENOMEM;
+
+ ret = irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, arg);
+ if (ret >= 0) {
+ if (info->uv.limit == UV_AFFINITY_CPU)
+ irq_set_status_flags(virq, IRQ_NO_BALANCING);
+ else
+ irq_set_status_flags(virq, IRQ_MOVE_PCNTXT);
+
+ chip_data->pnode = uv_blade_to_pnode(info->uv.blade);
+ chip_data->offset = info->uv.offset;
+ irq_domain_set_info(domain, virq, virq, &uv_irq_chip, chip_data,
+ handle_percpu_irq, NULL, info->uv.name);
+ } else {
+ kfree(chip_data);
+ }
+
+ return ret;
+}
+
+static void uv_domain_free(struct irq_domain *domain, unsigned int virq,
+ unsigned int nr_irqs)
+{
+ struct irq_data *irq_data = irq_domain_get_irq_data(domain, virq);
+
+ BUG_ON(nr_irqs != 1);
+ kfree(irq_data->chip_data);
+ irq_clear_status_flags(virq, IRQ_MOVE_PCNTXT);
+ irq_clear_status_flags(virq, IRQ_NO_BALANCING);
+ irq_domain_free_irqs_top(domain, virq, nr_irqs);
+}
+
+/*
+ * Re-target the irq to the specified CPU and enable the specified MMR located
+ * on the specified blade to allow the sending of MSIs to the specified CPU.
+ */
+static int uv_domain_activate(struct irq_domain *domain,
+ struct irq_data *irq_data, bool reserve)
+{
+ uv_program_mmr(irqd_cfg(irq_data), irq_data->chip_data);
+ return 0;
+}
+
+/*
+ * Disable the specified MMR located on the specified blade so that MSIs are
+ * longer allowed to be sent.
+ */
+static void uv_domain_deactivate(struct irq_domain *domain,
+ struct irq_data *irq_data)
+{
+ unsigned long mmr_value;
+ struct uv_IO_APIC_route_entry *entry;
+
+ mmr_value = 0;
+ entry = (struct uv_IO_APIC_route_entry *)&mmr_value;
+ entry->mask = 1;
+ uv_program_mmr(irqd_cfg(irq_data), irq_data->chip_data);
+}
+
+static const struct irq_domain_ops uv_domain_ops = {
+ .alloc = uv_domain_alloc,
+ .free = uv_domain_free,
+ .activate = uv_domain_activate,
+ .deactivate = uv_domain_deactivate,
+};
+
+static struct irq_domain *uv_get_irq_domain(void)
+{
+ static struct irq_domain *uv_domain;
+ static DEFINE_MUTEX(uv_lock);
+ struct fwnode_handle *fn;
+
+ mutex_lock(&uv_lock);
+ if (uv_domain)
+ goto out;
+
+ fn = irq_domain_alloc_named_fwnode("UV-CORE");
+ if (!fn)
+ goto out;
+
+ uv_domain = irq_domain_create_tree(fn, &uv_domain_ops, NULL);
+ if (uv_domain)
+ uv_domain->parent = x86_vector_domain;
+ else
+ irq_domain_free_fwnode(fn);
+out:
+ mutex_unlock(&uv_lock);
+
+ return uv_domain;
+}
+
+/*
+ * Set up a mapping of an available irq and vector, and enable the specified
+ * MMR that defines the MSI that is to be sent to the specified CPU when an
+ * interrupt is raised.
+ */
+int uv_setup_irq(char *irq_name, int cpu, int mmr_blade,
+ unsigned long mmr_offset, int limit)
+{
+ struct irq_alloc_info info;
+ struct irq_domain *domain = uv_get_irq_domain();
+
+ if (!domain)
+ return -ENOMEM;
+
+ init_irq_alloc_info(&info, cpumask_of(cpu));
+ info.type = X86_IRQ_ALLOC_TYPE_UV;
+ info.uv.limit = limit;
+ info.uv.blade = mmr_blade;
+ info.uv.offset = mmr_offset;
+ info.uv.name = irq_name;
+
+ return irq_domain_alloc_irqs(domain, 1,
+ uv_blade_to_memory_nid(mmr_blade), &info);
+}
+EXPORT_SYMBOL_GPL(uv_setup_irq);
+
+/*
+ * Tear down a mapping of an irq and vector, and disable the specified MMR that
+ * defined the MSI that was to be sent to the specified CPU when an interrupt
+ * was raised.
+ *
+ * Set mmr_blade and mmr_offset to what was passed in on uv_setup_irq().
+ */
+void uv_teardown_irq(unsigned int irq)
+{
+ irq_domain_free_irqs(irq, 1);
+}
+EXPORT_SYMBOL_GPL(uv_teardown_irq);
diff --git a/arch/x86/platform/uv/uv_nmi.c b/arch/x86/platform/uv/uv_nmi.c
new file mode 100644
index 000000000..a60af0230
--- /dev/null
+++ b/arch/x86/platform/uv/uv_nmi.c
@@ -0,0 +1,1096 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * SGI NMI support routines
+ *
+ * (C) Copyright 2020 Hewlett Packard Enterprise Development LP
+ * Copyright (C) 2007-2017 Silicon Graphics, Inc. All rights reserved.
+ * Copyright (c) Mike Travis
+ */
+
+#include <linux/cpu.h>
+#include <linux/delay.h>
+#include <linux/kdb.h>
+#include <linux/kexec.h>
+#include <linux/kgdb.h>
+#include <linux/moduleparam.h>
+#include <linux/nmi.h>
+#include <linux/sched.h>
+#include <linux/sched/debug.h>
+#include <linux/slab.h>
+#include <linux/clocksource.h>
+
+#include <asm/apic.h>
+#include <asm/current.h>
+#include <asm/kdebug.h>
+#include <asm/local64.h>
+#include <asm/nmi.h>
+#include <asm/reboot.h>
+#include <asm/traps.h>
+#include <asm/uv/uv.h>
+#include <asm/uv/uv_hub.h>
+#include <asm/uv/uv_mmrs.h>
+
+/*
+ * UV handler for NMI
+ *
+ * Handle system-wide NMI events generated by the global 'power nmi' command.
+ *
+ * Basic operation is to field the NMI interrupt on each CPU and wait
+ * until all CPU's have arrived into the nmi handler. If some CPU's do not
+ * make it into the handler, try and force them in with the IPI(NMI) signal.
+ *
+ * We also have to lessen UV Hub MMR accesses as much as possible as this
+ * disrupts the UV Hub's primary mission of directing NumaLink traffic and
+ * can cause system problems to occur.
+ *
+ * To do this we register our primary NMI notifier on the NMI_UNKNOWN
+ * chain. This reduces the number of false NMI calls when the perf
+ * tools are running which generate an enormous number of NMIs per
+ * second (~4M/s for 1024 CPU threads). Our secondary NMI handler is
+ * very short as it only checks that if it has been "pinged" with the
+ * IPI(NMI) signal as mentioned above, and does not read the UV Hub's MMR.
+ *
+ */
+
+static struct uv_hub_nmi_s **uv_hub_nmi_list;
+
+DEFINE_PER_CPU(struct uv_cpu_nmi_s, uv_cpu_nmi);
+
+/* Newer SMM NMI handler, not present in all systems */
+static unsigned long uvh_nmi_mmrx; /* UVH_EVENT_OCCURRED0/1 */
+static unsigned long uvh_nmi_mmrx_clear; /* UVH_EVENT_OCCURRED0/1_ALIAS */
+static int uvh_nmi_mmrx_shift; /* UVH_EVENT_OCCURRED0/1_EXTIO_INT0_SHFT */
+static char *uvh_nmi_mmrx_type; /* "EXTIO_INT0" */
+
+/* Non-zero indicates newer SMM NMI handler present */
+static unsigned long uvh_nmi_mmrx_supported; /* UVH_EXTIO_INT0_BROADCAST */
+
+/* Indicates to BIOS that we want to use the newer SMM NMI handler */
+static unsigned long uvh_nmi_mmrx_req; /* UVH_BIOS_KERNEL_MMR_ALIAS_2 */
+static int uvh_nmi_mmrx_req_shift; /* 62 */
+
+/* UV hubless values */
+#define NMI_CONTROL_PORT 0x70
+#define NMI_DUMMY_PORT 0x71
+#define PAD_OWN_GPP_D_0 0x2c
+#define GPI_NMI_STS_GPP_D_0 0x164
+#define GPI_NMI_ENA_GPP_D_0 0x174
+#define STS_GPP_D_0_MASK 0x1
+#define PAD_CFG_DW0_GPP_D_0 0x4c0
+#define GPIROUTNMI (1ul << 17)
+#define PCH_PCR_GPIO_1_BASE 0xfdae0000ul
+#define PCH_PCR_GPIO_ADDRESS(offset) (int *)((u64)(pch_base) | (u64)(offset))
+
+static u64 *pch_base;
+static unsigned long nmi_mmr;
+static unsigned long nmi_mmr_clear;
+static unsigned long nmi_mmr_pending;
+
+static atomic_t uv_in_nmi;
+static atomic_t uv_nmi_cpu = ATOMIC_INIT(-1);
+static atomic_t uv_nmi_cpus_in_nmi = ATOMIC_INIT(-1);
+static atomic_t uv_nmi_slave_continue;
+static cpumask_var_t uv_nmi_cpu_mask;
+
+static atomic_t uv_nmi_kexec_failed;
+
+/* Values for uv_nmi_slave_continue */
+#define SLAVE_CLEAR 0
+#define SLAVE_CONTINUE 1
+#define SLAVE_EXIT 2
+
+/*
+ * Default is all stack dumps go to the console and buffer.
+ * Lower level to send to log buffer only.
+ */
+static int uv_nmi_loglevel = CONSOLE_LOGLEVEL_DEFAULT;
+module_param_named(dump_loglevel, uv_nmi_loglevel, int, 0644);
+
+/*
+ * The following values show statistics on how perf events are affecting
+ * this system.
+ */
+static int param_get_local64(char *buffer, const struct kernel_param *kp)
+{
+ return sprintf(buffer, "%lu\n", local64_read((local64_t *)kp->arg));
+}
+
+static int param_set_local64(const char *val, const struct kernel_param *kp)
+{
+ /* Clear on any write */
+ local64_set((local64_t *)kp->arg, 0);
+ return 0;
+}
+
+static const struct kernel_param_ops param_ops_local64 = {
+ .get = param_get_local64,
+ .set = param_set_local64,
+};
+#define param_check_local64(name, p) __param_check(name, p, local64_t)
+
+static local64_t uv_nmi_count;
+module_param_named(nmi_count, uv_nmi_count, local64, 0644);
+
+static local64_t uv_nmi_misses;
+module_param_named(nmi_misses, uv_nmi_misses, local64, 0644);
+
+static local64_t uv_nmi_ping_count;
+module_param_named(ping_count, uv_nmi_ping_count, local64, 0644);
+
+static local64_t uv_nmi_ping_misses;
+module_param_named(ping_misses, uv_nmi_ping_misses, local64, 0644);
+
+/*
+ * Following values allow tuning for large systems under heavy loading
+ */
+static int uv_nmi_initial_delay = 100;
+module_param_named(initial_delay, uv_nmi_initial_delay, int, 0644);
+
+static int uv_nmi_slave_delay = 100;
+module_param_named(slave_delay, uv_nmi_slave_delay, int, 0644);
+
+static int uv_nmi_loop_delay = 100;
+module_param_named(loop_delay, uv_nmi_loop_delay, int, 0644);
+
+static int uv_nmi_trigger_delay = 10000;
+module_param_named(trigger_delay, uv_nmi_trigger_delay, int, 0644);
+
+static int uv_nmi_wait_count = 100;
+module_param_named(wait_count, uv_nmi_wait_count, int, 0644);
+
+static int uv_nmi_retry_count = 500;
+module_param_named(retry_count, uv_nmi_retry_count, int, 0644);
+
+static bool uv_pch_intr_enable = true;
+static bool uv_pch_intr_now_enabled;
+module_param_named(pch_intr_enable, uv_pch_intr_enable, bool, 0644);
+
+static bool uv_pch_init_enable = true;
+module_param_named(pch_init_enable, uv_pch_init_enable, bool, 0644);
+
+static int uv_nmi_debug;
+module_param_named(debug, uv_nmi_debug, int, 0644);
+
+#define nmi_debug(fmt, ...) \
+ do { \
+ if (uv_nmi_debug) \
+ pr_info(fmt, ##__VA_ARGS__); \
+ } while (0)
+
+/* Valid NMI Actions */
+#define ACTION_LEN 16
+static struct nmi_action {
+ char *action;
+ char *desc;
+} valid_acts[] = {
+ { "kdump", "do kernel crash dump" },
+ { "dump", "dump process stack for each cpu" },
+ { "ips", "dump Inst Ptr info for each cpu" },
+ { "kdb", "enter KDB (needs kgdboc= assignment)" },
+ { "kgdb", "enter KGDB (needs gdb target remote)" },
+ { "health", "check if CPUs respond to NMI" },
+};
+typedef char action_t[ACTION_LEN];
+static action_t uv_nmi_action = { "dump" };
+
+static int param_get_action(char *buffer, const struct kernel_param *kp)
+{
+ return sprintf(buffer, "%s\n", uv_nmi_action);
+}
+
+static int param_set_action(const char *val, const struct kernel_param *kp)
+{
+ int i;
+ int n = ARRAY_SIZE(valid_acts);
+ char arg[ACTION_LEN], *p;
+
+ /* (remove possible '\n') */
+ strncpy(arg, val, ACTION_LEN - 1);
+ arg[ACTION_LEN - 1] = '\0';
+ p = strchr(arg, '\n');
+ if (p)
+ *p = '\0';
+
+ for (i = 0; i < n; i++)
+ if (!strcmp(arg, valid_acts[i].action))
+ break;
+
+ if (i < n) {
+ strcpy(uv_nmi_action, arg);
+ pr_info("UV: New NMI action:%s\n", uv_nmi_action);
+ return 0;
+ }
+
+ pr_err("UV: Invalid NMI action:%s, valid actions are:\n", arg);
+ for (i = 0; i < n; i++)
+ pr_err("UV: %-8s - %s\n",
+ valid_acts[i].action, valid_acts[i].desc);
+ return -EINVAL;
+}
+
+static const struct kernel_param_ops param_ops_action = {
+ .get = param_get_action,
+ .set = param_set_action,
+};
+#define param_check_action(name, p) __param_check(name, p, action_t)
+
+module_param_named(action, uv_nmi_action, action, 0644);
+
+static inline bool uv_nmi_action_is(const char *action)
+{
+ return (strncmp(uv_nmi_action, action, strlen(action)) == 0);
+}
+
+/* Setup which NMI support is present in system */
+static void uv_nmi_setup_mmrs(void)
+{
+ bool new_nmi_method_only = false;
+
+ /* First determine arch specific MMRs to handshake with BIOS */
+ if (UVH_EVENT_OCCURRED0_EXTIO_INT0_MASK) { /* UV2,3,4 setup */
+ uvh_nmi_mmrx = UVH_EVENT_OCCURRED0;
+ uvh_nmi_mmrx_clear = UVH_EVENT_OCCURRED0_ALIAS;
+ uvh_nmi_mmrx_shift = UVH_EVENT_OCCURRED0_EXTIO_INT0_SHFT;
+ uvh_nmi_mmrx_type = "OCRD0-EXTIO_INT0";
+
+ uvh_nmi_mmrx_supported = UVH_EXTIO_INT0_BROADCAST;
+ uvh_nmi_mmrx_req = UVH_BIOS_KERNEL_MMR_ALIAS_2;
+ uvh_nmi_mmrx_req_shift = 62;
+
+ } else if (UVH_EVENT_OCCURRED1_EXTIO_INT0_MASK) { /* UV5+ setup */
+ uvh_nmi_mmrx = UVH_EVENT_OCCURRED1;
+ uvh_nmi_mmrx_clear = UVH_EVENT_OCCURRED1_ALIAS;
+ uvh_nmi_mmrx_shift = UVH_EVENT_OCCURRED1_EXTIO_INT0_SHFT;
+ uvh_nmi_mmrx_type = "OCRD1-EXTIO_INT0";
+
+ new_nmi_method_only = true; /* Newer nmi always valid on UV5+ */
+ uvh_nmi_mmrx_req = 0; /* no request bit to clear */
+
+ } else {
+ pr_err("UV:%s:NMI support not available on this system\n", __func__);
+ return;
+ }
+
+ /* Then find out if new NMI is supported */
+ if (new_nmi_method_only || uv_read_local_mmr(uvh_nmi_mmrx_supported)) {
+ if (uvh_nmi_mmrx_req)
+ uv_write_local_mmr(uvh_nmi_mmrx_req,
+ 1UL << uvh_nmi_mmrx_req_shift);
+ nmi_mmr = uvh_nmi_mmrx;
+ nmi_mmr_clear = uvh_nmi_mmrx_clear;
+ nmi_mmr_pending = 1UL << uvh_nmi_mmrx_shift;
+ pr_info("UV: SMI NMI support: %s\n", uvh_nmi_mmrx_type);
+ } else {
+ nmi_mmr = UVH_NMI_MMR;
+ nmi_mmr_clear = UVH_NMI_MMR_CLEAR;
+ nmi_mmr_pending = 1UL << UVH_NMI_MMR_SHIFT;
+ pr_info("UV: SMI NMI support: %s\n", UVH_NMI_MMR_TYPE);
+ }
+}
+
+/* Read NMI MMR and check if NMI flag was set by BMC. */
+static inline int uv_nmi_test_mmr(struct uv_hub_nmi_s *hub_nmi)
+{
+ hub_nmi->nmi_value = uv_read_local_mmr(nmi_mmr);
+ atomic_inc(&hub_nmi->read_mmr_count);
+ return !!(hub_nmi->nmi_value & nmi_mmr_pending);
+}
+
+static inline void uv_local_mmr_clear_nmi(void)
+{
+ uv_write_local_mmr(nmi_mmr_clear, nmi_mmr_pending);
+}
+
+/*
+ * UV hubless NMI handler functions
+ */
+static inline void uv_reassert_nmi(void)
+{
+ /* (from arch/x86/include/asm/mach_traps.h) */
+ outb(0x8f, NMI_CONTROL_PORT);
+ inb(NMI_DUMMY_PORT); /* dummy read */
+ outb(0x0f, NMI_CONTROL_PORT);
+ inb(NMI_DUMMY_PORT); /* dummy read */
+}
+
+static void uv_init_hubless_pch_io(int offset, int mask, int data)
+{
+ int *addr = PCH_PCR_GPIO_ADDRESS(offset);
+ int readd = readl(addr);
+
+ if (mask) { /* OR in new data */
+ int writed = (readd & ~mask) | data;
+
+ nmi_debug("UV:PCH: %p = %x & %x | %x (%x)\n",
+ addr, readd, ~mask, data, writed);
+ writel(writed, addr);
+ } else if (readd & data) { /* clear status bit */
+ nmi_debug("UV:PCH: %p = %x\n", addr, data);
+ writel(data, addr);
+ }
+
+ (void)readl(addr); /* flush write data */
+}
+
+static void uv_nmi_setup_hubless_intr(void)
+{
+ uv_pch_intr_now_enabled = uv_pch_intr_enable;
+
+ uv_init_hubless_pch_io(
+ PAD_CFG_DW0_GPP_D_0, GPIROUTNMI,
+ uv_pch_intr_now_enabled ? GPIROUTNMI : 0);
+
+ nmi_debug("UV:NMI: GPP_D_0 interrupt %s\n",
+ uv_pch_intr_now_enabled ? "enabled" : "disabled");
+}
+
+static struct init_nmi {
+ unsigned int offset;
+ unsigned int mask;
+ unsigned int data;
+} init_nmi[] = {
+ { /* HOSTSW_OWN_GPP_D_0 */
+ .offset = 0x84,
+ .mask = 0x1,
+ .data = 0x0, /* ACPI Mode */
+ },
+
+/* Clear status: */
+ { /* GPI_INT_STS_GPP_D_0 */
+ .offset = 0x104,
+ .mask = 0x0,
+ .data = 0x1, /* Clear Status */
+ },
+ { /* GPI_GPE_STS_GPP_D_0 */
+ .offset = 0x124,
+ .mask = 0x0,
+ .data = 0x1, /* Clear Status */
+ },
+ { /* GPI_SMI_STS_GPP_D_0 */
+ .offset = 0x144,
+ .mask = 0x0,
+ .data = 0x1, /* Clear Status */
+ },
+ { /* GPI_NMI_STS_GPP_D_0 */
+ .offset = 0x164,
+ .mask = 0x0,
+ .data = 0x1, /* Clear Status */
+ },
+
+/* Disable interrupts: */
+ { /* GPI_INT_EN_GPP_D_0 */
+ .offset = 0x114,
+ .mask = 0x1,
+ .data = 0x0, /* Disable interrupt generation */
+ },
+ { /* GPI_GPE_EN_GPP_D_0 */
+ .offset = 0x134,
+ .mask = 0x1,
+ .data = 0x0, /* Disable interrupt generation */
+ },
+ { /* GPI_SMI_EN_GPP_D_0 */
+ .offset = 0x154,
+ .mask = 0x1,
+ .data = 0x0, /* Disable interrupt generation */
+ },
+ { /* GPI_NMI_EN_GPP_D_0 */
+ .offset = 0x174,
+ .mask = 0x1,
+ .data = 0x0, /* Disable interrupt generation */
+ },
+
+/* Setup GPP_D_0 Pad Config: */
+ { /* PAD_CFG_DW0_GPP_D_0 */
+ .offset = 0x4c0,
+ .mask = 0xffffffff,
+ .data = 0x82020100,
+/*
+ * 31:30 Pad Reset Config (PADRSTCFG): = 2h # PLTRST# (default)
+ *
+ * 29 RX Pad State Select (RXPADSTSEL): = 0 # Raw RX pad state directly
+ * from RX buffer (default)
+ *
+ * 28 RX Raw Override to '1' (RXRAW1): = 0 # No Override
+ *
+ * 26:25 RX Level/Edge Configuration (RXEVCFG):
+ * = 0h # Level
+ * = 1h # Edge
+ *
+ * 23 RX Invert (RXINV): = 0 # No Inversion (signal active high)
+ *
+ * 20 GPIO Input Route IOxAPIC (GPIROUTIOXAPIC):
+ * = 0 # Routing does not cause peripheral IRQ...
+ * # (we want an NMI not an IRQ)
+ *
+ * 19 GPIO Input Route SCI (GPIROUTSCI): = 0 # Routing does not cause SCI.
+ * 18 GPIO Input Route SMI (GPIROUTSMI): = 0 # Routing does not cause SMI.
+ * 17 GPIO Input Route NMI (GPIROUTNMI): = 1 # Routing can cause NMI.
+ *
+ * 11:10 Pad Mode (PMODE1/0): = 0h = GPIO control the Pad.
+ * 9 GPIO RX Disable (GPIORXDIS):
+ * = 0 # Enable the input buffer (active low enable)
+ *
+ * 8 GPIO TX Disable (GPIOTXDIS):
+ * = 1 # Disable the output buffer; i.e. Hi-Z
+ *
+ * 1 GPIO RX State (GPIORXSTATE): This is the current internal RX pad state..
+ * 0 GPIO TX State (GPIOTXSTATE):
+ * = 0 # (Leave at default)
+ */
+ },
+
+/* Pad Config DW1 */
+ { /* PAD_CFG_DW1_GPP_D_0 */
+ .offset = 0x4c4,
+ .mask = 0x3c00,
+ .data = 0, /* Termination = none (default) */
+ },
+};
+
+static void uv_init_hubless_pch_d0(void)
+{
+ int i, read;
+
+ read = *PCH_PCR_GPIO_ADDRESS(PAD_OWN_GPP_D_0);
+ if (read != 0) {
+ pr_info("UV: Hubless NMI already configured\n");
+ return;
+ }
+
+ nmi_debug("UV: Initializing UV Hubless NMI on PCH\n");
+ for (i = 0; i < ARRAY_SIZE(init_nmi); i++) {
+ uv_init_hubless_pch_io(init_nmi[i].offset,
+ init_nmi[i].mask,
+ init_nmi[i].data);
+ }
+}
+
+static int uv_nmi_test_hubless(struct uv_hub_nmi_s *hub_nmi)
+{
+ int *pstat = PCH_PCR_GPIO_ADDRESS(GPI_NMI_STS_GPP_D_0);
+ int status = *pstat;
+
+ hub_nmi->nmi_value = status;
+ atomic_inc(&hub_nmi->read_mmr_count);
+
+ if (!(status & STS_GPP_D_0_MASK)) /* Not a UV external NMI */
+ return 0;
+
+ *pstat = STS_GPP_D_0_MASK; /* Is a UV NMI: clear GPP_D_0 status */
+ (void)*pstat; /* Flush write */
+
+ return 1;
+}
+
+static int uv_test_nmi(struct uv_hub_nmi_s *hub_nmi)
+{
+ if (hub_nmi->hub_present)
+ return uv_nmi_test_mmr(hub_nmi);
+
+ if (hub_nmi->pch_owner) /* Only PCH owner can check status */
+ return uv_nmi_test_hubless(hub_nmi);
+
+ return -1;
+}
+
+/*
+ * If first CPU in on this hub, set hub_nmi "in_nmi" and "owner" values and
+ * return true. If first CPU in on the system, set global "in_nmi" flag.
+ */
+static int uv_set_in_nmi(int cpu, struct uv_hub_nmi_s *hub_nmi)
+{
+ int first = atomic_add_unless(&hub_nmi->in_nmi, 1, 1);
+
+ if (first) {
+ atomic_set(&hub_nmi->cpu_owner, cpu);
+ if (atomic_add_unless(&uv_in_nmi, 1, 1))
+ atomic_set(&uv_nmi_cpu, cpu);
+
+ atomic_inc(&hub_nmi->nmi_count);
+ }
+ return first;
+}
+
+/* Check if this is a system NMI event */
+static int uv_check_nmi(struct uv_hub_nmi_s *hub_nmi)
+{
+ int cpu = smp_processor_id();
+ int nmi = 0;
+ int nmi_detected = 0;
+
+ local64_inc(&uv_nmi_count);
+ this_cpu_inc(uv_cpu_nmi.queries);
+
+ do {
+ nmi = atomic_read(&hub_nmi->in_nmi);
+ if (nmi)
+ break;
+
+ if (raw_spin_trylock(&hub_nmi->nmi_lock)) {
+ nmi_detected = uv_test_nmi(hub_nmi);
+
+ /* Check flag for UV external NMI */
+ if (nmi_detected > 0) {
+ uv_set_in_nmi(cpu, hub_nmi);
+ nmi = 1;
+ break;
+ }
+
+ /* A non-PCH node in a hubless system waits for NMI */
+ else if (nmi_detected < 0)
+ goto slave_wait;
+
+ /* MMR/PCH NMI flag is clear */
+ raw_spin_unlock(&hub_nmi->nmi_lock);
+
+ } else {
+
+ /* Wait a moment for the HUB NMI locker to set flag */
+slave_wait: cpu_relax();
+ udelay(uv_nmi_slave_delay);
+
+ /* Re-check hub in_nmi flag */
+ nmi = atomic_read(&hub_nmi->in_nmi);
+ if (nmi)
+ break;
+ }
+
+ /*
+ * Check if this BMC missed setting the MMR NMI flag (or)
+ * UV hubless system where only PCH owner can check flag
+ */
+ if (!nmi) {
+ nmi = atomic_read(&uv_in_nmi);
+ if (nmi)
+ uv_set_in_nmi(cpu, hub_nmi);
+ }
+
+ /* If we're holding the hub lock, release it now */
+ if (nmi_detected < 0)
+ raw_spin_unlock(&hub_nmi->nmi_lock);
+
+ } while (0);
+
+ if (!nmi)
+ local64_inc(&uv_nmi_misses);
+
+ return nmi;
+}
+
+/* Need to reset the NMI MMR register, but only once per hub. */
+static inline void uv_clear_nmi(int cpu)
+{
+ struct uv_hub_nmi_s *hub_nmi = uv_hub_nmi;
+
+ if (cpu == atomic_read(&hub_nmi->cpu_owner)) {
+ atomic_set(&hub_nmi->cpu_owner, -1);
+ atomic_set(&hub_nmi->in_nmi, 0);
+ if (hub_nmi->hub_present)
+ uv_local_mmr_clear_nmi();
+ else
+ uv_reassert_nmi();
+ raw_spin_unlock(&hub_nmi->nmi_lock);
+ }
+}
+
+/* Ping non-responding CPU's attempting to force them into the NMI handler */
+static void uv_nmi_nr_cpus_ping(void)
+{
+ int cpu;
+
+ for_each_cpu(cpu, uv_nmi_cpu_mask)
+ uv_cpu_nmi_per(cpu).pinging = 1;
+
+ apic->send_IPI_mask(uv_nmi_cpu_mask, APIC_DM_NMI);
+}
+
+/* Clean up flags for CPU's that ignored both NMI and ping */
+static void uv_nmi_cleanup_mask(void)
+{
+ int cpu;
+
+ for_each_cpu(cpu, uv_nmi_cpu_mask) {
+ uv_cpu_nmi_per(cpu).pinging = 0;
+ uv_cpu_nmi_per(cpu).state = UV_NMI_STATE_OUT;
+ cpumask_clear_cpu(cpu, uv_nmi_cpu_mask);
+ }
+}
+
+/* Loop waiting as CPU's enter NMI handler */
+static int uv_nmi_wait_cpus(int first)
+{
+ int i, j, k, n = num_online_cpus();
+ int last_k = 0, waiting = 0;
+ int cpu = smp_processor_id();
+
+ if (first) {
+ cpumask_copy(uv_nmi_cpu_mask, cpu_online_mask);
+ k = 0;
+ } else {
+ k = n - cpumask_weight(uv_nmi_cpu_mask);
+ }
+
+ /* PCH NMI causes only one CPU to respond */
+ if (first && uv_pch_intr_now_enabled) {
+ cpumask_clear_cpu(cpu, uv_nmi_cpu_mask);
+ return n - k - 1;
+ }
+
+ udelay(uv_nmi_initial_delay);
+ for (i = 0; i < uv_nmi_retry_count; i++) {
+ int loop_delay = uv_nmi_loop_delay;
+
+ for_each_cpu(j, uv_nmi_cpu_mask) {
+ if (uv_cpu_nmi_per(j).state) {
+ cpumask_clear_cpu(j, uv_nmi_cpu_mask);
+ if (++k >= n)
+ break;
+ }
+ }
+ if (k >= n) { /* all in? */
+ k = n;
+ break;
+ }
+ if (last_k != k) { /* abort if no new CPU's coming in */
+ last_k = k;
+ waiting = 0;
+ } else if (++waiting > uv_nmi_wait_count)
+ break;
+
+ /* Extend delay if waiting only for CPU 0: */
+ if (waiting && (n - k) == 1 &&
+ cpumask_test_cpu(0, uv_nmi_cpu_mask))
+ loop_delay *= 100;
+
+ udelay(loop_delay);
+ }
+ atomic_set(&uv_nmi_cpus_in_nmi, k);
+ return n - k;
+}
+
+/* Wait until all slave CPU's have entered UV NMI handler */
+static void uv_nmi_wait(int master)
+{
+ /* Indicate this CPU is in: */
+ this_cpu_write(uv_cpu_nmi.state, UV_NMI_STATE_IN);
+
+ /* If not the first CPU in (the master), then we are a slave CPU */
+ if (!master)
+ return;
+
+ do {
+ /* Wait for all other CPU's to gather here */
+ if (!uv_nmi_wait_cpus(1))
+ break;
+
+ /* If not all made it in, send IPI NMI to them */
+ pr_alert("UV: Sending NMI IPI to %d CPUs: %*pbl\n",
+ cpumask_weight(uv_nmi_cpu_mask),
+ cpumask_pr_args(uv_nmi_cpu_mask));
+
+ uv_nmi_nr_cpus_ping();
+
+ /* If all CPU's are in, then done */
+ if (!uv_nmi_wait_cpus(0))
+ break;
+
+ pr_alert("UV: %d CPUs not in NMI loop: %*pbl\n",
+ cpumask_weight(uv_nmi_cpu_mask),
+ cpumask_pr_args(uv_nmi_cpu_mask));
+ } while (0);
+
+ pr_alert("UV: %d of %d CPUs in NMI\n",
+ atomic_read(&uv_nmi_cpus_in_nmi), num_online_cpus());
+}
+
+/* Dump Instruction Pointer header */
+static void uv_nmi_dump_cpu_ip_hdr(void)
+{
+ pr_info("\nUV: %4s %6s %-32s %s (Note: PID 0 not listed)\n",
+ "CPU", "PID", "COMMAND", "IP");
+}
+
+/* Dump Instruction Pointer info */
+static void uv_nmi_dump_cpu_ip(int cpu, struct pt_regs *regs)
+{
+ pr_info("UV: %4d %6d %-32.32s %pS",
+ cpu, current->pid, current->comm, (void *)regs->ip);
+}
+
+/*
+ * Dump this CPU's state. If action was set to "kdump" and the crash_kexec
+ * failed, then we provide "dump" as an alternate action. Action "dump" now
+ * also includes the show "ips" (instruction pointers) action whereas the
+ * action "ips" only displays instruction pointers for the non-idle CPU's.
+ * This is an abbreviated form of the "ps" command.
+ */
+static void uv_nmi_dump_state_cpu(int cpu, struct pt_regs *regs)
+{
+ const char *dots = " ................................. ";
+
+ if (cpu == 0)
+ uv_nmi_dump_cpu_ip_hdr();
+
+ if (current->pid != 0 || !uv_nmi_action_is("ips"))
+ uv_nmi_dump_cpu_ip(cpu, regs);
+
+ if (uv_nmi_action_is("dump")) {
+ pr_info("UV:%sNMI process trace for CPU %d\n", dots, cpu);
+ show_regs(regs);
+ }
+
+ this_cpu_write(uv_cpu_nmi.state, UV_NMI_STATE_DUMP_DONE);
+}
+
+/* Trigger a slave CPU to dump it's state */
+static void uv_nmi_trigger_dump(int cpu)
+{
+ int retry = uv_nmi_trigger_delay;
+
+ if (uv_cpu_nmi_per(cpu).state != UV_NMI_STATE_IN)
+ return;
+
+ uv_cpu_nmi_per(cpu).state = UV_NMI_STATE_DUMP;
+ do {
+ cpu_relax();
+ udelay(10);
+ if (uv_cpu_nmi_per(cpu).state
+ != UV_NMI_STATE_DUMP)
+ return;
+ } while (--retry > 0);
+
+ pr_crit("UV: CPU %d stuck in process dump function\n", cpu);
+ uv_cpu_nmi_per(cpu).state = UV_NMI_STATE_DUMP_DONE;
+}
+
+/* Wait until all CPU's ready to exit */
+static void uv_nmi_sync_exit(int master)
+{
+ atomic_dec(&uv_nmi_cpus_in_nmi);
+ if (master) {
+ while (atomic_read(&uv_nmi_cpus_in_nmi) > 0)
+ cpu_relax();
+ atomic_set(&uv_nmi_slave_continue, SLAVE_CLEAR);
+ } else {
+ while (atomic_read(&uv_nmi_slave_continue))
+ cpu_relax();
+ }
+}
+
+/* Current "health" check is to check which CPU's are responsive */
+static void uv_nmi_action_health(int cpu, struct pt_regs *regs, int master)
+{
+ if (master) {
+ int in = atomic_read(&uv_nmi_cpus_in_nmi);
+ int out = num_online_cpus() - in;
+
+ pr_alert("UV: NMI CPU health check (non-responding:%d)\n", out);
+ atomic_set(&uv_nmi_slave_continue, SLAVE_EXIT);
+ } else {
+ while (!atomic_read(&uv_nmi_slave_continue))
+ cpu_relax();
+ }
+ uv_nmi_sync_exit(master);
+}
+
+/* Walk through CPU list and dump state of each */
+static void uv_nmi_dump_state(int cpu, struct pt_regs *regs, int master)
+{
+ if (master) {
+ int tcpu;
+ int ignored = 0;
+ int saved_console_loglevel = console_loglevel;
+
+ pr_alert("UV: tracing %s for %d CPUs from CPU %d\n",
+ uv_nmi_action_is("ips") ? "IPs" : "processes",
+ atomic_read(&uv_nmi_cpus_in_nmi), cpu);
+
+ console_loglevel = uv_nmi_loglevel;
+ atomic_set(&uv_nmi_slave_continue, SLAVE_EXIT);
+ for_each_online_cpu(tcpu) {
+ if (cpumask_test_cpu(tcpu, uv_nmi_cpu_mask))
+ ignored++;
+ else if (tcpu == cpu)
+ uv_nmi_dump_state_cpu(tcpu, regs);
+ else
+ uv_nmi_trigger_dump(tcpu);
+ }
+ if (ignored)
+ pr_alert("UV: %d CPUs ignored NMI\n", ignored);
+
+ console_loglevel = saved_console_loglevel;
+ pr_alert("UV: process trace complete\n");
+ } else {
+ while (!atomic_read(&uv_nmi_slave_continue))
+ cpu_relax();
+ while (this_cpu_read(uv_cpu_nmi.state) != UV_NMI_STATE_DUMP)
+ cpu_relax();
+ uv_nmi_dump_state_cpu(cpu, regs);
+ }
+ uv_nmi_sync_exit(master);
+}
+
+static void uv_nmi_touch_watchdogs(void)
+{
+ touch_softlockup_watchdog_sync();
+ clocksource_touch_watchdog();
+ rcu_cpu_stall_reset();
+ touch_nmi_watchdog();
+}
+
+static void uv_nmi_kdump(int cpu, int main, struct pt_regs *regs)
+{
+ /* Check if kdump kernel loaded for both main and secondary CPUs */
+ if (!kexec_crash_image) {
+ if (main)
+ pr_err("UV: NMI error: kdump kernel not loaded\n");
+ return;
+ }
+
+ /* Call crash to dump system state */
+ if (main) {
+ pr_emerg("UV: NMI executing crash_kexec on CPU%d\n", cpu);
+ crash_kexec(regs);
+
+ pr_emerg("UV: crash_kexec unexpectedly returned\n");
+ atomic_set(&uv_nmi_kexec_failed, 1);
+
+ } else { /* secondary */
+
+ /* If kdump kernel fails, secondaries will exit this loop */
+ while (atomic_read(&uv_nmi_kexec_failed) == 0) {
+
+ /* Once shootdown cpus starts, they do not return */
+ run_crash_ipi_callback(regs);
+
+ mdelay(10);
+ }
+ }
+}
+
+#ifdef CONFIG_KGDB
+#ifdef CONFIG_KGDB_KDB
+static inline int uv_nmi_kdb_reason(void)
+{
+ return KDB_REASON_SYSTEM_NMI;
+}
+#else /* !CONFIG_KGDB_KDB */
+static inline int uv_nmi_kdb_reason(void)
+{
+ /* Ensure user is expecting to attach gdb remote */
+ if (uv_nmi_action_is("kgdb"))
+ return 0;
+
+ pr_err("UV: NMI error: KDB is not enabled in this kernel\n");
+ return -1;
+}
+#endif /* CONFIG_KGDB_KDB */
+
+/*
+ * Call KGDB/KDB from NMI handler
+ *
+ * Note that if both KGDB and KDB are configured, then the action of 'kgdb' or
+ * 'kdb' has no affect on which is used. See the KGDB documentation for further
+ * information.
+ */
+static void uv_call_kgdb_kdb(int cpu, struct pt_regs *regs, int master)
+{
+ if (master) {
+ int reason = uv_nmi_kdb_reason();
+ int ret;
+
+ if (reason < 0)
+ return;
+
+ /* Call KGDB NMI handler as MASTER */
+ ret = kgdb_nmicallin(cpu, X86_TRAP_NMI, regs, reason,
+ &uv_nmi_slave_continue);
+ if (ret) {
+ pr_alert("KGDB returned error, is kgdboc set?\n");
+ atomic_set(&uv_nmi_slave_continue, SLAVE_EXIT);
+ }
+ } else {
+ /* Wait for KGDB signal that it's ready for slaves to enter */
+ int sig;
+
+ do {
+ cpu_relax();
+ sig = atomic_read(&uv_nmi_slave_continue);
+ } while (!sig);
+
+ /* Call KGDB as slave */
+ if (sig == SLAVE_CONTINUE)
+ kgdb_nmicallback(cpu, regs);
+ }
+ uv_nmi_sync_exit(master);
+}
+
+#else /* !CONFIG_KGDB */
+static inline void uv_call_kgdb_kdb(int cpu, struct pt_regs *regs, int master)
+{
+ pr_err("UV: NMI error: KGDB is not enabled in this kernel\n");
+}
+#endif /* !CONFIG_KGDB */
+
+/*
+ * UV NMI handler
+ */
+static int uv_handle_nmi(unsigned int reason, struct pt_regs *regs)
+{
+ struct uv_hub_nmi_s *hub_nmi = uv_hub_nmi;
+ int cpu = smp_processor_id();
+ int master = 0;
+ unsigned long flags;
+
+ local_irq_save(flags);
+
+ /* If not a UV System NMI, ignore */
+ if (!this_cpu_read(uv_cpu_nmi.pinging) && !uv_check_nmi(hub_nmi)) {
+ local_irq_restore(flags);
+ return NMI_DONE;
+ }
+
+ /* Indicate we are the first CPU into the NMI handler */
+ master = (atomic_read(&uv_nmi_cpu) == cpu);
+
+ /* If NMI action is "kdump", then attempt to do it */
+ if (uv_nmi_action_is("kdump")) {
+ uv_nmi_kdump(cpu, master, regs);
+
+ /* Unexpected return, revert action to "dump" */
+ if (master)
+ strncpy(uv_nmi_action, "dump", strlen(uv_nmi_action));
+ }
+
+ /* Pause as all CPU's enter the NMI handler */
+ uv_nmi_wait(master);
+
+ /* Process actions other than "kdump": */
+ if (uv_nmi_action_is("health")) {
+ uv_nmi_action_health(cpu, regs, master);
+ } else if (uv_nmi_action_is("ips") || uv_nmi_action_is("dump")) {
+ uv_nmi_dump_state(cpu, regs, master);
+ } else if (uv_nmi_action_is("kdb") || uv_nmi_action_is("kgdb")) {
+ uv_call_kgdb_kdb(cpu, regs, master);
+ } else {
+ if (master)
+ pr_alert("UV: unknown NMI action: %s\n", uv_nmi_action);
+ uv_nmi_sync_exit(master);
+ }
+
+ /* Clear per_cpu "in_nmi" flag */
+ this_cpu_write(uv_cpu_nmi.state, UV_NMI_STATE_OUT);
+
+ /* Clear MMR NMI flag on each hub */
+ uv_clear_nmi(cpu);
+
+ /* Clear global flags */
+ if (master) {
+ if (!cpumask_empty(uv_nmi_cpu_mask))
+ uv_nmi_cleanup_mask();
+ atomic_set(&uv_nmi_cpus_in_nmi, -1);
+ atomic_set(&uv_nmi_cpu, -1);
+ atomic_set(&uv_in_nmi, 0);
+ atomic_set(&uv_nmi_kexec_failed, 0);
+ atomic_set(&uv_nmi_slave_continue, SLAVE_CLEAR);
+ }
+
+ uv_nmi_touch_watchdogs();
+ local_irq_restore(flags);
+
+ return NMI_HANDLED;
+}
+
+/*
+ * NMI handler for pulling in CPU's when perf events are grabbing our NMI
+ */
+static int uv_handle_nmi_ping(unsigned int reason, struct pt_regs *regs)
+{
+ int ret;
+
+ this_cpu_inc(uv_cpu_nmi.queries);
+ if (!this_cpu_read(uv_cpu_nmi.pinging)) {
+ local64_inc(&uv_nmi_ping_misses);
+ return NMI_DONE;
+ }
+
+ this_cpu_inc(uv_cpu_nmi.pings);
+ local64_inc(&uv_nmi_ping_count);
+ ret = uv_handle_nmi(reason, regs);
+ this_cpu_write(uv_cpu_nmi.pinging, 0);
+ return ret;
+}
+
+static void uv_register_nmi_notifier(void)
+{
+ if (register_nmi_handler(NMI_UNKNOWN, uv_handle_nmi, 0, "uv"))
+ pr_warn("UV: NMI handler failed to register\n");
+
+ if (register_nmi_handler(NMI_LOCAL, uv_handle_nmi_ping, 0, "uvping"))
+ pr_warn("UV: PING NMI handler failed to register\n");
+}
+
+void uv_nmi_init(void)
+{
+ unsigned int value;
+
+ /*
+ * Unmask NMI on all CPU's
+ */
+ value = apic_read(APIC_LVT1) | APIC_DM_NMI;
+ value &= ~APIC_LVT_MASKED;
+ apic_write(APIC_LVT1, value);
+}
+
+/* Setup HUB NMI info */
+static void __init uv_nmi_setup_common(bool hubbed)
+{
+ int size = sizeof(void *) * (1 << NODES_SHIFT);
+ int cpu;
+
+ uv_hub_nmi_list = kzalloc(size, GFP_KERNEL);
+ nmi_debug("UV: NMI hub list @ 0x%p (%d)\n", uv_hub_nmi_list, size);
+ BUG_ON(!uv_hub_nmi_list);
+ size = sizeof(struct uv_hub_nmi_s);
+ for_each_present_cpu(cpu) {
+ int nid = cpu_to_node(cpu);
+ if (uv_hub_nmi_list[nid] == NULL) {
+ uv_hub_nmi_list[nid] = kzalloc_node(size,
+ GFP_KERNEL, nid);
+ BUG_ON(!uv_hub_nmi_list[nid]);
+ raw_spin_lock_init(&(uv_hub_nmi_list[nid]->nmi_lock));
+ atomic_set(&uv_hub_nmi_list[nid]->cpu_owner, -1);
+ uv_hub_nmi_list[nid]->hub_present = hubbed;
+ uv_hub_nmi_list[nid]->pch_owner = (nid == 0);
+ }
+ uv_hub_nmi_per(cpu) = uv_hub_nmi_list[nid];
+ }
+ BUG_ON(!alloc_cpumask_var(&uv_nmi_cpu_mask, GFP_KERNEL));
+}
+
+/* Setup for UV Hub systems */
+void __init uv_nmi_setup(void)
+{
+ uv_nmi_setup_mmrs();
+ uv_nmi_setup_common(true);
+ uv_register_nmi_notifier();
+ pr_info("UV: Hub NMI enabled\n");
+}
+
+/* Setup for UV Hubless systems */
+void __init uv_nmi_setup_hubless(void)
+{
+ uv_nmi_setup_common(false);
+ pch_base = xlate_dev_mem_ptr(PCH_PCR_GPIO_1_BASE);
+ nmi_debug("UV: PCH base:%p from 0x%lx, GPP_D_0\n",
+ pch_base, PCH_PCR_GPIO_1_BASE);
+ if (uv_pch_init_enable)
+ uv_init_hubless_pch_d0();
+ uv_init_hubless_pch_io(GPI_NMI_ENA_GPP_D_0,
+ STS_GPP_D_0_MASK, STS_GPP_D_0_MASK);
+ uv_nmi_setup_hubless_intr();
+ /* Ensure NMI enabled in Processor Interface Reg: */
+ uv_reassert_nmi();
+ uv_register_nmi_notifier();
+ pr_info("UV: PCH NMI enabled\n");
+}
diff --git a/arch/x86/platform/uv/uv_time.c b/arch/x86/platform/uv/uv_time.c
new file mode 100644
index 000000000..54663f3e0
--- /dev/null
+++ b/arch/x86/platform/uv/uv_time.c
@@ -0,0 +1,393 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * SGI RTC clock/timer routines.
+ *
+ * (C) Copyright 2020 Hewlett Packard Enterprise Development LP
+ * Copyright (c) 2009-2013 Silicon Graphics, Inc. All Rights Reserved.
+ * Copyright (c) Dimitri Sivanich
+ */
+#include <linux/clockchips.h>
+#include <linux/slab.h>
+
+#include <asm/uv/uv_mmrs.h>
+#include <asm/uv/uv_hub.h>
+#include <asm/uv/bios.h>
+#include <asm/uv/uv.h>
+#include <asm/apic.h>
+#include <asm/cpu.h>
+
+#define RTC_NAME "sgi_rtc"
+
+static u64 uv_read_rtc(struct clocksource *cs);
+static int uv_rtc_next_event(unsigned long, struct clock_event_device *);
+static int uv_rtc_shutdown(struct clock_event_device *evt);
+
+static struct clocksource clocksource_uv = {
+ .name = RTC_NAME,
+ .rating = 299,
+ .read = uv_read_rtc,
+ .mask = (u64)UVH_RTC_REAL_TIME_CLOCK_MASK,
+ .flags = CLOCK_SOURCE_IS_CONTINUOUS,
+};
+
+static struct clock_event_device clock_event_device_uv = {
+ .name = RTC_NAME,
+ .features = CLOCK_EVT_FEAT_ONESHOT,
+ .shift = 20,
+ .rating = 400,
+ .irq = -1,
+ .set_next_event = uv_rtc_next_event,
+ .set_state_shutdown = uv_rtc_shutdown,
+ .event_handler = NULL,
+};
+
+static DEFINE_PER_CPU(struct clock_event_device, cpu_ced);
+
+/* There is one of these allocated per node */
+struct uv_rtc_timer_head {
+ spinlock_t lock;
+ /* next cpu waiting for timer, local node relative: */
+ int next_cpu;
+ /* number of cpus on this node: */
+ int ncpus;
+ struct {
+ int lcpu; /* systemwide logical cpu number */
+ u64 expires; /* next timer expiration for this cpu */
+ } cpu[];
+};
+
+/*
+ * Access to uv_rtc_timer_head via blade id.
+ */
+static struct uv_rtc_timer_head **blade_info __read_mostly;
+
+static int uv_rtc_evt_enable;
+
+/*
+ * Hardware interface routines
+ */
+
+/* Send IPIs to another node */
+static void uv_rtc_send_IPI(int cpu)
+{
+ unsigned long apicid, val;
+ int pnode;
+
+ apicid = cpu_physical_id(cpu);
+ pnode = uv_apicid_to_pnode(apicid);
+ val = (1UL << UVH_IPI_INT_SEND_SHFT) |
+ (apicid << UVH_IPI_INT_APIC_ID_SHFT) |
+ (X86_PLATFORM_IPI_VECTOR << UVH_IPI_INT_VECTOR_SHFT);
+
+ uv_write_global_mmr64(pnode, UVH_IPI_INT, val);
+}
+
+/* Check for an RTC interrupt pending */
+static int uv_intr_pending(int pnode)
+{
+ return uv_read_global_mmr64(pnode, UVH_EVENT_OCCURRED2) &
+ UVH_EVENT_OCCURRED2_RTC_1_MASK;
+}
+
+/* Setup interrupt and return non-zero if early expiration occurred. */
+static int uv_setup_intr(int cpu, u64 expires)
+{
+ u64 val;
+ unsigned long apicid = cpu_physical_id(cpu);
+ int pnode = uv_cpu_to_pnode(cpu);
+
+ uv_write_global_mmr64(pnode, UVH_RTC1_INT_CONFIG,
+ UVH_RTC1_INT_CONFIG_M_MASK);
+ uv_write_global_mmr64(pnode, UVH_INT_CMPB, -1L);
+
+ uv_write_global_mmr64(pnode, UVH_EVENT_OCCURRED2_ALIAS,
+ UVH_EVENT_OCCURRED2_RTC_1_MASK);
+
+ val = (X86_PLATFORM_IPI_VECTOR << UVH_RTC1_INT_CONFIG_VECTOR_SHFT) |
+ ((u64)apicid << UVH_RTC1_INT_CONFIG_APIC_ID_SHFT);
+
+ /* Set configuration */
+ uv_write_global_mmr64(pnode, UVH_RTC1_INT_CONFIG, val);
+ /* Initialize comparator value */
+ uv_write_global_mmr64(pnode, UVH_INT_CMPB, expires);
+
+ if (uv_read_rtc(NULL) <= expires)
+ return 0;
+
+ return !uv_intr_pending(pnode);
+}
+
+/*
+ * Per-cpu timer tracking routines
+ */
+
+static __init void uv_rtc_deallocate_timers(void)
+{
+ int bid;
+
+ for_each_possible_blade(bid) {
+ kfree(blade_info[bid]);
+ }
+ kfree(blade_info);
+}
+
+/* Allocate per-node list of cpu timer expiration times. */
+static __init int uv_rtc_allocate_timers(void)
+{
+ int cpu;
+
+ blade_info = kcalloc(uv_possible_blades, sizeof(void *), GFP_KERNEL);
+ if (!blade_info)
+ return -ENOMEM;
+
+ for_each_present_cpu(cpu) {
+ int nid = cpu_to_node(cpu);
+ int bid = uv_cpu_to_blade_id(cpu);
+ int bcpu = uv_cpu_blade_processor_id(cpu);
+ struct uv_rtc_timer_head *head = blade_info[bid];
+
+ if (!head) {
+ head = kmalloc_node(struct_size(head, cpu,
+ uv_blade_nr_possible_cpus(bid)),
+ GFP_KERNEL, nid);
+ if (!head) {
+ uv_rtc_deallocate_timers();
+ return -ENOMEM;
+ }
+ spin_lock_init(&head->lock);
+ head->ncpus = uv_blade_nr_possible_cpus(bid);
+ head->next_cpu = -1;
+ blade_info[bid] = head;
+ }
+
+ head->cpu[bcpu].lcpu = cpu;
+ head->cpu[bcpu].expires = ULLONG_MAX;
+ }
+
+ return 0;
+}
+
+/* Find and set the next expiring timer. */
+static void uv_rtc_find_next_timer(struct uv_rtc_timer_head *head, int pnode)
+{
+ u64 lowest = ULLONG_MAX;
+ int c, bcpu = -1;
+
+ head->next_cpu = -1;
+ for (c = 0; c < head->ncpus; c++) {
+ u64 exp = head->cpu[c].expires;
+ if (exp < lowest) {
+ bcpu = c;
+ lowest = exp;
+ }
+ }
+ if (bcpu >= 0) {
+ head->next_cpu = bcpu;
+ c = head->cpu[bcpu].lcpu;
+ if (uv_setup_intr(c, lowest))
+ /* If we didn't set it up in time, trigger */
+ uv_rtc_send_IPI(c);
+ } else {
+ uv_write_global_mmr64(pnode, UVH_RTC1_INT_CONFIG,
+ UVH_RTC1_INT_CONFIG_M_MASK);
+ }
+}
+
+/*
+ * Set expiration time for current cpu.
+ *
+ * Returns 1 if we missed the expiration time.
+ */
+static int uv_rtc_set_timer(int cpu, u64 expires)
+{
+ int pnode = uv_cpu_to_pnode(cpu);
+ int bid = uv_cpu_to_blade_id(cpu);
+ struct uv_rtc_timer_head *head = blade_info[bid];
+ int bcpu = uv_cpu_blade_processor_id(cpu);
+ u64 *t = &head->cpu[bcpu].expires;
+ unsigned long flags;
+ int next_cpu;
+
+ spin_lock_irqsave(&head->lock, flags);
+
+ next_cpu = head->next_cpu;
+ *t = expires;
+
+ /* Will this one be next to go off? */
+ if (next_cpu < 0 || bcpu == next_cpu ||
+ expires < head->cpu[next_cpu].expires) {
+ head->next_cpu = bcpu;
+ if (uv_setup_intr(cpu, expires)) {
+ *t = ULLONG_MAX;
+ uv_rtc_find_next_timer(head, pnode);
+ spin_unlock_irqrestore(&head->lock, flags);
+ return -ETIME;
+ }
+ }
+
+ spin_unlock_irqrestore(&head->lock, flags);
+ return 0;
+}
+
+/*
+ * Unset expiration time for current cpu.
+ *
+ * Returns 1 if this timer was pending.
+ */
+static int uv_rtc_unset_timer(int cpu, int force)
+{
+ int pnode = uv_cpu_to_pnode(cpu);
+ int bid = uv_cpu_to_blade_id(cpu);
+ struct uv_rtc_timer_head *head = blade_info[bid];
+ int bcpu = uv_cpu_blade_processor_id(cpu);
+ u64 *t = &head->cpu[bcpu].expires;
+ unsigned long flags;
+ int rc = 0;
+
+ spin_lock_irqsave(&head->lock, flags);
+
+ if ((head->next_cpu == bcpu && uv_read_rtc(NULL) >= *t) || force)
+ rc = 1;
+
+ if (rc) {
+ *t = ULLONG_MAX;
+ /* Was the hardware setup for this timer? */
+ if (head->next_cpu == bcpu)
+ uv_rtc_find_next_timer(head, pnode);
+ }
+
+ spin_unlock_irqrestore(&head->lock, flags);
+
+ return rc;
+}
+
+
+/*
+ * Kernel interface routines.
+ */
+
+/*
+ * Read the RTC.
+ *
+ * Starting with HUB rev 2.0, the UV RTC register is replicated across all
+ * cachelines of it's own page. This allows faster simultaneous reads
+ * from a given socket.
+ */
+static u64 uv_read_rtc(struct clocksource *cs)
+{
+ unsigned long offset;
+
+ if (uv_get_min_hub_revision_id() == 1)
+ offset = 0;
+ else
+ offset = (uv_blade_processor_id() * L1_CACHE_BYTES) % PAGE_SIZE;
+
+ return (u64)uv_read_local_mmr(UVH_RTC | offset);
+}
+
+/*
+ * Program the next event, relative to now
+ */
+static int uv_rtc_next_event(unsigned long delta,
+ struct clock_event_device *ced)
+{
+ int ced_cpu = cpumask_first(ced->cpumask);
+
+ return uv_rtc_set_timer(ced_cpu, delta + uv_read_rtc(NULL));
+}
+
+/*
+ * Shutdown the RTC timer
+ */
+static int uv_rtc_shutdown(struct clock_event_device *evt)
+{
+ int ced_cpu = cpumask_first(evt->cpumask);
+
+ uv_rtc_unset_timer(ced_cpu, 1);
+ return 0;
+}
+
+static void uv_rtc_interrupt(void)
+{
+ int cpu = smp_processor_id();
+ struct clock_event_device *ced = &per_cpu(cpu_ced, cpu);
+
+ if (!ced || !ced->event_handler)
+ return;
+
+ if (uv_rtc_unset_timer(cpu, 0) != 1)
+ return;
+
+ ced->event_handler(ced);
+}
+
+static int __init uv_enable_evt_rtc(char *str)
+{
+ uv_rtc_evt_enable = 1;
+
+ return 1;
+}
+__setup("uvrtcevt", uv_enable_evt_rtc);
+
+static __init void uv_rtc_register_clockevents(struct work_struct *dummy)
+{
+ struct clock_event_device *ced = this_cpu_ptr(&cpu_ced);
+
+ *ced = clock_event_device_uv;
+ ced->cpumask = cpumask_of(smp_processor_id());
+ clockevents_register_device(ced);
+}
+
+static __init int uv_rtc_setup_clock(void)
+{
+ int rc;
+
+ if (!is_uv_system())
+ return -ENODEV;
+
+ rc = clocksource_register_hz(&clocksource_uv, sn_rtc_cycles_per_second);
+ if (rc)
+ printk(KERN_INFO "UV RTC clocksource failed rc %d\n", rc);
+ else
+ printk(KERN_INFO "UV RTC clocksource registered freq %lu MHz\n",
+ sn_rtc_cycles_per_second/(unsigned long)1E6);
+
+ if (rc || !uv_rtc_evt_enable || x86_platform_ipi_callback)
+ return rc;
+
+ /* Setup and register clockevents */
+ rc = uv_rtc_allocate_timers();
+ if (rc)
+ goto error;
+
+ x86_platform_ipi_callback = uv_rtc_interrupt;
+
+ clock_event_device_uv.mult = div_sc(sn_rtc_cycles_per_second,
+ NSEC_PER_SEC, clock_event_device_uv.shift);
+
+ clock_event_device_uv.min_delta_ns = NSEC_PER_SEC /
+ sn_rtc_cycles_per_second;
+ clock_event_device_uv.min_delta_ticks = 1;
+
+ clock_event_device_uv.max_delta_ns = clocksource_uv.mask *
+ (NSEC_PER_SEC / sn_rtc_cycles_per_second);
+ clock_event_device_uv.max_delta_ticks = clocksource_uv.mask;
+
+ rc = schedule_on_each_cpu(uv_rtc_register_clockevents);
+ if (rc) {
+ x86_platform_ipi_callback = NULL;
+ uv_rtc_deallocate_timers();
+ goto error;
+ }
+
+ printk(KERN_INFO "UV RTC clockevents registered\n");
+
+ return 0;
+
+error:
+ clocksource_unregister(&clocksource_uv);
+ printk(KERN_INFO "UV RTC clockevents failed rc %d\n", rc);
+
+ return rc;
+}
+arch_initcall(uv_rtc_setup_clock);