diff options
Diffstat (limited to '')
37 files changed, 11184 insertions, 0 deletions
diff --git a/arch/x86/xen/Kconfig b/arch/x86/xen/Kconfig new file mode 100644 index 0000000000..a65fc2ae15 --- /dev/null +++ b/arch/x86/xen/Kconfig @@ -0,0 +1,104 @@ +# SPDX-License-Identifier: GPL-2.0 +# +# This Kconfig describes xen options +# + +config XEN + bool "Xen guest support" + depends on PARAVIRT + select PARAVIRT_CLOCK + select X86_HV_CALLBACK_VECTOR + depends on X86_64 || (X86_32 && X86_PAE) + depends on X86_64 || (X86_GENERIC || MPENTIUM4 || MCORE2 || MATOM || MK8) + depends on X86_LOCAL_APIC && X86_TSC + help + This is the Linux Xen port. Enabling this will allow the + kernel to boot in a paravirtualized environment under the + Xen hypervisor. + +config XEN_PV + bool "Xen PV guest support" + default y + depends on XEN + depends on X86_64 + select PARAVIRT_XXL + select XEN_HAVE_PVMMU + select XEN_HAVE_VPMU + select GUEST_PERF_EVENTS + help + Support running as a Xen PV guest. + +config XEN_512GB + bool "Limit Xen pv-domain memory to 512GB" + depends on XEN_PV + default y + help + Limit paravirtualized user domains to 512GB of RAM. + + The Xen tools and crash dump analysis tools might not support + pv-domains with more than 512 GB of RAM. This option controls the + default setting of the kernel to use only up to 512 GB or more. + It is always possible to change the default via specifying the + boot parameter "xen_512gb_limit". + +config XEN_PV_SMP + def_bool y + depends on XEN_PV && SMP + +config XEN_PV_DOM0 + def_bool y + depends on XEN_PV && XEN_DOM0 + +config XEN_PVHVM + def_bool y + depends on XEN && X86_LOCAL_APIC + +config XEN_PVHVM_SMP + def_bool y + depends on XEN_PVHVM && SMP + +config XEN_PVHVM_GUEST + bool "Xen PVHVM guest support" + default y + depends on XEN_PVHVM && PCI + help + Support running as a Xen PVHVM guest. + +config XEN_SAVE_RESTORE + bool + depends on XEN + select HIBERNATE_CALLBACKS + default y + +config XEN_DEBUG_FS + bool "Enable Xen debug and tuning parameters in debugfs" + depends on XEN && DEBUG_FS + help + Enable statistics output and various tuning options in debugfs. + Enabling this option may incur a significant performance overhead. + +config XEN_PVH + bool "Xen PVH guest support" + depends on XEN && XEN_PVHVM && ACPI + select PVH + def_bool n + help + Support for running as a Xen PVH guest. + +config XEN_DOM0 + bool "Xen Dom0 support" + default XEN_PV + depends on (XEN_PV && SWIOTLB_XEN) || (XEN_PVH && X86_64) + depends on X86_IO_APIC && ACPI && PCI + select X86_X2APIC if XEN_PVH && X86_64 + help + Support running as a Xen Dom0 guest. + +config XEN_PV_MSR_SAFE + bool "Always use safe MSR accesses in PV guests" + default y + depends on XEN_PV + help + Use safe (not faulting) MSR access functions even if the MSR access + should not fault anyway. + The default can be changed by using the "xen_msr_safe" boot parameter. diff --git a/arch/x86/xen/Makefile b/arch/x86/xen/Makefile new file mode 100644 index 0000000000..a9ec8c9f5c --- /dev/null +++ b/arch/x86/xen/Makefile @@ -0,0 +1,50 @@ +# SPDX-License-Identifier: GPL-2.0 + +ifdef CONFIG_FUNCTION_TRACER +# Do not profile debug and lowlevel utilities +CFLAGS_REMOVE_spinlock.o = -pg +CFLAGS_REMOVE_time.o = -pg +CFLAGS_REMOVE_irq.o = -pg +endif + +# Make sure early boot has no stackprotector +CFLAGS_enlighten_pv.o := -fno-stack-protector +CFLAGS_mmu_pv.o := -fno-stack-protector + +obj-y += enlighten.o +obj-y += mmu.o +obj-y += time.o +obj-y += grant-table.o +obj-y += suspend.o + +obj-$(CONFIG_XEN_PVHVM) += enlighten_hvm.o +obj-$(CONFIG_XEN_PVHVM) += mmu_hvm.o +obj-$(CONFIG_XEN_PVHVM) += suspend_hvm.o +obj-$(CONFIG_XEN_PVHVM) += platform-pci-unplug.o + +obj-$(CONFIG_XEN_PV) += setup.o +obj-$(CONFIG_XEN_PV) += apic.o +obj-$(CONFIG_XEN_PV) += pmu.o +obj-$(CONFIG_XEN_PV) += suspend_pv.o +obj-$(CONFIG_XEN_PV) += p2m.o +obj-$(CONFIG_XEN_PV) += enlighten_pv.o +obj-$(CONFIG_XEN_PV) += mmu_pv.o +obj-$(CONFIG_XEN_PV) += irq.o +obj-$(CONFIG_XEN_PV) += multicalls.o +obj-$(CONFIG_XEN_PV) += xen-asm.o + +obj-$(CONFIG_XEN_PVH) += enlighten_pvh.o + +obj-$(CONFIG_EVENT_TRACING) += trace.o + +obj-$(CONFIG_SMP) += smp.o +obj-$(CONFIG_XEN_PV_SMP) += smp_pv.o +obj-$(CONFIG_XEN_PVHVM_SMP) += smp_hvm.o + +obj-$(CONFIG_PARAVIRT_SPINLOCKS)+= spinlock.o + +obj-$(CONFIG_XEN_DEBUG_FS) += debugfs.o + +obj-$(CONFIG_XEN_DOM0) += vga.o + +obj-$(CONFIG_XEN_EFI) += efi.o diff --git a/arch/x86/xen/apic.c b/arch/x86/xen/apic.c new file mode 100644 index 0000000000..7ad91225fd --- /dev/null +++ b/arch/x86/xen/apic.c @@ -0,0 +1,163 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <linux/init.h> +#include <linux/thread_info.h> + +#include <asm/x86_init.h> +#include <asm/apic.h> +#include <asm/io_apic.h> +#include <asm/xen/hypercall.h> + +#include <xen/xen.h> +#include <xen/interface/physdev.h> +#include "xen-ops.h" +#include "pmu.h" +#include "smp.h" + +static unsigned int xen_io_apic_read(unsigned apic, unsigned reg) +{ + struct physdev_apic apic_op; + int ret; + + apic_op.apic_physbase = mpc_ioapic_addr(apic); + apic_op.reg = reg; + ret = HYPERVISOR_physdev_op(PHYSDEVOP_apic_read, &apic_op); + if (!ret) + return apic_op.value; + + /* fallback to return an emulated IO_APIC values */ + if (reg == 0x1) + return 0x00170020; + else if (reg == 0x0) + return apic << 24; + + return 0xfd; +} + +static u32 xen_set_apic_id(unsigned int x) +{ + WARN_ON(1); + return x; +} + +static unsigned int xen_get_apic_id(unsigned long x) +{ + return ((x)>>24) & 0xFFu; +} + +static u32 xen_apic_read(u32 reg) +{ + struct xen_platform_op op = { + .cmd = XENPF_get_cpuinfo, + .interface_version = XENPF_INTERFACE_VERSION, + .u.pcpu_info.xen_cpuid = 0, + }; + int ret; + + /* Shouldn't need this as APIC is turned off for PV, and we only + * get called on the bootup processor. But just in case. */ + if (!xen_initial_domain() || smp_processor_id()) + return 0; + + if (reg == APIC_LVR) + return 0x14; + if (reg != APIC_ID) + return 0; + + ret = HYPERVISOR_platform_op(&op); + if (ret) + op.u.pcpu_info.apic_id = BAD_APICID; + + return op.u.pcpu_info.apic_id << 24; +} + +static void xen_apic_write(u32 reg, u32 val) +{ + if (reg == APIC_LVTPC) { + (void)pmu_apic_update(reg); + return; + } + + /* Warn to see if there's any stray references */ + WARN(1,"register: %x, value: %x\n", reg, val); +} + +static void xen_apic_eoi(void) +{ + WARN_ON_ONCE(1); +} + +static u64 xen_apic_icr_read(void) +{ + return 0; +} + +static void xen_apic_icr_write(u32 low, u32 id) +{ + /* Warn to see if there's any stray references */ + WARN_ON(1); +} + +static int xen_apic_probe_pv(void) +{ + if (xen_pv_domain()) + return 1; + + return 0; +} + +static int xen_madt_oem_check(char *oem_id, char *oem_table_id) +{ + return xen_pv_domain(); +} + +static int xen_phys_pkg_id(int initial_apic_id, int index_msb) +{ + return initial_apic_id >> index_msb; +} + +static int xen_cpu_present_to_apicid(int cpu) +{ + if (cpu_present(cpu)) + return cpu_data(cpu).apicid; + else + return BAD_APICID; +} + +static struct apic xen_pv_apic __ro_after_init = { + .name = "Xen PV", + .probe = xen_apic_probe_pv, + .acpi_madt_oem_check = xen_madt_oem_check, + + /* .delivery_mode and .dest_mode_logical not used by XENPV */ + + .disable_esr = 0, + + .cpu_present_to_apicid = xen_cpu_present_to_apicid, + .phys_pkg_id = xen_phys_pkg_id, /* detect_ht */ + + .max_apic_id = UINT_MAX, + .get_apic_id = xen_get_apic_id, + .set_apic_id = xen_set_apic_id, + + .calc_dest_apicid = apic_flat_calc_apicid, + +#ifdef CONFIG_SMP + .send_IPI_mask = xen_send_IPI_mask, + .send_IPI_mask_allbutself = xen_send_IPI_mask_allbutself, + .send_IPI_allbutself = xen_send_IPI_allbutself, + .send_IPI_all = xen_send_IPI_all, + .send_IPI_self = xen_send_IPI_self, +#endif + .read = xen_apic_read, + .write = xen_apic_write, + .eoi = xen_apic_eoi, + + .icr_read = xen_apic_icr_read, + .icr_write = xen_apic_icr_write, +}; +apic_driver(xen_pv_apic); + +void __init xen_init_apic(void) +{ + x86_apic_ops.io_apic_read = xen_io_apic_read; +} diff --git a/arch/x86/xen/debugfs.c b/arch/x86/xen/debugfs.c new file mode 100644 index 0000000000..5324109986 --- /dev/null +++ b/arch/x86/xen/debugfs.c @@ -0,0 +1,16 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <linux/init.h> +#include <linux/debugfs.h> +#include <linux/slab.h> + +#include "debugfs.h" + +static struct dentry *d_xen_debug; + +struct dentry * __init xen_init_debugfs(void) +{ + if (!d_xen_debug) + d_xen_debug = debugfs_create_dir("xen", NULL); + return d_xen_debug; +} + diff --git a/arch/x86/xen/debugfs.h b/arch/x86/xen/debugfs.h new file mode 100644 index 0000000000..6b813ad109 --- /dev/null +++ b/arch/x86/xen/debugfs.h @@ -0,0 +1,7 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _XEN_DEBUGFS_H +#define _XEN_DEBUGFS_H + +struct dentry * __init xen_init_debugfs(void); + +#endif /* _XEN_DEBUGFS_H */ diff --git a/arch/x86/xen/efi.c b/arch/x86/xen/efi.c new file mode 100644 index 0000000000..7250d0e0e1 --- /dev/null +++ b/arch/x86/xen/efi.c @@ -0,0 +1,151 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2014 Oracle Co., Daniel Kiper + */ + +#include <linux/bitops.h> +#include <linux/efi.h> +#include <linux/init.h> +#include <linux/string.h> + +#include <xen/xen.h> +#include <xen/xen-ops.h> +#include <xen/interface/platform.h> + +#include <asm/page.h> +#include <asm/setup.h> +#include <asm/xen/hypercall.h> + +#include "xen-ops.h" + +static efi_char16_t vendor[100] __initdata; + +static efi_system_table_t efi_systab_xen __initdata = { + .hdr = { + .signature = EFI_SYSTEM_TABLE_SIGNATURE, + .revision = 0, /* Initialized later. */ + .headersize = 0, /* Ignored by Linux Kernel. */ + .crc32 = 0, /* Ignored by Linux Kernel. */ + .reserved = 0 + }, + .fw_vendor = EFI_INVALID_TABLE_ADDR, /* Initialized later. */ + .fw_revision = 0, /* Initialized later. */ + .con_in_handle = EFI_INVALID_TABLE_ADDR, /* Not used under Xen. */ + .con_in = NULL, /* Not used under Xen. */ + .con_out_handle = EFI_INVALID_TABLE_ADDR, /* Not used under Xen. */ + .con_out = NULL, /* Not used under Xen. */ + .stderr_handle = EFI_INVALID_TABLE_ADDR, /* Not used under Xen. */ + .stderr = EFI_INVALID_TABLE_ADDR, /* Not used under Xen. */ + .runtime = (efi_runtime_services_t *)EFI_INVALID_TABLE_ADDR, + /* Not used under Xen. */ + .boottime = (efi_boot_services_t *)EFI_INVALID_TABLE_ADDR, + /* Not used under Xen. */ + .nr_tables = 0, /* Initialized later. */ + .tables = EFI_INVALID_TABLE_ADDR /* Initialized later. */ +}; + +static efi_system_table_t __init *xen_efi_probe(void) +{ + struct xen_platform_op op = { + .cmd = XENPF_firmware_info, + .u.firmware_info = { + .type = XEN_FW_EFI_INFO, + .index = XEN_FW_EFI_CONFIG_TABLE + } + }; + union xenpf_efi_info *info = &op.u.firmware_info.u.efi_info; + + if (!xen_initial_domain() || HYPERVISOR_platform_op(&op) < 0) + return NULL; + + /* Here we know that Xen runs on EFI platform. */ + xen_efi_runtime_setup(); + + efi_systab_xen.tables = info->cfg.addr; + efi_systab_xen.nr_tables = info->cfg.nent; + + op.cmd = XENPF_firmware_info; + op.u.firmware_info.type = XEN_FW_EFI_INFO; + op.u.firmware_info.index = XEN_FW_EFI_VENDOR; + info->vendor.bufsz = sizeof(vendor); + set_xen_guest_handle(info->vendor.name, vendor); + + if (HYPERVISOR_platform_op(&op) == 0) { + efi_systab_xen.fw_vendor = __pa_symbol(vendor); + efi_systab_xen.fw_revision = info->vendor.revision; + } else + efi_systab_xen.fw_vendor = __pa_symbol(L"UNKNOWN"); + + op.cmd = XENPF_firmware_info; + op.u.firmware_info.type = XEN_FW_EFI_INFO; + op.u.firmware_info.index = XEN_FW_EFI_VERSION; + + if (HYPERVISOR_platform_op(&op) == 0) + efi_systab_xen.hdr.revision = info->version; + + op.cmd = XENPF_firmware_info; + op.u.firmware_info.type = XEN_FW_EFI_INFO; + op.u.firmware_info.index = XEN_FW_EFI_RT_VERSION; + + if (HYPERVISOR_platform_op(&op) == 0) + efi.runtime_version = info->version; + + return &efi_systab_xen; +} + +/* + * Determine whether we're in secure boot mode. + */ +static enum efi_secureboot_mode xen_efi_get_secureboot(void) +{ + static efi_guid_t shim_guid = EFI_SHIM_LOCK_GUID; + enum efi_secureboot_mode mode; + efi_status_t status; + u8 moksbstate; + unsigned long size; + + mode = efi_get_secureboot_mode(efi.get_variable); + if (mode == efi_secureboot_mode_unknown) { + pr_err("Could not determine UEFI Secure Boot status.\n"); + return efi_secureboot_mode_unknown; + } + if (mode != efi_secureboot_mode_enabled) + return mode; + + /* See if a user has put the shim into insecure mode. */ + size = sizeof(moksbstate); + status = efi.get_variable(L"MokSBStateRT", &shim_guid, + NULL, &size, &moksbstate); + + /* If it fails, we don't care why. Default to secure. */ + if (status != EFI_SUCCESS) + goto secure_boot_enabled; + + if (moksbstate == 1) + return efi_secureboot_mode_disabled; + + secure_boot_enabled: + pr_info("UEFI Secure Boot is enabled.\n"); + return efi_secureboot_mode_enabled; +} + +void __init xen_efi_init(struct boot_params *boot_params) +{ + efi_system_table_t *efi_systab_xen; + + efi_systab_xen = xen_efi_probe(); + + if (efi_systab_xen == NULL) + return; + + strscpy((char *)&boot_params->efi_info.efi_loader_signature, "Xen", + sizeof(boot_params->efi_info.efi_loader_signature)); + boot_params->efi_info.efi_systab = (__u32)__pa(efi_systab_xen); + boot_params->efi_info.efi_systab_hi = (__u32)(__pa(efi_systab_xen) >> 32); + + boot_params->secure_boot = xen_efi_get_secureboot(); + + set_bit(EFI_BOOT, &efi.flags); + set_bit(EFI_PARAVIRT, &efi.flags); + set_bit(EFI_64BIT, &efi.flags); +} diff --git a/arch/x86/xen/enlighten.c b/arch/x86/xen/enlighten.c new file mode 100644 index 0000000000..3c61bb98c1 --- /dev/null +++ b/arch/x86/xen/enlighten.c @@ -0,0 +1,352 @@ +// SPDX-License-Identifier: GPL-2.0 + +#ifdef CONFIG_XEN_BALLOON_MEMORY_HOTPLUG +#include <linux/memblock.h> +#endif +#include <linux/console.h> +#include <linux/cpu.h> +#include <linux/kexec.h> +#include <linux/slab.h> +#include <linux/panic_notifier.h> + +#include <xen/xen.h> +#include <xen/features.h> +#include <xen/interface/sched.h> +#include <xen/interface/version.h> +#include <xen/page.h> + +#include <asm/xen/hypercall.h> +#include <asm/xen/hypervisor.h> +#include <asm/cpu.h> +#include <asm/e820/api.h> +#include <asm/setup.h> + +#include "xen-ops.h" +#include "smp.h" +#include "pmu.h" + +EXPORT_SYMBOL_GPL(hypercall_page); + +/* + * Pointer to the xen_vcpu_info structure or + * &HYPERVISOR_shared_info->vcpu_info[cpu]. See xen_hvm_init_shared_info + * and xen_vcpu_setup for details. By default it points to share_info->vcpu_info + * but during boot it is switched to point to xen_vcpu_info. + * The pointer is used in xen_evtchn_do_upcall to acknowledge pending events. + * Make sure that xen_vcpu_info doesn't cross a page boundary by making it + * cache-line aligned (the struct is guaranteed to have a size of 64 bytes, + * which matches the cache line size of 64-bit x86 processors). + */ +DEFINE_PER_CPU(struct vcpu_info *, xen_vcpu); +DEFINE_PER_CPU_ALIGNED(struct vcpu_info, xen_vcpu_info); + +/* Linux <-> Xen vCPU id mapping */ +DEFINE_PER_CPU(uint32_t, xen_vcpu_id); +EXPORT_PER_CPU_SYMBOL(xen_vcpu_id); + +unsigned long *machine_to_phys_mapping = (void *)MACH2PHYS_VIRT_START; +EXPORT_SYMBOL(machine_to_phys_mapping); +unsigned long machine_to_phys_nr; +EXPORT_SYMBOL(machine_to_phys_nr); + +struct start_info *xen_start_info; +EXPORT_SYMBOL_GPL(xen_start_info); + +struct shared_info xen_dummy_shared_info; + +__read_mostly bool xen_have_vector_callback = true; +EXPORT_SYMBOL_GPL(xen_have_vector_callback); + +/* + * NB: These need to live in .data or alike because they're used by + * xen_prepare_pvh() which runs before clearing the bss. + */ +enum xen_domain_type __ro_after_init xen_domain_type = XEN_NATIVE; +EXPORT_SYMBOL_GPL(xen_domain_type); +uint32_t __ro_after_init xen_start_flags; +EXPORT_SYMBOL(xen_start_flags); + +/* + * Point at some empty memory to start with. We map the real shared_info + * page as soon as fixmap is up and running. + */ +struct shared_info *HYPERVISOR_shared_info = &xen_dummy_shared_info; + +static int xen_cpu_up_online(unsigned int cpu) +{ + xen_init_lock_cpu(cpu); + return 0; +} + +int xen_cpuhp_setup(int (*cpu_up_prepare_cb)(unsigned int), + int (*cpu_dead_cb)(unsigned int)) +{ + int rc; + + rc = cpuhp_setup_state_nocalls(CPUHP_XEN_PREPARE, + "x86/xen/guest:prepare", + cpu_up_prepare_cb, cpu_dead_cb); + if (rc >= 0) { + rc = cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, + "x86/xen/guest:online", + xen_cpu_up_online, NULL); + if (rc < 0) + cpuhp_remove_state_nocalls(CPUHP_XEN_PREPARE); + } + + return rc >= 0 ? 0 : rc; +} + +static void xen_vcpu_setup_restore(int cpu) +{ + /* Any per_cpu(xen_vcpu) is stale, so reset it */ + xen_vcpu_info_reset(cpu); + + /* + * For PVH and PVHVM, setup online VCPUs only. The rest will + * be handled by hotplug. + */ + if (xen_pv_domain() || + (xen_hvm_domain() && cpu_online(cpu))) + xen_vcpu_setup(cpu); +} + +/* + * On restore, set the vcpu placement up again. + * If it fails, then we're in a bad state, since + * we can't back out from using it... + */ +void xen_vcpu_restore(void) +{ + int cpu; + + for_each_possible_cpu(cpu) { + bool other_cpu = (cpu != smp_processor_id()); + bool is_up; + + if (xen_vcpu_nr(cpu) == XEN_VCPU_ID_INVALID) + continue; + + /* Only Xen 4.5 and higher support this. */ + is_up = HYPERVISOR_vcpu_op(VCPUOP_is_up, + xen_vcpu_nr(cpu), NULL) > 0; + + if (other_cpu && is_up && + HYPERVISOR_vcpu_op(VCPUOP_down, xen_vcpu_nr(cpu), NULL)) + BUG(); + + if (xen_pv_domain() || xen_feature(XENFEAT_hvm_safe_pvclock)) + xen_setup_runstate_info(cpu); + + xen_vcpu_setup_restore(cpu); + + if (other_cpu && is_up && + HYPERVISOR_vcpu_op(VCPUOP_up, xen_vcpu_nr(cpu), NULL)) + BUG(); + } +} + +void xen_vcpu_info_reset(int cpu) +{ + if (xen_vcpu_nr(cpu) < MAX_VIRT_CPUS) { + per_cpu(xen_vcpu, cpu) = + &HYPERVISOR_shared_info->vcpu_info[xen_vcpu_nr(cpu)]; + } else { + /* Set to NULL so that if somebody accesses it we get an OOPS */ + per_cpu(xen_vcpu, cpu) = NULL; + } +} + +void xen_vcpu_setup(int cpu) +{ + struct vcpu_register_vcpu_info info; + int err; + struct vcpu_info *vcpup; + + BUILD_BUG_ON(sizeof(*vcpup) > SMP_CACHE_BYTES); + BUG_ON(HYPERVISOR_shared_info == &xen_dummy_shared_info); + + /* + * This path is called on PVHVM at bootup (xen_hvm_smp_prepare_boot_cpu) + * and at restore (xen_vcpu_restore). Also called for hotplugged + * VCPUs (cpu_init -> xen_hvm_cpu_prepare_hvm). + * However, the hypercall can only be done once (see below) so if a VCPU + * is offlined and comes back online then let's not redo the hypercall. + * + * For PV it is called during restore (xen_vcpu_restore) and bootup + * (xen_setup_vcpu_info_placement). The hotplug mechanism does not + * use this function. + */ + if (xen_hvm_domain()) { + if (per_cpu(xen_vcpu, cpu) == &per_cpu(xen_vcpu_info, cpu)) + return; + } + + vcpup = &per_cpu(xen_vcpu_info, cpu); + info.mfn = arbitrary_virt_to_mfn(vcpup); + info.offset = offset_in_page(vcpup); + + /* + * N.B. This hypercall can _only_ be called once per CPU. + * Subsequent calls will error out with -EINVAL. This is due to + * the fact that hypervisor has no unregister variant and this + * hypercall does not allow to over-write info.mfn and + * info.offset. + */ + err = HYPERVISOR_vcpu_op(VCPUOP_register_vcpu_info, xen_vcpu_nr(cpu), + &info); + if (err) + panic("register_vcpu_info failed: cpu=%d err=%d\n", cpu, err); + + per_cpu(xen_vcpu, cpu) = vcpup; +} + +void __init xen_banner(void) +{ + unsigned version = HYPERVISOR_xen_version(XENVER_version, NULL); + struct xen_extraversion extra; + + HYPERVISOR_xen_version(XENVER_extraversion, &extra); + + pr_info("Booting kernel on %s\n", pv_info.name); + pr_info("Xen version: %u.%u%s%s\n", + version >> 16, version & 0xffff, extra.extraversion, + xen_feature(XENFEAT_mmu_pt_update_preserve_ad) + ? " (preserve-AD)" : ""); +} + +/* Check if running on Xen version (major, minor) or later */ +bool xen_running_on_version_or_later(unsigned int major, unsigned int minor) +{ + unsigned int version; + + if (!xen_domain()) + return false; + + version = HYPERVISOR_xen_version(XENVER_version, NULL); + if ((((version >> 16) == major) && ((version & 0xffff) >= minor)) || + ((version >> 16) > major)) + return true; + return false; +} + +void __init xen_add_preferred_consoles(void) +{ + add_preferred_console("xenboot", 0, NULL); + if (!boot_params.screen_info.orig_video_isVGA) + add_preferred_console("tty", 0, NULL); + add_preferred_console("hvc", 0, NULL); + if (boot_params.screen_info.orig_video_isVGA) + add_preferred_console("tty", 0, NULL); +} + +void xen_reboot(int reason) +{ + struct sched_shutdown r = { .reason = reason }; + int cpu; + + for_each_online_cpu(cpu) + xen_pmu_finish(cpu); + + if (HYPERVISOR_sched_op(SCHEDOP_shutdown, &r)) + BUG(); +} + +static int reboot_reason = SHUTDOWN_reboot; +static bool xen_legacy_crash; +void xen_emergency_restart(void) +{ + xen_reboot(reboot_reason); +} + +static int +xen_panic_event(struct notifier_block *this, unsigned long event, void *ptr) +{ + if (!kexec_crash_loaded()) { + if (xen_legacy_crash) + xen_reboot(SHUTDOWN_crash); + + reboot_reason = SHUTDOWN_crash; + + /* + * If panic_timeout==0 then we are supposed to wait forever. + * However, to preserve original dom0 behavior we have to drop + * into hypervisor. (domU behavior is controlled by its + * config file) + */ + if (panic_timeout == 0) + panic_timeout = -1; + } + return NOTIFY_DONE; +} + +static int __init parse_xen_legacy_crash(char *arg) +{ + xen_legacy_crash = true; + return 0; +} +early_param("xen_legacy_crash", parse_xen_legacy_crash); + +static struct notifier_block xen_panic_block = { + .notifier_call = xen_panic_event, + .priority = INT_MIN +}; + +int xen_panic_handler_init(void) +{ + atomic_notifier_chain_register(&panic_notifier_list, &xen_panic_block); + return 0; +} + +void xen_pin_vcpu(int cpu) +{ + static bool disable_pinning; + struct sched_pin_override pin_override; + int ret; + + if (disable_pinning) + return; + + pin_override.pcpu = cpu; + ret = HYPERVISOR_sched_op(SCHEDOP_pin_override, &pin_override); + + /* Ignore errors when removing override. */ + if (cpu < 0) + return; + + switch (ret) { + case -ENOSYS: + pr_warn("Unable to pin on physical cpu %d. In case of problems consider vcpu pinning.\n", + cpu); + disable_pinning = true; + break; + case -EPERM: + WARN(1, "Trying to pin vcpu without having privilege to do so\n"); + disable_pinning = true; + break; + case -EINVAL: + case -EBUSY: + pr_warn("Physical cpu %d not available for pinning. Check Xen cpu configuration.\n", + cpu); + break; + case 0: + break; + default: + WARN(1, "rc %d while trying to pin vcpu\n", ret); + disable_pinning = true; + } +} + +#ifdef CONFIG_HOTPLUG_CPU +void xen_arch_register_cpu(int num) +{ + arch_register_cpu(num); +} +EXPORT_SYMBOL(xen_arch_register_cpu); + +void xen_arch_unregister_cpu(int num) +{ + arch_unregister_cpu(num); +} +EXPORT_SYMBOL(xen_arch_unregister_cpu); +#endif diff --git a/arch/x86/xen/enlighten_hvm.c b/arch/x86/xen/enlighten_hvm.c new file mode 100644 index 0000000000..3f8c34707c --- /dev/null +++ b/arch/x86/xen/enlighten_hvm.c @@ -0,0 +1,335 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include <linux/acpi.h> +#include <linux/cpu.h> +#include <linux/kexec.h> +#include <linux/memblock.h> +#include <linux/virtio_anchor.h> + +#include <xen/features.h> +#include <xen/events.h> +#include <xen/hvm.h> +#include <xen/interface/hvm/hvm_op.h> +#include <xen/interface/memory.h> + +#include <asm/apic.h> +#include <asm/cpu.h> +#include <asm/smp.h> +#include <asm/io_apic.h> +#include <asm/reboot.h> +#include <asm/setup.h> +#include <asm/idtentry.h> +#include <asm/hypervisor.h> +#include <asm/e820/api.h> +#include <asm/early_ioremap.h> + +#include <asm/xen/cpuid.h> +#include <asm/xen/hypervisor.h> +#include <asm/xen/page.h> + +#include "xen-ops.h" +#include "mmu.h" +#include "smp.h" + +static unsigned long shared_info_pfn; + +__ro_after_init bool xen_percpu_upcall; +EXPORT_SYMBOL_GPL(xen_percpu_upcall); + +void xen_hvm_init_shared_info(void) +{ + struct xen_add_to_physmap xatp; + + xatp.domid = DOMID_SELF; + xatp.idx = 0; + xatp.space = XENMAPSPACE_shared_info; + xatp.gpfn = shared_info_pfn; + if (HYPERVISOR_memory_op(XENMEM_add_to_physmap, &xatp)) + BUG(); +} + +static void __init reserve_shared_info(void) +{ + u64 pa; + + /* + * Search for a free page starting at 4kB physical address. + * Low memory is preferred to avoid an EPT large page split up + * by the mapping. + * Starting below X86_RESERVE_LOW (usually 64kB) is fine as + * the BIOS used for HVM guests is well behaved and won't + * clobber memory other than the first 4kB. + */ + for (pa = PAGE_SIZE; + !e820__mapped_all(pa, pa + PAGE_SIZE, E820_TYPE_RAM) || + memblock_is_reserved(pa); + pa += PAGE_SIZE) + ; + + shared_info_pfn = PHYS_PFN(pa); + + memblock_reserve(pa, PAGE_SIZE); + HYPERVISOR_shared_info = early_memremap(pa, PAGE_SIZE); +} + +static void __init xen_hvm_init_mem_mapping(void) +{ + early_memunmap(HYPERVISOR_shared_info, PAGE_SIZE); + HYPERVISOR_shared_info = __va(PFN_PHYS(shared_info_pfn)); + + /* + * The virtual address of the shared_info page has changed, so + * the vcpu_info pointer for VCPU 0 is now stale. + * + * The prepare_boot_cpu callback will re-initialize it via + * xen_vcpu_setup, but we can't rely on that to be called for + * old Xen versions (xen_have_vector_callback == 0). + * + * It is, in any case, bad to have a stale vcpu_info pointer + * so reset it now. + */ + xen_vcpu_info_reset(0); +} + +static void __init init_hvm_pv_info(void) +{ + int major, minor; + uint32_t eax, ebx, ecx, edx, base; + + base = xen_cpuid_base(); + eax = cpuid_eax(base + 1); + + major = eax >> 16; + minor = eax & 0xffff; + printk(KERN_INFO "Xen version %d.%d.\n", major, minor); + + xen_domain_type = XEN_HVM_DOMAIN; + + /* PVH set up hypercall page in xen_prepare_pvh(). */ + if (xen_pvh_domain()) + pv_info.name = "Xen PVH"; + else { + u64 pfn; + uint32_t msr; + + pv_info.name = "Xen HVM"; + msr = cpuid_ebx(base + 2); + pfn = __pa(hypercall_page); + wrmsr_safe(msr, (u32)pfn, (u32)(pfn >> 32)); + } + + xen_setup_features(); + + cpuid(base + 4, &eax, &ebx, &ecx, &edx); + if (eax & XEN_HVM_CPUID_VCPU_ID_PRESENT) + this_cpu_write(xen_vcpu_id, ebx); + else + this_cpu_write(xen_vcpu_id, smp_processor_id()); +} + +DEFINE_IDTENTRY_SYSVEC(sysvec_xen_hvm_callback) +{ + struct pt_regs *old_regs = set_irq_regs(regs); + + if (xen_percpu_upcall) + apic_eoi(); + + inc_irq_stat(irq_hv_callback_count); + + xen_evtchn_do_upcall(); + + set_irq_regs(old_regs); +} + +#ifdef CONFIG_KEXEC_CORE +static void xen_hvm_shutdown(void) +{ + native_machine_shutdown(); + if (kexec_in_progress) + xen_reboot(SHUTDOWN_soft_reset); +} + +static void xen_hvm_crash_shutdown(struct pt_regs *regs) +{ + native_machine_crash_shutdown(regs); + xen_reboot(SHUTDOWN_soft_reset); +} +#endif + +static int xen_cpu_up_prepare_hvm(unsigned int cpu) +{ + int rc = 0; + + /* + * If a CPU was offlined earlier and offlining timed out then the + * lock mechanism is still initialized. Uninit it unconditionally + * as it's safe to call even if already uninited. Interrupts and + * timer have already been handled in xen_cpu_dead_hvm(). + */ + xen_uninit_lock_cpu(cpu); + + if (cpu_acpi_id(cpu) != U32_MAX) + per_cpu(xen_vcpu_id, cpu) = cpu_acpi_id(cpu); + else + per_cpu(xen_vcpu_id, cpu) = cpu; + xen_vcpu_setup(cpu); + if (!xen_have_vector_callback) + return 0; + + if (xen_percpu_upcall) { + rc = xen_set_upcall_vector(cpu); + if (rc) { + WARN(1, "HVMOP_set_evtchn_upcall_vector" + " for CPU %d failed: %d\n", cpu, rc); + return rc; + } + } + + if (xen_feature(XENFEAT_hvm_safe_pvclock)) + xen_setup_timer(cpu); + + rc = xen_smp_intr_init(cpu); + if (rc) { + WARN(1, "xen_smp_intr_init() for CPU %d failed: %d\n", + cpu, rc); + } + return rc; +} + +static int xen_cpu_dead_hvm(unsigned int cpu) +{ + xen_smp_intr_free(cpu); + + if (xen_have_vector_callback && xen_feature(XENFEAT_hvm_safe_pvclock)) + xen_teardown_timer(cpu); + return 0; +} + +static void __init xen_hvm_guest_init(void) +{ + if (xen_pv_domain()) + return; + + if (IS_ENABLED(CONFIG_XEN_VIRTIO_FORCE_GRANT)) + virtio_set_mem_acc_cb(xen_virtio_restricted_mem_acc); + + init_hvm_pv_info(); + + reserve_shared_info(); + xen_hvm_init_shared_info(); + + /* + * xen_vcpu is a pointer to the vcpu_info struct in the shared_info + * page, we use it in the event channel upcall and in some pvclock + * related functions. + */ + xen_vcpu_info_reset(0); + + xen_panic_handler_init(); + + xen_hvm_smp_init(); + WARN_ON(xen_cpuhp_setup(xen_cpu_up_prepare_hvm, xen_cpu_dead_hvm)); + xen_unplug_emulated_devices(); + x86_init.irqs.intr_init = xen_init_IRQ; + xen_hvm_init_time_ops(); + xen_hvm_init_mmu_ops(); + +#ifdef CONFIG_KEXEC_CORE + machine_ops.shutdown = xen_hvm_shutdown; + machine_ops.crash_shutdown = xen_hvm_crash_shutdown; +#endif +} + +static __init int xen_parse_nopv(char *arg) +{ + pr_notice("\"xen_nopv\" is deprecated, please use \"nopv\" instead\n"); + + if (xen_cpuid_base()) + nopv = true; + return 0; +} +early_param("xen_nopv", xen_parse_nopv); + +static __init int xen_parse_no_vector_callback(char *arg) +{ + xen_have_vector_callback = false; + return 0; +} +early_param("xen_no_vector_callback", xen_parse_no_vector_callback); + +static __init bool xen_x2apic_available(void) +{ + return x2apic_supported(); +} + +static bool __init msi_ext_dest_id(void) +{ + return cpuid_eax(xen_cpuid_base() + 4) & XEN_HVM_CPUID_EXT_DEST_ID; +} + +static __init void xen_hvm_guest_late_init(void) +{ +#ifdef CONFIG_XEN_PVH + /* Test for PVH domain (PVH boot path taken overrides ACPI flags). */ + if (!xen_pvh && + (x86_platform.legacy.rtc || !x86_platform.legacy.no_vga)) + return; + + /* PVH detected. */ + xen_pvh = true; + + if (nopv) + panic("\"nopv\" and \"xen_nopv\" parameters are unsupported in PVH guest."); + + /* Make sure we don't fall back to (default) ACPI_IRQ_MODEL_PIC. */ + if (!nr_ioapics && acpi_irq_model == ACPI_IRQ_MODEL_PIC) + acpi_irq_model = ACPI_IRQ_MODEL_PLATFORM; + + machine_ops.emergency_restart = xen_emergency_restart; + pv_info.name = "Xen PVH"; +#endif +} + +static uint32_t __init xen_platform_hvm(void) +{ + uint32_t xen_domain = xen_cpuid_base(); + struct x86_hyper_init *h = &x86_hyper_xen_hvm.init; + + if (xen_pv_domain()) + return 0; + + if (xen_pvh_domain() && nopv) { + /* Guest booting via the Xen-PVH boot entry goes here */ + pr_info("\"nopv\" parameter is ignored in PVH guest\n"); + nopv = false; + } else if (nopv && xen_domain) { + /* + * Guest booting via normal boot entry (like via grub2) goes + * here. + * + * Use interface functions for bare hardware if nopv, + * xen_hvm_guest_late_init is an exception as we need to + * detect PVH and panic there. + */ + h->init_platform = x86_init_noop; + h->x2apic_available = bool_x86_init_noop; + h->init_mem_mapping = x86_init_noop; + h->init_after_bootmem = x86_init_noop; + h->guest_late_init = xen_hvm_guest_late_init; + x86_hyper_xen_hvm.runtime.pin_vcpu = x86_op_int_noop; + } + return xen_domain; +} + +struct hypervisor_x86 x86_hyper_xen_hvm __initdata = { + .name = "Xen HVM", + .detect = xen_platform_hvm, + .type = X86_HYPER_XEN_HVM, + .init.init_platform = xen_hvm_guest_init, + .init.x2apic_available = xen_x2apic_available, + .init.init_mem_mapping = xen_hvm_init_mem_mapping, + .init.guest_late_init = xen_hvm_guest_late_init, + .init.msi_ext_dest_id = msi_ext_dest_id, + .runtime.pin_vcpu = xen_pin_vcpu, + .ignore_nopv = true, +}; diff --git a/arch/x86/xen/enlighten_pv.c b/arch/x86/xen/enlighten_pv.c new file mode 100644 index 0000000000..aeb33e0a3f --- /dev/null +++ b/arch/x86/xen/enlighten_pv.c @@ -0,0 +1,1564 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Core of Xen paravirt_ops implementation. + * + * This file contains the xen_paravirt_ops structure itself, and the + * implementations for: + * - privileged instructions + * - interrupt flags + * - segment operations + * - booting and setup + * + * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007 + */ + +#include <linux/cpu.h> +#include <linux/kernel.h> +#include <linux/init.h> +#include <linux/smp.h> +#include <linux/preempt.h> +#include <linux/hardirq.h> +#include <linux/percpu.h> +#include <linux/delay.h> +#include <linux/start_kernel.h> +#include <linux/sched.h> +#include <linux/kprobes.h> +#include <linux/kstrtox.h> +#include <linux/memblock.h> +#include <linux/export.h> +#include <linux/mm.h> +#include <linux/page-flags.h> +#include <linux/pci.h> +#include <linux/gfp.h> +#include <linux/edd.h> +#include <linux/reboot.h> +#include <linux/virtio_anchor.h> +#include <linux/stackprotector.h> + +#include <xen/xen.h> +#include <xen/events.h> +#include <xen/interface/xen.h> +#include <xen/interface/version.h> +#include <xen/interface/physdev.h> +#include <xen/interface/vcpu.h> +#include <xen/interface/memory.h> +#include <xen/interface/nmi.h> +#include <xen/interface/xen-mca.h> +#include <xen/features.h> +#include <xen/page.h> +#include <xen/hvc-console.h> +#include <xen/acpi.h> + +#include <asm/paravirt.h> +#include <asm/apic.h> +#include <asm/page.h> +#include <asm/xen/pci.h> +#include <asm/xen/hypercall.h> +#include <asm/xen/hypervisor.h> +#include <asm/xen/cpuid.h> +#include <asm/fixmap.h> +#include <asm/processor.h> +#include <asm/proto.h> +#include <asm/msr-index.h> +#include <asm/traps.h> +#include <asm/setup.h> +#include <asm/desc.h> +#include <asm/pgalloc.h> +#include <asm/tlbflush.h> +#include <asm/reboot.h> +#include <asm/hypervisor.h> +#include <asm/mach_traps.h> +#include <asm/mtrr.h> +#include <asm/mwait.h> +#include <asm/pci_x86.h> +#include <asm/cpu.h> +#ifdef CONFIG_X86_IOPL_IOPERM +#include <asm/io_bitmap.h> +#endif + +#ifdef CONFIG_ACPI +#include <linux/acpi.h> +#include <asm/acpi.h> +#include <acpi/proc_cap_intel.h> +#include <acpi/processor.h> +#include <xen/interface/platform.h> +#endif + +#include "xen-ops.h" +#include "mmu.h" +#include "smp.h" +#include "multicalls.h" +#include "pmu.h" + +#include "../kernel/cpu/cpu.h" /* get_cpu_cap() */ + +void *xen_initial_gdt; + +static int xen_cpu_up_prepare_pv(unsigned int cpu); +static int xen_cpu_dead_pv(unsigned int cpu); + +struct tls_descs { + struct desc_struct desc[3]; +}; + +DEFINE_PER_CPU(enum xen_lazy_mode, xen_lazy_mode) = XEN_LAZY_NONE; +DEFINE_PER_CPU(unsigned int, xen_lazy_nesting); + +enum xen_lazy_mode xen_get_lazy_mode(void) +{ + if (in_interrupt()) + return XEN_LAZY_NONE; + + return this_cpu_read(xen_lazy_mode); +} + +/* + * Updating the 3 TLS descriptors in the GDT on every task switch is + * surprisingly expensive so we avoid updating them if they haven't + * changed. Since Xen writes different descriptors than the one + * passed in the update_descriptor hypercall we keep shadow copies to + * compare against. + */ +static DEFINE_PER_CPU(struct tls_descs, shadow_tls_desc); + +static __read_mostly bool xen_msr_safe = IS_ENABLED(CONFIG_XEN_PV_MSR_SAFE); + +static int __init parse_xen_msr_safe(char *str) +{ + if (str) + return kstrtobool(str, &xen_msr_safe); + return -EINVAL; +} +early_param("xen_msr_safe", parse_xen_msr_safe); + +/* Get MTRR settings from Xen and put them into mtrr_state. */ +static void __init xen_set_mtrr_data(void) +{ +#ifdef CONFIG_MTRR + struct xen_platform_op op = { + .cmd = XENPF_read_memtype, + .interface_version = XENPF_INTERFACE_VERSION, + }; + unsigned int reg; + unsigned long mask; + uint32_t eax, width; + static struct mtrr_var_range var[MTRR_MAX_VAR_RANGES] __initdata; + + /* Get physical address width (only 64-bit cpus supported). */ + width = 36; + eax = cpuid_eax(0x80000000); + if ((eax >> 16) == 0x8000 && eax >= 0x80000008) { + eax = cpuid_eax(0x80000008); + width = eax & 0xff; + } + + for (reg = 0; reg < MTRR_MAX_VAR_RANGES; reg++) { + op.u.read_memtype.reg = reg; + if (HYPERVISOR_platform_op(&op)) + break; + + /* + * Only called in dom0, which has all RAM PFNs mapped at + * RAM MFNs, and all PCI space etc. is identity mapped. + * This means we can treat MFN == PFN regarding MTRR settings. + */ + var[reg].base_lo = op.u.read_memtype.type; + var[reg].base_lo |= op.u.read_memtype.mfn << PAGE_SHIFT; + var[reg].base_hi = op.u.read_memtype.mfn >> (32 - PAGE_SHIFT); + mask = ~((op.u.read_memtype.nr_mfns << PAGE_SHIFT) - 1); + mask &= (1UL << width) - 1; + if (mask) + mask |= MTRR_PHYSMASK_V; + var[reg].mask_lo = mask; + var[reg].mask_hi = mask >> 32; + } + + /* Only overwrite MTRR state if any MTRR could be got from Xen. */ + if (reg) + mtrr_overwrite_state(var, reg, MTRR_TYPE_UNCACHABLE); +#endif +} + +static void __init xen_pv_init_platform(void) +{ + /* PV guests can't operate virtio devices without grants. */ + if (IS_ENABLED(CONFIG_XEN_VIRTIO)) + virtio_set_mem_acc_cb(xen_virtio_restricted_mem_acc); + + populate_extra_pte(fix_to_virt(FIX_PARAVIRT_BOOTMAP)); + + set_fixmap(FIX_PARAVIRT_BOOTMAP, xen_start_info->shared_info); + HYPERVISOR_shared_info = (void *)fix_to_virt(FIX_PARAVIRT_BOOTMAP); + + /* xen clock uses per-cpu vcpu_info, need to init it for boot cpu */ + xen_vcpu_info_reset(0); + + /* pvclock is in shared info area */ + xen_init_time_ops(); + + if (xen_initial_domain()) + xen_set_mtrr_data(); + else + mtrr_overwrite_state(NULL, 0, MTRR_TYPE_WRBACK); +} + +static void __init xen_pv_guest_late_init(void) +{ +#ifndef CONFIG_SMP + /* Setup shared vcpu info for non-smp configurations */ + xen_setup_vcpu_info_placement(); +#endif +} + +static __read_mostly unsigned int cpuid_leaf5_ecx_val; +static __read_mostly unsigned int cpuid_leaf5_edx_val; + +static void xen_cpuid(unsigned int *ax, unsigned int *bx, + unsigned int *cx, unsigned int *dx) +{ + unsigned maskebx = ~0; + + /* + * Mask out inconvenient features, to try and disable as many + * unsupported kernel subsystems as possible. + */ + switch (*ax) { + case CPUID_MWAIT_LEAF: + /* Synthesize the values.. */ + *ax = 0; + *bx = 0; + *cx = cpuid_leaf5_ecx_val; + *dx = cpuid_leaf5_edx_val; + return; + + case 0xb: + /* Suppress extended topology stuff */ + maskebx = 0; + break; + } + + asm(XEN_EMULATE_PREFIX "cpuid" + : "=a" (*ax), + "=b" (*bx), + "=c" (*cx), + "=d" (*dx) + : "0" (*ax), "2" (*cx)); + + *bx &= maskebx; +} + +static bool __init xen_check_mwait(void) +{ +#ifdef CONFIG_ACPI + struct xen_platform_op op = { + .cmd = XENPF_set_processor_pminfo, + .u.set_pminfo.id = -1, + .u.set_pminfo.type = XEN_PM_PDC, + }; + uint32_t buf[3]; + unsigned int ax, bx, cx, dx; + unsigned int mwait_mask; + + /* We need to determine whether it is OK to expose the MWAIT + * capability to the kernel to harvest deeper than C3 states from ACPI + * _CST using the processor_harvest_xen.c module. For this to work, we + * need to gather the MWAIT_LEAF values (which the cstate.c code + * checks against). The hypervisor won't expose the MWAIT flag because + * it would break backwards compatibility; so we will find out directly + * from the hardware and hypercall. + */ + if (!xen_initial_domain()) + return false; + + /* + * When running under platform earlier than Xen4.2, do not expose + * mwait, to avoid the risk of loading native acpi pad driver + */ + if (!xen_running_on_version_or_later(4, 2)) + return false; + + ax = 1; + cx = 0; + + native_cpuid(&ax, &bx, &cx, &dx); + + mwait_mask = (1 << (X86_FEATURE_EST % 32)) | + (1 << (X86_FEATURE_MWAIT % 32)); + + if ((cx & mwait_mask) != mwait_mask) + return false; + + /* We need to emulate the MWAIT_LEAF and for that we need both + * ecx and edx. The hypercall provides only partial information. + */ + + ax = CPUID_MWAIT_LEAF; + bx = 0; + cx = 0; + dx = 0; + + native_cpuid(&ax, &bx, &cx, &dx); + + /* Ask the Hypervisor whether to clear ACPI_PROC_CAP_C_C2C3_FFH. If so, + * don't expose MWAIT_LEAF and let ACPI pick the IOPORT version of C3. + */ + buf[0] = ACPI_PDC_REVISION_ID; + buf[1] = 1; + buf[2] = (ACPI_PROC_CAP_C_CAPABILITY_SMP | ACPI_PROC_CAP_EST_CAPABILITY_SWSMP); + + set_xen_guest_handle(op.u.set_pminfo.pdc, buf); + + if ((HYPERVISOR_platform_op(&op) == 0) && + (buf[2] & (ACPI_PROC_CAP_C_C1_FFH | ACPI_PROC_CAP_C_C2C3_FFH))) { + cpuid_leaf5_ecx_val = cx; + cpuid_leaf5_edx_val = dx; + } + return true; +#else + return false; +#endif +} + +static bool __init xen_check_xsave(void) +{ + unsigned int cx, xsave_mask; + + cx = cpuid_ecx(1); + + xsave_mask = (1 << (X86_FEATURE_XSAVE % 32)) | + (1 << (X86_FEATURE_OSXSAVE % 32)); + + /* Xen will set CR4.OSXSAVE if supported and not disabled by force */ + return (cx & xsave_mask) == xsave_mask; +} + +static void __init xen_init_capabilities(void) +{ + setup_force_cpu_cap(X86_FEATURE_XENPV); + setup_clear_cpu_cap(X86_FEATURE_DCA); + setup_clear_cpu_cap(X86_FEATURE_APERFMPERF); + setup_clear_cpu_cap(X86_FEATURE_MTRR); + setup_clear_cpu_cap(X86_FEATURE_ACC); + setup_clear_cpu_cap(X86_FEATURE_X2APIC); + setup_clear_cpu_cap(X86_FEATURE_SME); + setup_clear_cpu_cap(X86_FEATURE_LKGS); + + /* + * Xen PV would need some work to support PCID: CR3 handling as well + * as xen_flush_tlb_others() would need updating. + */ + setup_clear_cpu_cap(X86_FEATURE_PCID); + + if (!xen_initial_domain()) + setup_clear_cpu_cap(X86_FEATURE_ACPI); + + if (xen_check_mwait()) + setup_force_cpu_cap(X86_FEATURE_MWAIT); + else + setup_clear_cpu_cap(X86_FEATURE_MWAIT); + + if (!xen_check_xsave()) { + setup_clear_cpu_cap(X86_FEATURE_XSAVE); + setup_clear_cpu_cap(X86_FEATURE_OSXSAVE); + } +} + +static noinstr void xen_set_debugreg(int reg, unsigned long val) +{ + HYPERVISOR_set_debugreg(reg, val); +} + +static noinstr unsigned long xen_get_debugreg(int reg) +{ + return HYPERVISOR_get_debugreg(reg); +} + +static void xen_start_context_switch(struct task_struct *prev) +{ + BUG_ON(preemptible()); + + if (this_cpu_read(xen_lazy_mode) == XEN_LAZY_MMU) { + arch_leave_lazy_mmu_mode(); + set_ti_thread_flag(task_thread_info(prev), TIF_LAZY_MMU_UPDATES); + } + enter_lazy(XEN_LAZY_CPU); +} + +static void xen_end_context_switch(struct task_struct *next) +{ + BUG_ON(preemptible()); + + xen_mc_flush(); + leave_lazy(XEN_LAZY_CPU); + if (test_and_clear_ti_thread_flag(task_thread_info(next), TIF_LAZY_MMU_UPDATES)) + arch_enter_lazy_mmu_mode(); +} + +static unsigned long xen_store_tr(void) +{ + return 0; +} + +/* + * Set the page permissions for a particular virtual address. If the + * address is a vmalloc mapping (or other non-linear mapping), then + * find the linear mapping of the page and also set its protections to + * match. + */ +static void set_aliased_prot(void *v, pgprot_t prot) +{ + int level; + pte_t *ptep; + pte_t pte; + unsigned long pfn; + unsigned char dummy; + void *va; + + ptep = lookup_address((unsigned long)v, &level); + BUG_ON(ptep == NULL); + + pfn = pte_pfn(*ptep); + pte = pfn_pte(pfn, prot); + + /* + * Careful: update_va_mapping() will fail if the virtual address + * we're poking isn't populated in the page tables. We don't + * need to worry about the direct map (that's always in the page + * tables), but we need to be careful about vmap space. In + * particular, the top level page table can lazily propagate + * entries between processes, so if we've switched mms since we + * vmapped the target in the first place, we might not have the + * top-level page table entry populated. + * + * We disable preemption because we want the same mm active when + * we probe the target and when we issue the hypercall. We'll + * have the same nominal mm, but if we're a kernel thread, lazy + * mm dropping could change our pgd. + * + * Out of an abundance of caution, this uses __get_user() to fault + * in the target address just in case there's some obscure case + * in which the target address isn't readable. + */ + + preempt_disable(); + + copy_from_kernel_nofault(&dummy, v, 1); + + if (HYPERVISOR_update_va_mapping((unsigned long)v, pte, 0)) + BUG(); + + va = __va(PFN_PHYS(pfn)); + + if (va != v && HYPERVISOR_update_va_mapping((unsigned long)va, pte, 0)) + BUG(); + + preempt_enable(); +} + +static void xen_alloc_ldt(struct desc_struct *ldt, unsigned entries) +{ + const unsigned entries_per_page = PAGE_SIZE / LDT_ENTRY_SIZE; + int i; + + /* + * We need to mark the all aliases of the LDT pages RO. We + * don't need to call vm_flush_aliases(), though, since that's + * only responsible for flushing aliases out the TLBs, not the + * page tables, and Xen will flush the TLB for us if needed. + * + * To avoid confusing future readers: none of this is necessary + * to load the LDT. The hypervisor only checks this when the + * LDT is faulted in due to subsequent descriptor access. + */ + + for (i = 0; i < entries; i += entries_per_page) + set_aliased_prot(ldt + i, PAGE_KERNEL_RO); +} + +static void xen_free_ldt(struct desc_struct *ldt, unsigned entries) +{ + const unsigned entries_per_page = PAGE_SIZE / LDT_ENTRY_SIZE; + int i; + + for (i = 0; i < entries; i += entries_per_page) + set_aliased_prot(ldt + i, PAGE_KERNEL); +} + +static void xen_set_ldt(const void *addr, unsigned entries) +{ + struct mmuext_op *op; + struct multicall_space mcs = xen_mc_entry(sizeof(*op)); + + trace_xen_cpu_set_ldt(addr, entries); + + op = mcs.args; + op->cmd = MMUEXT_SET_LDT; + op->arg1.linear_addr = (unsigned long)addr; + op->arg2.nr_ents = entries; + + MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF); + + xen_mc_issue(XEN_LAZY_CPU); +} + +static void xen_load_gdt(const struct desc_ptr *dtr) +{ + unsigned long va = dtr->address; + unsigned int size = dtr->size + 1; + unsigned long pfn, mfn; + int level; + pte_t *ptep; + void *virt; + + /* @size should be at most GDT_SIZE which is smaller than PAGE_SIZE. */ + BUG_ON(size > PAGE_SIZE); + BUG_ON(va & ~PAGE_MASK); + + /* + * The GDT is per-cpu and is in the percpu data area. + * That can be virtually mapped, so we need to do a + * page-walk to get the underlying MFN for the + * hypercall. The page can also be in the kernel's + * linear range, so we need to RO that mapping too. + */ + ptep = lookup_address(va, &level); + BUG_ON(ptep == NULL); + + pfn = pte_pfn(*ptep); + mfn = pfn_to_mfn(pfn); + virt = __va(PFN_PHYS(pfn)); + + make_lowmem_page_readonly((void *)va); + make_lowmem_page_readonly(virt); + + if (HYPERVISOR_set_gdt(&mfn, size / sizeof(struct desc_struct))) + BUG(); +} + +/* + * load_gdt for early boot, when the gdt is only mapped once + */ +static void __init xen_load_gdt_boot(const struct desc_ptr *dtr) +{ + unsigned long va = dtr->address; + unsigned int size = dtr->size + 1; + unsigned long pfn, mfn; + pte_t pte; + + /* @size should be at most GDT_SIZE which is smaller than PAGE_SIZE. */ + BUG_ON(size > PAGE_SIZE); + BUG_ON(va & ~PAGE_MASK); + + pfn = virt_to_pfn((void *)va); + mfn = pfn_to_mfn(pfn); + + pte = pfn_pte(pfn, PAGE_KERNEL_RO); + + if (HYPERVISOR_update_va_mapping((unsigned long)va, pte, 0)) + BUG(); + + if (HYPERVISOR_set_gdt(&mfn, size / sizeof(struct desc_struct))) + BUG(); +} + +static inline bool desc_equal(const struct desc_struct *d1, + const struct desc_struct *d2) +{ + return !memcmp(d1, d2, sizeof(*d1)); +} + +static void load_TLS_descriptor(struct thread_struct *t, + unsigned int cpu, unsigned int i) +{ + struct desc_struct *shadow = &per_cpu(shadow_tls_desc, cpu).desc[i]; + struct desc_struct *gdt; + xmaddr_t maddr; + struct multicall_space mc; + + if (desc_equal(shadow, &t->tls_array[i])) + return; + + *shadow = t->tls_array[i]; + + gdt = get_cpu_gdt_rw(cpu); + maddr = arbitrary_virt_to_machine(&gdt[GDT_ENTRY_TLS_MIN+i]); + mc = __xen_mc_entry(0); + + MULTI_update_descriptor(mc.mc, maddr.maddr, t->tls_array[i]); +} + +static void xen_load_tls(struct thread_struct *t, unsigned int cpu) +{ + /* + * In lazy mode we need to zero %fs, otherwise we may get an + * exception between the new %fs descriptor being loaded and + * %fs being effectively cleared at __switch_to(). + */ + if (xen_get_lazy_mode() == XEN_LAZY_CPU) + loadsegment(fs, 0); + + xen_mc_batch(); + + load_TLS_descriptor(t, cpu, 0); + load_TLS_descriptor(t, cpu, 1); + load_TLS_descriptor(t, cpu, 2); + + xen_mc_issue(XEN_LAZY_CPU); +} + +static void xen_load_gs_index(unsigned int idx) +{ + if (HYPERVISOR_set_segment_base(SEGBASE_GS_USER_SEL, idx)) + BUG(); +} + +static void xen_write_ldt_entry(struct desc_struct *dt, int entrynum, + const void *ptr) +{ + xmaddr_t mach_lp = arbitrary_virt_to_machine(&dt[entrynum]); + u64 entry = *(u64 *)ptr; + + trace_xen_cpu_write_ldt_entry(dt, entrynum, entry); + + preempt_disable(); + + xen_mc_flush(); + if (HYPERVISOR_update_descriptor(mach_lp.maddr, entry)) + BUG(); + + preempt_enable(); +} + +void noist_exc_debug(struct pt_regs *regs); + +DEFINE_IDTENTRY_RAW(xenpv_exc_nmi) +{ + /* On Xen PV, NMI doesn't use IST. The C part is the same as native. */ + exc_nmi(regs); +} + +DEFINE_IDTENTRY_RAW_ERRORCODE(xenpv_exc_double_fault) +{ + /* On Xen PV, DF doesn't use IST. The C part is the same as native. */ + exc_double_fault(regs, error_code); +} + +DEFINE_IDTENTRY_RAW(xenpv_exc_debug) +{ + /* + * There's no IST on Xen PV, but we still need to dispatch + * to the correct handler. + */ + if (user_mode(regs)) + noist_exc_debug(regs); + else + exc_debug(regs); +} + +DEFINE_IDTENTRY_RAW(exc_xen_unknown_trap) +{ + /* This should never happen and there is no way to handle it. */ + instrumentation_begin(); + pr_err("Unknown trap in Xen PV mode."); + BUG(); + instrumentation_end(); +} + +#ifdef CONFIG_X86_MCE +DEFINE_IDTENTRY_RAW(xenpv_exc_machine_check) +{ + /* + * There's no IST on Xen PV, but we still need to dispatch + * to the correct handler. + */ + if (user_mode(regs)) + noist_exc_machine_check(regs); + else + exc_machine_check(regs); +} +#endif + +struct trap_array_entry { + void (*orig)(void); + void (*xen)(void); + bool ist_okay; +}; + +#define TRAP_ENTRY(func, ist_ok) { \ + .orig = asm_##func, \ + .xen = xen_asm_##func, \ + .ist_okay = ist_ok } + +#define TRAP_ENTRY_REDIR(func, ist_ok) { \ + .orig = asm_##func, \ + .xen = xen_asm_xenpv_##func, \ + .ist_okay = ist_ok } + +static struct trap_array_entry trap_array[] = { + TRAP_ENTRY_REDIR(exc_debug, true ), + TRAP_ENTRY_REDIR(exc_double_fault, true ), +#ifdef CONFIG_X86_MCE + TRAP_ENTRY_REDIR(exc_machine_check, true ), +#endif + TRAP_ENTRY_REDIR(exc_nmi, true ), + TRAP_ENTRY(exc_int3, false ), + TRAP_ENTRY(exc_overflow, false ), +#ifdef CONFIG_IA32_EMULATION + TRAP_ENTRY(int80_emulation, false ), +#endif + TRAP_ENTRY(exc_page_fault, false ), + TRAP_ENTRY(exc_divide_error, false ), + TRAP_ENTRY(exc_bounds, false ), + TRAP_ENTRY(exc_invalid_op, false ), + TRAP_ENTRY(exc_device_not_available, false ), + TRAP_ENTRY(exc_coproc_segment_overrun, false ), + TRAP_ENTRY(exc_invalid_tss, false ), + TRAP_ENTRY(exc_segment_not_present, false ), + TRAP_ENTRY(exc_stack_segment, false ), + TRAP_ENTRY(exc_general_protection, false ), + TRAP_ENTRY(exc_spurious_interrupt_bug, false ), + TRAP_ENTRY(exc_coprocessor_error, false ), + TRAP_ENTRY(exc_alignment_check, false ), + TRAP_ENTRY(exc_simd_coprocessor_error, false ), +#ifdef CONFIG_X86_CET + TRAP_ENTRY(exc_control_protection, false ), +#endif +}; + +static bool __ref get_trap_addr(void **addr, unsigned int ist) +{ + unsigned int nr; + bool ist_okay = false; + bool found = false; + + /* + * Replace trap handler addresses by Xen specific ones. + * Check for known traps using IST and whitelist them. + * The debugger ones are the only ones we care about. + * Xen will handle faults like double_fault, so we should never see + * them. Warn if there's an unexpected IST-using fault handler. + */ + for (nr = 0; nr < ARRAY_SIZE(trap_array); nr++) { + struct trap_array_entry *entry = trap_array + nr; + + if (*addr == entry->orig) { + *addr = entry->xen; + ist_okay = entry->ist_okay; + found = true; + break; + } + } + + if (nr == ARRAY_SIZE(trap_array) && + *addr >= (void *)early_idt_handler_array[0] && + *addr < (void *)early_idt_handler_array[NUM_EXCEPTION_VECTORS]) { + nr = (*addr - (void *)early_idt_handler_array[0]) / + EARLY_IDT_HANDLER_SIZE; + *addr = (void *)xen_early_idt_handler_array[nr]; + found = true; + } + + if (!found) + *addr = (void *)xen_asm_exc_xen_unknown_trap; + + if (WARN_ON(found && ist != 0 && !ist_okay)) + return false; + + return true; +} + +static int cvt_gate_to_trap(int vector, const gate_desc *val, + struct trap_info *info) +{ + unsigned long addr; + + if (val->bits.type != GATE_TRAP && val->bits.type != GATE_INTERRUPT) + return 0; + + info->vector = vector; + + addr = gate_offset(val); + if (!get_trap_addr((void **)&addr, val->bits.ist)) + return 0; + info->address = addr; + + info->cs = gate_segment(val); + info->flags = val->bits.dpl; + /* interrupt gates clear IF */ + if (val->bits.type == GATE_INTERRUPT) + info->flags |= 1 << 2; + + return 1; +} + +/* Locations of each CPU's IDT */ +static DEFINE_PER_CPU(struct desc_ptr, idt_desc); + +/* Set an IDT entry. If the entry is part of the current IDT, then + also update Xen. */ +static void xen_write_idt_entry(gate_desc *dt, int entrynum, const gate_desc *g) +{ + unsigned long p = (unsigned long)&dt[entrynum]; + unsigned long start, end; + + trace_xen_cpu_write_idt_entry(dt, entrynum, g); + + preempt_disable(); + + start = __this_cpu_read(idt_desc.address); + end = start + __this_cpu_read(idt_desc.size) + 1; + + xen_mc_flush(); + + native_write_idt_entry(dt, entrynum, g); + + if (p >= start && (p + 8) <= end) { + struct trap_info info[2]; + + info[1].address = 0; + + if (cvt_gate_to_trap(entrynum, g, &info[0])) + if (HYPERVISOR_set_trap_table(info)) + BUG(); + } + + preempt_enable(); +} + +static unsigned xen_convert_trap_info(const struct desc_ptr *desc, + struct trap_info *traps, bool full) +{ + unsigned in, out, count; + + count = (desc->size+1) / sizeof(gate_desc); + BUG_ON(count > 256); + + for (in = out = 0; in < count; in++) { + gate_desc *entry = (gate_desc *)(desc->address) + in; + + if (cvt_gate_to_trap(in, entry, &traps[out]) || full) + out++; + } + + return out; +} + +void xen_copy_trap_info(struct trap_info *traps) +{ + const struct desc_ptr *desc = this_cpu_ptr(&idt_desc); + + xen_convert_trap_info(desc, traps, true); +} + +/* Load a new IDT into Xen. In principle this can be per-CPU, so we + hold a spinlock to protect the static traps[] array (static because + it avoids allocation, and saves stack space). */ +static void xen_load_idt(const struct desc_ptr *desc) +{ + static DEFINE_SPINLOCK(lock); + static struct trap_info traps[257]; + static const struct trap_info zero = { }; + unsigned out; + + trace_xen_cpu_load_idt(desc); + + spin_lock(&lock); + + memcpy(this_cpu_ptr(&idt_desc), desc, sizeof(idt_desc)); + + out = xen_convert_trap_info(desc, traps, false); + traps[out] = zero; + + xen_mc_flush(); + if (HYPERVISOR_set_trap_table(traps)) + BUG(); + + spin_unlock(&lock); +} + +/* Write a GDT descriptor entry. Ignore LDT descriptors, since + they're handled differently. */ +static void xen_write_gdt_entry(struct desc_struct *dt, int entry, + const void *desc, int type) +{ + trace_xen_cpu_write_gdt_entry(dt, entry, desc, type); + + preempt_disable(); + + switch (type) { + case DESC_LDT: + case DESC_TSS: + /* ignore */ + break; + + default: { + xmaddr_t maddr = arbitrary_virt_to_machine(&dt[entry]); + + xen_mc_flush(); + if (HYPERVISOR_update_descriptor(maddr.maddr, *(u64 *)desc)) + BUG(); + } + + } + + preempt_enable(); +} + +/* + * Version of write_gdt_entry for use at early boot-time needed to + * update an entry as simply as possible. + */ +static void __init xen_write_gdt_entry_boot(struct desc_struct *dt, int entry, + const void *desc, int type) +{ + trace_xen_cpu_write_gdt_entry(dt, entry, desc, type); + + switch (type) { + case DESC_LDT: + case DESC_TSS: + /* ignore */ + break; + + default: { + xmaddr_t maddr = virt_to_machine(&dt[entry]); + + if (HYPERVISOR_update_descriptor(maddr.maddr, *(u64 *)desc)) + dt[entry] = *(struct desc_struct *)desc; + } + + } +} + +static void xen_load_sp0(unsigned long sp0) +{ + struct multicall_space mcs; + + mcs = xen_mc_entry(0); + MULTI_stack_switch(mcs.mc, __KERNEL_DS, sp0); + xen_mc_issue(XEN_LAZY_CPU); + this_cpu_write(cpu_tss_rw.x86_tss.sp0, sp0); +} + +#ifdef CONFIG_X86_IOPL_IOPERM +static void xen_invalidate_io_bitmap(void) +{ + struct physdev_set_iobitmap iobitmap = { + .bitmap = NULL, + .nr_ports = 0, + }; + + native_tss_invalidate_io_bitmap(); + HYPERVISOR_physdev_op(PHYSDEVOP_set_iobitmap, &iobitmap); +} + +static void xen_update_io_bitmap(void) +{ + struct physdev_set_iobitmap iobitmap; + struct tss_struct *tss = this_cpu_ptr(&cpu_tss_rw); + + native_tss_update_io_bitmap(); + + iobitmap.bitmap = (uint8_t *)(&tss->x86_tss) + + tss->x86_tss.io_bitmap_base; + if (tss->x86_tss.io_bitmap_base == IO_BITMAP_OFFSET_INVALID) + iobitmap.nr_ports = 0; + else + iobitmap.nr_ports = IO_BITMAP_BITS; + + HYPERVISOR_physdev_op(PHYSDEVOP_set_iobitmap, &iobitmap); +} +#endif + +static void xen_io_delay(void) +{ +} + +static DEFINE_PER_CPU(unsigned long, xen_cr0_value); + +static unsigned long xen_read_cr0(void) +{ + unsigned long cr0 = this_cpu_read(xen_cr0_value); + + if (unlikely(cr0 == 0)) { + cr0 = native_read_cr0(); + this_cpu_write(xen_cr0_value, cr0); + } + + return cr0; +} + +static void xen_write_cr0(unsigned long cr0) +{ + struct multicall_space mcs; + + this_cpu_write(xen_cr0_value, cr0); + + /* Only pay attention to cr0.TS; everything else is + ignored. */ + mcs = xen_mc_entry(0); + + MULTI_fpu_taskswitch(mcs.mc, (cr0 & X86_CR0_TS) != 0); + + xen_mc_issue(XEN_LAZY_CPU); +} + +static void xen_write_cr4(unsigned long cr4) +{ + cr4 &= ~(X86_CR4_PGE | X86_CR4_PSE | X86_CR4_PCE); + + native_write_cr4(cr4); +} + +static u64 xen_do_read_msr(unsigned int msr, int *err) +{ + u64 val = 0; /* Avoid uninitialized value for safe variant. */ + + if (pmu_msr_read(msr, &val, err)) + return val; + + if (err) + val = native_read_msr_safe(msr, err); + else + val = native_read_msr(msr); + + switch (msr) { + case MSR_IA32_APICBASE: + val &= ~X2APIC_ENABLE; + break; + } + return val; +} + +static void set_seg(unsigned int which, unsigned int low, unsigned int high, + int *err) +{ + u64 base = ((u64)high << 32) | low; + + if (HYPERVISOR_set_segment_base(which, base) == 0) + return; + + if (err) + *err = -EIO; + else + WARN(1, "Xen set_segment_base(%u, %llx) failed\n", which, base); +} + +/* + * Support write_msr_safe() and write_msr() semantics. + * With err == NULL write_msr() semantics are selected. + * Supplying an err pointer requires err to be pre-initialized with 0. + */ +static void xen_do_write_msr(unsigned int msr, unsigned int low, + unsigned int high, int *err) +{ + switch (msr) { + case MSR_FS_BASE: + set_seg(SEGBASE_FS, low, high, err); + break; + + case MSR_KERNEL_GS_BASE: + set_seg(SEGBASE_GS_USER, low, high, err); + break; + + case MSR_GS_BASE: + set_seg(SEGBASE_GS_KERNEL, low, high, err); + break; + + case MSR_STAR: + case MSR_CSTAR: + case MSR_LSTAR: + case MSR_SYSCALL_MASK: + case MSR_IA32_SYSENTER_CS: + case MSR_IA32_SYSENTER_ESP: + case MSR_IA32_SYSENTER_EIP: + /* Fast syscall setup is all done in hypercalls, so + these are all ignored. Stub them out here to stop + Xen console noise. */ + break; + + default: + if (!pmu_msr_write(msr, low, high, err)) { + if (err) + *err = native_write_msr_safe(msr, low, high); + else + native_write_msr(msr, low, high); + } + } +} + +static u64 xen_read_msr_safe(unsigned int msr, int *err) +{ + return xen_do_read_msr(msr, err); +} + +static int xen_write_msr_safe(unsigned int msr, unsigned int low, + unsigned int high) +{ + int err = 0; + + xen_do_write_msr(msr, low, high, &err); + + return err; +} + +static u64 xen_read_msr(unsigned int msr) +{ + int err; + + return xen_do_read_msr(msr, xen_msr_safe ? &err : NULL); +} + +static void xen_write_msr(unsigned int msr, unsigned low, unsigned high) +{ + int err; + + xen_do_write_msr(msr, low, high, xen_msr_safe ? &err : NULL); +} + +/* This is called once we have the cpu_possible_mask */ +void __init xen_setup_vcpu_info_placement(void) +{ + int cpu; + + for_each_possible_cpu(cpu) { + /* Set up direct vCPU id mapping for PV guests. */ + per_cpu(xen_vcpu_id, cpu) = cpu; + xen_vcpu_setup(cpu); + } + + pv_ops.irq.save_fl = __PV_IS_CALLEE_SAVE(xen_save_fl_direct); + pv_ops.irq.irq_disable = __PV_IS_CALLEE_SAVE(xen_irq_disable_direct); + pv_ops.irq.irq_enable = __PV_IS_CALLEE_SAVE(xen_irq_enable_direct); + pv_ops.mmu.read_cr2 = __PV_IS_CALLEE_SAVE(xen_read_cr2_direct); +} + +static const struct pv_info xen_info __initconst = { + .extra_user_64bit_cs = FLAT_USER_CS64, + .name = "Xen", +}; + +static const typeof(pv_ops) xen_cpu_ops __initconst = { + .cpu = { + .cpuid = xen_cpuid, + + .set_debugreg = xen_set_debugreg, + .get_debugreg = xen_get_debugreg, + + .read_cr0 = xen_read_cr0, + .write_cr0 = xen_write_cr0, + + .write_cr4 = xen_write_cr4, + + .wbinvd = pv_native_wbinvd, + + .read_msr = xen_read_msr, + .write_msr = xen_write_msr, + + .read_msr_safe = xen_read_msr_safe, + .write_msr_safe = xen_write_msr_safe, + + .read_pmc = xen_read_pmc, + + .load_tr_desc = paravirt_nop, + .set_ldt = xen_set_ldt, + .load_gdt = xen_load_gdt, + .load_idt = xen_load_idt, + .load_tls = xen_load_tls, + .load_gs_index = xen_load_gs_index, + + .alloc_ldt = xen_alloc_ldt, + .free_ldt = xen_free_ldt, + + .store_tr = xen_store_tr, + + .write_ldt_entry = xen_write_ldt_entry, + .write_gdt_entry = xen_write_gdt_entry, + .write_idt_entry = xen_write_idt_entry, + .load_sp0 = xen_load_sp0, + +#ifdef CONFIG_X86_IOPL_IOPERM + .invalidate_io_bitmap = xen_invalidate_io_bitmap, + .update_io_bitmap = xen_update_io_bitmap, +#endif + .io_delay = xen_io_delay, + + .start_context_switch = xen_start_context_switch, + .end_context_switch = xen_end_context_switch, + }, +}; + +static void xen_restart(char *msg) +{ + xen_reboot(SHUTDOWN_reboot); +} + +static void xen_machine_halt(void) +{ + xen_reboot(SHUTDOWN_poweroff); +} + +static void xen_machine_power_off(void) +{ + do_kernel_power_off(); + xen_reboot(SHUTDOWN_poweroff); +} + +static void xen_crash_shutdown(struct pt_regs *regs) +{ + xen_reboot(SHUTDOWN_crash); +} + +static const struct machine_ops xen_machine_ops __initconst = { + .restart = xen_restart, + .halt = xen_machine_halt, + .power_off = xen_machine_power_off, + .shutdown = xen_machine_halt, + .crash_shutdown = xen_crash_shutdown, + .emergency_restart = xen_emergency_restart, +}; + +static unsigned char xen_get_nmi_reason(void) +{ + unsigned char reason = 0; + + /* Construct a value which looks like it came from port 0x61. */ + if (test_bit(_XEN_NMIREASON_io_error, + &HYPERVISOR_shared_info->arch.nmi_reason)) + reason |= NMI_REASON_IOCHK; + if (test_bit(_XEN_NMIREASON_pci_serr, + &HYPERVISOR_shared_info->arch.nmi_reason)) + reason |= NMI_REASON_SERR; + + return reason; +} + +static void __init xen_boot_params_init_edd(void) +{ +#if IS_ENABLED(CONFIG_EDD) + struct xen_platform_op op; + struct edd_info *edd_info; + u32 *mbr_signature; + unsigned nr; + int ret; + + edd_info = boot_params.eddbuf; + mbr_signature = boot_params.edd_mbr_sig_buffer; + + op.cmd = XENPF_firmware_info; + + op.u.firmware_info.type = XEN_FW_DISK_INFO; + for (nr = 0; nr < EDDMAXNR; nr++) { + struct edd_info *info = edd_info + nr; + + op.u.firmware_info.index = nr; + info->params.length = sizeof(info->params); + set_xen_guest_handle(op.u.firmware_info.u.disk_info.edd_params, + &info->params); + ret = HYPERVISOR_platform_op(&op); + if (ret) + break; + +#define C(x) info->x = op.u.firmware_info.u.disk_info.x + C(device); + C(version); + C(interface_support); + C(legacy_max_cylinder); + C(legacy_max_head); + C(legacy_sectors_per_track); +#undef C + } + boot_params.eddbuf_entries = nr; + + op.u.firmware_info.type = XEN_FW_DISK_MBR_SIGNATURE; + for (nr = 0; nr < EDD_MBR_SIG_MAX; nr++) { + op.u.firmware_info.index = nr; + ret = HYPERVISOR_platform_op(&op); + if (ret) + break; + mbr_signature[nr] = op.u.firmware_info.u.disk_mbr_signature.mbr_signature; + } + boot_params.edd_mbr_sig_buf_entries = nr; +#endif +} + +/* + * Set up the GDT and segment registers for -fstack-protector. Until + * we do this, we have to be careful not to call any stack-protected + * function, which is most of the kernel. + */ +static void __init xen_setup_gdt(int cpu) +{ + pv_ops.cpu.write_gdt_entry = xen_write_gdt_entry_boot; + pv_ops.cpu.load_gdt = xen_load_gdt_boot; + + switch_gdt_and_percpu_base(cpu); + + pv_ops.cpu.write_gdt_entry = xen_write_gdt_entry; + pv_ops.cpu.load_gdt = xen_load_gdt; +} + +static void __init xen_dom0_set_legacy_features(void) +{ + x86_platform.legacy.rtc = 1; +} + +static void __init xen_domu_set_legacy_features(void) +{ + x86_platform.legacy.rtc = 0; +} + +extern void early_xen_iret_patch(void); + +/* First C function to be called on Xen boot */ +asmlinkage __visible void __init xen_start_kernel(struct start_info *si) +{ + struct physdev_set_iopl set_iopl; + unsigned long initrd_start = 0; + int rc; + + if (!si) + return; + + clear_bss(); + + xen_start_info = si; + + __text_gen_insn(&early_xen_iret_patch, + JMP32_INSN_OPCODE, &early_xen_iret_patch, &xen_iret, + JMP32_INSN_SIZE); + + xen_domain_type = XEN_PV_DOMAIN; + xen_start_flags = xen_start_info->flags; + + xen_setup_features(); + + /* Install Xen paravirt ops */ + pv_info = xen_info; + pv_ops.cpu = xen_cpu_ops.cpu; + xen_init_irq_ops(); + + /* + * Setup xen_vcpu early because it is needed for + * local_irq_disable(), irqs_disabled(), e.g. in printk(). + * + * Don't do the full vcpu_info placement stuff until we have + * the cpu_possible_mask and a non-dummy shared_info. + */ + xen_vcpu_info_reset(0); + + x86_platform.get_nmi_reason = xen_get_nmi_reason; + x86_platform.realmode_reserve = x86_init_noop; + x86_platform.realmode_init = x86_init_noop; + + x86_init.resources.memory_setup = xen_memory_setup; + x86_init.irqs.intr_mode_select = x86_init_noop; + x86_init.irqs.intr_mode_init = x86_64_probe_apic; + x86_init.oem.arch_setup = xen_arch_setup; + x86_init.oem.banner = xen_banner; + x86_init.hyper.init_platform = xen_pv_init_platform; + x86_init.hyper.guest_late_init = xen_pv_guest_late_init; + + /* + * Set up some pagetable state before starting to set any ptes. + */ + + xen_setup_machphys_mapping(); + xen_init_mmu_ops(); + + /* Prevent unwanted bits from being set in PTEs. */ + __supported_pte_mask &= ~_PAGE_GLOBAL; + __default_kernel_pte_mask &= ~_PAGE_GLOBAL; + + /* Get mfn list */ + xen_build_dynamic_phys_to_machine(); + + /* Work out if we support NX */ + get_cpu_cap(&boot_cpu_data); + x86_configure_nx(); + + /* + * Set up kernel GDT and segment registers, mainly so that + * -fstack-protector code can be executed. + */ + xen_setup_gdt(0); + + /* Determine virtual and physical address sizes */ + get_cpu_address_sizes(&boot_cpu_data); + + /* Let's presume PV guests always boot on vCPU with id 0. */ + per_cpu(xen_vcpu_id, 0) = 0; + + idt_setup_early_handler(); + + xen_init_capabilities(); + + /* + * set up the basic apic ops. + */ + xen_init_apic(); + + machine_ops = xen_machine_ops; + + /* + * The only reliable way to retain the initial address of the + * percpu gdt_page is to remember it here, so we can go and + * mark it RW later, when the initial percpu area is freed. + */ + xen_initial_gdt = &per_cpu(gdt_page, 0); + + xen_smp_init(); + +#ifdef CONFIG_ACPI_NUMA + /* + * The pages we from Xen are not related to machine pages, so + * any NUMA information the kernel tries to get from ACPI will + * be meaningless. Prevent it from trying. + */ + disable_srat(); +#endif + WARN_ON(xen_cpuhp_setup(xen_cpu_up_prepare_pv, xen_cpu_dead_pv)); + + local_irq_disable(); + early_boot_irqs_disabled = true; + + xen_raw_console_write("mapping kernel into physical memory\n"); + xen_setup_kernel_pagetable((pgd_t *)xen_start_info->pt_base, + xen_start_info->nr_pages); + xen_reserve_special_pages(); + + /* + * We used to do this in xen_arch_setup, but that is too late + * on AMD were early_cpu_init (run before ->arch_setup()) calls + * early_amd_init which pokes 0xcf8 port. + */ + set_iopl.iopl = 1; + rc = HYPERVISOR_physdev_op(PHYSDEVOP_set_iopl, &set_iopl); + if (rc != 0) + xen_raw_printk("physdev_op failed %d\n", rc); + + + if (xen_start_info->mod_start) { + if (xen_start_info->flags & SIF_MOD_START_PFN) + initrd_start = PFN_PHYS(xen_start_info->mod_start); + else + initrd_start = __pa(xen_start_info->mod_start); + } + + /* Poke various useful things into boot_params */ + boot_params.hdr.type_of_loader = (9 << 4) | 0; + boot_params.hdr.ramdisk_image = initrd_start; + boot_params.hdr.ramdisk_size = xen_start_info->mod_len; + boot_params.hdr.cmd_line_ptr = __pa(xen_start_info->cmd_line); + boot_params.hdr.hardware_subarch = X86_SUBARCH_XEN; + + if (!xen_initial_domain()) { + if (pci_xen) + x86_init.pci.arch_init = pci_xen_init; + x86_platform.set_legacy_features = + xen_domu_set_legacy_features; + } else { + const struct dom0_vga_console_info *info = + (void *)((char *)xen_start_info + + xen_start_info->console.dom0.info_off); + struct xen_platform_op op = { + .cmd = XENPF_firmware_info, + .interface_version = XENPF_INTERFACE_VERSION, + .u.firmware_info.type = XEN_FW_KBD_SHIFT_FLAGS, + }; + + x86_platform.set_legacy_features = + xen_dom0_set_legacy_features; + xen_init_vga(info, xen_start_info->console.dom0.info_size, + &boot_params.screen_info); + xen_start_info->console.domU.mfn = 0; + xen_start_info->console.domU.evtchn = 0; + + if (HYPERVISOR_platform_op(&op) == 0) + boot_params.kbd_status = op.u.firmware_info.u.kbd_shift_flags; + + /* Make sure ACS will be enabled */ + pci_request_acs(); + + xen_acpi_sleep_register(); + + xen_boot_params_init_edd(); + +#ifdef CONFIG_ACPI + /* + * Disable selecting "Firmware First mode" for correctable + * memory errors, as this is the duty of the hypervisor to + * decide. + */ + acpi_disable_cmcff = 1; +#endif + } + + xen_add_preferred_consoles(); + +#ifdef CONFIG_PCI + /* PCI BIOS service won't work from a PV guest. */ + pci_probe &= ~PCI_PROBE_BIOS; +#endif + xen_raw_console_write("about to get started...\n"); + + /* We need this for printk timestamps */ + xen_setup_runstate_info(0); + + xen_efi_init(&boot_params); + + /* Start the world */ + cr4_init_shadow(); /* 32b kernel does this in i386_start_kernel() */ + x86_64_start_reservations((char *)__pa_symbol(&boot_params)); +} + +static int xen_cpu_up_prepare_pv(unsigned int cpu) +{ + int rc; + + if (per_cpu(xen_vcpu, cpu) == NULL) + return -ENODEV; + + xen_setup_timer(cpu); + + rc = xen_smp_intr_init(cpu); + if (rc) { + WARN(1, "xen_smp_intr_init() for CPU %d failed: %d\n", + cpu, rc); + return rc; + } + + rc = xen_smp_intr_init_pv(cpu); + if (rc) { + WARN(1, "xen_smp_intr_init_pv() for CPU %d failed: %d\n", + cpu, rc); + return rc; + } + + return 0; +} + +static int xen_cpu_dead_pv(unsigned int cpu) +{ + xen_smp_intr_free(cpu); + xen_smp_intr_free_pv(cpu); + + xen_teardown_timer(cpu); + + return 0; +} + +static uint32_t __init xen_platform_pv(void) +{ + if (xen_pv_domain()) + return xen_cpuid_base(); + + return 0; +} + +const __initconst struct hypervisor_x86 x86_hyper_xen_pv = { + .name = "Xen PV", + .detect = xen_platform_pv, + .type = X86_HYPER_XEN_PV, + .runtime.pin_vcpu = xen_pin_vcpu, + .ignore_nopv = true, +}; diff --git a/arch/x86/xen/enlighten_pvh.c b/arch/x86/xen/enlighten_pvh.c new file mode 100644 index 0000000000..ada3868c02 --- /dev/null +++ b/arch/x86/xen/enlighten_pvh.c @@ -0,0 +1,74 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <linux/acpi.h> +#include <linux/export.h> + +#include <xen/hvc-console.h> + +#include <asm/io_apic.h> +#include <asm/hypervisor.h> +#include <asm/e820/api.h> + +#include <xen/xen.h> +#include <asm/xen/interface.h> +#include <asm/xen/hypercall.h> + +#include <xen/interface/memory.h> + +#include "xen-ops.h" + +/* + * PVH variables. + * + * The variable xen_pvh needs to live in a data segment since it is used + * after startup_{32|64} is invoked, which will clear the .bss segment. + */ +bool __ro_after_init xen_pvh; +EXPORT_SYMBOL_GPL(xen_pvh); + +void __init xen_pvh_init(struct boot_params *boot_params) +{ + u32 msr; + u64 pfn; + + xen_pvh = 1; + xen_domain_type = XEN_HVM_DOMAIN; + xen_start_flags = pvh_start_info.flags; + + msr = cpuid_ebx(xen_cpuid_base() + 2); + pfn = __pa(hypercall_page); + wrmsr_safe(msr, (u32)pfn, (u32)(pfn >> 32)); + + if (xen_initial_domain()) + x86_init.oem.arch_setup = xen_add_preferred_consoles; + x86_init.oem.banner = xen_banner; + + xen_efi_init(boot_params); + + if (xen_initial_domain()) { + struct xen_platform_op op = { + .cmd = XENPF_get_dom0_console, + }; + int ret = HYPERVISOR_platform_op(&op); + + if (ret > 0) + xen_init_vga(&op.u.dom0_console, + min(ret * sizeof(char), + sizeof(op.u.dom0_console)), + &boot_params->screen_info); + } +} + +void __init mem_map_via_hcall(struct boot_params *boot_params_p) +{ + struct xen_memory_map memmap; + int rc; + + memmap.nr_entries = ARRAY_SIZE(boot_params_p->e820_table); + set_xen_guest_handle(memmap.buffer, boot_params_p->e820_table); + rc = HYPERVISOR_memory_op(XENMEM_memory_map, &memmap); + if (rc) { + xen_raw_printk("XENMEM_memory_map failed (%d)\n", rc); + BUG(); + } + boot_params_p->e820_entries = memmap.nr_entries; +} diff --git a/arch/x86/xen/grant-table.c b/arch/x86/xen/grant-table.c new file mode 100644 index 0000000000..1e681bf625 --- /dev/null +++ b/arch/x86/xen/grant-table.c @@ -0,0 +1,169 @@ +// SPDX-License-Identifier: GPL-2.0 OR MIT +/****************************************************************************** + * grant_table.c + * x86 specific part + * + * Granting foreign access to our memory reservation. + * + * Copyright (c) 2005-2006, Christopher Clark + * Copyright (c) 2004-2005, K A Fraser + * Copyright (c) 2008 Isaku Yamahata <yamahata at valinux co jp> + * VA Linux Systems Japan. Split out x86 specific part. + */ + +#include <linux/sched.h> +#include <linux/mm.h> +#include <linux/slab.h> +#include <linux/vmalloc.h> + +#include <xen/interface/xen.h> +#include <xen/page.h> +#include <xen/grant_table.h> +#include <xen/xen.h> + + +static struct gnttab_vm_area { + struct vm_struct *area; + pte_t **ptes; + int idx; +} gnttab_shared_vm_area, gnttab_status_vm_area; + +int arch_gnttab_map_shared(unsigned long *frames, unsigned long nr_gframes, + unsigned long max_nr_gframes, + void **__shared) +{ + void *shared = *__shared; + unsigned long addr; + unsigned long i; + + if (shared == NULL) + *__shared = shared = gnttab_shared_vm_area.area->addr; + + addr = (unsigned long)shared; + + for (i = 0; i < nr_gframes; i++) { + set_pte_at(&init_mm, addr, gnttab_shared_vm_area.ptes[i], + mfn_pte(frames[i], PAGE_KERNEL)); + addr += PAGE_SIZE; + } + + return 0; +} + +int arch_gnttab_map_status(uint64_t *frames, unsigned long nr_gframes, + unsigned long max_nr_gframes, + grant_status_t **__shared) +{ + grant_status_t *shared = *__shared; + unsigned long addr; + unsigned long i; + + if (shared == NULL) + *__shared = shared = gnttab_status_vm_area.area->addr; + + addr = (unsigned long)shared; + + for (i = 0; i < nr_gframes; i++) { + set_pte_at(&init_mm, addr, gnttab_status_vm_area.ptes[i], + mfn_pte(frames[i], PAGE_KERNEL)); + addr += PAGE_SIZE; + } + + return 0; +} + +void arch_gnttab_unmap(void *shared, unsigned long nr_gframes) +{ + pte_t **ptes; + unsigned long addr; + unsigned long i; + + if (shared == gnttab_status_vm_area.area->addr) + ptes = gnttab_status_vm_area.ptes; + else + ptes = gnttab_shared_vm_area.ptes; + + addr = (unsigned long)shared; + + for (i = 0; i < nr_gframes; i++) { + set_pte_at(&init_mm, addr, ptes[i], __pte(0)); + addr += PAGE_SIZE; + } +} + +static int gnttab_apply(pte_t *pte, unsigned long addr, void *data) +{ + struct gnttab_vm_area *area = data; + + area->ptes[area->idx++] = pte; + return 0; +} + +static int arch_gnttab_valloc(struct gnttab_vm_area *area, unsigned nr_frames) +{ + area->ptes = kmalloc_array(nr_frames, sizeof(*area->ptes), GFP_KERNEL); + if (area->ptes == NULL) + return -ENOMEM; + area->area = get_vm_area(PAGE_SIZE * nr_frames, VM_IOREMAP); + if (!area->area) + goto out_free_ptes; + if (apply_to_page_range(&init_mm, (unsigned long)area->area->addr, + PAGE_SIZE * nr_frames, gnttab_apply, area)) + goto out_free_vm_area; + return 0; +out_free_vm_area: + free_vm_area(area->area); +out_free_ptes: + kfree(area->ptes); + return -ENOMEM; +} + +static void arch_gnttab_vfree(struct gnttab_vm_area *area) +{ + free_vm_area(area->area); + kfree(area->ptes); +} + +int arch_gnttab_init(unsigned long nr_shared, unsigned long nr_status) +{ + int ret; + + if (!xen_pv_domain()) + return 0; + + ret = arch_gnttab_valloc(&gnttab_shared_vm_area, nr_shared); + if (ret < 0) + return ret; + + /* + * Always allocate the space for the status frames in case + * we're migrated to a host with V2 support. + */ + ret = arch_gnttab_valloc(&gnttab_status_vm_area, nr_status); + if (ret < 0) + goto err; + + return 0; +err: + arch_gnttab_vfree(&gnttab_shared_vm_area); + return -ENOMEM; +} + +#ifdef CONFIG_XEN_PVH +#include <xen/events.h> +#include <xen/xen-ops.h> +static int __init xen_pvh_gnttab_setup(void) +{ + if (!xen_pvh_domain()) + return -ENODEV; + + xen_auto_xlat_grant_frames.count = gnttab_max_grant_frames(); + + return xen_xlate_map_ballooned_pages(&xen_auto_xlat_grant_frames.pfn, + &xen_auto_xlat_grant_frames.vaddr, + xen_auto_xlat_grant_frames.count); +} +/* Call it _before_ __gnttab_init as we need to initialize the + * xen_auto_xlat_grant_frames first. */ +core_initcall(xen_pvh_gnttab_setup); +#endif diff --git a/arch/x86/xen/irq.c b/arch/x86/xen/irq.c new file mode 100644 index 0000000000..6092fea7d6 --- /dev/null +++ b/arch/x86/xen/irq.c @@ -0,0 +1,59 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <linux/hardirq.h> + +#include <asm/x86_init.h> + +#include <xen/interface/xen.h> +#include <xen/interface/sched.h> +#include <xen/interface/vcpu.h> +#include <xen/features.h> +#include <xen/events.h> + +#include <asm/xen/hypercall.h> +#include <asm/xen/hypervisor.h> + +#include "xen-ops.h" + +/* + * Force a proper event-channel callback from Xen after clearing the + * callback mask. We do this in a very simple manner, by making a call + * down into Xen. The pending flag will be checked by Xen on return. + */ +noinstr void xen_force_evtchn_callback(void) +{ + (void)HYPERVISOR_xen_version(0, NULL); +} + +static noinstr void xen_safe_halt(void) +{ + /* Blocking includes an implicit local_irq_enable(). */ + if (HYPERVISOR_sched_op(SCHEDOP_block, NULL) != 0) + BUG(); +} + +static void xen_halt(void) +{ + if (irqs_disabled()) + HYPERVISOR_vcpu_op(VCPUOP_down, + xen_vcpu_nr(smp_processor_id()), NULL); + else + xen_safe_halt(); +} + +static const typeof(pv_ops) xen_irq_ops __initconst = { + .irq = { + /* Initial interrupt flag handling only called while interrupts off. */ + .save_fl = __PV_IS_CALLEE_SAVE(paravirt_ret0), + .irq_disable = __PV_IS_CALLEE_SAVE(paravirt_nop), + .irq_enable = __PV_IS_CALLEE_SAVE(paravirt_BUG), + + .safe_halt = xen_safe_halt, + .halt = xen_halt, + }, +}; + +void __init xen_init_irq_ops(void) +{ + pv_ops.irq = xen_irq_ops.irq; + x86_init.irqs.intr_init = xen_init_IRQ; +} diff --git a/arch/x86/xen/mmu.c b/arch/x86/xen/mmu.c new file mode 100644 index 0000000000..60e9c37fd7 --- /dev/null +++ b/arch/x86/xen/mmu.c @@ -0,0 +1,53 @@ +// SPDX-License-Identifier: GPL-2.0 + +#include <linux/pfn.h> +#include <asm/xen/page.h> +#include <asm/xen/hypercall.h> +#include <xen/interface/memory.h> + +#include "multicalls.h" +#include "mmu.h" + +unsigned long arbitrary_virt_to_mfn(void *vaddr) +{ + xmaddr_t maddr = arbitrary_virt_to_machine(vaddr); + + return PFN_DOWN(maddr.maddr); +} + +xmaddr_t arbitrary_virt_to_machine(void *vaddr) +{ + unsigned long address = (unsigned long)vaddr; + unsigned int level; + pte_t *pte; + unsigned offset; + + /* + * if the PFN is in the linear mapped vaddr range, we can just use + * the (quick) virt_to_machine() p2m lookup + */ + if (virt_addr_valid(vaddr)) + return virt_to_machine(vaddr); + + /* otherwise we have to do a (slower) full page-table walk */ + + pte = lookup_address(address, &level); + BUG_ON(pte == NULL); + offset = address & ~PAGE_MASK; + return XMADDR(((phys_addr_t)pte_mfn(*pte) << PAGE_SHIFT) + offset); +} +EXPORT_SYMBOL_GPL(arbitrary_virt_to_machine); + +/* Returns: 0 success */ +int xen_unmap_domain_gfn_range(struct vm_area_struct *vma, + int nr, struct page **pages) +{ + if (xen_feature(XENFEAT_auto_translated_physmap)) + return xen_xlate_unmap_gfn_range(vma, nr, pages); + + if (!pages) + return 0; + + return -EINVAL; +} +EXPORT_SYMBOL_GPL(xen_unmap_domain_gfn_range); diff --git a/arch/x86/xen/mmu.h b/arch/x86/xen/mmu.h new file mode 100644 index 0000000000..6e4c6bd622 --- /dev/null +++ b/arch/x86/xen/mmu.h @@ -0,0 +1,28 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _XEN_MMU_H + +#include <linux/linkage.h> +#include <asm/page.h> + +enum pt_level { + PT_PGD, + PT_P4D, + PT_PUD, + PT_PMD, + PT_PTE +}; + + +bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn); + +void set_pte_mfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags); + +pte_t xen_ptep_modify_prot_start(struct vm_area_struct *vma, unsigned long addr, pte_t *ptep); +void xen_ptep_modify_prot_commit(struct vm_area_struct *vma, unsigned long addr, + pte_t *ptep, pte_t pte); + +unsigned long xen_read_cr2_direct(void); + +extern void xen_init_mmu_ops(void); +extern void xen_hvm_init_mmu_ops(void); +#endif /* _XEN_MMU_H */ diff --git a/arch/x86/xen/mmu_hvm.c b/arch/x86/xen/mmu_hvm.c new file mode 100644 index 0000000000..509bdee3ab --- /dev/null +++ b/arch/x86/xen/mmu_hvm.c @@ -0,0 +1,69 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <linux/types.h> +#include <linux/crash_dump.h> + +#include <xen/interface/xen.h> +#include <xen/hvm.h> + +#include "mmu.h" + +#ifdef CONFIG_PROC_VMCORE +/* + * The kdump kernel has to check whether a pfn of the crashed kernel + * was a ballooned page. vmcore is using this function to decide + * whether to access a pfn of the crashed kernel. + * Returns "false" if the pfn is not backed by a RAM page, the caller may + * handle the pfn special in this case. + */ +static bool xen_vmcore_pfn_is_ram(struct vmcore_cb *cb, unsigned long pfn) +{ + struct xen_hvm_get_mem_type a = { + .domid = DOMID_SELF, + .pfn = pfn, + }; + + if (HYPERVISOR_hvm_op(HVMOP_get_mem_type, &a)) { + pr_warn_once("Unexpected HVMOP_get_mem_type failure\n"); + return true; + } + return a.mem_type != HVMMEM_mmio_dm; +} +static struct vmcore_cb xen_vmcore_cb = { + .pfn_is_ram = xen_vmcore_pfn_is_ram, +}; +#endif + +static void xen_hvm_exit_mmap(struct mm_struct *mm) +{ + struct xen_hvm_pagetable_dying a; + int rc; + + a.domid = DOMID_SELF; + a.gpa = __pa(mm->pgd); + rc = HYPERVISOR_hvm_op(HVMOP_pagetable_dying, &a); + WARN_ON_ONCE(rc < 0); +} + +static int is_pagetable_dying_supported(void) +{ + struct xen_hvm_pagetable_dying a; + int rc = 0; + + a.domid = DOMID_SELF; + a.gpa = 0x00; + rc = HYPERVISOR_hvm_op(HVMOP_pagetable_dying, &a); + if (rc < 0) { + printk(KERN_DEBUG "HVMOP_pagetable_dying not supported\n"); + return 0; + } + return 1; +} + +void __init xen_hvm_init_mmu_ops(void) +{ + if (is_pagetable_dying_supported()) + pv_ops.mmu.exit_mmap = xen_hvm_exit_mmap; +#ifdef CONFIG_PROC_VMCORE + register_vmcore_cb(&xen_vmcore_cb); +#endif +} diff --git a/arch/x86/xen/mmu_pv.c b/arch/x86/xen/mmu_pv.c new file mode 100644 index 0000000000..b6830554ff --- /dev/null +++ b/arch/x86/xen/mmu_pv.c @@ -0,0 +1,2531 @@ +// SPDX-License-Identifier: GPL-2.0 + +/* + * Xen mmu operations + * + * This file contains the various mmu fetch and update operations. + * The most important job they must perform is the mapping between the + * domain's pfn and the overall machine mfns. + * + * Xen allows guests to directly update the pagetable, in a controlled + * fashion. In other words, the guest modifies the same pagetable + * that the CPU actually uses, which eliminates the overhead of having + * a separate shadow pagetable. + * + * In order to allow this, it falls on the guest domain to map its + * notion of a "physical" pfn - which is just a domain-local linear + * address - into a real "machine address" which the CPU's MMU can + * use. + * + * A pgd_t/pmd_t/pte_t will typically contain an mfn, and so can be + * inserted directly into the pagetable. When creating a new + * pte/pmd/pgd, it converts the passed pfn into an mfn. Conversely, + * when reading the content back with __(pgd|pmd|pte)_val, it converts + * the mfn back into a pfn. + * + * The other constraint is that all pages which make up a pagetable + * must be mapped read-only in the guest. This prevents uncontrolled + * guest updates to the pagetable. Xen strictly enforces this, and + * will disallow any pagetable update which will end up mapping a + * pagetable page RW, and will disallow using any writable page as a + * pagetable. + * + * Naively, when loading %cr3 with the base of a new pagetable, Xen + * would need to validate the whole pagetable before going on. + * Naturally, this is quite slow. The solution is to "pin" a + * pagetable, which enforces all the constraints on the pagetable even + * when it is not actively in use. This menas that Xen can be assured + * that it is still valid when you do load it into %cr3, and doesn't + * need to revalidate it. + * + * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007 + */ +#include <linux/sched/mm.h> +#include <linux/debugfs.h> +#include <linux/bug.h> +#include <linux/vmalloc.h> +#include <linux/export.h> +#include <linux/init.h> +#include <linux/gfp.h> +#include <linux/memblock.h> +#include <linux/seq_file.h> +#include <linux/crash_dump.h> +#include <linux/pgtable.h> +#ifdef CONFIG_KEXEC_CORE +#include <linux/kexec.h> +#endif + +#include <trace/events/xen.h> + +#include <asm/tlbflush.h> +#include <asm/fixmap.h> +#include <asm/mmu_context.h> +#include <asm/setup.h> +#include <asm/paravirt.h> +#include <asm/e820/api.h> +#include <asm/linkage.h> +#include <asm/page.h> +#include <asm/init.h> +#include <asm/memtype.h> +#include <asm/smp.h> +#include <asm/tlb.h> + +#include <asm/xen/hypercall.h> +#include <asm/xen/hypervisor.h> + +#include <xen/xen.h> +#include <xen/page.h> +#include <xen/interface/xen.h> +#include <xen/interface/hvm/hvm_op.h> +#include <xen/interface/version.h> +#include <xen/interface/memory.h> +#include <xen/hvc-console.h> +#include <xen/swiotlb-xen.h> + +#include "multicalls.h" +#include "mmu.h" +#include "debugfs.h" + +/* + * Prototypes for functions called via PV_CALLEE_SAVE_REGS_THUNK() in order + * to avoid warnings with "-Wmissing-prototypes". + */ +pteval_t xen_pte_val(pte_t pte); +pgdval_t xen_pgd_val(pgd_t pgd); +pmdval_t xen_pmd_val(pmd_t pmd); +pudval_t xen_pud_val(pud_t pud); +p4dval_t xen_p4d_val(p4d_t p4d); +pte_t xen_make_pte(pteval_t pte); +pgd_t xen_make_pgd(pgdval_t pgd); +pmd_t xen_make_pmd(pmdval_t pmd); +pud_t xen_make_pud(pudval_t pud); +p4d_t xen_make_p4d(p4dval_t p4d); +pte_t xen_make_pte_init(pteval_t pte); + +#ifdef CONFIG_X86_VSYSCALL_EMULATION +/* l3 pud for userspace vsyscall mapping */ +static pud_t level3_user_vsyscall[PTRS_PER_PUD] __page_aligned_bss; +#endif + +/* + * Protects atomic reservation decrease/increase against concurrent increases. + * Also protects non-atomic updates of current_pages and balloon lists. + */ +static DEFINE_SPINLOCK(xen_reservation_lock); + +/* + * Note about cr3 (pagetable base) values: + * + * xen_cr3 contains the current logical cr3 value; it contains the + * last set cr3. This may not be the current effective cr3, because + * its update may be being lazily deferred. However, a vcpu looking + * at its own cr3 can use this value knowing that it everything will + * be self-consistent. + * + * xen_current_cr3 contains the actual vcpu cr3; it is set once the + * hypercall to set the vcpu cr3 is complete (so it may be a little + * out of date, but it will never be set early). If one vcpu is + * looking at another vcpu's cr3 value, it should use this variable. + */ +DEFINE_PER_CPU(unsigned long, xen_cr3); /* cr3 stored as physaddr */ +DEFINE_PER_CPU(unsigned long, xen_current_cr3); /* actual vcpu cr3 */ + +static phys_addr_t xen_pt_base, xen_pt_size __initdata; + +static DEFINE_STATIC_KEY_FALSE(xen_struct_pages_ready); + +/* + * Just beyond the highest usermode address. STACK_TOP_MAX has a + * redzone above it, so round it up to a PGD boundary. + */ +#define USER_LIMIT ((STACK_TOP_MAX + PGDIR_SIZE - 1) & PGDIR_MASK) + +void make_lowmem_page_readonly(void *vaddr) +{ + pte_t *pte, ptev; + unsigned long address = (unsigned long)vaddr; + unsigned int level; + + pte = lookup_address(address, &level); + if (pte == NULL) + return; /* vaddr missing */ + + ptev = pte_wrprotect(*pte); + + if (HYPERVISOR_update_va_mapping(address, ptev, 0)) + BUG(); +} + +void make_lowmem_page_readwrite(void *vaddr) +{ + pte_t *pte, ptev; + unsigned long address = (unsigned long)vaddr; + unsigned int level; + + pte = lookup_address(address, &level); + if (pte == NULL) + return; /* vaddr missing */ + + ptev = pte_mkwrite_novma(*pte); + + if (HYPERVISOR_update_va_mapping(address, ptev, 0)) + BUG(); +} + + +/* + * During early boot all page table pages are pinned, but we do not have struct + * pages, so return true until struct pages are ready. + */ +static bool xen_page_pinned(void *ptr) +{ + if (static_branch_likely(&xen_struct_pages_ready)) { + struct page *page = virt_to_page(ptr); + + return PagePinned(page); + } + return true; +} + +static void xen_extend_mmu_update(const struct mmu_update *update) +{ + struct multicall_space mcs; + struct mmu_update *u; + + mcs = xen_mc_extend_args(__HYPERVISOR_mmu_update, sizeof(*u)); + + if (mcs.mc != NULL) { + mcs.mc->args[1]++; + } else { + mcs = __xen_mc_entry(sizeof(*u)); + MULTI_mmu_update(mcs.mc, mcs.args, 1, NULL, DOMID_SELF); + } + + u = mcs.args; + *u = *update; +} + +static void xen_extend_mmuext_op(const struct mmuext_op *op) +{ + struct multicall_space mcs; + struct mmuext_op *u; + + mcs = xen_mc_extend_args(__HYPERVISOR_mmuext_op, sizeof(*u)); + + if (mcs.mc != NULL) { + mcs.mc->args[1]++; + } else { + mcs = __xen_mc_entry(sizeof(*u)); + MULTI_mmuext_op(mcs.mc, mcs.args, 1, NULL, DOMID_SELF); + } + + u = mcs.args; + *u = *op; +} + +static void xen_set_pmd_hyper(pmd_t *ptr, pmd_t val) +{ + struct mmu_update u; + + preempt_disable(); + + xen_mc_batch(); + + /* ptr may be ioremapped for 64-bit pagetable setup */ + u.ptr = arbitrary_virt_to_machine(ptr).maddr; + u.val = pmd_val_ma(val); + xen_extend_mmu_update(&u); + + xen_mc_issue(XEN_LAZY_MMU); + + preempt_enable(); +} + +static void xen_set_pmd(pmd_t *ptr, pmd_t val) +{ + trace_xen_mmu_set_pmd(ptr, val); + + /* If page is not pinned, we can just update the entry + directly */ + if (!xen_page_pinned(ptr)) { + *ptr = val; + return; + } + + xen_set_pmd_hyper(ptr, val); +} + +/* + * Associate a virtual page frame with a given physical page frame + * and protection flags for that frame. + */ +void __init set_pte_mfn(unsigned long vaddr, unsigned long mfn, pgprot_t flags) +{ + if (HYPERVISOR_update_va_mapping(vaddr, mfn_pte(mfn, flags), + UVMF_INVLPG)) + BUG(); +} + +static bool xen_batched_set_pte(pte_t *ptep, pte_t pteval) +{ + struct mmu_update u; + + if (xen_get_lazy_mode() != XEN_LAZY_MMU) + return false; + + xen_mc_batch(); + + u.ptr = virt_to_machine(ptep).maddr | MMU_NORMAL_PT_UPDATE; + u.val = pte_val_ma(pteval); + xen_extend_mmu_update(&u); + + xen_mc_issue(XEN_LAZY_MMU); + + return true; +} + +static inline void __xen_set_pte(pte_t *ptep, pte_t pteval) +{ + if (!xen_batched_set_pte(ptep, pteval)) { + /* + * Could call native_set_pte() here and trap and + * emulate the PTE write, but a hypercall is much cheaper. + */ + struct mmu_update u; + + u.ptr = virt_to_machine(ptep).maddr | MMU_NORMAL_PT_UPDATE; + u.val = pte_val_ma(pteval); + HYPERVISOR_mmu_update(&u, 1, NULL, DOMID_SELF); + } +} + +static void xen_set_pte(pte_t *ptep, pte_t pteval) +{ + trace_xen_mmu_set_pte(ptep, pteval); + __xen_set_pte(ptep, pteval); +} + +pte_t xen_ptep_modify_prot_start(struct vm_area_struct *vma, + unsigned long addr, pte_t *ptep) +{ + /* Just return the pte as-is. We preserve the bits on commit */ + trace_xen_mmu_ptep_modify_prot_start(vma->vm_mm, addr, ptep, *ptep); + return *ptep; +} + +void xen_ptep_modify_prot_commit(struct vm_area_struct *vma, unsigned long addr, + pte_t *ptep, pte_t pte) +{ + struct mmu_update u; + + trace_xen_mmu_ptep_modify_prot_commit(vma->vm_mm, addr, ptep, pte); + xen_mc_batch(); + + u.ptr = virt_to_machine(ptep).maddr | MMU_PT_UPDATE_PRESERVE_AD; + u.val = pte_val_ma(pte); + xen_extend_mmu_update(&u); + + xen_mc_issue(XEN_LAZY_MMU); +} + +/* Assume pteval_t is equivalent to all the other *val_t types. */ +static pteval_t pte_mfn_to_pfn(pteval_t val) +{ + if (val & _PAGE_PRESENT) { + unsigned long mfn = (val & XEN_PTE_MFN_MASK) >> PAGE_SHIFT; + unsigned long pfn = mfn_to_pfn(mfn); + + pteval_t flags = val & PTE_FLAGS_MASK; + if (unlikely(pfn == ~0)) + val = flags & ~_PAGE_PRESENT; + else + val = ((pteval_t)pfn << PAGE_SHIFT) | flags; + } + + return val; +} + +static pteval_t pte_pfn_to_mfn(pteval_t val) +{ + if (val & _PAGE_PRESENT) { + unsigned long pfn = (val & PTE_PFN_MASK) >> PAGE_SHIFT; + pteval_t flags = val & PTE_FLAGS_MASK; + unsigned long mfn; + + mfn = __pfn_to_mfn(pfn); + + /* + * If there's no mfn for the pfn, then just create an + * empty non-present pte. Unfortunately this loses + * information about the original pfn, so + * pte_mfn_to_pfn is asymmetric. + */ + if (unlikely(mfn == INVALID_P2M_ENTRY)) { + mfn = 0; + flags = 0; + } else + mfn &= ~(FOREIGN_FRAME_BIT | IDENTITY_FRAME_BIT); + val = ((pteval_t)mfn << PAGE_SHIFT) | flags; + } + + return val; +} + +__visible pteval_t xen_pte_val(pte_t pte) +{ + pteval_t pteval = pte.pte; + + return pte_mfn_to_pfn(pteval); +} +PV_CALLEE_SAVE_REGS_THUNK(xen_pte_val); + +__visible pgdval_t xen_pgd_val(pgd_t pgd) +{ + return pte_mfn_to_pfn(pgd.pgd); +} +PV_CALLEE_SAVE_REGS_THUNK(xen_pgd_val); + +__visible pte_t xen_make_pte(pteval_t pte) +{ + pte = pte_pfn_to_mfn(pte); + + return native_make_pte(pte); +} +PV_CALLEE_SAVE_REGS_THUNK(xen_make_pte); + +__visible pgd_t xen_make_pgd(pgdval_t pgd) +{ + pgd = pte_pfn_to_mfn(pgd); + return native_make_pgd(pgd); +} +PV_CALLEE_SAVE_REGS_THUNK(xen_make_pgd); + +__visible pmdval_t xen_pmd_val(pmd_t pmd) +{ + return pte_mfn_to_pfn(pmd.pmd); +} +PV_CALLEE_SAVE_REGS_THUNK(xen_pmd_val); + +static void xen_set_pud_hyper(pud_t *ptr, pud_t val) +{ + struct mmu_update u; + + preempt_disable(); + + xen_mc_batch(); + + /* ptr may be ioremapped for 64-bit pagetable setup */ + u.ptr = arbitrary_virt_to_machine(ptr).maddr; + u.val = pud_val_ma(val); + xen_extend_mmu_update(&u); + + xen_mc_issue(XEN_LAZY_MMU); + + preempt_enable(); +} + +static void xen_set_pud(pud_t *ptr, pud_t val) +{ + trace_xen_mmu_set_pud(ptr, val); + + /* If page is not pinned, we can just update the entry + directly */ + if (!xen_page_pinned(ptr)) { + *ptr = val; + return; + } + + xen_set_pud_hyper(ptr, val); +} + +__visible pmd_t xen_make_pmd(pmdval_t pmd) +{ + pmd = pte_pfn_to_mfn(pmd); + return native_make_pmd(pmd); +} +PV_CALLEE_SAVE_REGS_THUNK(xen_make_pmd); + +__visible pudval_t xen_pud_val(pud_t pud) +{ + return pte_mfn_to_pfn(pud.pud); +} +PV_CALLEE_SAVE_REGS_THUNK(xen_pud_val); + +__visible pud_t xen_make_pud(pudval_t pud) +{ + pud = pte_pfn_to_mfn(pud); + + return native_make_pud(pud); +} +PV_CALLEE_SAVE_REGS_THUNK(xen_make_pud); + +static pgd_t *xen_get_user_pgd(pgd_t *pgd) +{ + pgd_t *pgd_page = (pgd_t *)(((unsigned long)pgd) & PAGE_MASK); + unsigned offset = pgd - pgd_page; + pgd_t *user_ptr = NULL; + + if (offset < pgd_index(USER_LIMIT)) { + struct page *page = virt_to_page(pgd_page); + user_ptr = (pgd_t *)page->private; + if (user_ptr) + user_ptr += offset; + } + + return user_ptr; +} + +static void __xen_set_p4d_hyper(p4d_t *ptr, p4d_t val) +{ + struct mmu_update u; + + u.ptr = virt_to_machine(ptr).maddr; + u.val = p4d_val_ma(val); + xen_extend_mmu_update(&u); +} + +/* + * Raw hypercall-based set_p4d, intended for in early boot before + * there's a page structure. This implies: + * 1. The only existing pagetable is the kernel's + * 2. It is always pinned + * 3. It has no user pagetable attached to it + */ +static void __init xen_set_p4d_hyper(p4d_t *ptr, p4d_t val) +{ + preempt_disable(); + + xen_mc_batch(); + + __xen_set_p4d_hyper(ptr, val); + + xen_mc_issue(XEN_LAZY_MMU); + + preempt_enable(); +} + +static void xen_set_p4d(p4d_t *ptr, p4d_t val) +{ + pgd_t *user_ptr = xen_get_user_pgd((pgd_t *)ptr); + pgd_t pgd_val; + + trace_xen_mmu_set_p4d(ptr, (p4d_t *)user_ptr, val); + + /* If page is not pinned, we can just update the entry + directly */ + if (!xen_page_pinned(ptr)) { + *ptr = val; + if (user_ptr) { + WARN_ON(xen_page_pinned(user_ptr)); + pgd_val.pgd = p4d_val_ma(val); + *user_ptr = pgd_val; + } + return; + } + + /* If it's pinned, then we can at least batch the kernel and + user updates together. */ + xen_mc_batch(); + + __xen_set_p4d_hyper(ptr, val); + if (user_ptr) + __xen_set_p4d_hyper((p4d_t *)user_ptr, val); + + xen_mc_issue(XEN_LAZY_MMU); +} + +#if CONFIG_PGTABLE_LEVELS >= 5 +__visible p4dval_t xen_p4d_val(p4d_t p4d) +{ + return pte_mfn_to_pfn(p4d.p4d); +} +PV_CALLEE_SAVE_REGS_THUNK(xen_p4d_val); + +__visible p4d_t xen_make_p4d(p4dval_t p4d) +{ + p4d = pte_pfn_to_mfn(p4d); + + return native_make_p4d(p4d); +} +PV_CALLEE_SAVE_REGS_THUNK(xen_make_p4d); +#endif /* CONFIG_PGTABLE_LEVELS >= 5 */ + +static void xen_pmd_walk(struct mm_struct *mm, pmd_t *pmd, + void (*func)(struct mm_struct *mm, struct page *, + enum pt_level), + bool last, unsigned long limit) +{ + int i, nr; + + nr = last ? pmd_index(limit) + 1 : PTRS_PER_PMD; + for (i = 0; i < nr; i++) { + if (!pmd_none(pmd[i])) + (*func)(mm, pmd_page(pmd[i]), PT_PTE); + } +} + +static void xen_pud_walk(struct mm_struct *mm, pud_t *pud, + void (*func)(struct mm_struct *mm, struct page *, + enum pt_level), + bool last, unsigned long limit) +{ + int i, nr; + + nr = last ? pud_index(limit) + 1 : PTRS_PER_PUD; + for (i = 0; i < nr; i++) { + pmd_t *pmd; + + if (pud_none(pud[i])) + continue; + + pmd = pmd_offset(&pud[i], 0); + if (PTRS_PER_PMD > 1) + (*func)(mm, virt_to_page(pmd), PT_PMD); + xen_pmd_walk(mm, pmd, func, last && i == nr - 1, limit); + } +} + +static void xen_p4d_walk(struct mm_struct *mm, p4d_t *p4d, + void (*func)(struct mm_struct *mm, struct page *, + enum pt_level), + bool last, unsigned long limit) +{ + pud_t *pud; + + + if (p4d_none(*p4d)) + return; + + pud = pud_offset(p4d, 0); + if (PTRS_PER_PUD > 1) + (*func)(mm, virt_to_page(pud), PT_PUD); + xen_pud_walk(mm, pud, func, last, limit); +} + +/* + * (Yet another) pagetable walker. This one is intended for pinning a + * pagetable. This means that it walks a pagetable and calls the + * callback function on each page it finds making up the page table, + * at every level. It walks the entire pagetable, but it only bothers + * pinning pte pages which are below limit. In the normal case this + * will be STACK_TOP_MAX, but at boot we need to pin up to + * FIXADDR_TOP. + * + * We must skip the Xen hole in the middle of the address space, just after + * the big x86-64 virtual hole. + */ +static void __xen_pgd_walk(struct mm_struct *mm, pgd_t *pgd, + void (*func)(struct mm_struct *mm, struct page *, + enum pt_level), + unsigned long limit) +{ + int i, nr; + unsigned hole_low = 0, hole_high = 0; + + /* The limit is the last byte to be touched */ + limit--; + BUG_ON(limit >= FIXADDR_TOP); + + /* + * 64-bit has a great big hole in the middle of the address + * space, which contains the Xen mappings. + */ + hole_low = pgd_index(GUARD_HOLE_BASE_ADDR); + hole_high = pgd_index(GUARD_HOLE_END_ADDR); + + nr = pgd_index(limit) + 1; + for (i = 0; i < nr; i++) { + p4d_t *p4d; + + if (i >= hole_low && i < hole_high) + continue; + + if (pgd_none(pgd[i])) + continue; + + p4d = p4d_offset(&pgd[i], 0); + xen_p4d_walk(mm, p4d, func, i == nr - 1, limit); + } + + /* Do the top level last, so that the callbacks can use it as + a cue to do final things like tlb flushes. */ + (*func)(mm, virt_to_page(pgd), PT_PGD); +} + +static void xen_pgd_walk(struct mm_struct *mm, + void (*func)(struct mm_struct *mm, struct page *, + enum pt_level), + unsigned long limit) +{ + __xen_pgd_walk(mm, mm->pgd, func, limit); +} + +/* If we're using split pte locks, then take the page's lock and + return a pointer to it. Otherwise return NULL. */ +static spinlock_t *xen_pte_lock(struct page *page, struct mm_struct *mm) +{ + spinlock_t *ptl = NULL; + +#if USE_SPLIT_PTE_PTLOCKS + ptl = ptlock_ptr(page_ptdesc(page)); + spin_lock_nest_lock(ptl, &mm->page_table_lock); +#endif + + return ptl; +} + +static void xen_pte_unlock(void *v) +{ + spinlock_t *ptl = v; + spin_unlock(ptl); +} + +static void xen_do_pin(unsigned level, unsigned long pfn) +{ + struct mmuext_op op; + + op.cmd = level; + op.arg1.mfn = pfn_to_mfn(pfn); + + xen_extend_mmuext_op(&op); +} + +static void xen_pin_page(struct mm_struct *mm, struct page *page, + enum pt_level level) +{ + unsigned pgfl = TestSetPagePinned(page); + + if (!pgfl) { + void *pt = lowmem_page_address(page); + unsigned long pfn = page_to_pfn(page); + struct multicall_space mcs = __xen_mc_entry(0); + spinlock_t *ptl; + + /* + * We need to hold the pagetable lock between the time + * we make the pagetable RO and when we actually pin + * it. If we don't, then other users may come in and + * attempt to update the pagetable by writing it, + * which will fail because the memory is RO but not + * pinned, so Xen won't do the trap'n'emulate. + * + * If we're using split pte locks, we can't hold the + * entire pagetable's worth of locks during the + * traverse, because we may wrap the preempt count (8 + * bits). The solution is to mark RO and pin each PTE + * page while holding the lock. This means the number + * of locks we end up holding is never more than a + * batch size (~32 entries, at present). + * + * If we're not using split pte locks, we needn't pin + * the PTE pages independently, because we're + * protected by the overall pagetable lock. + */ + ptl = NULL; + if (level == PT_PTE) + ptl = xen_pte_lock(page, mm); + + MULTI_update_va_mapping(mcs.mc, (unsigned long)pt, + pfn_pte(pfn, PAGE_KERNEL_RO), + level == PT_PGD ? UVMF_TLB_FLUSH : 0); + + if (ptl) { + xen_do_pin(MMUEXT_PIN_L1_TABLE, pfn); + + /* Queue a deferred unlock for when this batch + is completed. */ + xen_mc_callback(xen_pte_unlock, ptl); + } + } +} + +/* This is called just after a mm has been created, but it has not + been used yet. We need to make sure that its pagetable is all + read-only, and can be pinned. */ +static void __xen_pgd_pin(struct mm_struct *mm, pgd_t *pgd) +{ + pgd_t *user_pgd = xen_get_user_pgd(pgd); + + trace_xen_mmu_pgd_pin(mm, pgd); + + xen_mc_batch(); + + __xen_pgd_walk(mm, pgd, xen_pin_page, USER_LIMIT); + + xen_do_pin(MMUEXT_PIN_L4_TABLE, PFN_DOWN(__pa(pgd))); + + if (user_pgd) { + xen_pin_page(mm, virt_to_page(user_pgd), PT_PGD); + xen_do_pin(MMUEXT_PIN_L4_TABLE, + PFN_DOWN(__pa(user_pgd))); + } + + xen_mc_issue(0); +} + +static void xen_pgd_pin(struct mm_struct *mm) +{ + __xen_pgd_pin(mm, mm->pgd); +} + +/* + * On save, we need to pin all pagetables to make sure they get their + * mfns turned into pfns. Search the list for any unpinned pgds and pin + * them (unpinned pgds are not currently in use, probably because the + * process is under construction or destruction). + * + * Expected to be called in stop_machine() ("equivalent to taking + * every spinlock in the system"), so the locking doesn't really + * matter all that much. + */ +void xen_mm_pin_all(void) +{ + struct page *page; + + spin_lock(&pgd_lock); + + list_for_each_entry(page, &pgd_list, lru) { + if (!PagePinned(page)) { + __xen_pgd_pin(&init_mm, (pgd_t *)page_address(page)); + SetPageSavePinned(page); + } + } + + spin_unlock(&pgd_lock); +} + +static void __init xen_mark_pinned(struct mm_struct *mm, struct page *page, + enum pt_level level) +{ + SetPagePinned(page); +} + +/* + * The init_mm pagetable is really pinned as soon as its created, but + * that's before we have page structures to store the bits. So do all + * the book-keeping now once struct pages for allocated pages are + * initialized. This happens only after memblock_free_all() is called. + */ +static void __init xen_after_bootmem(void) +{ + static_branch_enable(&xen_struct_pages_ready); +#ifdef CONFIG_X86_VSYSCALL_EMULATION + SetPagePinned(virt_to_page(level3_user_vsyscall)); +#endif + xen_pgd_walk(&init_mm, xen_mark_pinned, FIXADDR_TOP); +} + +static void xen_unpin_page(struct mm_struct *mm, struct page *page, + enum pt_level level) +{ + unsigned pgfl = TestClearPagePinned(page); + + if (pgfl) { + void *pt = lowmem_page_address(page); + unsigned long pfn = page_to_pfn(page); + spinlock_t *ptl = NULL; + struct multicall_space mcs; + + /* + * Do the converse to pin_page. If we're using split + * pte locks, we must be holding the lock for while + * the pte page is unpinned but still RO to prevent + * concurrent updates from seeing it in this + * partially-pinned state. + */ + if (level == PT_PTE) { + ptl = xen_pte_lock(page, mm); + + if (ptl) + xen_do_pin(MMUEXT_UNPIN_TABLE, pfn); + } + + mcs = __xen_mc_entry(0); + + MULTI_update_va_mapping(mcs.mc, (unsigned long)pt, + pfn_pte(pfn, PAGE_KERNEL), + level == PT_PGD ? UVMF_TLB_FLUSH : 0); + + if (ptl) { + /* unlock when batch completed */ + xen_mc_callback(xen_pte_unlock, ptl); + } + } +} + +/* Release a pagetables pages back as normal RW */ +static void __xen_pgd_unpin(struct mm_struct *mm, pgd_t *pgd) +{ + pgd_t *user_pgd = xen_get_user_pgd(pgd); + + trace_xen_mmu_pgd_unpin(mm, pgd); + + xen_mc_batch(); + + xen_do_pin(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd))); + + if (user_pgd) { + xen_do_pin(MMUEXT_UNPIN_TABLE, + PFN_DOWN(__pa(user_pgd))); + xen_unpin_page(mm, virt_to_page(user_pgd), PT_PGD); + } + + __xen_pgd_walk(mm, pgd, xen_unpin_page, USER_LIMIT); + + xen_mc_issue(0); +} + +static void xen_pgd_unpin(struct mm_struct *mm) +{ + __xen_pgd_unpin(mm, mm->pgd); +} + +/* + * On resume, undo any pinning done at save, so that the rest of the + * kernel doesn't see any unexpected pinned pagetables. + */ +void xen_mm_unpin_all(void) +{ + struct page *page; + + spin_lock(&pgd_lock); + + list_for_each_entry(page, &pgd_list, lru) { + if (PageSavePinned(page)) { + BUG_ON(!PagePinned(page)); + __xen_pgd_unpin(&init_mm, (pgd_t *)page_address(page)); + ClearPageSavePinned(page); + } + } + + spin_unlock(&pgd_lock); +} + +static void xen_enter_mmap(struct mm_struct *mm) +{ + spin_lock(&mm->page_table_lock); + xen_pgd_pin(mm); + spin_unlock(&mm->page_table_lock); +} + +static void drop_mm_ref_this_cpu(void *info) +{ + struct mm_struct *mm = info; + + if (this_cpu_read(cpu_tlbstate.loaded_mm) == mm) + leave_mm(smp_processor_id()); + + /* + * If this cpu still has a stale cr3 reference, then make sure + * it has been flushed. + */ + if (this_cpu_read(xen_current_cr3) == __pa(mm->pgd)) + xen_mc_flush(); +} + +#ifdef CONFIG_SMP +/* + * Another cpu may still have their %cr3 pointing at the pagetable, so + * we need to repoint it somewhere else before we can unpin it. + */ +static void xen_drop_mm_ref(struct mm_struct *mm) +{ + cpumask_var_t mask; + unsigned cpu; + + drop_mm_ref_this_cpu(mm); + + /* Get the "official" set of cpus referring to our pagetable. */ + if (!alloc_cpumask_var(&mask, GFP_ATOMIC)) { + for_each_online_cpu(cpu) { + if (per_cpu(xen_current_cr3, cpu) != __pa(mm->pgd)) + continue; + smp_call_function_single(cpu, drop_mm_ref_this_cpu, mm, 1); + } + return; + } + + /* + * It's possible that a vcpu may have a stale reference to our + * cr3, because its in lazy mode, and it hasn't yet flushed + * its set of pending hypercalls yet. In this case, we can + * look at its actual current cr3 value, and force it to flush + * if needed. + */ + cpumask_clear(mask); + for_each_online_cpu(cpu) { + if (per_cpu(xen_current_cr3, cpu) == __pa(mm->pgd)) + cpumask_set_cpu(cpu, mask); + } + + smp_call_function_many(mask, drop_mm_ref_this_cpu, mm, 1); + free_cpumask_var(mask); +} +#else +static void xen_drop_mm_ref(struct mm_struct *mm) +{ + drop_mm_ref_this_cpu(mm); +} +#endif + +/* + * While a process runs, Xen pins its pagetables, which means that the + * hypervisor forces it to be read-only, and it controls all updates + * to it. This means that all pagetable updates have to go via the + * hypervisor, which is moderately expensive. + * + * Since we're pulling the pagetable down, we switch to use init_mm, + * unpin old process pagetable and mark it all read-write, which + * allows further operations on it to be simple memory accesses. + * + * The only subtle point is that another CPU may be still using the + * pagetable because of lazy tlb flushing. This means we need need to + * switch all CPUs off this pagetable before we can unpin it. + */ +static void xen_exit_mmap(struct mm_struct *mm) +{ + get_cpu(); /* make sure we don't move around */ + xen_drop_mm_ref(mm); + put_cpu(); + + spin_lock(&mm->page_table_lock); + + /* pgd may not be pinned in the error exit path of execve */ + if (xen_page_pinned(mm->pgd)) + xen_pgd_unpin(mm); + + spin_unlock(&mm->page_table_lock); +} + +static void xen_post_allocator_init(void); + +static void __init pin_pagetable_pfn(unsigned cmd, unsigned long pfn) +{ + struct mmuext_op op; + + op.cmd = cmd; + op.arg1.mfn = pfn_to_mfn(pfn); + if (HYPERVISOR_mmuext_op(&op, 1, NULL, DOMID_SELF)) + BUG(); +} + +static void __init xen_cleanhighmap(unsigned long vaddr, + unsigned long vaddr_end) +{ + unsigned long kernel_end = roundup((unsigned long)_brk_end, PMD_SIZE) - 1; + pmd_t *pmd = level2_kernel_pgt + pmd_index(vaddr); + + /* NOTE: The loop is more greedy than the cleanup_highmap variant. + * We include the PMD passed in on _both_ boundaries. */ + for (; vaddr <= vaddr_end && (pmd < (level2_kernel_pgt + PTRS_PER_PMD)); + pmd++, vaddr += PMD_SIZE) { + if (pmd_none(*pmd)) + continue; + if (vaddr < (unsigned long) _text || vaddr > kernel_end) + set_pmd(pmd, __pmd(0)); + } + /* In case we did something silly, we should crash in this function + * instead of somewhere later and be confusing. */ + xen_mc_flush(); +} + +/* + * Make a page range writeable and free it. + */ +static void __init xen_free_ro_pages(unsigned long paddr, unsigned long size) +{ + void *vaddr = __va(paddr); + void *vaddr_end = vaddr + size; + + for (; vaddr < vaddr_end; vaddr += PAGE_SIZE) + make_lowmem_page_readwrite(vaddr); + + memblock_phys_free(paddr, size); +} + +static void __init xen_cleanmfnmap_free_pgtbl(void *pgtbl, bool unpin) +{ + unsigned long pa = __pa(pgtbl) & PHYSICAL_PAGE_MASK; + + if (unpin) + pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, PFN_DOWN(pa)); + ClearPagePinned(virt_to_page(__va(pa))); + xen_free_ro_pages(pa, PAGE_SIZE); +} + +static void __init xen_cleanmfnmap_pmd(pmd_t *pmd, bool unpin) +{ + unsigned long pa; + pte_t *pte_tbl; + int i; + + if (pmd_large(*pmd)) { + pa = pmd_val(*pmd) & PHYSICAL_PAGE_MASK; + xen_free_ro_pages(pa, PMD_SIZE); + return; + } + + pte_tbl = pte_offset_kernel(pmd, 0); + for (i = 0; i < PTRS_PER_PTE; i++) { + if (pte_none(pte_tbl[i])) + continue; + pa = pte_pfn(pte_tbl[i]) << PAGE_SHIFT; + xen_free_ro_pages(pa, PAGE_SIZE); + } + set_pmd(pmd, __pmd(0)); + xen_cleanmfnmap_free_pgtbl(pte_tbl, unpin); +} + +static void __init xen_cleanmfnmap_pud(pud_t *pud, bool unpin) +{ + unsigned long pa; + pmd_t *pmd_tbl; + int i; + + if (pud_large(*pud)) { + pa = pud_val(*pud) & PHYSICAL_PAGE_MASK; + xen_free_ro_pages(pa, PUD_SIZE); + return; + } + + pmd_tbl = pmd_offset(pud, 0); + for (i = 0; i < PTRS_PER_PMD; i++) { + if (pmd_none(pmd_tbl[i])) + continue; + xen_cleanmfnmap_pmd(pmd_tbl + i, unpin); + } + set_pud(pud, __pud(0)); + xen_cleanmfnmap_free_pgtbl(pmd_tbl, unpin); +} + +static void __init xen_cleanmfnmap_p4d(p4d_t *p4d, bool unpin) +{ + unsigned long pa; + pud_t *pud_tbl; + int i; + + if (p4d_large(*p4d)) { + pa = p4d_val(*p4d) & PHYSICAL_PAGE_MASK; + xen_free_ro_pages(pa, P4D_SIZE); + return; + } + + pud_tbl = pud_offset(p4d, 0); + for (i = 0; i < PTRS_PER_PUD; i++) { + if (pud_none(pud_tbl[i])) + continue; + xen_cleanmfnmap_pud(pud_tbl + i, unpin); + } + set_p4d(p4d, __p4d(0)); + xen_cleanmfnmap_free_pgtbl(pud_tbl, unpin); +} + +/* + * Since it is well isolated we can (and since it is perhaps large we should) + * also free the page tables mapping the initial P->M table. + */ +static void __init xen_cleanmfnmap(unsigned long vaddr) +{ + pgd_t *pgd; + p4d_t *p4d; + bool unpin; + + unpin = (vaddr == 2 * PGDIR_SIZE); + vaddr &= PMD_MASK; + pgd = pgd_offset_k(vaddr); + p4d = p4d_offset(pgd, 0); + if (!p4d_none(*p4d)) + xen_cleanmfnmap_p4d(p4d, unpin); +} + +static void __init xen_pagetable_p2m_free(void) +{ + unsigned long size; + unsigned long addr; + + size = PAGE_ALIGN(xen_start_info->nr_pages * sizeof(unsigned long)); + + /* No memory or already called. */ + if ((unsigned long)xen_p2m_addr == xen_start_info->mfn_list) + return; + + /* using __ka address and sticking INVALID_P2M_ENTRY! */ + memset((void *)xen_start_info->mfn_list, 0xff, size); + + addr = xen_start_info->mfn_list; + /* + * We could be in __ka space. + * We roundup to the PMD, which means that if anybody at this stage is + * using the __ka address of xen_start_info or + * xen_start_info->shared_info they are in going to crash. Fortunately + * we have already revectored in xen_setup_kernel_pagetable. + */ + size = roundup(size, PMD_SIZE); + + if (addr >= __START_KERNEL_map) { + xen_cleanhighmap(addr, addr + size); + size = PAGE_ALIGN(xen_start_info->nr_pages * + sizeof(unsigned long)); + memblock_free((void *)addr, size); + } else { + xen_cleanmfnmap(addr); + } +} + +static void __init xen_pagetable_cleanhighmap(void) +{ + unsigned long size; + unsigned long addr; + + /* At this stage, cleanup_highmap has already cleaned __ka space + * from _brk_limit way up to the max_pfn_mapped (which is the end of + * the ramdisk). We continue on, erasing PMD entries that point to page + * tables - do note that they are accessible at this stage via __va. + * As Xen is aligning the memory end to a 4MB boundary, for good + * measure we also round up to PMD_SIZE * 2 - which means that if + * anybody is using __ka address to the initial boot-stack - and try + * to use it - they are going to crash. The xen_start_info has been + * taken care of already in xen_setup_kernel_pagetable. */ + addr = xen_start_info->pt_base; + size = xen_start_info->nr_pt_frames * PAGE_SIZE; + + xen_cleanhighmap(addr, roundup(addr + size, PMD_SIZE * 2)); + xen_start_info->pt_base = (unsigned long)__va(__pa(xen_start_info->pt_base)); +} + +static void __init xen_pagetable_p2m_setup(void) +{ + xen_vmalloc_p2m_tree(); + + xen_pagetable_p2m_free(); + + xen_pagetable_cleanhighmap(); + + /* And revector! Bye bye old array */ + xen_start_info->mfn_list = (unsigned long)xen_p2m_addr; +} + +static void __init xen_pagetable_init(void) +{ + /* + * The majority of further PTE writes is to pagetables already + * announced as such to Xen. Hence it is more efficient to use + * hypercalls for these updates. + */ + pv_ops.mmu.set_pte = __xen_set_pte; + + paging_init(); + xen_post_allocator_init(); + + xen_pagetable_p2m_setup(); + + /* Allocate and initialize top and mid mfn levels for p2m structure */ + xen_build_mfn_list_list(); + + /* Remap memory freed due to conflicts with E820 map */ + xen_remap_memory(); + xen_setup_mfn_list_list(); +} + +static noinstr void xen_write_cr2(unsigned long cr2) +{ + this_cpu_read(xen_vcpu)->arch.cr2 = cr2; +} + +static noinline void xen_flush_tlb(void) +{ + struct mmuext_op *op; + struct multicall_space mcs; + + preempt_disable(); + + mcs = xen_mc_entry(sizeof(*op)); + + op = mcs.args; + op->cmd = MMUEXT_TLB_FLUSH_LOCAL; + MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF); + + xen_mc_issue(XEN_LAZY_MMU); + + preempt_enable(); +} + +static void xen_flush_tlb_one_user(unsigned long addr) +{ + struct mmuext_op *op; + struct multicall_space mcs; + + trace_xen_mmu_flush_tlb_one_user(addr); + + preempt_disable(); + + mcs = xen_mc_entry(sizeof(*op)); + op = mcs.args; + op->cmd = MMUEXT_INVLPG_LOCAL; + op->arg1.linear_addr = addr & PAGE_MASK; + MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF); + + xen_mc_issue(XEN_LAZY_MMU); + + preempt_enable(); +} + +static void xen_flush_tlb_multi(const struct cpumask *cpus, + const struct flush_tlb_info *info) +{ + struct { + struct mmuext_op op; + DECLARE_BITMAP(mask, NR_CPUS); + } *args; + struct multicall_space mcs; + const size_t mc_entry_size = sizeof(args->op) + + sizeof(args->mask[0]) * BITS_TO_LONGS(num_possible_cpus()); + + trace_xen_mmu_flush_tlb_multi(cpus, info->mm, info->start, info->end); + + if (cpumask_empty(cpus)) + return; /* nothing to do */ + + mcs = xen_mc_entry(mc_entry_size); + args = mcs.args; + args->op.arg2.vcpumask = to_cpumask(args->mask); + + /* Remove any offline CPUs */ + cpumask_and(to_cpumask(args->mask), cpus, cpu_online_mask); + + args->op.cmd = MMUEXT_TLB_FLUSH_MULTI; + if (info->end != TLB_FLUSH_ALL && + (info->end - info->start) <= PAGE_SIZE) { + args->op.cmd = MMUEXT_INVLPG_MULTI; + args->op.arg1.linear_addr = info->start; + } + + MULTI_mmuext_op(mcs.mc, &args->op, 1, NULL, DOMID_SELF); + + xen_mc_issue(XEN_LAZY_MMU); +} + +static unsigned long xen_read_cr3(void) +{ + return this_cpu_read(xen_cr3); +} + +static void set_current_cr3(void *v) +{ + this_cpu_write(xen_current_cr3, (unsigned long)v); +} + +static void __xen_write_cr3(bool kernel, unsigned long cr3) +{ + struct mmuext_op op; + unsigned long mfn; + + trace_xen_mmu_write_cr3(kernel, cr3); + + if (cr3) + mfn = pfn_to_mfn(PFN_DOWN(cr3)); + else + mfn = 0; + + WARN_ON(mfn == 0 && kernel); + + op.cmd = kernel ? MMUEXT_NEW_BASEPTR : MMUEXT_NEW_USER_BASEPTR; + op.arg1.mfn = mfn; + + xen_extend_mmuext_op(&op); + + if (kernel) { + this_cpu_write(xen_cr3, cr3); + + /* Update xen_current_cr3 once the batch has actually + been submitted. */ + xen_mc_callback(set_current_cr3, (void *)cr3); + } +} +static void xen_write_cr3(unsigned long cr3) +{ + pgd_t *user_pgd = xen_get_user_pgd(__va(cr3)); + + BUG_ON(preemptible()); + + xen_mc_batch(); /* disables interrupts */ + + /* Update while interrupts are disabled, so its atomic with + respect to ipis */ + this_cpu_write(xen_cr3, cr3); + + __xen_write_cr3(true, cr3); + + if (user_pgd) + __xen_write_cr3(false, __pa(user_pgd)); + else + __xen_write_cr3(false, 0); + + xen_mc_issue(XEN_LAZY_CPU); /* interrupts restored */ +} + +/* + * At the start of the day - when Xen launches a guest, it has already + * built pagetables for the guest. We diligently look over them + * in xen_setup_kernel_pagetable and graft as appropriate them in the + * init_top_pgt and its friends. Then when we are happy we load + * the new init_top_pgt - and continue on. + * + * The generic code starts (start_kernel) and 'init_mem_mapping' sets + * up the rest of the pagetables. When it has completed it loads the cr3. + * N.B. that baremetal would start at 'start_kernel' (and the early + * #PF handler would create bootstrap pagetables) - so we are running + * with the same assumptions as what to do when write_cr3 is executed + * at this point. + * + * Since there are no user-page tables at all, we have two variants + * of xen_write_cr3 - the early bootup (this one), and the late one + * (xen_write_cr3). The reason we have to do that is that in 64-bit + * the Linux kernel and user-space are both in ring 3 while the + * hypervisor is in ring 0. + */ +static void __init xen_write_cr3_init(unsigned long cr3) +{ + BUG_ON(preemptible()); + + xen_mc_batch(); /* disables interrupts */ + + /* Update while interrupts are disabled, so its atomic with + respect to ipis */ + this_cpu_write(xen_cr3, cr3); + + __xen_write_cr3(true, cr3); + + xen_mc_issue(XEN_LAZY_CPU); /* interrupts restored */ +} + +static int xen_pgd_alloc(struct mm_struct *mm) +{ + pgd_t *pgd = mm->pgd; + struct page *page = virt_to_page(pgd); + pgd_t *user_pgd; + int ret = -ENOMEM; + + BUG_ON(PagePinned(virt_to_page(pgd))); + BUG_ON(page->private != 0); + + user_pgd = (pgd_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO); + page->private = (unsigned long)user_pgd; + + if (user_pgd != NULL) { +#ifdef CONFIG_X86_VSYSCALL_EMULATION + user_pgd[pgd_index(VSYSCALL_ADDR)] = + __pgd(__pa(level3_user_vsyscall) | _PAGE_TABLE); +#endif + ret = 0; + } + + BUG_ON(PagePinned(virt_to_page(xen_get_user_pgd(pgd)))); + + return ret; +} + +static void xen_pgd_free(struct mm_struct *mm, pgd_t *pgd) +{ + pgd_t *user_pgd = xen_get_user_pgd(pgd); + + if (user_pgd) + free_page((unsigned long)user_pgd); +} + +/* + * Init-time set_pte while constructing initial pagetables, which + * doesn't allow RO page table pages to be remapped RW. + * + * If there is no MFN for this PFN then this page is initially + * ballooned out so clear the PTE (as in decrease_reservation() in + * drivers/xen/balloon.c). + * + * Many of these PTE updates are done on unpinned and writable pages + * and doing a hypercall for these is unnecessary and expensive. At + * this point it is rarely possible to tell if a page is pinned, so + * mostly write the PTE directly and rely on Xen trapping and + * emulating any updates as necessary. + */ +static void __init xen_set_pte_init(pte_t *ptep, pte_t pte) +{ + if (unlikely(is_early_ioremap_ptep(ptep))) + __xen_set_pte(ptep, pte); + else + native_set_pte(ptep, pte); +} + +__visible pte_t xen_make_pte_init(pteval_t pte) +{ + unsigned long pfn; + + /* + * Pages belonging to the initial p2m list mapped outside the default + * address range must be mapped read-only. This region contains the + * page tables for mapping the p2m list, too, and page tables MUST be + * mapped read-only. + */ + pfn = (pte & PTE_PFN_MASK) >> PAGE_SHIFT; + if (xen_start_info->mfn_list < __START_KERNEL_map && + pfn >= xen_start_info->first_p2m_pfn && + pfn < xen_start_info->first_p2m_pfn + xen_start_info->nr_p2m_frames) + pte &= ~_PAGE_RW; + + pte = pte_pfn_to_mfn(pte); + return native_make_pte(pte); +} +PV_CALLEE_SAVE_REGS_THUNK(xen_make_pte_init); + +/* Early in boot, while setting up the initial pagetable, assume + everything is pinned. */ +static void __init xen_alloc_pte_init(struct mm_struct *mm, unsigned long pfn) +{ +#ifdef CONFIG_FLATMEM + BUG_ON(mem_map); /* should only be used early */ +#endif + make_lowmem_page_readonly(__va(PFN_PHYS(pfn))); + pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE, pfn); +} + +/* Used for pmd and pud */ +static void __init xen_alloc_pmd_init(struct mm_struct *mm, unsigned long pfn) +{ +#ifdef CONFIG_FLATMEM + BUG_ON(mem_map); /* should only be used early */ +#endif + make_lowmem_page_readonly(__va(PFN_PHYS(pfn))); +} + +/* Early release_pte assumes that all pts are pinned, since there's + only init_mm and anything attached to that is pinned. */ +static void __init xen_release_pte_init(unsigned long pfn) +{ + pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, pfn); + make_lowmem_page_readwrite(__va(PFN_PHYS(pfn))); +} + +static void __init xen_release_pmd_init(unsigned long pfn) +{ + make_lowmem_page_readwrite(__va(PFN_PHYS(pfn))); +} + +static inline void __pin_pagetable_pfn(unsigned cmd, unsigned long pfn) +{ + struct multicall_space mcs; + struct mmuext_op *op; + + mcs = __xen_mc_entry(sizeof(*op)); + op = mcs.args; + op->cmd = cmd; + op->arg1.mfn = pfn_to_mfn(pfn); + + MULTI_mmuext_op(mcs.mc, mcs.args, 1, NULL, DOMID_SELF); +} + +static inline void __set_pfn_prot(unsigned long pfn, pgprot_t prot) +{ + struct multicall_space mcs; + unsigned long addr = (unsigned long)__va(pfn << PAGE_SHIFT); + + mcs = __xen_mc_entry(0); + MULTI_update_va_mapping(mcs.mc, (unsigned long)addr, + pfn_pte(pfn, prot), 0); +} + +/* This needs to make sure the new pte page is pinned iff its being + attached to a pinned pagetable. */ +static inline void xen_alloc_ptpage(struct mm_struct *mm, unsigned long pfn, + unsigned level) +{ + bool pinned = xen_page_pinned(mm->pgd); + + trace_xen_mmu_alloc_ptpage(mm, pfn, level, pinned); + + if (pinned) { + struct page *page = pfn_to_page(pfn); + + pinned = false; + if (static_branch_likely(&xen_struct_pages_ready)) { + pinned = PagePinned(page); + SetPagePinned(page); + } + + xen_mc_batch(); + + __set_pfn_prot(pfn, PAGE_KERNEL_RO); + + if (level == PT_PTE && USE_SPLIT_PTE_PTLOCKS && !pinned) + __pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE, pfn); + + xen_mc_issue(XEN_LAZY_MMU); + } +} + +static void xen_alloc_pte(struct mm_struct *mm, unsigned long pfn) +{ + xen_alloc_ptpage(mm, pfn, PT_PTE); +} + +static void xen_alloc_pmd(struct mm_struct *mm, unsigned long pfn) +{ + xen_alloc_ptpage(mm, pfn, PT_PMD); +} + +/* This should never happen until we're OK to use struct page */ +static inline void xen_release_ptpage(unsigned long pfn, unsigned level) +{ + struct page *page = pfn_to_page(pfn); + bool pinned = PagePinned(page); + + trace_xen_mmu_release_ptpage(pfn, level, pinned); + + if (pinned) { + xen_mc_batch(); + + if (level == PT_PTE && USE_SPLIT_PTE_PTLOCKS) + __pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, pfn); + + __set_pfn_prot(pfn, PAGE_KERNEL); + + xen_mc_issue(XEN_LAZY_MMU); + + ClearPagePinned(page); + } +} + +static void xen_release_pte(unsigned long pfn) +{ + xen_release_ptpage(pfn, PT_PTE); +} + +static void xen_release_pmd(unsigned long pfn) +{ + xen_release_ptpage(pfn, PT_PMD); +} + +static void xen_alloc_pud(struct mm_struct *mm, unsigned long pfn) +{ + xen_alloc_ptpage(mm, pfn, PT_PUD); +} + +static void xen_release_pud(unsigned long pfn) +{ + xen_release_ptpage(pfn, PT_PUD); +} + +/* + * Like __va(), but returns address in the kernel mapping (which is + * all we have until the physical memory mapping has been set up. + */ +static void * __init __ka(phys_addr_t paddr) +{ + return (void *)(paddr + __START_KERNEL_map); +} + +/* Convert a machine address to physical address */ +static unsigned long __init m2p(phys_addr_t maddr) +{ + phys_addr_t paddr; + + maddr &= XEN_PTE_MFN_MASK; + paddr = mfn_to_pfn(maddr >> PAGE_SHIFT) << PAGE_SHIFT; + + return paddr; +} + +/* Convert a machine address to kernel virtual */ +static void * __init m2v(phys_addr_t maddr) +{ + return __ka(m2p(maddr)); +} + +/* Set the page permissions on an identity-mapped pages */ +static void __init set_page_prot_flags(void *addr, pgprot_t prot, + unsigned long flags) +{ + unsigned long pfn = __pa(addr) >> PAGE_SHIFT; + pte_t pte = pfn_pte(pfn, prot); + + if (HYPERVISOR_update_va_mapping((unsigned long)addr, pte, flags)) + BUG(); +} +static void __init set_page_prot(void *addr, pgprot_t prot) +{ + return set_page_prot_flags(addr, prot, UVMF_NONE); +} + +void __init xen_setup_machphys_mapping(void) +{ + struct xen_machphys_mapping mapping; + + if (HYPERVISOR_memory_op(XENMEM_machphys_mapping, &mapping) == 0) { + machine_to_phys_mapping = (unsigned long *)mapping.v_start; + machine_to_phys_nr = mapping.max_mfn + 1; + } else { + machine_to_phys_nr = MACH2PHYS_NR_ENTRIES; + } +} + +static void __init convert_pfn_mfn(void *v) +{ + pte_t *pte = v; + int i; + + /* All levels are converted the same way, so just treat them + as ptes. */ + for (i = 0; i < PTRS_PER_PTE; i++) + pte[i] = xen_make_pte(pte[i].pte); +} +static void __init check_pt_base(unsigned long *pt_base, unsigned long *pt_end, + unsigned long addr) +{ + if (*pt_base == PFN_DOWN(__pa(addr))) { + set_page_prot_flags((void *)addr, PAGE_KERNEL, UVMF_INVLPG); + clear_page((void *)addr); + (*pt_base)++; + } + if (*pt_end == PFN_DOWN(__pa(addr))) { + set_page_prot_flags((void *)addr, PAGE_KERNEL, UVMF_INVLPG); + clear_page((void *)addr); + (*pt_end)--; + } +} +/* + * Set up the initial kernel pagetable. + * + * We can construct this by grafting the Xen provided pagetable into + * head_64.S's preconstructed pagetables. We copy the Xen L2's into + * level2_ident_pgt, and level2_kernel_pgt. This means that only the + * kernel has a physical mapping to start with - but that's enough to + * get __va working. We need to fill in the rest of the physical + * mapping once some sort of allocator has been set up. + */ +void __init xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn) +{ + pud_t *l3; + pmd_t *l2; + unsigned long addr[3]; + unsigned long pt_base, pt_end; + unsigned i; + + /* max_pfn_mapped is the last pfn mapped in the initial memory + * mappings. Considering that on Xen after the kernel mappings we + * have the mappings of some pages that don't exist in pfn space, we + * set max_pfn_mapped to the last real pfn mapped. */ + if (xen_start_info->mfn_list < __START_KERNEL_map) + max_pfn_mapped = xen_start_info->first_p2m_pfn; + else + max_pfn_mapped = PFN_DOWN(__pa(xen_start_info->mfn_list)); + + pt_base = PFN_DOWN(__pa(xen_start_info->pt_base)); + pt_end = pt_base + xen_start_info->nr_pt_frames; + + /* Zap identity mapping */ + init_top_pgt[0] = __pgd(0); + + /* Pre-constructed entries are in pfn, so convert to mfn */ + /* L4[273] -> level3_ident_pgt */ + /* L4[511] -> level3_kernel_pgt */ + convert_pfn_mfn(init_top_pgt); + + /* L3_i[0] -> level2_ident_pgt */ + convert_pfn_mfn(level3_ident_pgt); + /* L3_k[510] -> level2_kernel_pgt */ + /* L3_k[511] -> level2_fixmap_pgt */ + convert_pfn_mfn(level3_kernel_pgt); + + /* L3_k[511][508-FIXMAP_PMD_NUM ... 507] -> level1_fixmap_pgt */ + convert_pfn_mfn(level2_fixmap_pgt); + + /* We get [511][511] and have Xen's version of level2_kernel_pgt */ + l3 = m2v(pgd[pgd_index(__START_KERNEL_map)].pgd); + l2 = m2v(l3[pud_index(__START_KERNEL_map)].pud); + + addr[0] = (unsigned long)pgd; + addr[1] = (unsigned long)l3; + addr[2] = (unsigned long)l2; + /* Graft it onto L4[273][0]. Note that we creating an aliasing problem: + * Both L4[273][0] and L4[511][510] have entries that point to the same + * L2 (PMD) tables. Meaning that if you modify it in __va space + * it will be also modified in the __ka space! (But if you just + * modify the PMD table to point to other PTE's or none, then you + * are OK - which is what cleanup_highmap does) */ + copy_page(level2_ident_pgt, l2); + /* Graft it onto L4[511][510] */ + copy_page(level2_kernel_pgt, l2); + + /* + * Zap execute permission from the ident map. Due to the sharing of + * L1 entries we need to do this in the L2. + */ + if (__supported_pte_mask & _PAGE_NX) { + for (i = 0; i < PTRS_PER_PMD; ++i) { + if (pmd_none(level2_ident_pgt[i])) + continue; + level2_ident_pgt[i] = pmd_set_flags(level2_ident_pgt[i], _PAGE_NX); + } + } + + /* Copy the initial P->M table mappings if necessary. */ + i = pgd_index(xen_start_info->mfn_list); + if (i && i < pgd_index(__START_KERNEL_map)) + init_top_pgt[i] = ((pgd_t *)xen_start_info->pt_base)[i]; + + /* Make pagetable pieces RO */ + set_page_prot(init_top_pgt, PAGE_KERNEL_RO); + set_page_prot(level3_ident_pgt, PAGE_KERNEL_RO); + set_page_prot(level3_kernel_pgt, PAGE_KERNEL_RO); + set_page_prot(level2_ident_pgt, PAGE_KERNEL_RO); + set_page_prot(level2_kernel_pgt, PAGE_KERNEL_RO); + set_page_prot(level2_fixmap_pgt, PAGE_KERNEL_RO); + + for (i = 0; i < FIXMAP_PMD_NUM; i++) { + set_page_prot(level1_fixmap_pgt + i * PTRS_PER_PTE, + PAGE_KERNEL_RO); + } + + /* Pin down new L4 */ + pin_pagetable_pfn(MMUEXT_PIN_L4_TABLE, + PFN_DOWN(__pa_symbol(init_top_pgt))); + + /* Unpin Xen-provided one */ + pin_pagetable_pfn(MMUEXT_UNPIN_TABLE, PFN_DOWN(__pa(pgd))); + +#ifdef CONFIG_X86_VSYSCALL_EMULATION + /* Pin user vsyscall L3 */ + set_page_prot(level3_user_vsyscall, PAGE_KERNEL_RO); + pin_pagetable_pfn(MMUEXT_PIN_L3_TABLE, + PFN_DOWN(__pa_symbol(level3_user_vsyscall))); +#endif + + /* + * At this stage there can be no user pgd, and no page structure to + * attach it to, so make sure we just set kernel pgd. + */ + xen_mc_batch(); + __xen_write_cr3(true, __pa(init_top_pgt)); + xen_mc_issue(XEN_LAZY_CPU); + + /* We can't that easily rip out L3 and L2, as the Xen pagetables are + * set out this way: [L4], [L1], [L2], [L3], [L1], [L1] ... for + * the initial domain. For guests using the toolstack, they are in: + * [L4], [L3], [L2], [L1], [L1], order .. So for dom0 we can only + * rip out the [L4] (pgd), but for guests we shave off three pages. + */ + for (i = 0; i < ARRAY_SIZE(addr); i++) + check_pt_base(&pt_base, &pt_end, addr[i]); + + /* Our (by three pages) smaller Xen pagetable that we are using */ + xen_pt_base = PFN_PHYS(pt_base); + xen_pt_size = (pt_end - pt_base) * PAGE_SIZE; + memblock_reserve(xen_pt_base, xen_pt_size); + + /* Revector the xen_start_info */ + xen_start_info = (struct start_info *)__va(__pa(xen_start_info)); +} + +/* + * Read a value from a physical address. + */ +static unsigned long __init xen_read_phys_ulong(phys_addr_t addr) +{ + unsigned long *vaddr; + unsigned long val; + + vaddr = early_memremap_ro(addr, sizeof(val)); + val = *vaddr; + early_memunmap(vaddr, sizeof(val)); + return val; +} + +/* + * Translate a virtual address to a physical one without relying on mapped + * page tables. Don't rely on big pages being aligned in (guest) physical + * space! + */ +static phys_addr_t __init xen_early_virt_to_phys(unsigned long vaddr) +{ + phys_addr_t pa; + pgd_t pgd; + pud_t pud; + pmd_t pmd; + pte_t pte; + + pa = read_cr3_pa(); + pgd = native_make_pgd(xen_read_phys_ulong(pa + pgd_index(vaddr) * + sizeof(pgd))); + if (!pgd_present(pgd)) + return 0; + + pa = pgd_val(pgd) & PTE_PFN_MASK; + pud = native_make_pud(xen_read_phys_ulong(pa + pud_index(vaddr) * + sizeof(pud))); + if (!pud_present(pud)) + return 0; + pa = pud_val(pud) & PTE_PFN_MASK; + if (pud_large(pud)) + return pa + (vaddr & ~PUD_MASK); + + pmd = native_make_pmd(xen_read_phys_ulong(pa + pmd_index(vaddr) * + sizeof(pmd))); + if (!pmd_present(pmd)) + return 0; + pa = pmd_val(pmd) & PTE_PFN_MASK; + if (pmd_large(pmd)) + return pa + (vaddr & ~PMD_MASK); + + pte = native_make_pte(xen_read_phys_ulong(pa + pte_index(vaddr) * + sizeof(pte))); + if (!pte_present(pte)) + return 0; + pa = pte_pfn(pte) << PAGE_SHIFT; + + return pa | (vaddr & ~PAGE_MASK); +} + +/* + * Find a new area for the hypervisor supplied p2m list and relocate the p2m to + * this area. + */ +void __init xen_relocate_p2m(void) +{ + phys_addr_t size, new_area, pt_phys, pmd_phys, pud_phys; + unsigned long p2m_pfn, p2m_pfn_end, n_frames, pfn, pfn_end; + int n_pte, n_pt, n_pmd, n_pud, idx_pte, idx_pt, idx_pmd, idx_pud; + pte_t *pt; + pmd_t *pmd; + pud_t *pud; + pgd_t *pgd; + unsigned long *new_p2m; + + size = PAGE_ALIGN(xen_start_info->nr_pages * sizeof(unsigned long)); + n_pte = roundup(size, PAGE_SIZE) >> PAGE_SHIFT; + n_pt = roundup(size, PMD_SIZE) >> PMD_SHIFT; + n_pmd = roundup(size, PUD_SIZE) >> PUD_SHIFT; + n_pud = roundup(size, P4D_SIZE) >> P4D_SHIFT; + n_frames = n_pte + n_pt + n_pmd + n_pud; + + new_area = xen_find_free_area(PFN_PHYS(n_frames)); + if (!new_area) { + xen_raw_console_write("Can't find new memory area for p2m needed due to E820 map conflict\n"); + BUG(); + } + + /* + * Setup the page tables for addressing the new p2m list. + * We have asked the hypervisor to map the p2m list at the user address + * PUD_SIZE. It may have done so, or it may have used a kernel space + * address depending on the Xen version. + * To avoid any possible virtual address collision, just use + * 2 * PUD_SIZE for the new area. + */ + pud_phys = new_area; + pmd_phys = pud_phys + PFN_PHYS(n_pud); + pt_phys = pmd_phys + PFN_PHYS(n_pmd); + p2m_pfn = PFN_DOWN(pt_phys) + n_pt; + + pgd = __va(read_cr3_pa()); + new_p2m = (unsigned long *)(2 * PGDIR_SIZE); + for (idx_pud = 0; idx_pud < n_pud; idx_pud++) { + pud = early_memremap(pud_phys, PAGE_SIZE); + clear_page(pud); + for (idx_pmd = 0; idx_pmd < min(n_pmd, PTRS_PER_PUD); + idx_pmd++) { + pmd = early_memremap(pmd_phys, PAGE_SIZE); + clear_page(pmd); + for (idx_pt = 0; idx_pt < min(n_pt, PTRS_PER_PMD); + idx_pt++) { + pt = early_memremap(pt_phys, PAGE_SIZE); + clear_page(pt); + for (idx_pte = 0; + idx_pte < min(n_pte, PTRS_PER_PTE); + idx_pte++) { + pt[idx_pte] = pfn_pte(p2m_pfn, + PAGE_KERNEL); + p2m_pfn++; + } + n_pte -= PTRS_PER_PTE; + early_memunmap(pt, PAGE_SIZE); + make_lowmem_page_readonly(__va(pt_phys)); + pin_pagetable_pfn(MMUEXT_PIN_L1_TABLE, + PFN_DOWN(pt_phys)); + pmd[idx_pt] = __pmd(_PAGE_TABLE | pt_phys); + pt_phys += PAGE_SIZE; + } + n_pt -= PTRS_PER_PMD; + early_memunmap(pmd, PAGE_SIZE); + make_lowmem_page_readonly(__va(pmd_phys)); + pin_pagetable_pfn(MMUEXT_PIN_L2_TABLE, + PFN_DOWN(pmd_phys)); + pud[idx_pmd] = __pud(_PAGE_TABLE | pmd_phys); + pmd_phys += PAGE_SIZE; + } + n_pmd -= PTRS_PER_PUD; + early_memunmap(pud, PAGE_SIZE); + make_lowmem_page_readonly(__va(pud_phys)); + pin_pagetable_pfn(MMUEXT_PIN_L3_TABLE, PFN_DOWN(pud_phys)); + set_pgd(pgd + 2 + idx_pud, __pgd(_PAGE_TABLE | pud_phys)); + pud_phys += PAGE_SIZE; + } + + /* Now copy the old p2m info to the new area. */ + memcpy(new_p2m, xen_p2m_addr, size); + xen_p2m_addr = new_p2m; + + /* Release the old p2m list and set new list info. */ + p2m_pfn = PFN_DOWN(xen_early_virt_to_phys(xen_start_info->mfn_list)); + BUG_ON(!p2m_pfn); + p2m_pfn_end = p2m_pfn + PFN_DOWN(size); + + if (xen_start_info->mfn_list < __START_KERNEL_map) { + pfn = xen_start_info->first_p2m_pfn; + pfn_end = xen_start_info->first_p2m_pfn + + xen_start_info->nr_p2m_frames; + set_pgd(pgd + 1, __pgd(0)); + } else { + pfn = p2m_pfn; + pfn_end = p2m_pfn_end; + } + + memblock_phys_free(PFN_PHYS(pfn), PAGE_SIZE * (pfn_end - pfn)); + while (pfn < pfn_end) { + if (pfn == p2m_pfn) { + pfn = p2m_pfn_end; + continue; + } + make_lowmem_page_readwrite(__va(PFN_PHYS(pfn))); + pfn++; + } + + xen_start_info->mfn_list = (unsigned long)xen_p2m_addr; + xen_start_info->first_p2m_pfn = PFN_DOWN(new_area); + xen_start_info->nr_p2m_frames = n_frames; +} + +void __init xen_reserve_special_pages(void) +{ + phys_addr_t paddr; + + memblock_reserve(__pa(xen_start_info), PAGE_SIZE); + if (xen_start_info->store_mfn) { + paddr = PFN_PHYS(mfn_to_pfn(xen_start_info->store_mfn)); + memblock_reserve(paddr, PAGE_SIZE); + } + if (!xen_initial_domain()) { + paddr = PFN_PHYS(mfn_to_pfn(xen_start_info->console.domU.mfn)); + memblock_reserve(paddr, PAGE_SIZE); + } +} + +void __init xen_pt_check_e820(void) +{ + if (xen_is_e820_reserved(xen_pt_base, xen_pt_size)) { + xen_raw_console_write("Xen hypervisor allocated page table memory conflicts with E820 map\n"); + BUG(); + } +} + +static unsigned char dummy_mapping[PAGE_SIZE] __page_aligned_bss; + +static void xen_set_fixmap(unsigned idx, phys_addr_t phys, pgprot_t prot) +{ + pte_t pte; + unsigned long vaddr; + + phys >>= PAGE_SHIFT; + + switch (idx) { + case FIX_BTMAP_END ... FIX_BTMAP_BEGIN: +#ifdef CONFIG_X86_VSYSCALL_EMULATION + case VSYSCALL_PAGE: +#endif + /* All local page mappings */ + pte = pfn_pte(phys, prot); + break; + +#ifdef CONFIG_X86_LOCAL_APIC + case FIX_APIC_BASE: /* maps dummy local APIC */ + pte = pfn_pte(PFN_DOWN(__pa(dummy_mapping)), PAGE_KERNEL); + break; +#endif + +#ifdef CONFIG_X86_IO_APIC + case FIX_IO_APIC_BASE_0 ... FIX_IO_APIC_BASE_END: + /* + * We just don't map the IO APIC - all access is via + * hypercalls. Keep the address in the pte for reference. + */ + pte = pfn_pte(PFN_DOWN(__pa(dummy_mapping)), PAGE_KERNEL); + break; +#endif + + case FIX_PARAVIRT_BOOTMAP: + /* This is an MFN, but it isn't an IO mapping from the + IO domain */ + pte = mfn_pte(phys, prot); + break; + + default: + /* By default, set_fixmap is used for hardware mappings */ + pte = mfn_pte(phys, prot); + break; + } + + vaddr = __fix_to_virt(idx); + if (HYPERVISOR_update_va_mapping(vaddr, pte, UVMF_INVLPG)) + BUG(); + +#ifdef CONFIG_X86_VSYSCALL_EMULATION + /* Replicate changes to map the vsyscall page into the user + pagetable vsyscall mapping. */ + if (idx == VSYSCALL_PAGE) + set_pte_vaddr_pud(level3_user_vsyscall, vaddr, pte); +#endif +} + +static void xen_enter_lazy_mmu(void) +{ + enter_lazy(XEN_LAZY_MMU); +} + +static void xen_flush_lazy_mmu(void) +{ + preempt_disable(); + + if (xen_get_lazy_mode() == XEN_LAZY_MMU) { + arch_leave_lazy_mmu_mode(); + arch_enter_lazy_mmu_mode(); + } + + preempt_enable(); +} + +static void __init xen_post_allocator_init(void) +{ + pv_ops.mmu.set_pte = xen_set_pte; + pv_ops.mmu.set_pmd = xen_set_pmd; + pv_ops.mmu.set_pud = xen_set_pud; + pv_ops.mmu.set_p4d = xen_set_p4d; + + /* This will work as long as patching hasn't happened yet + (which it hasn't) */ + pv_ops.mmu.alloc_pte = xen_alloc_pte; + pv_ops.mmu.alloc_pmd = xen_alloc_pmd; + pv_ops.mmu.release_pte = xen_release_pte; + pv_ops.mmu.release_pmd = xen_release_pmd; + pv_ops.mmu.alloc_pud = xen_alloc_pud; + pv_ops.mmu.release_pud = xen_release_pud; + pv_ops.mmu.make_pte = PV_CALLEE_SAVE(xen_make_pte); + + pv_ops.mmu.write_cr3 = &xen_write_cr3; +} + +static void xen_leave_lazy_mmu(void) +{ + preempt_disable(); + xen_mc_flush(); + leave_lazy(XEN_LAZY_MMU); + preempt_enable(); +} + +static const typeof(pv_ops) xen_mmu_ops __initconst = { + .mmu = { + .read_cr2 = __PV_IS_CALLEE_SAVE(xen_read_cr2), + .write_cr2 = xen_write_cr2, + + .read_cr3 = xen_read_cr3, + .write_cr3 = xen_write_cr3_init, + + .flush_tlb_user = xen_flush_tlb, + .flush_tlb_kernel = xen_flush_tlb, + .flush_tlb_one_user = xen_flush_tlb_one_user, + .flush_tlb_multi = xen_flush_tlb_multi, + .tlb_remove_table = tlb_remove_table, + + .pgd_alloc = xen_pgd_alloc, + .pgd_free = xen_pgd_free, + + .alloc_pte = xen_alloc_pte_init, + .release_pte = xen_release_pte_init, + .alloc_pmd = xen_alloc_pmd_init, + .release_pmd = xen_release_pmd_init, + + .set_pte = xen_set_pte_init, + .set_pmd = xen_set_pmd_hyper, + + .ptep_modify_prot_start = xen_ptep_modify_prot_start, + .ptep_modify_prot_commit = xen_ptep_modify_prot_commit, + + .pte_val = PV_CALLEE_SAVE(xen_pte_val), + .pgd_val = PV_CALLEE_SAVE(xen_pgd_val), + + .make_pte = PV_CALLEE_SAVE(xen_make_pte_init), + .make_pgd = PV_CALLEE_SAVE(xen_make_pgd), + + .set_pud = xen_set_pud_hyper, + + .make_pmd = PV_CALLEE_SAVE(xen_make_pmd), + .pmd_val = PV_CALLEE_SAVE(xen_pmd_val), + + .pud_val = PV_CALLEE_SAVE(xen_pud_val), + .make_pud = PV_CALLEE_SAVE(xen_make_pud), + .set_p4d = xen_set_p4d_hyper, + + .alloc_pud = xen_alloc_pmd_init, + .release_pud = xen_release_pmd_init, + +#if CONFIG_PGTABLE_LEVELS >= 5 + .p4d_val = PV_CALLEE_SAVE(xen_p4d_val), + .make_p4d = PV_CALLEE_SAVE(xen_make_p4d), +#endif + + .enter_mmap = xen_enter_mmap, + .exit_mmap = xen_exit_mmap, + + .lazy_mode = { + .enter = xen_enter_lazy_mmu, + .leave = xen_leave_lazy_mmu, + .flush = xen_flush_lazy_mmu, + }, + + .set_fixmap = xen_set_fixmap, + }, +}; + +void __init xen_init_mmu_ops(void) +{ + x86_init.paging.pagetable_init = xen_pagetable_init; + x86_init.hyper.init_after_bootmem = xen_after_bootmem; + + pv_ops.mmu = xen_mmu_ops.mmu; + + memset(dummy_mapping, 0xff, PAGE_SIZE); +} + +/* Protected by xen_reservation_lock. */ +#define MAX_CONTIG_ORDER 9 /* 2MB */ +static unsigned long discontig_frames[1<<MAX_CONTIG_ORDER]; + +#define VOID_PTE (mfn_pte(0, __pgprot(0))) +static void xen_zap_pfn_range(unsigned long vaddr, unsigned int order, + unsigned long *in_frames, + unsigned long *out_frames) +{ + int i; + struct multicall_space mcs; + + xen_mc_batch(); + for (i = 0; i < (1UL<<order); i++, vaddr += PAGE_SIZE) { + mcs = __xen_mc_entry(0); + + if (in_frames) + in_frames[i] = virt_to_mfn((void *)vaddr); + + MULTI_update_va_mapping(mcs.mc, vaddr, VOID_PTE, 0); + __set_phys_to_machine(virt_to_pfn((void *)vaddr), INVALID_P2M_ENTRY); + + if (out_frames) + out_frames[i] = virt_to_pfn((void *)vaddr); + } + xen_mc_issue(0); +} + +/* + * Update the pfn-to-mfn mappings for a virtual address range, either to + * point to an array of mfns, or contiguously from a single starting + * mfn. + */ +static void xen_remap_exchanged_ptes(unsigned long vaddr, int order, + unsigned long *mfns, + unsigned long first_mfn) +{ + unsigned i, limit; + unsigned long mfn; + + xen_mc_batch(); + + limit = 1u << order; + for (i = 0; i < limit; i++, vaddr += PAGE_SIZE) { + struct multicall_space mcs; + unsigned flags; + + mcs = __xen_mc_entry(0); + if (mfns) + mfn = mfns[i]; + else + mfn = first_mfn + i; + + if (i < (limit - 1)) + flags = 0; + else { + if (order == 0) + flags = UVMF_INVLPG | UVMF_ALL; + else + flags = UVMF_TLB_FLUSH | UVMF_ALL; + } + + MULTI_update_va_mapping(mcs.mc, vaddr, + mfn_pte(mfn, PAGE_KERNEL), flags); + + set_phys_to_machine(virt_to_pfn((void *)vaddr), mfn); + } + + xen_mc_issue(0); +} + +/* + * Perform the hypercall to exchange a region of our pfns to point to + * memory with the required contiguous alignment. Takes the pfns as + * input, and populates mfns as output. + * + * Returns a success code indicating whether the hypervisor was able to + * satisfy the request or not. + */ +static int xen_exchange_memory(unsigned long extents_in, unsigned int order_in, + unsigned long *pfns_in, + unsigned long extents_out, + unsigned int order_out, + unsigned long *mfns_out, + unsigned int address_bits) +{ + long rc; + int success; + + struct xen_memory_exchange exchange = { + .in = { + .nr_extents = extents_in, + .extent_order = order_in, + .extent_start = pfns_in, + .domid = DOMID_SELF + }, + .out = { + .nr_extents = extents_out, + .extent_order = order_out, + .extent_start = mfns_out, + .address_bits = address_bits, + .domid = DOMID_SELF + } + }; + + BUG_ON(extents_in << order_in != extents_out << order_out); + + rc = HYPERVISOR_memory_op(XENMEM_exchange, &exchange); + success = (exchange.nr_exchanged == extents_in); + + BUG_ON(!success && ((exchange.nr_exchanged != 0) || (rc == 0))); + BUG_ON(success && (rc != 0)); + + return success; +} + +int xen_create_contiguous_region(phys_addr_t pstart, unsigned int order, + unsigned int address_bits, + dma_addr_t *dma_handle) +{ + unsigned long *in_frames = discontig_frames, out_frame; + unsigned long flags; + int success; + unsigned long vstart = (unsigned long)phys_to_virt(pstart); + + if (unlikely(order > MAX_CONTIG_ORDER)) + return -ENOMEM; + + memset((void *) vstart, 0, PAGE_SIZE << order); + + spin_lock_irqsave(&xen_reservation_lock, flags); + + /* 1. Zap current PTEs, remembering MFNs. */ + xen_zap_pfn_range(vstart, order, in_frames, NULL); + + /* 2. Get a new contiguous memory extent. */ + out_frame = virt_to_pfn((void *)vstart); + success = xen_exchange_memory(1UL << order, 0, in_frames, + 1, order, &out_frame, + address_bits); + + /* 3. Map the new extent in place of old pages. */ + if (success) + xen_remap_exchanged_ptes(vstart, order, NULL, out_frame); + else + xen_remap_exchanged_ptes(vstart, order, in_frames, 0); + + spin_unlock_irqrestore(&xen_reservation_lock, flags); + + *dma_handle = virt_to_machine(vstart).maddr; + return success ? 0 : -ENOMEM; +} + +void xen_destroy_contiguous_region(phys_addr_t pstart, unsigned int order) +{ + unsigned long *out_frames = discontig_frames, in_frame; + unsigned long flags; + int success; + unsigned long vstart; + + if (unlikely(order > MAX_CONTIG_ORDER)) + return; + + vstart = (unsigned long)phys_to_virt(pstart); + memset((void *) vstart, 0, PAGE_SIZE << order); + + spin_lock_irqsave(&xen_reservation_lock, flags); + + /* 1. Find start MFN of contiguous extent. */ + in_frame = virt_to_mfn((void *)vstart); + + /* 2. Zap current PTEs. */ + xen_zap_pfn_range(vstart, order, NULL, out_frames); + + /* 3. Do the exchange for non-contiguous MFNs. */ + success = xen_exchange_memory(1, order, &in_frame, 1UL << order, + 0, out_frames, 0); + + /* 4. Map new pages in place of old pages. */ + if (success) + xen_remap_exchanged_ptes(vstart, order, out_frames, 0); + else + xen_remap_exchanged_ptes(vstart, order, NULL, in_frame); + + spin_unlock_irqrestore(&xen_reservation_lock, flags); +} + +static noinline void xen_flush_tlb_all(void) +{ + struct mmuext_op *op; + struct multicall_space mcs; + + preempt_disable(); + + mcs = xen_mc_entry(sizeof(*op)); + + op = mcs.args; + op->cmd = MMUEXT_TLB_FLUSH_ALL; + MULTI_mmuext_op(mcs.mc, op, 1, NULL, DOMID_SELF); + + xen_mc_issue(XEN_LAZY_MMU); + + preempt_enable(); +} + +#define REMAP_BATCH_SIZE 16 + +struct remap_data { + xen_pfn_t *pfn; + bool contiguous; + bool no_translate; + pgprot_t prot; + struct mmu_update *mmu_update; +}; + +static int remap_area_pfn_pte_fn(pte_t *ptep, unsigned long addr, void *data) +{ + struct remap_data *rmd = data; + pte_t pte = pte_mkspecial(mfn_pte(*rmd->pfn, rmd->prot)); + + /* + * If we have a contiguous range, just update the pfn itself, + * else update pointer to be "next pfn". + */ + if (rmd->contiguous) + (*rmd->pfn)++; + else + rmd->pfn++; + + rmd->mmu_update->ptr = virt_to_machine(ptep).maddr; + rmd->mmu_update->ptr |= rmd->no_translate ? + MMU_PT_UPDATE_NO_TRANSLATE : + MMU_NORMAL_PT_UPDATE; + rmd->mmu_update->val = pte_val_ma(pte); + rmd->mmu_update++; + + return 0; +} + +int xen_remap_pfn(struct vm_area_struct *vma, unsigned long addr, + xen_pfn_t *pfn, int nr, int *err_ptr, pgprot_t prot, + unsigned int domid, bool no_translate) +{ + int err = 0; + struct remap_data rmd; + struct mmu_update mmu_update[REMAP_BATCH_SIZE]; + unsigned long range; + int mapped = 0; + + BUG_ON(!((vma->vm_flags & (VM_PFNMAP | VM_IO)) == (VM_PFNMAP | VM_IO))); + + rmd.pfn = pfn; + rmd.prot = prot; + /* + * We use the err_ptr to indicate if there we are doing a contiguous + * mapping or a discontiguous mapping. + */ + rmd.contiguous = !err_ptr; + rmd.no_translate = no_translate; + + while (nr) { + int index = 0; + int done = 0; + int batch = min(REMAP_BATCH_SIZE, nr); + int batch_left = batch; + + range = (unsigned long)batch << PAGE_SHIFT; + + rmd.mmu_update = mmu_update; + err = apply_to_page_range(vma->vm_mm, addr, range, + remap_area_pfn_pte_fn, &rmd); + if (err) + goto out; + + /* + * We record the error for each page that gives an error, but + * continue mapping until the whole set is done + */ + do { + int i; + + err = HYPERVISOR_mmu_update(&mmu_update[index], + batch_left, &done, domid); + + /* + * @err_ptr may be the same buffer as @gfn, so + * only clear it after each chunk of @gfn is + * used. + */ + if (err_ptr) { + for (i = index; i < index + done; i++) + err_ptr[i] = 0; + } + if (err < 0) { + if (!err_ptr) + goto out; + err_ptr[i] = err; + done++; /* Skip failed frame. */ + } else + mapped += done; + batch_left -= done; + index += done; + } while (batch_left); + + nr -= batch; + addr += range; + if (err_ptr) + err_ptr += batch; + cond_resched(); + } +out: + + xen_flush_tlb_all(); + + return err < 0 ? err : mapped; +} +EXPORT_SYMBOL_GPL(xen_remap_pfn); + +#ifdef CONFIG_KEXEC_CORE +phys_addr_t paddr_vmcoreinfo_note(void) +{ + if (xen_pv_domain()) + return virt_to_machine(vmcoreinfo_note).maddr; + else + return __pa(vmcoreinfo_note); +} +#endif /* CONFIG_KEXEC_CORE */ diff --git a/arch/x86/xen/multicalls.c b/arch/x86/xen/multicalls.c new file mode 100644 index 0000000000..0705457229 --- /dev/null +++ b/arch/x86/xen/multicalls.c @@ -0,0 +1,214 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Xen hypercall batching. + * + * Xen allows multiple hypercalls to be issued at once, using the + * multicall interface. This allows the cost of trapping into the + * hypervisor to be amortized over several calls. + * + * This file implements a simple interface for multicalls. There's a + * per-cpu buffer of outstanding multicalls. When you want to queue a + * multicall for issuing, you can allocate a multicall slot for the + * call and its arguments, along with storage for space which is + * pointed to by the arguments (for passing pointers to structures, + * etc). When the multicall is actually issued, all the space for the + * commands and allocated memory is freed for reuse. + * + * Multicalls are flushed whenever any of the buffers get full, or + * when explicitly requested. There's no way to get per-multicall + * return results back. It will BUG if any of the multicalls fail. + * + * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007 + */ +#include <linux/percpu.h> +#include <linux/hardirq.h> +#include <linux/debugfs.h> + +#include <asm/xen/hypercall.h> + +#include "multicalls.h" +#include "debugfs.h" + +#define MC_BATCH 32 + +#define MC_DEBUG 0 + +#define MC_ARGS (MC_BATCH * 16) + + +struct mc_buffer { + unsigned mcidx, argidx, cbidx; + struct multicall_entry entries[MC_BATCH]; +#if MC_DEBUG + struct multicall_entry debug[MC_BATCH]; + void *caller[MC_BATCH]; +#endif + unsigned char args[MC_ARGS]; + struct callback { + void (*fn)(void *); + void *data; + } callbacks[MC_BATCH]; +}; + +static DEFINE_PER_CPU(struct mc_buffer, mc_buffer); +DEFINE_PER_CPU(unsigned long, xen_mc_irq_flags); + +void xen_mc_flush(void) +{ + struct mc_buffer *b = this_cpu_ptr(&mc_buffer); + struct multicall_entry *mc; + int ret = 0; + unsigned long flags; + int i; + + BUG_ON(preemptible()); + + /* Disable interrupts in case someone comes in and queues + something in the middle */ + local_irq_save(flags); + + trace_xen_mc_flush(b->mcidx, b->argidx, b->cbidx); + +#if MC_DEBUG + memcpy(b->debug, b->entries, + b->mcidx * sizeof(struct multicall_entry)); +#endif + + switch (b->mcidx) { + case 0: + /* no-op */ + BUG_ON(b->argidx != 0); + break; + + case 1: + /* Singleton multicall - bypass multicall machinery + and just do the call directly. */ + mc = &b->entries[0]; + + mc->result = xen_single_call(mc->op, mc->args[0], mc->args[1], + mc->args[2], mc->args[3], + mc->args[4]); + ret = mc->result < 0; + break; + + default: + if (HYPERVISOR_multicall(b->entries, b->mcidx) != 0) + BUG(); + for (i = 0; i < b->mcidx; i++) + if (b->entries[i].result < 0) + ret++; + } + + if (WARN_ON(ret)) { + pr_err("%d of %d multicall(s) failed: cpu %d\n", + ret, b->mcidx, smp_processor_id()); + for (i = 0; i < b->mcidx; i++) { + if (b->entries[i].result < 0) { +#if MC_DEBUG + pr_err(" call %2d: op=%lu arg=[%lx] result=%ld\t%pS\n", + i + 1, + b->debug[i].op, + b->debug[i].args[0], + b->entries[i].result, + b->caller[i]); +#else + pr_err(" call %2d: op=%lu arg=[%lx] result=%ld\n", + i + 1, + b->entries[i].op, + b->entries[i].args[0], + b->entries[i].result); +#endif + } + } + } + + b->mcidx = 0; + b->argidx = 0; + + for (i = 0; i < b->cbidx; i++) { + struct callback *cb = &b->callbacks[i]; + + (*cb->fn)(cb->data); + } + b->cbidx = 0; + + local_irq_restore(flags); +} + +struct multicall_space __xen_mc_entry(size_t args) +{ + struct mc_buffer *b = this_cpu_ptr(&mc_buffer); + struct multicall_space ret; + unsigned argidx = roundup(b->argidx, sizeof(u64)); + + trace_xen_mc_entry_alloc(args); + + BUG_ON(preemptible()); + BUG_ON(b->argidx >= MC_ARGS); + + if (unlikely(b->mcidx == MC_BATCH || + (argidx + args) >= MC_ARGS)) { + trace_xen_mc_flush_reason((b->mcidx == MC_BATCH) ? + XEN_MC_FL_BATCH : XEN_MC_FL_ARGS); + xen_mc_flush(); + argidx = roundup(b->argidx, sizeof(u64)); + } + + ret.mc = &b->entries[b->mcidx]; +#if MC_DEBUG + b->caller[b->mcidx] = __builtin_return_address(0); +#endif + b->mcidx++; + ret.args = &b->args[argidx]; + b->argidx = argidx + args; + + BUG_ON(b->argidx >= MC_ARGS); + return ret; +} + +struct multicall_space xen_mc_extend_args(unsigned long op, size_t size) +{ + struct mc_buffer *b = this_cpu_ptr(&mc_buffer); + struct multicall_space ret = { NULL, NULL }; + + BUG_ON(preemptible()); + BUG_ON(b->argidx >= MC_ARGS); + + if (unlikely(b->mcidx == 0 || + b->entries[b->mcidx - 1].op != op)) { + trace_xen_mc_extend_args(op, size, XEN_MC_XE_BAD_OP); + goto out; + } + + if (unlikely((b->argidx + size) >= MC_ARGS)) { + trace_xen_mc_extend_args(op, size, XEN_MC_XE_NO_SPACE); + goto out; + } + + ret.mc = &b->entries[b->mcidx - 1]; + ret.args = &b->args[b->argidx]; + b->argidx += size; + + BUG_ON(b->argidx >= MC_ARGS); + + trace_xen_mc_extend_args(op, size, XEN_MC_XE_OK); +out: + return ret; +} + +void xen_mc_callback(void (*fn)(void *), void *data) +{ + struct mc_buffer *b = this_cpu_ptr(&mc_buffer); + struct callback *cb; + + if (b->cbidx == MC_BATCH) { + trace_xen_mc_flush_reason(XEN_MC_FL_CALLBACK); + xen_mc_flush(); + } + + trace_xen_mc_callback(fn, data); + + cb = &b->callbacks[b->cbidx++]; + cb->fn = fn; + cb->data = data; +} diff --git a/arch/x86/xen/multicalls.h b/arch/x86/xen/multicalls.h new file mode 100644 index 0000000000..c3867b585e --- /dev/null +++ b/arch/x86/xen/multicalls.h @@ -0,0 +1,69 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _XEN_MULTICALLS_H +#define _XEN_MULTICALLS_H + +#include <trace/events/xen.h> + +#include "xen-ops.h" + +/* Multicalls */ +struct multicall_space +{ + struct multicall_entry *mc; + void *args; +}; + +/* Allocate room for a multicall and its args */ +struct multicall_space __xen_mc_entry(size_t args); + +DECLARE_PER_CPU(unsigned long, xen_mc_irq_flags); + +/* Call to start a batch of multiple __xen_mc_entry()s. Must be + paired with xen_mc_issue() */ +static inline void xen_mc_batch(void) +{ + unsigned long flags; + + /* need to disable interrupts until this entry is complete */ + local_irq_save(flags); + trace_xen_mc_batch(xen_get_lazy_mode()); + __this_cpu_write(xen_mc_irq_flags, flags); +} + +static inline struct multicall_space xen_mc_entry(size_t args) +{ + xen_mc_batch(); + return __xen_mc_entry(args); +} + +/* Flush all pending multicalls */ +void xen_mc_flush(void); + +/* Issue a multicall if we're not in a lazy mode */ +static inline void xen_mc_issue(unsigned mode) +{ + trace_xen_mc_issue(mode); + + if ((xen_get_lazy_mode() & mode) == 0) + xen_mc_flush(); + + /* restore flags saved in xen_mc_batch */ + local_irq_restore(this_cpu_read(xen_mc_irq_flags)); +} + +/* Set up a callback to be called when the current batch is flushed */ +void xen_mc_callback(void (*fn)(void *), void *data); + +/* + * Try to extend the arguments of the previous multicall command. The + * previous command's op must match. If it does, then it attempts to + * extend the argument space allocated to the multicall entry by + * arg_size bytes. + * + * The returned multicall_space will return with mc pointing to the + * command on success, or NULL on failure, and args pointing to the + * newly allocated space. + */ +struct multicall_space xen_mc_extend_args(unsigned long op, size_t arg_size); + +#endif /* _XEN_MULTICALLS_H */ diff --git a/arch/x86/xen/p2m.c b/arch/x86/xen/p2m.c new file mode 100644 index 0000000000..9bdc3b656b --- /dev/null +++ b/arch/x86/xen/p2m.c @@ -0,0 +1,841 @@ +// SPDX-License-Identifier: GPL-2.0 + +/* + * Xen leaves the responsibility for maintaining p2m mappings to the + * guests themselves, but it must also access and update the p2m array + * during suspend/resume when all the pages are reallocated. + * + * The logical flat p2m table is mapped to a linear kernel memory area. + * For accesses by Xen a three-level tree linked via mfns only is set up to + * allow the address space to be sparse. + * + * Xen + * | + * p2m_top_mfn + * / \ + * p2m_mid_mfn p2m_mid_mfn + * / / + * p2m p2m p2m ... + * + * The p2m_mid_mfn pages are mapped by p2m_top_mfn_p. + * + * The p2m_top_mfn level is limited to 1 page, so the maximum representable + * pseudo-physical address space is: + * P2M_TOP_PER_PAGE * P2M_MID_PER_PAGE * P2M_PER_PAGE pages + * + * P2M_PER_PAGE depends on the architecture, as a mfn is always + * unsigned long (8 bytes on 64-bit, 4 bytes on 32), leading to + * 512 and 1024 entries respectively. + * + * In short, these structures contain the Machine Frame Number (MFN) of the PFN. + * + * However not all entries are filled with MFNs. Specifically for all other + * leaf entries, or for the top root, or middle one, for which there is a void + * entry, we assume it is "missing". So (for example) + * pfn_to_mfn(0x90909090)=INVALID_P2M_ENTRY. + * We have a dedicated page p2m_missing with all entries being + * INVALID_P2M_ENTRY. This page may be referenced multiple times in the p2m + * list/tree in case there are multiple areas with P2M_PER_PAGE invalid pfns. + * + * We also have the possibility of setting 1-1 mappings on certain regions, so + * that: + * pfn_to_mfn(0xc0000)=0xc0000 + * + * The benefit of this is, that we can assume for non-RAM regions (think + * PCI BARs, or ACPI spaces), we can create mappings easily because we + * get the PFN value to match the MFN. + * + * For this to work efficiently we have one new page p2m_identity. All entries + * in p2m_identity are set to INVALID_P2M_ENTRY type (Xen toolstack only + * recognizes that and MFNs, no other fancy value). + * + * On lookup we spot that the entry points to p2m_identity and return the + * identity value instead of dereferencing and returning INVALID_P2M_ENTRY. + * If the entry points to an allocated page, we just proceed as before and + * return the PFN. If the PFN has IDENTITY_FRAME_BIT set we unmask that in + * appropriate functions (pfn_to_mfn). + * + * The reason for having the IDENTITY_FRAME_BIT instead of just returning the + * PFN is that we could find ourselves where pfn_to_mfn(pfn)==pfn for a + * non-identity pfn. To protect ourselves against we elect to set (and get) the + * IDENTITY_FRAME_BIT on all identity mapped PFNs. + */ + +#include <linux/init.h> +#include <linux/export.h> +#include <linux/list.h> +#include <linux/hash.h> +#include <linux/sched.h> +#include <linux/seq_file.h> +#include <linux/memblock.h> +#include <linux/slab.h> +#include <linux/vmalloc.h> + +#include <asm/cache.h> +#include <asm/setup.h> +#include <linux/uaccess.h> + +#include <asm/xen/page.h> +#include <asm/xen/hypercall.h> +#include <asm/xen/hypervisor.h> +#include <xen/balloon.h> +#include <xen/grant_table.h> + +#include "multicalls.h" +#include "xen-ops.h" + +#define P2M_MID_PER_PAGE (PAGE_SIZE / sizeof(unsigned long *)) +#define P2M_TOP_PER_PAGE (PAGE_SIZE / sizeof(unsigned long **)) + +#define MAX_P2M_PFN (P2M_TOP_PER_PAGE * P2M_MID_PER_PAGE * P2M_PER_PAGE) + +#define PMDS_PER_MID_PAGE (P2M_MID_PER_PAGE / PTRS_PER_PTE) + +unsigned long *xen_p2m_addr __read_mostly; +EXPORT_SYMBOL_GPL(xen_p2m_addr); +unsigned long xen_p2m_size __read_mostly; +EXPORT_SYMBOL_GPL(xen_p2m_size); +unsigned long xen_max_p2m_pfn __read_mostly; +EXPORT_SYMBOL_GPL(xen_max_p2m_pfn); + +#ifdef CONFIG_XEN_MEMORY_HOTPLUG_LIMIT +#define P2M_LIMIT CONFIG_XEN_MEMORY_HOTPLUG_LIMIT +#else +#define P2M_LIMIT 0 +#endif + +static DEFINE_SPINLOCK(p2m_update_lock); + +static unsigned long *p2m_mid_missing_mfn; +static unsigned long *p2m_top_mfn; +static unsigned long **p2m_top_mfn_p; +static unsigned long *p2m_missing; +static unsigned long *p2m_identity; +static pte_t *p2m_missing_pte; +static pte_t *p2m_identity_pte; + +/* + * Hint at last populated PFN. + * + * Used to set HYPERVISOR_shared_info->arch.max_pfn so the toolstack + * can avoid scanning the whole P2M (which may be sized to account for + * hotplugged memory). + */ +static unsigned long xen_p2m_last_pfn; + +static inline unsigned p2m_top_index(unsigned long pfn) +{ + BUG_ON(pfn >= MAX_P2M_PFN); + return pfn / (P2M_MID_PER_PAGE * P2M_PER_PAGE); +} + +static inline unsigned p2m_mid_index(unsigned long pfn) +{ + return (pfn / P2M_PER_PAGE) % P2M_MID_PER_PAGE; +} + +static void p2m_top_mfn_init(unsigned long *top) +{ + unsigned i; + + for (i = 0; i < P2M_TOP_PER_PAGE; i++) + top[i] = virt_to_mfn(p2m_mid_missing_mfn); +} + +static void p2m_top_mfn_p_init(unsigned long **top) +{ + unsigned i; + + for (i = 0; i < P2M_TOP_PER_PAGE; i++) + top[i] = p2m_mid_missing_mfn; +} + +static void p2m_mid_mfn_init(unsigned long *mid, unsigned long *leaf) +{ + unsigned i; + + for (i = 0; i < P2M_MID_PER_PAGE; i++) + mid[i] = virt_to_mfn(leaf); +} + +static void p2m_init(unsigned long *p2m) +{ + unsigned i; + + for (i = 0; i < P2M_PER_PAGE; i++) + p2m[i] = INVALID_P2M_ENTRY; +} + +static void p2m_init_identity(unsigned long *p2m, unsigned long pfn) +{ + unsigned i; + + for (i = 0; i < P2M_PER_PAGE; i++) + p2m[i] = IDENTITY_FRAME(pfn + i); +} + +static void * __ref alloc_p2m_page(void) +{ + if (unlikely(!slab_is_available())) { + void *ptr = memblock_alloc(PAGE_SIZE, PAGE_SIZE); + + if (!ptr) + panic("%s: Failed to allocate %lu bytes align=0x%lx\n", + __func__, PAGE_SIZE, PAGE_SIZE); + + return ptr; + } + + return (void *)__get_free_page(GFP_KERNEL); +} + +static void __ref free_p2m_page(void *p) +{ + if (unlikely(!slab_is_available())) { + memblock_free(p, PAGE_SIZE); + return; + } + + free_page((unsigned long)p); +} + +/* + * Build the parallel p2m_top_mfn and p2m_mid_mfn structures + * + * This is called both at boot time, and after resuming from suspend: + * - At boot time we're called rather early, and must use alloc_bootmem*() + * to allocate memory. + * + * - After resume we're called from within stop_machine, but the mfn + * tree should already be completely allocated. + */ +void __ref xen_build_mfn_list_list(void) +{ + unsigned long pfn, mfn; + pte_t *ptep; + unsigned int level, topidx, mididx; + unsigned long *mid_mfn_p; + + if (xen_start_info->flags & SIF_VIRT_P2M_4TOOLS) + return; + + /* Pre-initialize p2m_top_mfn to be completely missing */ + if (p2m_top_mfn == NULL) { + p2m_mid_missing_mfn = alloc_p2m_page(); + p2m_mid_mfn_init(p2m_mid_missing_mfn, p2m_missing); + + p2m_top_mfn_p = alloc_p2m_page(); + p2m_top_mfn_p_init(p2m_top_mfn_p); + + p2m_top_mfn = alloc_p2m_page(); + p2m_top_mfn_init(p2m_top_mfn); + } else { + /* Reinitialise, mfn's all change after migration */ + p2m_mid_mfn_init(p2m_mid_missing_mfn, p2m_missing); + } + + for (pfn = 0; pfn < xen_max_p2m_pfn && pfn < MAX_P2M_PFN; + pfn += P2M_PER_PAGE) { + topidx = p2m_top_index(pfn); + mididx = p2m_mid_index(pfn); + + mid_mfn_p = p2m_top_mfn_p[topidx]; + ptep = lookup_address((unsigned long)(xen_p2m_addr + pfn), + &level); + BUG_ON(!ptep || level != PG_LEVEL_4K); + mfn = pte_mfn(*ptep); + ptep = (pte_t *)((unsigned long)ptep & ~(PAGE_SIZE - 1)); + + /* Don't bother allocating any mfn mid levels if + * they're just missing, just update the stored mfn, + * since all could have changed over a migrate. + */ + if (ptep == p2m_missing_pte || ptep == p2m_identity_pte) { + BUG_ON(mididx); + BUG_ON(mid_mfn_p != p2m_mid_missing_mfn); + p2m_top_mfn[topidx] = virt_to_mfn(p2m_mid_missing_mfn); + pfn += (P2M_MID_PER_PAGE - 1) * P2M_PER_PAGE; + continue; + } + + if (mid_mfn_p == p2m_mid_missing_mfn) { + mid_mfn_p = alloc_p2m_page(); + p2m_mid_mfn_init(mid_mfn_p, p2m_missing); + + p2m_top_mfn_p[topidx] = mid_mfn_p; + } + + p2m_top_mfn[topidx] = virt_to_mfn(mid_mfn_p); + mid_mfn_p[mididx] = mfn; + } +} + +void xen_setup_mfn_list_list(void) +{ + BUG_ON(HYPERVISOR_shared_info == &xen_dummy_shared_info); + + if (xen_start_info->flags & SIF_VIRT_P2M_4TOOLS) + HYPERVISOR_shared_info->arch.pfn_to_mfn_frame_list_list = ~0UL; + else + HYPERVISOR_shared_info->arch.pfn_to_mfn_frame_list_list = + virt_to_mfn(p2m_top_mfn); + HYPERVISOR_shared_info->arch.max_pfn = xen_p2m_last_pfn; + HYPERVISOR_shared_info->arch.p2m_generation = 0; + HYPERVISOR_shared_info->arch.p2m_vaddr = (unsigned long)xen_p2m_addr; + HYPERVISOR_shared_info->arch.p2m_cr3 = + xen_pfn_to_cr3(virt_to_mfn(swapper_pg_dir)); +} + +/* Set up p2m_top to point to the domain-builder provided p2m pages */ +void __init xen_build_dynamic_phys_to_machine(void) +{ + unsigned long pfn; + + xen_p2m_addr = (unsigned long *)xen_start_info->mfn_list; + xen_p2m_size = ALIGN(xen_start_info->nr_pages, P2M_PER_PAGE); + + for (pfn = xen_start_info->nr_pages; pfn < xen_p2m_size; pfn++) + xen_p2m_addr[pfn] = INVALID_P2M_ENTRY; + + xen_max_p2m_pfn = xen_p2m_size; +} + +#define P2M_TYPE_IDENTITY 0 +#define P2M_TYPE_MISSING 1 +#define P2M_TYPE_PFN 2 +#define P2M_TYPE_UNKNOWN 3 + +static int xen_p2m_elem_type(unsigned long pfn) +{ + unsigned long mfn; + + if (pfn >= xen_p2m_size) + return P2M_TYPE_IDENTITY; + + mfn = xen_p2m_addr[pfn]; + + if (mfn == INVALID_P2M_ENTRY) + return P2M_TYPE_MISSING; + + if (mfn & IDENTITY_FRAME_BIT) + return P2M_TYPE_IDENTITY; + + return P2M_TYPE_PFN; +} + +static void __init xen_rebuild_p2m_list(unsigned long *p2m) +{ + unsigned int i, chunk; + unsigned long pfn; + unsigned long *mfns; + pte_t *ptep; + pmd_t *pmdp; + int type; + + p2m_missing = alloc_p2m_page(); + p2m_init(p2m_missing); + p2m_identity = alloc_p2m_page(); + p2m_init(p2m_identity); + + p2m_missing_pte = alloc_p2m_page(); + paravirt_alloc_pte(&init_mm, __pa(p2m_missing_pte) >> PAGE_SHIFT); + p2m_identity_pte = alloc_p2m_page(); + paravirt_alloc_pte(&init_mm, __pa(p2m_identity_pte) >> PAGE_SHIFT); + for (i = 0; i < PTRS_PER_PTE; i++) { + set_pte(p2m_missing_pte + i, + pfn_pte(PFN_DOWN(__pa(p2m_missing)), PAGE_KERNEL_RO)); + set_pte(p2m_identity_pte + i, + pfn_pte(PFN_DOWN(__pa(p2m_identity)), PAGE_KERNEL_RO)); + } + + for (pfn = 0; pfn < xen_max_p2m_pfn; pfn += chunk) { + /* + * Try to map missing/identity PMDs or p2m-pages if possible. + * We have to respect the structure of the mfn_list_list + * which will be built just afterwards. + * Chunk size to test is one p2m page if we are in the middle + * of a mfn_list_list mid page and the complete mid page area + * if we are at index 0 of the mid page. Please note that a + * mid page might cover more than one PMD, e.g. on 32 bit PAE + * kernels. + */ + chunk = (pfn & (P2M_PER_PAGE * P2M_MID_PER_PAGE - 1)) ? + P2M_PER_PAGE : P2M_PER_PAGE * P2M_MID_PER_PAGE; + + type = xen_p2m_elem_type(pfn); + i = 0; + if (type != P2M_TYPE_PFN) + for (i = 1; i < chunk; i++) + if (xen_p2m_elem_type(pfn + i) != type) + break; + if (i < chunk) + /* Reset to minimal chunk size. */ + chunk = P2M_PER_PAGE; + + if (type == P2M_TYPE_PFN || i < chunk) { + /* Use initial p2m page contents. */ + mfns = alloc_p2m_page(); + copy_page(mfns, xen_p2m_addr + pfn); + ptep = populate_extra_pte((unsigned long)(p2m + pfn)); + set_pte(ptep, + pfn_pte(PFN_DOWN(__pa(mfns)), PAGE_KERNEL)); + continue; + } + + if (chunk == P2M_PER_PAGE) { + /* Map complete missing or identity p2m-page. */ + mfns = (type == P2M_TYPE_MISSING) ? + p2m_missing : p2m_identity; + ptep = populate_extra_pte((unsigned long)(p2m + pfn)); + set_pte(ptep, + pfn_pte(PFN_DOWN(__pa(mfns)), PAGE_KERNEL_RO)); + continue; + } + + /* Complete missing or identity PMD(s) can be mapped. */ + ptep = (type == P2M_TYPE_MISSING) ? + p2m_missing_pte : p2m_identity_pte; + for (i = 0; i < PMDS_PER_MID_PAGE; i++) { + pmdp = populate_extra_pmd( + (unsigned long)(p2m + pfn) + i * PMD_SIZE); + set_pmd(pmdp, __pmd(__pa(ptep) | _KERNPG_TABLE)); + } + } +} + +void __init xen_vmalloc_p2m_tree(void) +{ + static struct vm_struct vm; + unsigned long p2m_limit; + + xen_p2m_last_pfn = xen_max_p2m_pfn; + + p2m_limit = (phys_addr_t)P2M_LIMIT * 1024 * 1024 * 1024 / PAGE_SIZE; + vm.flags = VM_ALLOC; + vm.size = ALIGN(sizeof(unsigned long) * max(xen_max_p2m_pfn, p2m_limit), + PMD_SIZE * PMDS_PER_MID_PAGE); + vm_area_register_early(&vm, PMD_SIZE * PMDS_PER_MID_PAGE); + pr_notice("p2m virtual area at %p, size is %lx\n", vm.addr, vm.size); + + xen_max_p2m_pfn = vm.size / sizeof(unsigned long); + + xen_rebuild_p2m_list(vm.addr); + + xen_p2m_addr = vm.addr; + xen_p2m_size = xen_max_p2m_pfn; + + xen_inv_extra_mem(); +} + +unsigned long get_phys_to_machine(unsigned long pfn) +{ + pte_t *ptep; + unsigned int level; + + if (unlikely(pfn >= xen_p2m_size)) { + if (pfn < xen_max_p2m_pfn) + return xen_chk_extra_mem(pfn); + + return IDENTITY_FRAME(pfn); + } + + ptep = lookup_address((unsigned long)(xen_p2m_addr + pfn), &level); + BUG_ON(!ptep || level != PG_LEVEL_4K); + + /* + * The INVALID_P2M_ENTRY is filled in both p2m_*identity + * and in p2m_*missing, so returning the INVALID_P2M_ENTRY + * would be wrong. + */ + if (pte_pfn(*ptep) == PFN_DOWN(__pa(p2m_identity))) + return IDENTITY_FRAME(pfn); + + return xen_p2m_addr[pfn]; +} +EXPORT_SYMBOL_GPL(get_phys_to_machine); + +/* + * Allocate new pmd(s). It is checked whether the old pmd is still in place. + * If not, nothing is changed. This is okay as the only reason for allocating + * a new pmd is to replace p2m_missing_pte or p2m_identity_pte by a individual + * pmd. + */ +static pte_t *alloc_p2m_pmd(unsigned long addr, pte_t *pte_pg) +{ + pte_t *ptechk; + pte_t *pte_newpg[PMDS_PER_MID_PAGE]; + pmd_t *pmdp; + unsigned int level; + unsigned long flags; + unsigned long vaddr; + int i; + + /* Do all allocations first to bail out in error case. */ + for (i = 0; i < PMDS_PER_MID_PAGE; i++) { + pte_newpg[i] = alloc_p2m_page(); + if (!pte_newpg[i]) { + for (i--; i >= 0; i--) + free_p2m_page(pte_newpg[i]); + + return NULL; + } + } + + vaddr = addr & ~(PMD_SIZE * PMDS_PER_MID_PAGE - 1); + + for (i = 0; i < PMDS_PER_MID_PAGE; i++) { + copy_page(pte_newpg[i], pte_pg); + paravirt_alloc_pte(&init_mm, __pa(pte_newpg[i]) >> PAGE_SHIFT); + + pmdp = lookup_pmd_address(vaddr); + BUG_ON(!pmdp); + + spin_lock_irqsave(&p2m_update_lock, flags); + + ptechk = lookup_address(vaddr, &level); + if (ptechk == pte_pg) { + HYPERVISOR_shared_info->arch.p2m_generation++; + wmb(); /* Tools are synchronizing via p2m_generation. */ + set_pmd(pmdp, + __pmd(__pa(pte_newpg[i]) | _KERNPG_TABLE)); + wmb(); /* Tools are synchronizing via p2m_generation. */ + HYPERVISOR_shared_info->arch.p2m_generation++; + pte_newpg[i] = NULL; + } + + spin_unlock_irqrestore(&p2m_update_lock, flags); + + if (pte_newpg[i]) { + paravirt_release_pte(__pa(pte_newpg[i]) >> PAGE_SHIFT); + free_p2m_page(pte_newpg[i]); + } + + vaddr += PMD_SIZE; + } + + return lookup_address(addr, &level); +} + +/* + * Fully allocate the p2m structure for a given pfn. We need to check + * that both the top and mid levels are allocated, and make sure the + * parallel mfn tree is kept in sync. We may race with other cpus, so + * the new pages are installed with cmpxchg; if we lose the race then + * simply free the page we allocated and use the one that's there. + */ +int xen_alloc_p2m_entry(unsigned long pfn) +{ + unsigned topidx; + unsigned long *top_mfn_p, *mid_mfn; + pte_t *ptep, *pte_pg; + unsigned int level; + unsigned long flags; + unsigned long addr = (unsigned long)(xen_p2m_addr + pfn); + unsigned long p2m_pfn; + + ptep = lookup_address(addr, &level); + BUG_ON(!ptep || level != PG_LEVEL_4K); + pte_pg = (pte_t *)((unsigned long)ptep & ~(PAGE_SIZE - 1)); + + if (pte_pg == p2m_missing_pte || pte_pg == p2m_identity_pte) { + /* PMD level is missing, allocate a new one */ + ptep = alloc_p2m_pmd(addr, pte_pg); + if (!ptep) + return -ENOMEM; + } + + if (p2m_top_mfn && pfn < MAX_P2M_PFN) { + topidx = p2m_top_index(pfn); + top_mfn_p = &p2m_top_mfn[topidx]; + mid_mfn = READ_ONCE(p2m_top_mfn_p[topidx]); + + BUG_ON(virt_to_mfn(mid_mfn) != *top_mfn_p); + + if (mid_mfn == p2m_mid_missing_mfn) { + /* Separately check the mid mfn level */ + unsigned long missing_mfn; + unsigned long mid_mfn_mfn; + unsigned long old_mfn; + + mid_mfn = alloc_p2m_page(); + if (!mid_mfn) + return -ENOMEM; + + p2m_mid_mfn_init(mid_mfn, p2m_missing); + + missing_mfn = virt_to_mfn(p2m_mid_missing_mfn); + mid_mfn_mfn = virt_to_mfn(mid_mfn); + old_mfn = cmpxchg(top_mfn_p, missing_mfn, mid_mfn_mfn); + if (old_mfn != missing_mfn) { + free_p2m_page(mid_mfn); + mid_mfn = mfn_to_virt(old_mfn); + } else { + p2m_top_mfn_p[topidx] = mid_mfn; + } + } + } else { + mid_mfn = NULL; + } + + p2m_pfn = pte_pfn(READ_ONCE(*ptep)); + if (p2m_pfn == PFN_DOWN(__pa(p2m_identity)) || + p2m_pfn == PFN_DOWN(__pa(p2m_missing))) { + /* p2m leaf page is missing */ + unsigned long *p2m; + + p2m = alloc_p2m_page(); + if (!p2m) + return -ENOMEM; + + if (p2m_pfn == PFN_DOWN(__pa(p2m_missing))) + p2m_init(p2m); + else + p2m_init_identity(p2m, pfn & ~(P2M_PER_PAGE - 1)); + + spin_lock_irqsave(&p2m_update_lock, flags); + + if (pte_pfn(*ptep) == p2m_pfn) { + HYPERVISOR_shared_info->arch.p2m_generation++; + wmb(); /* Tools are synchronizing via p2m_generation. */ + set_pte(ptep, + pfn_pte(PFN_DOWN(__pa(p2m)), PAGE_KERNEL)); + wmb(); /* Tools are synchronizing via p2m_generation. */ + HYPERVISOR_shared_info->arch.p2m_generation++; + if (mid_mfn) + mid_mfn[p2m_mid_index(pfn)] = virt_to_mfn(p2m); + p2m = NULL; + } + + spin_unlock_irqrestore(&p2m_update_lock, flags); + + if (p2m) + free_p2m_page(p2m); + } + + /* Expanded the p2m? */ + if (pfn >= xen_p2m_last_pfn) { + xen_p2m_last_pfn = ALIGN(pfn + 1, P2M_PER_PAGE); + HYPERVISOR_shared_info->arch.max_pfn = xen_p2m_last_pfn; + } + + return 0; +} +EXPORT_SYMBOL(xen_alloc_p2m_entry); + +unsigned long __init set_phys_range_identity(unsigned long pfn_s, + unsigned long pfn_e) +{ + unsigned long pfn; + + if (unlikely(pfn_s >= xen_p2m_size)) + return 0; + + if (pfn_s > pfn_e) + return 0; + + if (pfn_e > xen_p2m_size) + pfn_e = xen_p2m_size; + + for (pfn = pfn_s; pfn < pfn_e; pfn++) + xen_p2m_addr[pfn] = IDENTITY_FRAME(pfn); + + return pfn - pfn_s; +} + +bool __set_phys_to_machine(unsigned long pfn, unsigned long mfn) +{ + pte_t *ptep; + unsigned int level; + + /* Only invalid entries allowed above the highest p2m covered frame. */ + if (unlikely(pfn >= xen_p2m_size)) + return mfn == INVALID_P2M_ENTRY; + + /* + * The interface requires atomic updates on p2m elements. + * xen_safe_write_ulong() is using an atomic store via asm(). + */ + if (likely(!xen_safe_write_ulong(xen_p2m_addr + pfn, mfn))) + return true; + + ptep = lookup_address((unsigned long)(xen_p2m_addr + pfn), &level); + BUG_ON(!ptep || level != PG_LEVEL_4K); + + if (pte_pfn(*ptep) == PFN_DOWN(__pa(p2m_missing))) + return mfn == INVALID_P2M_ENTRY; + + if (pte_pfn(*ptep) == PFN_DOWN(__pa(p2m_identity))) + return mfn == IDENTITY_FRAME(pfn); + + return false; +} + +bool set_phys_to_machine(unsigned long pfn, unsigned long mfn) +{ + if (unlikely(!__set_phys_to_machine(pfn, mfn))) { + int ret; + + ret = xen_alloc_p2m_entry(pfn); + if (ret < 0) + return false; + + return __set_phys_to_machine(pfn, mfn); + } + + return true; +} + +int set_foreign_p2m_mapping(struct gnttab_map_grant_ref *map_ops, + struct gnttab_map_grant_ref *kmap_ops, + struct page **pages, unsigned int count) +{ + int i, ret = 0; + pte_t *pte; + + if (xen_feature(XENFEAT_auto_translated_physmap)) + return 0; + + if (kmap_ops) { + ret = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, + kmap_ops, count); + if (ret) + goto out; + } + + for (i = 0; i < count; i++) { + unsigned long mfn, pfn; + struct gnttab_unmap_grant_ref unmap[2]; + int rc; + + /* Do not add to override if the map failed. */ + if (map_ops[i].status != GNTST_okay || + (kmap_ops && kmap_ops[i].status != GNTST_okay)) + continue; + + if (map_ops[i].flags & GNTMAP_contains_pte) { + pte = (pte_t *)(mfn_to_virt(PFN_DOWN(map_ops[i].host_addr)) + + (map_ops[i].host_addr & ~PAGE_MASK)); + mfn = pte_mfn(*pte); + } else { + mfn = PFN_DOWN(map_ops[i].dev_bus_addr); + } + pfn = page_to_pfn(pages[i]); + + WARN(pfn_to_mfn(pfn) != INVALID_P2M_ENTRY, "page must be ballooned"); + + if (likely(set_phys_to_machine(pfn, FOREIGN_FRAME(mfn)))) + continue; + + /* + * Signal an error for this slot. This in turn requires + * immediate unmapping. + */ + map_ops[i].status = GNTST_general_error; + unmap[0].host_addr = map_ops[i].host_addr, + unmap[0].handle = map_ops[i].handle; + map_ops[i].handle = INVALID_GRANT_HANDLE; + if (map_ops[i].flags & GNTMAP_device_map) + unmap[0].dev_bus_addr = map_ops[i].dev_bus_addr; + else + unmap[0].dev_bus_addr = 0; + + if (kmap_ops) { + kmap_ops[i].status = GNTST_general_error; + unmap[1].host_addr = kmap_ops[i].host_addr, + unmap[1].handle = kmap_ops[i].handle; + kmap_ops[i].handle = INVALID_GRANT_HANDLE; + if (kmap_ops[i].flags & GNTMAP_device_map) + unmap[1].dev_bus_addr = kmap_ops[i].dev_bus_addr; + else + unmap[1].dev_bus_addr = 0; + } + + /* + * Pre-populate both status fields, to be recognizable in + * the log message below. + */ + unmap[0].status = 1; + unmap[1].status = 1; + + rc = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, + unmap, 1 + !!kmap_ops); + if (rc || unmap[0].status != GNTST_okay || + unmap[1].status != GNTST_okay) + pr_err_once("gnttab unmap failed: rc=%d st0=%d st1=%d\n", + rc, unmap[0].status, unmap[1].status); + } + +out: + return ret; +} + +int clear_foreign_p2m_mapping(struct gnttab_unmap_grant_ref *unmap_ops, + struct gnttab_unmap_grant_ref *kunmap_ops, + struct page **pages, unsigned int count) +{ + int i, ret = 0; + + if (xen_feature(XENFEAT_auto_translated_physmap)) + return 0; + + for (i = 0; i < count; i++) { + unsigned long mfn = __pfn_to_mfn(page_to_pfn(pages[i])); + unsigned long pfn = page_to_pfn(pages[i]); + + if (mfn != INVALID_P2M_ENTRY && (mfn & FOREIGN_FRAME_BIT)) + set_phys_to_machine(pfn, INVALID_P2M_ENTRY); + else + ret = -EINVAL; + } + if (kunmap_ops) + ret = HYPERVISOR_grant_table_op(GNTTABOP_unmap_grant_ref, + kunmap_ops, count) ?: ret; + + return ret; +} + +#ifdef CONFIG_XEN_DEBUG_FS +#include <linux/debugfs.h> +#include "debugfs.h" +static int p2m_dump_show(struct seq_file *m, void *v) +{ + static const char * const type_name[] = { + [P2M_TYPE_IDENTITY] = "identity", + [P2M_TYPE_MISSING] = "missing", + [P2M_TYPE_PFN] = "pfn", + [P2M_TYPE_UNKNOWN] = "abnormal"}; + unsigned long pfn, first_pfn; + int type, prev_type; + + prev_type = xen_p2m_elem_type(0); + first_pfn = 0; + + for (pfn = 0; pfn < xen_p2m_size; pfn++) { + type = xen_p2m_elem_type(pfn); + if (type != prev_type) { + seq_printf(m, " [0x%lx->0x%lx] %s\n", first_pfn, pfn, + type_name[prev_type]); + prev_type = type; + first_pfn = pfn; + } + } + seq_printf(m, " [0x%lx->0x%lx] %s\n", first_pfn, pfn, + type_name[prev_type]); + return 0; +} + +DEFINE_SHOW_ATTRIBUTE(p2m_dump); + +static struct dentry *d_mmu_debug; + +static int __init xen_p2m_debugfs(void) +{ + struct dentry *d_xen = xen_init_debugfs(); + + d_mmu_debug = debugfs_create_dir("mmu", d_xen); + + debugfs_create_file("p2m", 0600, d_mmu_debug, NULL, &p2m_dump_fops); + return 0; +} +fs_initcall(xen_p2m_debugfs); +#endif /* CONFIG_XEN_DEBUG_FS */ diff --git a/arch/x86/xen/platform-pci-unplug.c b/arch/x86/xen/platform-pci-unplug.c new file mode 100644 index 0000000000..62ac4898df --- /dev/null +++ b/arch/x86/xen/platform-pci-unplug.c @@ -0,0 +1,210 @@ +// SPDX-License-Identifier: GPL-2.0 + +/****************************************************************************** + * platform-pci-unplug.c + * + * Xen platform PCI device driver + * Copyright (c) 2010, Citrix + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <linux/init.h> +#include <linux/io.h> +#include <linux/export.h> + +#include <xen/xen.h> +#include <xen/platform_pci.h> +#include "xen-ops.h" + +#define XEN_PLATFORM_ERR_MAGIC -1 +#define XEN_PLATFORM_ERR_PROTOCOL -2 +#define XEN_PLATFORM_ERR_BLACKLIST -3 + +/* store the value of xen_emul_unplug after the unplug is done */ +static int xen_platform_pci_unplug; +static int xen_emul_unplug; + +static int check_platform_magic(void) +{ + short magic; + char protocol; + + magic = inw(XEN_IOPORT_MAGIC); + if (magic != XEN_IOPORT_MAGIC_VAL) { + pr_err("Xen Platform PCI: unrecognised magic value\n"); + return XEN_PLATFORM_ERR_MAGIC; + } + + protocol = inb(XEN_IOPORT_PROTOVER); + + pr_debug("Xen Platform PCI: I/O protocol version %d\n", + protocol); + + switch (protocol) { + case 1: + outw(XEN_IOPORT_LINUX_PRODNUM, XEN_IOPORT_PRODNUM); + outl(XEN_IOPORT_LINUX_DRVVER, XEN_IOPORT_DRVVER); + if (inw(XEN_IOPORT_MAGIC) != XEN_IOPORT_MAGIC_VAL) { + pr_err("Xen Platform: blacklisted by host\n"); + return XEN_PLATFORM_ERR_BLACKLIST; + } + break; + default: + pr_warn("Xen Platform PCI: unknown I/O protocol version\n"); + return XEN_PLATFORM_ERR_PROTOCOL; + } + + return 0; +} + +bool xen_has_pv_devices(void) +{ + if (!xen_domain()) + return false; + + /* PV and PVH domains always have them. */ + if (xen_pv_domain() || xen_pvh_domain()) + return true; + + /* And user has xen_platform_pci=0 set in guest config as + * driver did not modify the value. */ + if (xen_platform_pci_unplug == 0) + return false; + + if (xen_platform_pci_unplug & XEN_UNPLUG_NEVER) + return false; + + if (xen_platform_pci_unplug & XEN_UNPLUG_ALL) + return true; + + /* This is an odd one - we are going to run legacy + * and PV drivers at the same time. */ + if (xen_platform_pci_unplug & XEN_UNPLUG_UNNECESSARY) + return true; + + /* And the caller has to follow with xen_pv_{disk,nic}_devices + * to be certain which driver can load. */ + return false; +} +EXPORT_SYMBOL_GPL(xen_has_pv_devices); + +static bool __xen_has_pv_device(int state) +{ + /* HVM domains might or might not */ + if (xen_hvm_domain() && (xen_platform_pci_unplug & state)) + return true; + + return xen_has_pv_devices(); +} + +bool xen_has_pv_nic_devices(void) +{ + return __xen_has_pv_device(XEN_UNPLUG_ALL_NICS | XEN_UNPLUG_ALL); +} +EXPORT_SYMBOL_GPL(xen_has_pv_nic_devices); + +bool xen_has_pv_disk_devices(void) +{ + return __xen_has_pv_device(XEN_UNPLUG_ALL_IDE_DISKS | + XEN_UNPLUG_AUX_IDE_DISKS | XEN_UNPLUG_ALL); +} +EXPORT_SYMBOL_GPL(xen_has_pv_disk_devices); + +/* + * This one is odd - it determines whether you want to run PV _and_ + * legacy (IDE) drivers together. This combination is only possible + * under HVM. + */ +bool xen_has_pv_and_legacy_disk_devices(void) +{ + if (!xen_domain()) + return false; + + /* N.B. This is only ever used in HVM mode */ + if (xen_pv_domain()) + return false; + + if (xen_platform_pci_unplug & XEN_UNPLUG_UNNECESSARY) + return true; + + return false; +} +EXPORT_SYMBOL_GPL(xen_has_pv_and_legacy_disk_devices); + +void xen_unplug_emulated_devices(void) +{ + int r; + + /* PVH guests don't have emulated devices. */ + if (xen_pvh_domain()) + return; + + /* user explicitly requested no unplug */ + if (xen_emul_unplug & XEN_UNPLUG_NEVER) + return; + /* check the version of the xen platform PCI device */ + r = check_platform_magic(); + /* If the version matches enable the Xen platform PCI driver. + * Also enable the Xen platform PCI driver if the host does + * not support the unplug protocol (XEN_PLATFORM_ERR_MAGIC) + * but the user told us that unplugging is unnecessary. */ + if (r && !(r == XEN_PLATFORM_ERR_MAGIC && + (xen_emul_unplug & XEN_UNPLUG_UNNECESSARY))) + return; + /* Set the default value of xen_emul_unplug depending on whether or + * not the Xen PV frontends and the Xen platform PCI driver have + * been compiled for this kernel (modules or built-in are both OK). */ + if (!xen_emul_unplug) { + if (xen_must_unplug_nics()) { + pr_info("Netfront and the Xen platform PCI driver have " + "been compiled for this kernel: unplug emulated NICs.\n"); + xen_emul_unplug |= XEN_UNPLUG_ALL_NICS; + } + if (xen_must_unplug_disks()) { + pr_info("Blkfront and the Xen platform PCI driver have " + "been compiled for this kernel: unplug emulated disks.\n" + "You might have to change the root device\n" + "from /dev/hd[a-d] to /dev/xvd[a-d]\n" + "in your root= kernel command line option\n"); + xen_emul_unplug |= XEN_UNPLUG_ALL_IDE_DISKS; + } + } + /* Now unplug the emulated devices */ + if (!(xen_emul_unplug & XEN_UNPLUG_UNNECESSARY)) + outw(xen_emul_unplug, XEN_IOPORT_UNPLUG); + xen_platform_pci_unplug = xen_emul_unplug; +} + +static int __init parse_xen_emul_unplug(char *arg) +{ + char *p, *q; + int l; + + for (p = arg; p; p = q) { + q = strchr(p, ','); + if (q) { + l = q - p; + q++; + } else { + l = strlen(p); + } + if (!strncmp(p, "all", l)) + xen_emul_unplug |= XEN_UNPLUG_ALL; + else if (!strncmp(p, "ide-disks", l)) + xen_emul_unplug |= XEN_UNPLUG_ALL_IDE_DISKS; + else if (!strncmp(p, "aux-ide-disks", l)) + xen_emul_unplug |= XEN_UNPLUG_AUX_IDE_DISKS; + else if (!strncmp(p, "nics", l)) + xen_emul_unplug |= XEN_UNPLUG_ALL_NICS; + else if (!strncmp(p, "unnecessary", l)) + xen_emul_unplug |= XEN_UNPLUG_UNNECESSARY; + else if (!strncmp(p, "never", l)) + xen_emul_unplug |= XEN_UNPLUG_NEVER; + else + pr_warn("unrecognised option '%s' " + "in parameter 'xen_emul_unplug'\n", p); + } + return 0; +} +early_param("xen_emul_unplug", parse_xen_emul_unplug); diff --git a/arch/x86/xen/pmu.c b/arch/x86/xen/pmu.c new file mode 100644 index 0000000000..246d67dab5 --- /dev/null +++ b/arch/x86/xen/pmu.c @@ -0,0 +1,586 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <linux/types.h> +#include <linux/interrupt.h> + +#include <asm/xen/hypercall.h> +#include <xen/xen.h> +#include <xen/page.h> +#include <xen/interface/xen.h> +#include <xen/interface/vcpu.h> +#include <xen/interface/xenpmu.h> + +#include "xen-ops.h" +#include "pmu.h" + +/* x86_pmu.handle_irq definition */ +#include "../events/perf_event.h" + +#define XENPMU_IRQ_PROCESSING 1 +struct xenpmu { + /* Shared page between hypervisor and domain */ + struct xen_pmu_data *xenpmu_data; + + uint8_t flags; +}; +static DEFINE_PER_CPU(struct xenpmu, xenpmu_shared); +#define get_xenpmu_data() (this_cpu_ptr(&xenpmu_shared)->xenpmu_data) +#define get_xenpmu_flags() (this_cpu_ptr(&xenpmu_shared)->flags) + +/* Macro for computing address of a PMU MSR bank */ +#define field_offset(ctxt, field) ((void *)((uintptr_t)ctxt + \ + (uintptr_t)ctxt->field)) + +/* AMD PMU */ +#define F15H_NUM_COUNTERS 6 +#define F10H_NUM_COUNTERS 4 + +static __read_mostly uint32_t amd_counters_base; +static __read_mostly uint32_t amd_ctrls_base; +static __read_mostly int amd_msr_step; +static __read_mostly int k7_counters_mirrored; +static __read_mostly int amd_num_counters; + +/* Intel PMU */ +#define MSR_TYPE_COUNTER 0 +#define MSR_TYPE_CTRL 1 +#define MSR_TYPE_GLOBAL 2 +#define MSR_TYPE_ARCH_COUNTER 3 +#define MSR_TYPE_ARCH_CTRL 4 + +/* Number of general pmu registers (CPUID.EAX[0xa].EAX[8..15]) */ +#define PMU_GENERAL_NR_SHIFT 8 +#define PMU_GENERAL_NR_BITS 8 +#define PMU_GENERAL_NR_MASK (((1 << PMU_GENERAL_NR_BITS) - 1) \ + << PMU_GENERAL_NR_SHIFT) + +/* Number of fixed pmu registers (CPUID.EDX[0xa].EDX[0..4]) */ +#define PMU_FIXED_NR_SHIFT 0 +#define PMU_FIXED_NR_BITS 5 +#define PMU_FIXED_NR_MASK (((1 << PMU_FIXED_NR_BITS) - 1) \ + << PMU_FIXED_NR_SHIFT) + +/* Alias registers (0x4c1) for full-width writes to PMCs */ +#define MSR_PMC_ALIAS_MASK (~(MSR_IA32_PERFCTR0 ^ MSR_IA32_PMC0)) + +#define INTEL_PMC_TYPE_SHIFT 30 + +static __read_mostly int intel_num_arch_counters, intel_num_fixed_counters; + + +static void xen_pmu_arch_init(void) +{ + if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) { + + switch (boot_cpu_data.x86) { + case 0x15: + amd_num_counters = F15H_NUM_COUNTERS; + amd_counters_base = MSR_F15H_PERF_CTR; + amd_ctrls_base = MSR_F15H_PERF_CTL; + amd_msr_step = 2; + k7_counters_mirrored = 1; + break; + case 0x10: + case 0x12: + case 0x14: + case 0x16: + default: + amd_num_counters = F10H_NUM_COUNTERS; + amd_counters_base = MSR_K7_PERFCTR0; + amd_ctrls_base = MSR_K7_EVNTSEL0; + amd_msr_step = 1; + k7_counters_mirrored = 0; + break; + } + } else if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) { + amd_num_counters = F10H_NUM_COUNTERS; + amd_counters_base = MSR_K7_PERFCTR0; + amd_ctrls_base = MSR_K7_EVNTSEL0; + amd_msr_step = 1; + k7_counters_mirrored = 0; + } else { + uint32_t eax, ebx, ecx, edx; + + cpuid(0xa, &eax, &ebx, &ecx, &edx); + + intel_num_arch_counters = (eax & PMU_GENERAL_NR_MASK) >> + PMU_GENERAL_NR_SHIFT; + intel_num_fixed_counters = (edx & PMU_FIXED_NR_MASK) >> + PMU_FIXED_NR_SHIFT; + } +} + +static inline uint32_t get_fam15h_addr(u32 addr) +{ + switch (addr) { + case MSR_K7_PERFCTR0: + case MSR_K7_PERFCTR1: + case MSR_K7_PERFCTR2: + case MSR_K7_PERFCTR3: + return MSR_F15H_PERF_CTR + (addr - MSR_K7_PERFCTR0); + case MSR_K7_EVNTSEL0: + case MSR_K7_EVNTSEL1: + case MSR_K7_EVNTSEL2: + case MSR_K7_EVNTSEL3: + return MSR_F15H_PERF_CTL + (addr - MSR_K7_EVNTSEL0); + default: + break; + } + + return addr; +} + +static inline bool is_amd_pmu_msr(unsigned int msr) +{ + if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD && + boot_cpu_data.x86_vendor != X86_VENDOR_HYGON) + return false; + + if ((msr >= MSR_F15H_PERF_CTL && + msr < MSR_F15H_PERF_CTR + (amd_num_counters * 2)) || + (msr >= MSR_K7_EVNTSEL0 && + msr < MSR_K7_PERFCTR0 + amd_num_counters)) + return true; + + return false; +} + +static bool is_intel_pmu_msr(u32 msr_index, int *type, int *index) +{ + u32 msr_index_pmc; + + if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL && + boot_cpu_data.x86_vendor != X86_VENDOR_CENTAUR && + boot_cpu_data.x86_vendor != X86_VENDOR_ZHAOXIN) + return false; + + switch (msr_index) { + case MSR_CORE_PERF_FIXED_CTR_CTRL: + case MSR_IA32_DS_AREA: + case MSR_IA32_PEBS_ENABLE: + *type = MSR_TYPE_CTRL; + return true; + + case MSR_CORE_PERF_GLOBAL_CTRL: + case MSR_CORE_PERF_GLOBAL_STATUS: + case MSR_CORE_PERF_GLOBAL_OVF_CTRL: + *type = MSR_TYPE_GLOBAL; + return true; + + default: + + if ((msr_index >= MSR_CORE_PERF_FIXED_CTR0) && + (msr_index < MSR_CORE_PERF_FIXED_CTR0 + + intel_num_fixed_counters)) { + *index = msr_index - MSR_CORE_PERF_FIXED_CTR0; + *type = MSR_TYPE_COUNTER; + return true; + } + + if ((msr_index >= MSR_P6_EVNTSEL0) && + (msr_index < MSR_P6_EVNTSEL0 + intel_num_arch_counters)) { + *index = msr_index - MSR_P6_EVNTSEL0; + *type = MSR_TYPE_ARCH_CTRL; + return true; + } + + msr_index_pmc = msr_index & MSR_PMC_ALIAS_MASK; + if ((msr_index_pmc >= MSR_IA32_PERFCTR0) && + (msr_index_pmc < MSR_IA32_PERFCTR0 + + intel_num_arch_counters)) { + *type = MSR_TYPE_ARCH_COUNTER; + *index = msr_index_pmc - MSR_IA32_PERFCTR0; + return true; + } + return false; + } +} + +static bool xen_intel_pmu_emulate(unsigned int msr, u64 *val, int type, + int index, bool is_read) +{ + uint64_t *reg = NULL; + struct xen_pmu_intel_ctxt *ctxt; + uint64_t *fix_counters; + struct xen_pmu_cntr_pair *arch_cntr_pair; + struct xen_pmu_data *xenpmu_data = get_xenpmu_data(); + uint8_t xenpmu_flags = get_xenpmu_flags(); + + + if (!xenpmu_data || !(xenpmu_flags & XENPMU_IRQ_PROCESSING)) + return false; + + ctxt = &xenpmu_data->pmu.c.intel; + + switch (msr) { + case MSR_CORE_PERF_GLOBAL_OVF_CTRL: + reg = &ctxt->global_ovf_ctrl; + break; + case MSR_CORE_PERF_GLOBAL_STATUS: + reg = &ctxt->global_status; + break; + case MSR_CORE_PERF_GLOBAL_CTRL: + reg = &ctxt->global_ctrl; + break; + case MSR_CORE_PERF_FIXED_CTR_CTRL: + reg = &ctxt->fixed_ctrl; + break; + default: + switch (type) { + case MSR_TYPE_COUNTER: + fix_counters = field_offset(ctxt, fixed_counters); + reg = &fix_counters[index]; + break; + case MSR_TYPE_ARCH_COUNTER: + arch_cntr_pair = field_offset(ctxt, arch_counters); + reg = &arch_cntr_pair[index].counter; + break; + case MSR_TYPE_ARCH_CTRL: + arch_cntr_pair = field_offset(ctxt, arch_counters); + reg = &arch_cntr_pair[index].control; + break; + default: + return false; + } + } + + if (reg) { + if (is_read) + *val = *reg; + else { + *reg = *val; + + if (msr == MSR_CORE_PERF_GLOBAL_OVF_CTRL) + ctxt->global_status &= (~(*val)); + } + return true; + } + + return false; +} + +static bool xen_amd_pmu_emulate(unsigned int msr, u64 *val, bool is_read) +{ + uint64_t *reg = NULL; + int i, off = 0; + struct xen_pmu_amd_ctxt *ctxt; + uint64_t *counter_regs, *ctrl_regs; + struct xen_pmu_data *xenpmu_data = get_xenpmu_data(); + uint8_t xenpmu_flags = get_xenpmu_flags(); + + if (!xenpmu_data || !(xenpmu_flags & XENPMU_IRQ_PROCESSING)) + return false; + + if (k7_counters_mirrored && + ((msr >= MSR_K7_EVNTSEL0) && (msr <= MSR_K7_PERFCTR3))) + msr = get_fam15h_addr(msr); + + ctxt = &xenpmu_data->pmu.c.amd; + for (i = 0; i < amd_num_counters; i++) { + if (msr == amd_ctrls_base + off) { + ctrl_regs = field_offset(ctxt, ctrls); + reg = &ctrl_regs[i]; + break; + } else if (msr == amd_counters_base + off) { + counter_regs = field_offset(ctxt, counters); + reg = &counter_regs[i]; + break; + } + off += amd_msr_step; + } + + if (reg) { + if (is_read) + *val = *reg; + else + *reg = *val; + + return true; + } + return false; +} + +static bool pmu_msr_chk_emulated(unsigned int msr, uint64_t *val, bool is_read, + bool *emul) +{ + int type, index = 0; + + if (is_amd_pmu_msr(msr)) + *emul = xen_amd_pmu_emulate(msr, val, is_read); + else if (is_intel_pmu_msr(msr, &type, &index)) + *emul = xen_intel_pmu_emulate(msr, val, type, index, is_read); + else + return false; + + return true; +} + +bool pmu_msr_read(unsigned int msr, uint64_t *val, int *err) +{ + bool emulated; + + if (!pmu_msr_chk_emulated(msr, val, true, &emulated)) + return false; + + if (!emulated) { + *val = err ? native_read_msr_safe(msr, err) + : native_read_msr(msr); + } + + return true; +} + +bool pmu_msr_write(unsigned int msr, uint32_t low, uint32_t high, int *err) +{ + uint64_t val = ((uint64_t)high << 32) | low; + bool emulated; + + if (!pmu_msr_chk_emulated(msr, &val, false, &emulated)) + return false; + + if (!emulated) { + if (err) + *err = native_write_msr_safe(msr, low, high); + else + native_write_msr(msr, low, high); + } + + return true; +} + +static unsigned long long xen_amd_read_pmc(int counter) +{ + struct xen_pmu_amd_ctxt *ctxt; + uint64_t *counter_regs; + struct xen_pmu_data *xenpmu_data = get_xenpmu_data(); + uint8_t xenpmu_flags = get_xenpmu_flags(); + + if (!xenpmu_data || !(xenpmu_flags & XENPMU_IRQ_PROCESSING)) { + uint32_t msr; + int err; + + msr = amd_counters_base + (counter * amd_msr_step); + return native_read_msr_safe(msr, &err); + } + + ctxt = &xenpmu_data->pmu.c.amd; + counter_regs = field_offset(ctxt, counters); + return counter_regs[counter]; +} + +static unsigned long long xen_intel_read_pmc(int counter) +{ + struct xen_pmu_intel_ctxt *ctxt; + uint64_t *fixed_counters; + struct xen_pmu_cntr_pair *arch_cntr_pair; + struct xen_pmu_data *xenpmu_data = get_xenpmu_data(); + uint8_t xenpmu_flags = get_xenpmu_flags(); + + if (!xenpmu_data || !(xenpmu_flags & XENPMU_IRQ_PROCESSING)) { + uint32_t msr; + int err; + + if (counter & (1 << INTEL_PMC_TYPE_SHIFT)) + msr = MSR_CORE_PERF_FIXED_CTR0 + (counter & 0xffff); + else + msr = MSR_IA32_PERFCTR0 + counter; + + return native_read_msr_safe(msr, &err); + } + + ctxt = &xenpmu_data->pmu.c.intel; + if (counter & (1 << INTEL_PMC_TYPE_SHIFT)) { + fixed_counters = field_offset(ctxt, fixed_counters); + return fixed_counters[counter & 0xffff]; + } + + arch_cntr_pair = field_offset(ctxt, arch_counters); + return arch_cntr_pair[counter].counter; +} + +unsigned long long xen_read_pmc(int counter) +{ + if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL) + return xen_amd_read_pmc(counter); + else + return xen_intel_read_pmc(counter); +} + +int pmu_apic_update(uint32_t val) +{ + int ret; + struct xen_pmu_data *xenpmu_data = get_xenpmu_data(); + + if (!xenpmu_data) { + pr_warn_once("%s: pmudata not initialized\n", __func__); + return -EINVAL; + } + + xenpmu_data->pmu.l.lapic_lvtpc = val; + + if (get_xenpmu_flags() & XENPMU_IRQ_PROCESSING) + return 0; + + ret = HYPERVISOR_xenpmu_op(XENPMU_lvtpc_set, NULL); + + return ret; +} + +/* perf callbacks */ +static unsigned int xen_guest_state(void) +{ + const struct xen_pmu_data *xenpmu_data = get_xenpmu_data(); + unsigned int state = 0; + + if (!xenpmu_data) { + pr_warn_once("%s: pmudata not initialized\n", __func__); + return state; + } + + if (!xen_initial_domain() || (xenpmu_data->domain_id >= DOMID_SELF)) + return state; + + state |= PERF_GUEST_ACTIVE; + + if (xenpmu_data->pmu.pmu_flags & PMU_SAMPLE_PV) { + if (xenpmu_data->pmu.pmu_flags & PMU_SAMPLE_USER) + state |= PERF_GUEST_USER; + } else if (xenpmu_data->pmu.r.regs.cpl & 3) { + state |= PERF_GUEST_USER; + } + + return state; +} + +static unsigned long xen_get_guest_ip(void) +{ + const struct xen_pmu_data *xenpmu_data = get_xenpmu_data(); + + if (!xenpmu_data) { + pr_warn_once("%s: pmudata not initialized\n", __func__); + return 0; + } + + return xenpmu_data->pmu.r.regs.ip; +} + +static struct perf_guest_info_callbacks xen_guest_cbs = { + .state = xen_guest_state, + .get_ip = xen_get_guest_ip, +}; + +/* Convert registers from Xen's format to Linux' */ +static void xen_convert_regs(const struct xen_pmu_regs *xen_regs, + struct pt_regs *regs, uint64_t pmu_flags) +{ + regs->ip = xen_regs->ip; + regs->cs = xen_regs->cs; + regs->sp = xen_regs->sp; + + if (pmu_flags & PMU_SAMPLE_PV) { + if (pmu_flags & PMU_SAMPLE_USER) + regs->cs |= 3; + else + regs->cs &= ~3; + } else { + if (xen_regs->cpl) + regs->cs |= 3; + else + regs->cs &= ~3; + } +} + +irqreturn_t xen_pmu_irq_handler(int irq, void *dev_id) +{ + int err, ret = IRQ_NONE; + struct pt_regs regs = {0}; + const struct xen_pmu_data *xenpmu_data = get_xenpmu_data(); + uint8_t xenpmu_flags = get_xenpmu_flags(); + + if (!xenpmu_data) { + pr_warn_once("%s: pmudata not initialized\n", __func__); + return ret; + } + + this_cpu_ptr(&xenpmu_shared)->flags = + xenpmu_flags | XENPMU_IRQ_PROCESSING; + xen_convert_regs(&xenpmu_data->pmu.r.regs, ®s, + xenpmu_data->pmu.pmu_flags); + if (x86_pmu.handle_irq(®s)) + ret = IRQ_HANDLED; + + /* Write out cached context to HW */ + err = HYPERVISOR_xenpmu_op(XENPMU_flush, NULL); + this_cpu_ptr(&xenpmu_shared)->flags = xenpmu_flags; + if (err) { + pr_warn_once("%s: failed hypercall, err: %d\n", __func__, err); + return IRQ_NONE; + } + + return ret; +} + +bool is_xen_pmu; + +void xen_pmu_init(int cpu) +{ + int err; + struct xen_pmu_params xp; + unsigned long pfn; + struct xen_pmu_data *xenpmu_data; + + BUILD_BUG_ON(sizeof(struct xen_pmu_data) > PAGE_SIZE); + + if (xen_hvm_domain() || (cpu != 0 && !is_xen_pmu)) + return; + + xenpmu_data = (struct xen_pmu_data *)get_zeroed_page(GFP_KERNEL); + if (!xenpmu_data) { + pr_err("VPMU init: No memory\n"); + return; + } + pfn = virt_to_pfn(xenpmu_data); + + xp.val = pfn_to_mfn(pfn); + xp.vcpu = cpu; + xp.version.maj = XENPMU_VER_MAJ; + xp.version.min = XENPMU_VER_MIN; + err = HYPERVISOR_xenpmu_op(XENPMU_init, &xp); + if (err) + goto fail; + + per_cpu(xenpmu_shared, cpu).xenpmu_data = xenpmu_data; + per_cpu(xenpmu_shared, cpu).flags = 0; + + if (!is_xen_pmu) { + is_xen_pmu = true; + perf_register_guest_info_callbacks(&xen_guest_cbs); + xen_pmu_arch_init(); + } + + return; + +fail: + if (err == -EOPNOTSUPP || err == -ENOSYS) + pr_info_once("VPMU disabled by hypervisor.\n"); + else + pr_info_once("Could not initialize VPMU for cpu %d, error %d\n", + cpu, err); + free_pages((unsigned long)xenpmu_data, 0); +} + +void xen_pmu_finish(int cpu) +{ + struct xen_pmu_params xp; + + if (xen_hvm_domain()) + return; + + xp.vcpu = cpu; + xp.version.maj = XENPMU_VER_MAJ; + xp.version.min = XENPMU_VER_MIN; + + (void)HYPERVISOR_xenpmu_op(XENPMU_finish, &xp); + + free_pages((unsigned long)per_cpu(xenpmu_shared, cpu).xenpmu_data, 0); + per_cpu(xenpmu_shared, cpu).xenpmu_data = NULL; +} diff --git a/arch/x86/xen/pmu.h b/arch/x86/xen/pmu.h new file mode 100644 index 0000000000..65c58894fc --- /dev/null +++ b/arch/x86/xen/pmu.h @@ -0,0 +1,22 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __XEN_PMU_H +#define __XEN_PMU_H + +#include <xen/interface/xenpmu.h> + +extern bool is_xen_pmu; + +irqreturn_t xen_pmu_irq_handler(int irq, void *dev_id); +#ifdef CONFIG_XEN_HAVE_VPMU +void xen_pmu_init(int cpu); +void xen_pmu_finish(int cpu); +#else +static inline void xen_pmu_init(int cpu) {} +static inline void xen_pmu_finish(int cpu) {} +#endif +bool pmu_msr_read(unsigned int msr, uint64_t *val, int *err); +bool pmu_msr_write(unsigned int msr, uint32_t low, uint32_t high, int *err); +int pmu_apic_update(uint32_t reg); +unsigned long long xen_read_pmc(int counter); + +#endif /* __XEN_PMU_H */ diff --git a/arch/x86/xen/setup.c b/arch/x86/xen/setup.c new file mode 100644 index 0000000000..b3e3796106 --- /dev/null +++ b/arch/x86/xen/setup.c @@ -0,0 +1,988 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Machine specific setup for xen + * + * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007 + */ + +#include <linux/init.h> +#include <linux/iscsi_ibft.h> +#include <linux/sched.h> +#include <linux/kstrtox.h> +#include <linux/mm.h> +#include <linux/pm.h> +#include <linux/memblock.h> +#include <linux/cpuidle.h> +#include <linux/cpufreq.h> +#include <linux/memory_hotplug.h> + +#include <asm/elf.h> +#include <asm/vdso.h> +#include <asm/e820/api.h> +#include <asm/setup.h> +#include <asm/acpi.h> +#include <asm/numa.h> +#include <asm/idtentry.h> +#include <asm/xen/hypervisor.h> +#include <asm/xen/hypercall.h> + +#include <xen/xen.h> +#include <xen/page.h> +#include <xen/interface/callback.h> +#include <xen/interface/memory.h> +#include <xen/interface/physdev.h> +#include <xen/features.h> +#include <xen/hvc-console.h> +#include "xen-ops.h" +#include "mmu.h" + +#define GB(x) ((uint64_t)(x) * 1024 * 1024 * 1024) + +/* Amount of extra memory space we add to the e820 ranges */ +struct xen_memory_region xen_extra_mem[XEN_EXTRA_MEM_MAX_REGIONS] __initdata; + +/* Number of pages released from the initial allocation. */ +unsigned long xen_released_pages; + +/* Memory map would allow PCI passthrough. */ +bool xen_pv_pci_possible; + +/* E820 map used during setting up memory. */ +static struct e820_table xen_e820_table __initdata; + +/* + * Buffer used to remap identity mapped pages. We only need the virtual space. + * The physical page behind this address is remapped as needed to different + * buffer pages. + */ +#define REMAP_SIZE (P2M_PER_PAGE - 3) +static struct { + unsigned long next_area_mfn; + unsigned long target_pfn; + unsigned long size; + unsigned long mfns[REMAP_SIZE]; +} xen_remap_buf __initdata __aligned(PAGE_SIZE); +static unsigned long xen_remap_mfn __initdata = INVALID_P2M_ENTRY; + +/* + * The maximum amount of extra memory compared to the base size. The + * main scaling factor is the size of struct page. At extreme ratios + * of base:extra, all the base memory can be filled with page + * structures for the extra memory, leaving no space for anything + * else. + * + * 10x seems like a reasonable balance between scaling flexibility and + * leaving a practically usable system. + */ +#define EXTRA_MEM_RATIO (10) + +static bool xen_512gb_limit __initdata = IS_ENABLED(CONFIG_XEN_512GB); + +static void __init xen_parse_512gb(void) +{ + bool val = false; + char *arg; + + arg = strstr(xen_start_info->cmd_line, "xen_512gb_limit"); + if (!arg) + return; + + arg = strstr(xen_start_info->cmd_line, "xen_512gb_limit="); + if (!arg) + val = true; + else if (kstrtobool(arg + strlen("xen_512gb_limit="), &val)) + return; + + xen_512gb_limit = val; +} + +static void __init xen_add_extra_mem(unsigned long start_pfn, + unsigned long n_pfns) +{ + int i; + + /* + * No need to check for zero size, should happen rarely and will only + * write a new entry regarded to be unused due to zero size. + */ + for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) { + /* Add new region. */ + if (xen_extra_mem[i].n_pfns == 0) { + xen_extra_mem[i].start_pfn = start_pfn; + xen_extra_mem[i].n_pfns = n_pfns; + break; + } + /* Append to existing region. */ + if (xen_extra_mem[i].start_pfn + xen_extra_mem[i].n_pfns == + start_pfn) { + xen_extra_mem[i].n_pfns += n_pfns; + break; + } + } + if (i == XEN_EXTRA_MEM_MAX_REGIONS) + printk(KERN_WARNING "Warning: not enough extra memory regions\n"); + + memblock_reserve(PFN_PHYS(start_pfn), PFN_PHYS(n_pfns)); +} + +static void __init xen_del_extra_mem(unsigned long start_pfn, + unsigned long n_pfns) +{ + int i; + unsigned long start_r, size_r; + + for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) { + start_r = xen_extra_mem[i].start_pfn; + size_r = xen_extra_mem[i].n_pfns; + + /* Start of region. */ + if (start_r == start_pfn) { + BUG_ON(n_pfns > size_r); + xen_extra_mem[i].start_pfn += n_pfns; + xen_extra_mem[i].n_pfns -= n_pfns; + break; + } + /* End of region. */ + if (start_r + size_r == start_pfn + n_pfns) { + BUG_ON(n_pfns > size_r); + xen_extra_mem[i].n_pfns -= n_pfns; + break; + } + /* Mid of region. */ + if (start_pfn > start_r && start_pfn < start_r + size_r) { + BUG_ON(start_pfn + n_pfns > start_r + size_r); + xen_extra_mem[i].n_pfns = start_pfn - start_r; + /* Calling memblock_reserve() again is okay. */ + xen_add_extra_mem(start_pfn + n_pfns, start_r + size_r - + (start_pfn + n_pfns)); + break; + } + } + memblock_phys_free(PFN_PHYS(start_pfn), PFN_PHYS(n_pfns)); +} + +/* + * Called during boot before the p2m list can take entries beyond the + * hypervisor supplied p2m list. Entries in extra mem are to be regarded as + * invalid. + */ +unsigned long __ref xen_chk_extra_mem(unsigned long pfn) +{ + int i; + + for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) { + if (pfn >= xen_extra_mem[i].start_pfn && + pfn < xen_extra_mem[i].start_pfn + xen_extra_mem[i].n_pfns) + return INVALID_P2M_ENTRY; + } + + return IDENTITY_FRAME(pfn); +} + +/* + * Mark all pfns of extra mem as invalid in p2m list. + */ +void __init xen_inv_extra_mem(void) +{ + unsigned long pfn, pfn_s, pfn_e; + int i; + + for (i = 0; i < XEN_EXTRA_MEM_MAX_REGIONS; i++) { + if (!xen_extra_mem[i].n_pfns) + continue; + pfn_s = xen_extra_mem[i].start_pfn; + pfn_e = pfn_s + xen_extra_mem[i].n_pfns; + for (pfn = pfn_s; pfn < pfn_e; pfn++) + set_phys_to_machine(pfn, INVALID_P2M_ENTRY); + } +} + +/* + * Finds the next RAM pfn available in the E820 map after min_pfn. + * This function updates min_pfn with the pfn found and returns + * the size of that range or zero if not found. + */ +static unsigned long __init xen_find_pfn_range(unsigned long *min_pfn) +{ + const struct e820_entry *entry = xen_e820_table.entries; + unsigned int i; + unsigned long done = 0; + + for (i = 0; i < xen_e820_table.nr_entries; i++, entry++) { + unsigned long s_pfn; + unsigned long e_pfn; + + if (entry->type != E820_TYPE_RAM) + continue; + + e_pfn = PFN_DOWN(entry->addr + entry->size); + + /* We only care about E820 after this */ + if (e_pfn <= *min_pfn) + continue; + + s_pfn = PFN_UP(entry->addr); + + /* If min_pfn falls within the E820 entry, we want to start + * at the min_pfn PFN. + */ + if (s_pfn <= *min_pfn) { + done = e_pfn - *min_pfn; + } else { + done = e_pfn - s_pfn; + *min_pfn = s_pfn; + } + break; + } + + return done; +} + +static int __init xen_free_mfn(unsigned long mfn) +{ + struct xen_memory_reservation reservation = { + .address_bits = 0, + .extent_order = 0, + .domid = DOMID_SELF + }; + + set_xen_guest_handle(reservation.extent_start, &mfn); + reservation.nr_extents = 1; + + return HYPERVISOR_memory_op(XENMEM_decrease_reservation, &reservation); +} + +/* + * This releases a chunk of memory and then does the identity map. It's used + * as a fallback if the remapping fails. + */ +static void __init xen_set_identity_and_release_chunk(unsigned long start_pfn, + unsigned long end_pfn, unsigned long nr_pages) +{ + unsigned long pfn, end; + int ret; + + WARN_ON(start_pfn > end_pfn); + + /* Release pages first. */ + end = min(end_pfn, nr_pages); + for (pfn = start_pfn; pfn < end; pfn++) { + unsigned long mfn = pfn_to_mfn(pfn); + + /* Make sure pfn exists to start with */ + if (mfn == INVALID_P2M_ENTRY || mfn_to_pfn(mfn) != pfn) + continue; + + ret = xen_free_mfn(mfn); + WARN(ret != 1, "Failed to release pfn %lx err=%d\n", pfn, ret); + + if (ret == 1) { + xen_released_pages++; + if (!__set_phys_to_machine(pfn, INVALID_P2M_ENTRY)) + break; + } else + break; + } + + set_phys_range_identity(start_pfn, end_pfn); +} + +/* + * Helper function to update the p2m and m2p tables and kernel mapping. + */ +static void __init xen_update_mem_tables(unsigned long pfn, unsigned long mfn) +{ + struct mmu_update update = { + .ptr = ((uint64_t)mfn << PAGE_SHIFT) | MMU_MACHPHYS_UPDATE, + .val = pfn + }; + + /* Update p2m */ + if (!set_phys_to_machine(pfn, mfn)) { + WARN(1, "Failed to set p2m mapping for pfn=%ld mfn=%ld\n", + pfn, mfn); + BUG(); + } + + /* Update m2p */ + if (HYPERVISOR_mmu_update(&update, 1, NULL, DOMID_SELF) < 0) { + WARN(1, "Failed to set m2p mapping for mfn=%ld pfn=%ld\n", + mfn, pfn); + BUG(); + } + + if (HYPERVISOR_update_va_mapping((unsigned long)__va(pfn << PAGE_SHIFT), + mfn_pte(mfn, PAGE_KERNEL), 0)) { + WARN(1, "Failed to update kernel mapping for mfn=%ld pfn=%ld\n", + mfn, pfn); + BUG(); + } +} + +/* + * This function updates the p2m and m2p tables with an identity map from + * start_pfn to start_pfn+size and prepares remapping the underlying RAM of the + * original allocation at remap_pfn. The information needed for remapping is + * saved in the memory itself to avoid the need for allocating buffers. The + * complete remap information is contained in a list of MFNs each containing + * up to REMAP_SIZE MFNs and the start target PFN for doing the remap. + * This enables us to preserve the original mfn sequence while doing the + * remapping at a time when the memory management is capable of allocating + * virtual and physical memory in arbitrary amounts, see 'xen_remap_memory' and + * its callers. + */ +static void __init xen_do_set_identity_and_remap_chunk( + unsigned long start_pfn, unsigned long size, unsigned long remap_pfn) +{ + unsigned long buf = (unsigned long)&xen_remap_buf; + unsigned long mfn_save, mfn; + unsigned long ident_pfn_iter, remap_pfn_iter; + unsigned long ident_end_pfn = start_pfn + size; + unsigned long left = size; + unsigned int i, chunk; + + WARN_ON(size == 0); + + mfn_save = virt_to_mfn((void *)buf); + + for (ident_pfn_iter = start_pfn, remap_pfn_iter = remap_pfn; + ident_pfn_iter < ident_end_pfn; + ident_pfn_iter += REMAP_SIZE, remap_pfn_iter += REMAP_SIZE) { + chunk = (left < REMAP_SIZE) ? left : REMAP_SIZE; + + /* Map first pfn to xen_remap_buf */ + mfn = pfn_to_mfn(ident_pfn_iter); + set_pte_mfn(buf, mfn, PAGE_KERNEL); + + /* Save mapping information in page */ + xen_remap_buf.next_area_mfn = xen_remap_mfn; + xen_remap_buf.target_pfn = remap_pfn_iter; + xen_remap_buf.size = chunk; + for (i = 0; i < chunk; i++) + xen_remap_buf.mfns[i] = pfn_to_mfn(ident_pfn_iter + i); + + /* Put remap buf into list. */ + xen_remap_mfn = mfn; + + /* Set identity map */ + set_phys_range_identity(ident_pfn_iter, ident_pfn_iter + chunk); + + left -= chunk; + } + + /* Restore old xen_remap_buf mapping */ + set_pte_mfn(buf, mfn_save, PAGE_KERNEL); +} + +/* + * This function takes a contiguous pfn range that needs to be identity mapped + * and: + * + * 1) Finds a new range of pfns to use to remap based on E820 and remap_pfn. + * 2) Calls the do_ function to actually do the mapping/remapping work. + * + * The goal is to not allocate additional memory but to remap the existing + * pages. In the case of an error the underlying memory is simply released back + * to Xen and not remapped. + */ +static unsigned long __init xen_set_identity_and_remap_chunk( + unsigned long start_pfn, unsigned long end_pfn, unsigned long nr_pages, + unsigned long remap_pfn) +{ + unsigned long pfn; + unsigned long i = 0; + unsigned long n = end_pfn - start_pfn; + + if (remap_pfn == 0) + remap_pfn = nr_pages; + + while (i < n) { + unsigned long cur_pfn = start_pfn + i; + unsigned long left = n - i; + unsigned long size = left; + unsigned long remap_range_size; + + /* Do not remap pages beyond the current allocation */ + if (cur_pfn >= nr_pages) { + /* Identity map remaining pages */ + set_phys_range_identity(cur_pfn, cur_pfn + size); + break; + } + if (cur_pfn + size > nr_pages) + size = nr_pages - cur_pfn; + + remap_range_size = xen_find_pfn_range(&remap_pfn); + if (!remap_range_size) { + pr_warn("Unable to find available pfn range, not remapping identity pages\n"); + xen_set_identity_and_release_chunk(cur_pfn, + cur_pfn + left, nr_pages); + break; + } + /* Adjust size to fit in current e820 RAM region */ + if (size > remap_range_size) + size = remap_range_size; + + xen_do_set_identity_and_remap_chunk(cur_pfn, size, remap_pfn); + + /* Update variables to reflect new mappings. */ + i += size; + remap_pfn += size; + } + + /* + * If the PFNs are currently mapped, their VA mappings need to be + * zapped. + */ + for (pfn = start_pfn; pfn <= max_pfn_mapped && pfn < end_pfn; pfn++) + (void)HYPERVISOR_update_va_mapping( + (unsigned long)__va(pfn << PAGE_SHIFT), + native_make_pte(0), 0); + + return remap_pfn; +} + +static unsigned long __init xen_count_remap_pages( + unsigned long start_pfn, unsigned long end_pfn, unsigned long nr_pages, + unsigned long remap_pages) +{ + if (start_pfn >= nr_pages) + return remap_pages; + + return remap_pages + min(end_pfn, nr_pages) - start_pfn; +} + +static unsigned long __init xen_foreach_remap_area(unsigned long nr_pages, + unsigned long (*func)(unsigned long start_pfn, unsigned long end_pfn, + unsigned long nr_pages, unsigned long last_val)) +{ + phys_addr_t start = 0; + unsigned long ret_val = 0; + const struct e820_entry *entry = xen_e820_table.entries; + int i; + + /* + * Combine non-RAM regions and gaps until a RAM region (or the + * end of the map) is reached, then call the provided function + * to perform its duty on the non-RAM region. + * + * The combined non-RAM regions are rounded to a whole number + * of pages so any partial pages are accessible via the 1:1 + * mapping. This is needed for some BIOSes that put (for + * example) the DMI tables in a reserved region that begins on + * a non-page boundary. + */ + for (i = 0; i < xen_e820_table.nr_entries; i++, entry++) { + phys_addr_t end = entry->addr + entry->size; + if (entry->type == E820_TYPE_RAM || i == xen_e820_table.nr_entries - 1) { + unsigned long start_pfn = PFN_DOWN(start); + unsigned long end_pfn = PFN_UP(end); + + if (entry->type == E820_TYPE_RAM) + end_pfn = PFN_UP(entry->addr); + + if (start_pfn < end_pfn) + ret_val = func(start_pfn, end_pfn, nr_pages, + ret_val); + start = end; + } + } + + return ret_val; +} + +/* + * Remap the memory prepared in xen_do_set_identity_and_remap_chunk(). + * The remap information (which mfn remap to which pfn) is contained in the + * to be remapped memory itself in a linked list anchored at xen_remap_mfn. + * This scheme allows to remap the different chunks in arbitrary order while + * the resulting mapping will be independent from the order. + */ +void __init xen_remap_memory(void) +{ + unsigned long buf = (unsigned long)&xen_remap_buf; + unsigned long mfn_save, pfn; + unsigned long remapped = 0; + unsigned int i; + unsigned long pfn_s = ~0UL; + unsigned long len = 0; + + mfn_save = virt_to_mfn((void *)buf); + + while (xen_remap_mfn != INVALID_P2M_ENTRY) { + /* Map the remap information */ + set_pte_mfn(buf, xen_remap_mfn, PAGE_KERNEL); + + BUG_ON(xen_remap_mfn != xen_remap_buf.mfns[0]); + + pfn = xen_remap_buf.target_pfn; + for (i = 0; i < xen_remap_buf.size; i++) { + xen_update_mem_tables(pfn, xen_remap_buf.mfns[i]); + remapped++; + pfn++; + } + if (pfn_s == ~0UL || pfn == pfn_s) { + pfn_s = xen_remap_buf.target_pfn; + len += xen_remap_buf.size; + } else if (pfn_s + len == xen_remap_buf.target_pfn) { + len += xen_remap_buf.size; + } else { + xen_del_extra_mem(pfn_s, len); + pfn_s = xen_remap_buf.target_pfn; + len = xen_remap_buf.size; + } + xen_remap_mfn = xen_remap_buf.next_area_mfn; + } + + if (pfn_s != ~0UL && len) + xen_del_extra_mem(pfn_s, len); + + set_pte_mfn(buf, mfn_save, PAGE_KERNEL); + + pr_info("Remapped %ld page(s)\n", remapped); +} + +static unsigned long __init xen_get_pages_limit(void) +{ + unsigned long limit; + + limit = MAXMEM / PAGE_SIZE; + if (!xen_initial_domain() && xen_512gb_limit) + limit = GB(512) / PAGE_SIZE; + + return limit; +} + +static unsigned long __init xen_get_max_pages(void) +{ + unsigned long max_pages, limit; + domid_t domid = DOMID_SELF; + long ret; + + limit = xen_get_pages_limit(); + max_pages = limit; + + /* + * For the initial domain we use the maximum reservation as + * the maximum page. + * + * For guest domains the current maximum reservation reflects + * the current maximum rather than the static maximum. In this + * case the e820 map provided to us will cover the static + * maximum region. + */ + if (xen_initial_domain()) { + ret = HYPERVISOR_memory_op(XENMEM_maximum_reservation, &domid); + if (ret > 0) + max_pages = ret; + } + + return min(max_pages, limit); +} + +static void __init xen_align_and_add_e820_region(phys_addr_t start, + phys_addr_t size, int type) +{ + phys_addr_t end = start + size; + + /* Align RAM regions to page boundaries. */ + if (type == E820_TYPE_RAM) { + start = PAGE_ALIGN(start); + end &= ~((phys_addr_t)PAGE_SIZE - 1); +#ifdef CONFIG_MEMORY_HOTPLUG + /* + * Don't allow adding memory not in E820 map while booting the + * system. Once the balloon driver is up it will remove that + * restriction again. + */ + max_mem_size = end; +#endif + } + + e820__range_add(start, end - start, type); +} + +static void __init xen_ignore_unusable(void) +{ + struct e820_entry *entry = xen_e820_table.entries; + unsigned int i; + + for (i = 0; i < xen_e820_table.nr_entries; i++, entry++) { + if (entry->type == E820_TYPE_UNUSABLE) + entry->type = E820_TYPE_RAM; + } +} + +bool __init xen_is_e820_reserved(phys_addr_t start, phys_addr_t size) +{ + struct e820_entry *entry; + unsigned mapcnt; + phys_addr_t end; + + if (!size) + return false; + + end = start + size; + entry = xen_e820_table.entries; + + for (mapcnt = 0; mapcnt < xen_e820_table.nr_entries; mapcnt++) { + if (entry->type == E820_TYPE_RAM && entry->addr <= start && + (entry->addr + entry->size) >= end) + return false; + + entry++; + } + + return true; +} + +/* + * Find a free area in physical memory not yet reserved and compliant with + * E820 map. + * Used to relocate pre-allocated areas like initrd or p2m list which are in + * conflict with the to be used E820 map. + * In case no area is found, return 0. Otherwise return the physical address + * of the area which is already reserved for convenience. + */ +phys_addr_t __init xen_find_free_area(phys_addr_t size) +{ + unsigned mapcnt; + phys_addr_t addr, start; + struct e820_entry *entry = xen_e820_table.entries; + + for (mapcnt = 0; mapcnt < xen_e820_table.nr_entries; mapcnt++, entry++) { + if (entry->type != E820_TYPE_RAM || entry->size < size) + continue; + start = entry->addr; + for (addr = start; addr < start + size; addr += PAGE_SIZE) { + if (!memblock_is_reserved(addr)) + continue; + start = addr + PAGE_SIZE; + if (start + size > entry->addr + entry->size) + break; + } + if (addr >= start + size) { + memblock_reserve(start, size); + return start; + } + } + + return 0; +} + +/* + * Like memcpy, but with physical addresses for dest and src. + */ +static void __init xen_phys_memcpy(phys_addr_t dest, phys_addr_t src, + phys_addr_t n) +{ + phys_addr_t dest_off, src_off, dest_len, src_len, len; + void *from, *to; + + while (n) { + dest_off = dest & ~PAGE_MASK; + src_off = src & ~PAGE_MASK; + dest_len = n; + if (dest_len > (NR_FIX_BTMAPS << PAGE_SHIFT) - dest_off) + dest_len = (NR_FIX_BTMAPS << PAGE_SHIFT) - dest_off; + src_len = n; + if (src_len > (NR_FIX_BTMAPS << PAGE_SHIFT) - src_off) + src_len = (NR_FIX_BTMAPS << PAGE_SHIFT) - src_off; + len = min(dest_len, src_len); + to = early_memremap(dest - dest_off, dest_len + dest_off); + from = early_memremap(src - src_off, src_len + src_off); + memcpy(to, from, len); + early_memunmap(to, dest_len + dest_off); + early_memunmap(from, src_len + src_off); + n -= len; + dest += len; + src += len; + } +} + +/* + * Reserve Xen mfn_list. + */ +static void __init xen_reserve_xen_mfnlist(void) +{ + phys_addr_t start, size; + + if (xen_start_info->mfn_list >= __START_KERNEL_map) { + start = __pa(xen_start_info->mfn_list); + size = PFN_ALIGN(xen_start_info->nr_pages * + sizeof(unsigned long)); + } else { + start = PFN_PHYS(xen_start_info->first_p2m_pfn); + size = PFN_PHYS(xen_start_info->nr_p2m_frames); + } + + memblock_reserve(start, size); + if (!xen_is_e820_reserved(start, size)) + return; + + xen_relocate_p2m(); + memblock_phys_free(start, size); +} + +/** + * xen_memory_setup - Hook for machine specific memory setup. + **/ +char * __init xen_memory_setup(void) +{ + unsigned long max_pfn, pfn_s, n_pfns; + phys_addr_t mem_end, addr, size, chunk_size; + u32 type; + int rc; + struct xen_memory_map memmap; + unsigned long max_pages; + unsigned long extra_pages = 0; + int i; + int op; + + xen_parse_512gb(); + max_pfn = xen_get_pages_limit(); + max_pfn = min(max_pfn, xen_start_info->nr_pages); + mem_end = PFN_PHYS(max_pfn); + + memmap.nr_entries = ARRAY_SIZE(xen_e820_table.entries); + set_xen_guest_handle(memmap.buffer, xen_e820_table.entries); + +#if defined(CONFIG_MEMORY_HOTPLUG) && defined(CONFIG_XEN_BALLOON) + xen_saved_max_mem_size = max_mem_size; +#endif + + op = xen_initial_domain() ? + XENMEM_machine_memory_map : + XENMEM_memory_map; + rc = HYPERVISOR_memory_op(op, &memmap); + if (rc == -ENOSYS) { + BUG_ON(xen_initial_domain()); + memmap.nr_entries = 1; + xen_e820_table.entries[0].addr = 0ULL; + xen_e820_table.entries[0].size = mem_end; + /* 8MB slack (to balance backend allocations). */ + xen_e820_table.entries[0].size += 8ULL << 20; + xen_e820_table.entries[0].type = E820_TYPE_RAM; + rc = 0; + } + BUG_ON(rc); + BUG_ON(memmap.nr_entries == 0); + xen_e820_table.nr_entries = memmap.nr_entries; + + if (xen_initial_domain()) { + /* + * Xen won't allow a 1:1 mapping to be created to UNUSABLE + * regions, so if we're using the machine memory map leave the + * region as RAM as it is in the pseudo-physical map. + * + * UNUSABLE regions in domUs are not handled and will need + * a patch in the future. + */ + xen_ignore_unusable(); + +#ifdef CONFIG_ISCSI_IBFT_FIND + /* Reserve 0.5 MiB to 1 MiB region so iBFT can be found */ + xen_e820_table.entries[xen_e820_table.nr_entries].addr = IBFT_START; + xen_e820_table.entries[xen_e820_table.nr_entries].size = IBFT_END - IBFT_START; + xen_e820_table.entries[xen_e820_table.nr_entries].type = E820_TYPE_RESERVED; + xen_e820_table.nr_entries++; +#endif + } + + /* Make sure the Xen-supplied memory map is well-ordered. */ + e820__update_table(&xen_e820_table); + + max_pages = xen_get_max_pages(); + + /* How many extra pages do we need due to remapping? */ + max_pages += xen_foreach_remap_area(max_pfn, xen_count_remap_pages); + + if (max_pages > max_pfn) + extra_pages += max_pages - max_pfn; + + /* + * Clamp the amount of extra memory to a EXTRA_MEM_RATIO + * factor the base size. + * + * Make sure we have no memory above max_pages, as this area + * isn't handled by the p2m management. + */ + extra_pages = min3(EXTRA_MEM_RATIO * min(max_pfn, PFN_DOWN(MAXMEM)), + extra_pages, max_pages - max_pfn); + i = 0; + addr = xen_e820_table.entries[0].addr; + size = xen_e820_table.entries[0].size; + while (i < xen_e820_table.nr_entries) { + bool discard = false; + + chunk_size = size; + type = xen_e820_table.entries[i].type; + + if (type == E820_TYPE_RESERVED) + xen_pv_pci_possible = true; + + if (type == E820_TYPE_RAM) { + if (addr < mem_end) { + chunk_size = min(size, mem_end - addr); + } else if (extra_pages) { + chunk_size = min(size, PFN_PHYS(extra_pages)); + pfn_s = PFN_UP(addr); + n_pfns = PFN_DOWN(addr + chunk_size) - pfn_s; + extra_pages -= n_pfns; + xen_add_extra_mem(pfn_s, n_pfns); + xen_max_p2m_pfn = pfn_s + n_pfns; + } else + discard = true; + } + + if (!discard) + xen_align_and_add_e820_region(addr, chunk_size, type); + + addr += chunk_size; + size -= chunk_size; + if (size == 0) { + i++; + if (i < xen_e820_table.nr_entries) { + addr = xen_e820_table.entries[i].addr; + size = xen_e820_table.entries[i].size; + } + } + } + + /* + * Set the rest as identity mapped, in case PCI BARs are + * located here. + */ + set_phys_range_identity(addr / PAGE_SIZE, ~0ul); + + /* + * In domU, the ISA region is normal, usable memory, but we + * reserve ISA memory anyway because too many things poke + * about in there. + */ + e820__range_add(ISA_START_ADDRESS, ISA_END_ADDRESS - ISA_START_ADDRESS, E820_TYPE_RESERVED); + + e820__update_table(e820_table); + + /* + * Check whether the kernel itself conflicts with the target E820 map. + * Failing now is better than running into weird problems later due + * to relocating (and even reusing) pages with kernel text or data. + */ + if (xen_is_e820_reserved(__pa_symbol(_text), + __pa_symbol(__bss_stop) - __pa_symbol(_text))) { + xen_raw_console_write("Xen hypervisor allocated kernel memory conflicts with E820 map\n"); + BUG(); + } + + /* + * Check for a conflict of the hypervisor supplied page tables with + * the target E820 map. + */ + xen_pt_check_e820(); + + xen_reserve_xen_mfnlist(); + + /* Check for a conflict of the initrd with the target E820 map. */ + if (xen_is_e820_reserved(boot_params.hdr.ramdisk_image, + boot_params.hdr.ramdisk_size)) { + phys_addr_t new_area, start, size; + + new_area = xen_find_free_area(boot_params.hdr.ramdisk_size); + if (!new_area) { + xen_raw_console_write("Can't find new memory area for initrd needed due to E820 map conflict\n"); + BUG(); + } + + start = boot_params.hdr.ramdisk_image; + size = boot_params.hdr.ramdisk_size; + xen_phys_memcpy(new_area, start, size); + pr_info("initrd moved from [mem %#010llx-%#010llx] to [mem %#010llx-%#010llx]\n", + start, start + size, new_area, new_area + size); + memblock_phys_free(start, size); + boot_params.hdr.ramdisk_image = new_area; + boot_params.ext_ramdisk_image = new_area >> 32; + } + + /* + * Set identity map on non-RAM pages and prepare remapping the + * underlying RAM. + */ + xen_foreach_remap_area(max_pfn, xen_set_identity_and_remap_chunk); + + pr_info("Released %ld page(s)\n", xen_released_pages); + + return "Xen"; +} + +static int register_callback(unsigned type, const void *func) +{ + struct callback_register callback = { + .type = type, + .address = XEN_CALLBACK(__KERNEL_CS, func), + .flags = CALLBACKF_mask_events, + }; + + return HYPERVISOR_callback_op(CALLBACKOP_register, &callback); +} + +void xen_enable_sysenter(void) +{ + if (cpu_feature_enabled(X86_FEATURE_SYSENTER32) && + register_callback(CALLBACKTYPE_sysenter, xen_entry_SYSENTER_compat)) + setup_clear_cpu_cap(X86_FEATURE_SYSENTER32); +} + +void xen_enable_syscall(void) +{ + int ret; + + ret = register_callback(CALLBACKTYPE_syscall, xen_entry_SYSCALL_64); + if (ret != 0) { + printk(KERN_ERR "Failed to set syscall callback: %d\n", ret); + /* Pretty fatal; 64-bit userspace has no other + mechanism for syscalls. */ + } + + if (cpu_feature_enabled(X86_FEATURE_SYSCALL32) && + register_callback(CALLBACKTYPE_syscall32, xen_entry_SYSCALL_compat)) + setup_clear_cpu_cap(X86_FEATURE_SYSCALL32); +} + +static void __init xen_pvmmu_arch_setup(void) +{ + HYPERVISOR_vm_assist(VMASST_CMD_enable, VMASST_TYPE_writable_pagetables); + + if (register_callback(CALLBACKTYPE_event, + xen_asm_exc_xen_hypervisor_callback) || + register_callback(CALLBACKTYPE_failsafe, xen_failsafe_callback)) + BUG(); + + xen_enable_sysenter(); + xen_enable_syscall(); +} + +/* This function is not called for HVM domains */ +void __init xen_arch_setup(void) +{ + xen_panic_handler_init(); + xen_pvmmu_arch_setup(); + +#ifdef CONFIG_ACPI + if (!(xen_start_info->flags & SIF_INITDOMAIN)) { + printk(KERN_INFO "ACPI in unprivileged domain disabled\n"); + disable_acpi(); + } +#endif + + memcpy(boot_command_line, xen_start_info->cmd_line, + MAX_GUEST_CMDLINE > COMMAND_LINE_SIZE ? + COMMAND_LINE_SIZE : MAX_GUEST_CMDLINE); + + /* Set up idle, making sure it calls safe_halt() pvop */ + disable_cpuidle(); + disable_cpufreq(); + WARN_ON(xen_set_default_idle()); +#ifdef CONFIG_NUMA + numa_off = 1; +#endif +} diff --git a/arch/x86/xen/smp.c b/arch/x86/xen/smp.c new file mode 100644 index 0000000000..4b0d6fff88 --- /dev/null +++ b/arch/x86/xen/smp.c @@ -0,0 +1,259 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <linux/smp.h> +#include <linux/cpu.h> +#include <linux/slab.h> +#include <linux/cpumask.h> +#include <linux/percpu.h> + +#include <xen/events.h> + +#include <xen/hvc-console.h> +#include "xen-ops.h" +#include "smp.h" + +static DEFINE_PER_CPU(struct xen_common_irq, xen_resched_irq) = { .irq = -1 }; +static DEFINE_PER_CPU(struct xen_common_irq, xen_callfunc_irq) = { .irq = -1 }; +static DEFINE_PER_CPU(struct xen_common_irq, xen_callfuncsingle_irq) = { .irq = -1 }; +static DEFINE_PER_CPU(struct xen_common_irq, xen_debug_irq) = { .irq = -1 }; + +static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id); +static irqreturn_t xen_call_function_single_interrupt(int irq, void *dev_id); + +/* + * Reschedule call back. + */ +static irqreturn_t xen_reschedule_interrupt(int irq, void *dev_id) +{ + inc_irq_stat(irq_resched_count); + scheduler_ipi(); + + return IRQ_HANDLED; +} + +void xen_smp_intr_free(unsigned int cpu) +{ + kfree(per_cpu(xen_resched_irq, cpu).name); + per_cpu(xen_resched_irq, cpu).name = NULL; + if (per_cpu(xen_resched_irq, cpu).irq >= 0) { + unbind_from_irqhandler(per_cpu(xen_resched_irq, cpu).irq, NULL); + per_cpu(xen_resched_irq, cpu).irq = -1; + } + kfree(per_cpu(xen_callfunc_irq, cpu).name); + per_cpu(xen_callfunc_irq, cpu).name = NULL; + if (per_cpu(xen_callfunc_irq, cpu).irq >= 0) { + unbind_from_irqhandler(per_cpu(xen_callfunc_irq, cpu).irq, NULL); + per_cpu(xen_callfunc_irq, cpu).irq = -1; + } + kfree(per_cpu(xen_debug_irq, cpu).name); + per_cpu(xen_debug_irq, cpu).name = NULL; + if (per_cpu(xen_debug_irq, cpu).irq >= 0) { + unbind_from_irqhandler(per_cpu(xen_debug_irq, cpu).irq, NULL); + per_cpu(xen_debug_irq, cpu).irq = -1; + } + kfree(per_cpu(xen_callfuncsingle_irq, cpu).name); + per_cpu(xen_callfuncsingle_irq, cpu).name = NULL; + if (per_cpu(xen_callfuncsingle_irq, cpu).irq >= 0) { + unbind_from_irqhandler(per_cpu(xen_callfuncsingle_irq, cpu).irq, + NULL); + per_cpu(xen_callfuncsingle_irq, cpu).irq = -1; + } +} + +int xen_smp_intr_init(unsigned int cpu) +{ + int rc; + char *resched_name, *callfunc_name, *debug_name; + + resched_name = kasprintf(GFP_KERNEL, "resched%d", cpu); + per_cpu(xen_resched_irq, cpu).name = resched_name; + rc = bind_ipi_to_irqhandler(XEN_RESCHEDULE_VECTOR, + cpu, + xen_reschedule_interrupt, + IRQF_PERCPU|IRQF_NOBALANCING, + resched_name, + NULL); + if (rc < 0) + goto fail; + per_cpu(xen_resched_irq, cpu).irq = rc; + + callfunc_name = kasprintf(GFP_KERNEL, "callfunc%d", cpu); + per_cpu(xen_callfunc_irq, cpu).name = callfunc_name; + rc = bind_ipi_to_irqhandler(XEN_CALL_FUNCTION_VECTOR, + cpu, + xen_call_function_interrupt, + IRQF_PERCPU|IRQF_NOBALANCING, + callfunc_name, + NULL); + if (rc < 0) + goto fail; + per_cpu(xen_callfunc_irq, cpu).irq = rc; + + if (!xen_fifo_events) { + debug_name = kasprintf(GFP_KERNEL, "debug%d", cpu); + per_cpu(xen_debug_irq, cpu).name = debug_name; + rc = bind_virq_to_irqhandler(VIRQ_DEBUG, cpu, + xen_debug_interrupt, + IRQF_PERCPU | IRQF_NOBALANCING, + debug_name, NULL); + if (rc < 0) + goto fail; + per_cpu(xen_debug_irq, cpu).irq = rc; + } + + callfunc_name = kasprintf(GFP_KERNEL, "callfuncsingle%d", cpu); + per_cpu(xen_callfuncsingle_irq, cpu).name = callfunc_name; + rc = bind_ipi_to_irqhandler(XEN_CALL_FUNCTION_SINGLE_VECTOR, + cpu, + xen_call_function_single_interrupt, + IRQF_PERCPU|IRQF_NOBALANCING, + callfunc_name, + NULL); + if (rc < 0) + goto fail; + per_cpu(xen_callfuncsingle_irq, cpu).irq = rc; + + return 0; + + fail: + xen_smp_intr_free(cpu); + return rc; +} + +void __init xen_smp_cpus_done(unsigned int max_cpus) +{ + if (xen_hvm_domain()) + native_smp_cpus_done(max_cpus); + else + calculate_max_logical_packages(); +} + +void xen_smp_send_reschedule(int cpu) +{ + xen_send_IPI_one(cpu, XEN_RESCHEDULE_VECTOR); +} + +static void __xen_send_IPI_mask(const struct cpumask *mask, + int vector) +{ + unsigned cpu; + + for_each_cpu_and(cpu, mask, cpu_online_mask) + xen_send_IPI_one(cpu, vector); +} + +void xen_smp_send_call_function_ipi(const struct cpumask *mask) +{ + int cpu; + + __xen_send_IPI_mask(mask, XEN_CALL_FUNCTION_VECTOR); + + /* Make sure other vcpus get a chance to run if they need to. */ + for_each_cpu(cpu, mask) { + if (xen_vcpu_stolen(cpu)) { + HYPERVISOR_sched_op(SCHEDOP_yield, NULL); + break; + } + } +} + +void xen_smp_send_call_function_single_ipi(int cpu) +{ + __xen_send_IPI_mask(cpumask_of(cpu), + XEN_CALL_FUNCTION_SINGLE_VECTOR); +} + +static inline int xen_map_vector(int vector) +{ + int xen_vector; + + switch (vector) { + case RESCHEDULE_VECTOR: + xen_vector = XEN_RESCHEDULE_VECTOR; + break; + case CALL_FUNCTION_VECTOR: + xen_vector = XEN_CALL_FUNCTION_VECTOR; + break; + case CALL_FUNCTION_SINGLE_VECTOR: + xen_vector = XEN_CALL_FUNCTION_SINGLE_VECTOR; + break; + case IRQ_WORK_VECTOR: + xen_vector = XEN_IRQ_WORK_VECTOR; + break; +#ifdef CONFIG_X86_64 + case NMI_VECTOR: + case APIC_DM_NMI: /* Some use that instead of NMI_VECTOR */ + xen_vector = XEN_NMI_VECTOR; + break; +#endif + default: + xen_vector = -1; + printk(KERN_ERR "xen: vector 0x%x is not implemented\n", + vector); + } + + return xen_vector; +} + +void xen_send_IPI_mask(const struct cpumask *mask, + int vector) +{ + int xen_vector = xen_map_vector(vector); + + if (xen_vector >= 0) + __xen_send_IPI_mask(mask, xen_vector); +} + +void xen_send_IPI_all(int vector) +{ + int xen_vector = xen_map_vector(vector); + + if (xen_vector >= 0) + __xen_send_IPI_mask(cpu_online_mask, xen_vector); +} + +void xen_send_IPI_self(int vector) +{ + int xen_vector = xen_map_vector(vector); + + if (xen_vector >= 0) + xen_send_IPI_one(smp_processor_id(), xen_vector); +} + +void xen_send_IPI_mask_allbutself(const struct cpumask *mask, + int vector) +{ + unsigned cpu; + unsigned int this_cpu = smp_processor_id(); + int xen_vector = xen_map_vector(vector); + + if (!(num_online_cpus() > 1) || (xen_vector < 0)) + return; + + for_each_cpu_and(cpu, mask, cpu_online_mask) { + if (this_cpu == cpu) + continue; + + xen_send_IPI_one(cpu, xen_vector); + } +} + +void xen_send_IPI_allbutself(int vector) +{ + xen_send_IPI_mask_allbutself(cpu_online_mask, vector); +} + +static irqreturn_t xen_call_function_interrupt(int irq, void *dev_id) +{ + generic_smp_call_function_interrupt(); + inc_irq_stat(irq_call_count); + + return IRQ_HANDLED; +} + +static irqreturn_t xen_call_function_single_interrupt(int irq, void *dev_id) +{ + generic_smp_call_function_single_interrupt(); + inc_irq_stat(irq_call_count); + + return IRQ_HANDLED; +} diff --git a/arch/x86/xen/smp.h b/arch/x86/xen/smp.h new file mode 100644 index 0000000000..c20cbb14c8 --- /dev/null +++ b/arch/x86/xen/smp.h @@ -0,0 +1,49 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _XEN_SMP_H + +#ifdef CONFIG_SMP + +void asm_cpu_bringup_and_idle(void); +asmlinkage void cpu_bringup_and_idle(void); + +extern void xen_send_IPI_mask(const struct cpumask *mask, + int vector); +extern void xen_send_IPI_mask_allbutself(const struct cpumask *mask, + int vector); +extern void xen_send_IPI_allbutself(int vector); +extern void xen_send_IPI_all(int vector); +extern void xen_send_IPI_self(int vector); + +extern int xen_smp_intr_init(unsigned int cpu); +extern void xen_smp_intr_free(unsigned int cpu); +int xen_smp_intr_init_pv(unsigned int cpu); +void xen_smp_intr_free_pv(unsigned int cpu); + +void xen_smp_cpus_done(unsigned int max_cpus); + +void xen_smp_send_reschedule(int cpu); +void xen_smp_send_call_function_ipi(const struct cpumask *mask); +void xen_smp_send_call_function_single_ipi(int cpu); + +void __noreturn xen_cpu_bringup_again(unsigned long stack); + +struct xen_common_irq { + int irq; + char *name; +}; +#else /* CONFIG_SMP */ + +static inline int xen_smp_intr_init(unsigned int cpu) +{ + return 0; +} +static inline void xen_smp_intr_free(unsigned int cpu) {} + +static inline int xen_smp_intr_init_pv(unsigned int cpu) +{ + return 0; +} +static inline void xen_smp_intr_free_pv(unsigned int cpu) {} +#endif /* CONFIG_SMP */ + +#endif diff --git a/arch/x86/xen/smp_hvm.c b/arch/x86/xen/smp_hvm.c new file mode 100644 index 0000000000..ac95d1981c --- /dev/null +++ b/arch/x86/xen/smp_hvm.c @@ -0,0 +1,90 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <linux/thread_info.h> +#include <asm/smp.h> + +#include <xen/events.h> + +#include "xen-ops.h" +#include "smp.h" + + +static void __init xen_hvm_smp_prepare_boot_cpu(void) +{ + BUG_ON(smp_processor_id() != 0); + native_smp_prepare_boot_cpu(); + + /* + * Setup vcpu_info for boot CPU. Secondary CPUs get their vcpu_info + * in xen_cpu_up_prepare_hvm(). + */ + xen_vcpu_setup(0); + + /* + * Called again in case the kernel boots on vcpu >= MAX_VIRT_CPUS. + * Refer to comments in xen_hvm_init_time_ops(). + */ + xen_hvm_init_time_ops(); + + /* + * The alternative logic (which patches the unlock/lock) runs before + * the smp bootup up code is activated. Hence we need to set this up + * the core kernel is being patched. Otherwise we will have only + * modules patched but not core code. + */ + xen_init_spinlocks(); +} + +static void __init xen_hvm_smp_prepare_cpus(unsigned int max_cpus) +{ + int cpu; + + native_smp_prepare_cpus(max_cpus); + + if (xen_have_vector_callback) { + WARN_ON(xen_smp_intr_init(0)); + xen_init_lock_cpu(0); + } + + for_each_possible_cpu(cpu) { + if (cpu == 0) + continue; + + /* Set default vcpu_id to make sure that we don't use cpu-0's */ + per_cpu(xen_vcpu_id, cpu) = XEN_VCPU_ID_INVALID; + } +} + +#ifdef CONFIG_HOTPLUG_CPU +static void xen_hvm_cleanup_dead_cpu(unsigned int cpu) +{ + if (xen_have_vector_callback) { + xen_smp_intr_free(cpu); + xen_uninit_lock_cpu(cpu); + xen_teardown_timer(cpu); + } +} +#else +static void xen_hvm_cleanup_dead_cpu(unsigned int cpu) +{ + BUG(); +} +#endif + +void __init xen_hvm_smp_init(void) +{ + smp_ops.smp_prepare_boot_cpu = xen_hvm_smp_prepare_boot_cpu; + smp_ops.smp_prepare_cpus = xen_hvm_smp_prepare_cpus; + smp_ops.smp_cpus_done = xen_smp_cpus_done; + smp_ops.cleanup_dead_cpu = xen_hvm_cleanup_dead_cpu; + + if (!xen_have_vector_callback) { +#ifdef CONFIG_PARAVIRT_SPINLOCKS + nopvspin = true; +#endif + return; + } + + smp_ops.smp_send_reschedule = xen_smp_send_reschedule; + smp_ops.send_call_func_ipi = xen_smp_send_call_function_ipi; + smp_ops.send_call_func_single_ipi = xen_smp_send_call_function_single_ipi; +} diff --git a/arch/x86/xen/smp_pv.c b/arch/x86/xen/smp_pv.c new file mode 100644 index 0000000000..a0f07bbfcd --- /dev/null +++ b/arch/x86/xen/smp_pv.c @@ -0,0 +1,463 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Xen SMP support + * + * This file implements the Xen versions of smp_ops. SMP under Xen is + * very straightforward. Bringing a CPU up is simply a matter of + * loading its initial context and setting it running. + * + * IPIs are handled through the Xen event mechanism. + * + * Because virtual CPUs can be scheduled onto any real CPU, there's no + * useful topology information for the kernel to make use of. As a + * result, all CPUs are treated as if they're single-core and + * single-threaded. + */ +#include <linux/sched.h> +#include <linux/sched/task_stack.h> +#include <linux/err.h> +#include <linux/slab.h> +#include <linux/smp.h> +#include <linux/irq_work.h> +#include <linux/tick.h> +#include <linux/nmi.h> +#include <linux/cpuhotplug.h> +#include <linux/stackprotector.h> +#include <linux/pgtable.h> + +#include <asm/paravirt.h> +#include <asm/idtentry.h> +#include <asm/desc.h> +#include <asm/cpu.h> +#include <asm/io_apic.h> + +#include <xen/interface/xen.h> +#include <xen/interface/vcpu.h> +#include <xen/interface/xenpmu.h> + +#include <asm/spec-ctrl.h> +#include <asm/xen/interface.h> +#include <asm/xen/hypercall.h> + +#include <xen/xen.h> +#include <xen/page.h> +#include <xen/events.h> + +#include <xen/hvc-console.h> +#include "xen-ops.h" +#include "mmu.h" +#include "smp.h" +#include "pmu.h" + +cpumask_var_t xen_cpu_initialized_map; + +static DEFINE_PER_CPU(struct xen_common_irq, xen_irq_work) = { .irq = -1 }; +static DEFINE_PER_CPU(struct xen_common_irq, xen_pmu_irq) = { .irq = -1 }; + +static irqreturn_t xen_irq_work_interrupt(int irq, void *dev_id); + +static void cpu_bringup(void) +{ + int cpu; + + cr4_init(); + cpuhp_ap_sync_alive(); + cpu_init(); + fpu__init_cpu(); + touch_softlockup_watchdog(); + + /* PVH runs in ring 0 and allows us to do native syscalls. Yay! */ + if (!xen_feature(XENFEAT_supervisor_mode_kernel)) { + xen_enable_sysenter(); + xen_enable_syscall(); + } + cpu = smp_processor_id(); + smp_store_cpu_info(cpu); + cpu_data(cpu).x86_max_cores = 1; + set_cpu_sibling_map(cpu); + + speculative_store_bypass_ht_init(); + + xen_setup_cpu_clockevents(); + + notify_cpu_starting(cpu); + + set_cpu_online(cpu, true); + + smp_mb(); + + /* We can take interrupts now: we're officially "up". */ + local_irq_enable(); +} + +asmlinkage __visible void cpu_bringup_and_idle(void) +{ + cpu_bringup(); + cpu_startup_entry(CPUHP_AP_ONLINE_IDLE); +} + +void xen_smp_intr_free_pv(unsigned int cpu) +{ + kfree(per_cpu(xen_irq_work, cpu).name); + per_cpu(xen_irq_work, cpu).name = NULL; + if (per_cpu(xen_irq_work, cpu).irq >= 0) { + unbind_from_irqhandler(per_cpu(xen_irq_work, cpu).irq, NULL); + per_cpu(xen_irq_work, cpu).irq = -1; + } + + kfree(per_cpu(xen_pmu_irq, cpu).name); + per_cpu(xen_pmu_irq, cpu).name = NULL; + if (per_cpu(xen_pmu_irq, cpu).irq >= 0) { + unbind_from_irqhandler(per_cpu(xen_pmu_irq, cpu).irq, NULL); + per_cpu(xen_pmu_irq, cpu).irq = -1; + } +} + +int xen_smp_intr_init_pv(unsigned int cpu) +{ + int rc; + char *callfunc_name, *pmu_name; + + callfunc_name = kasprintf(GFP_KERNEL, "irqwork%d", cpu); + per_cpu(xen_irq_work, cpu).name = callfunc_name; + rc = bind_ipi_to_irqhandler(XEN_IRQ_WORK_VECTOR, + cpu, + xen_irq_work_interrupt, + IRQF_PERCPU|IRQF_NOBALANCING, + callfunc_name, + NULL); + if (rc < 0) + goto fail; + per_cpu(xen_irq_work, cpu).irq = rc; + + if (is_xen_pmu) { + pmu_name = kasprintf(GFP_KERNEL, "pmu%d", cpu); + per_cpu(xen_pmu_irq, cpu).name = pmu_name; + rc = bind_virq_to_irqhandler(VIRQ_XENPMU, cpu, + xen_pmu_irq_handler, + IRQF_PERCPU|IRQF_NOBALANCING, + pmu_name, NULL); + if (rc < 0) + goto fail; + per_cpu(xen_pmu_irq, cpu).irq = rc; + } + + return 0; + + fail: + xen_smp_intr_free_pv(cpu); + return rc; +} + +static void __init _get_smp_config(unsigned int early) +{ + int i, rc; + unsigned int subtract = 0; + + if (early) + return; + + num_processors = 0; + disabled_cpus = 0; + for (i = 0; i < nr_cpu_ids; i++) { + rc = HYPERVISOR_vcpu_op(VCPUOP_is_up, i, NULL); + if (rc >= 0) { + num_processors++; + set_cpu_possible(i, true); + } else { + set_cpu_possible(i, false); + set_cpu_present(i, false); + subtract++; + } + } +#ifdef CONFIG_HOTPLUG_CPU + /* This is akin to using 'nr_cpus' on the Linux command line. + * Which is OK as when we use 'dom0_max_vcpus=X' we can only + * have up to X, while nr_cpu_ids is greater than X. This + * normally is not a problem, except when CPU hotplugging + * is involved and then there might be more than X CPUs + * in the guest - which will not work as there is no + * hypercall to expand the max number of VCPUs an already + * running guest has. So cap it up to X. */ + if (subtract) + set_nr_cpu_ids(nr_cpu_ids - subtract); +#endif + /* Pretend to be a proper enumerated system */ + smp_found_config = 1; +} + +static void __init xen_pv_smp_prepare_boot_cpu(void) +{ + BUG_ON(smp_processor_id() != 0); + native_smp_prepare_boot_cpu(); + + if (!xen_feature(XENFEAT_writable_page_tables)) + /* We've switched to the "real" per-cpu gdt, so make + * sure the old memory can be recycled. */ + make_lowmem_page_readwrite(xen_initial_gdt); + + xen_setup_vcpu_info_placement(); + + /* + * The alternative logic (which patches the unlock/lock) runs before + * the smp bootup up code is activated. Hence we need to set this up + * the core kernel is being patched. Otherwise we will have only + * modules patched but not core code. + */ + xen_init_spinlocks(); +} + +static void __init xen_pv_smp_prepare_cpus(unsigned int max_cpus) +{ + unsigned cpu; + + if (ioapic_is_disabled) { + char *m = (max_cpus == 0) ? + "The nosmp parameter is incompatible with Xen; " \ + "use Xen dom0_max_vcpus=1 parameter" : + "The noapic parameter is incompatible with Xen"; + + xen_raw_printk(m); + panic(m); + } + xen_init_lock_cpu(0); + + smp_prepare_cpus_common(); + + cpu_data(0).x86_max_cores = 1; + + speculative_store_bypass_ht_init(); + + xen_pmu_init(0); + + if (xen_smp_intr_init(0) || xen_smp_intr_init_pv(0)) + BUG(); + + if (!alloc_cpumask_var(&xen_cpu_initialized_map, GFP_KERNEL)) + panic("could not allocate xen_cpu_initialized_map\n"); + + cpumask_copy(xen_cpu_initialized_map, cpumask_of(0)); + + /* Restrict the possible_map according to max_cpus. */ + while ((num_possible_cpus() > 1) && (num_possible_cpus() > max_cpus)) { + for (cpu = nr_cpu_ids - 1; !cpu_possible(cpu); cpu--) + continue; + set_cpu_possible(cpu, false); + } + + for_each_possible_cpu(cpu) + set_cpu_present(cpu, true); +} + +static int +cpu_initialize_context(unsigned int cpu, struct task_struct *idle) +{ + struct vcpu_guest_context *ctxt; + struct desc_struct *gdt; + unsigned long gdt_mfn; + + if (cpumask_test_and_set_cpu(cpu, xen_cpu_initialized_map)) + return 0; + + ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL); + if (ctxt == NULL) { + cpumask_clear_cpu(cpu, xen_cpu_initialized_map); + return -ENOMEM; + } + + gdt = get_cpu_gdt_rw(cpu); + + /* + * Bring up the CPU in cpu_bringup_and_idle() with the stack + * pointing just below where pt_regs would be if it were a normal + * kernel entry. + */ + ctxt->user_regs.eip = (unsigned long)asm_cpu_bringup_and_idle; + ctxt->flags = VGCF_IN_KERNEL; + ctxt->user_regs.eflags = 0x1000; /* IOPL_RING1 */ + ctxt->user_regs.ds = __USER_DS; + ctxt->user_regs.es = __USER_DS; + ctxt->user_regs.ss = __KERNEL_DS; + ctxt->user_regs.cs = __KERNEL_CS; + ctxt->user_regs.esp = (unsigned long)task_pt_regs(idle); + + xen_copy_trap_info(ctxt->trap_ctxt); + + BUG_ON((unsigned long)gdt & ~PAGE_MASK); + + gdt_mfn = arbitrary_virt_to_mfn(gdt); + make_lowmem_page_readonly(gdt); + make_lowmem_page_readonly(mfn_to_virt(gdt_mfn)); + + ctxt->gdt_frames[0] = gdt_mfn; + ctxt->gdt_ents = GDT_ENTRIES; + + /* + * Set SS:SP that Xen will use when entering guest kernel mode + * from guest user mode. Subsequent calls to load_sp0() can + * change this value. + */ + ctxt->kernel_ss = __KERNEL_DS; + ctxt->kernel_sp = task_top_of_stack(idle); + + ctxt->gs_base_kernel = per_cpu_offset(cpu); + ctxt->event_callback_eip = + (unsigned long)xen_asm_exc_xen_hypervisor_callback; + ctxt->failsafe_callback_eip = + (unsigned long)xen_failsafe_callback; + per_cpu(xen_cr3, cpu) = __pa(swapper_pg_dir); + + ctxt->ctrlreg[3] = xen_pfn_to_cr3(virt_to_gfn(swapper_pg_dir)); + if (HYPERVISOR_vcpu_op(VCPUOP_initialise, xen_vcpu_nr(cpu), ctxt)) + BUG(); + + kfree(ctxt); + return 0; +} + +static int xen_pv_kick_ap(unsigned int cpu, struct task_struct *idle) +{ + int rc; + + rc = common_cpu_up(cpu, idle); + if (rc) + return rc; + + xen_setup_runstate_info(cpu); + + /* make sure interrupts start blocked */ + per_cpu(xen_vcpu, cpu)->evtchn_upcall_mask = 1; + + rc = cpu_initialize_context(cpu, idle); + if (rc) + return rc; + + xen_pmu_init(cpu); + + /* + * Why is this a BUG? If the hypercall fails then everything can be + * rolled back, no? + */ + BUG_ON(HYPERVISOR_vcpu_op(VCPUOP_up, xen_vcpu_nr(cpu), NULL)); + + return 0; +} + +static void xen_pv_poll_sync_state(void) +{ + HYPERVISOR_sched_op(SCHEDOP_yield, NULL); +} + +#ifdef CONFIG_HOTPLUG_CPU +static int xen_pv_cpu_disable(void) +{ + unsigned int cpu = smp_processor_id(); + if (cpu == 0) + return -EBUSY; + + cpu_disable_common(); + + load_cr3(swapper_pg_dir); + return 0; +} + +static void xen_pv_cpu_die(unsigned int cpu) +{ + while (HYPERVISOR_vcpu_op(VCPUOP_is_up, xen_vcpu_nr(cpu), NULL)) { + __set_current_state(TASK_UNINTERRUPTIBLE); + schedule_timeout(HZ/10); + } +} + +static void xen_pv_cleanup_dead_cpu(unsigned int cpu) +{ + xen_smp_intr_free(cpu); + xen_uninit_lock_cpu(cpu); + xen_teardown_timer(cpu); + xen_pmu_finish(cpu); +} + +static void __noreturn xen_pv_play_dead(void) /* used only with HOTPLUG_CPU */ +{ + play_dead_common(); + HYPERVISOR_vcpu_op(VCPUOP_down, xen_vcpu_nr(smp_processor_id()), NULL); + xen_cpu_bringup_again((unsigned long)task_pt_regs(current)); + BUG(); +} + +#else /* !CONFIG_HOTPLUG_CPU */ +static int xen_pv_cpu_disable(void) +{ + return -ENOSYS; +} + +static void xen_pv_cpu_die(unsigned int cpu) +{ + BUG(); +} + +static void xen_pv_cleanup_dead_cpu(unsigned int cpu) +{ + BUG(); +} + +static void __noreturn xen_pv_play_dead(void) +{ + BUG(); +} + +#endif +static void stop_self(void *v) +{ + int cpu = smp_processor_id(); + + /* make sure we're not pinning something down */ + load_cr3(swapper_pg_dir); + /* should set up a minimal gdt */ + + set_cpu_online(cpu, false); + + HYPERVISOR_vcpu_op(VCPUOP_down, xen_vcpu_nr(cpu), NULL); + BUG(); +} + +static void xen_pv_stop_other_cpus(int wait) +{ + smp_call_function(stop_self, NULL, wait); +} + +static irqreturn_t xen_irq_work_interrupt(int irq, void *dev_id) +{ + irq_work_run(); + inc_irq_stat(apic_irq_work_irqs); + + return IRQ_HANDLED; +} + +static const struct smp_ops xen_smp_ops __initconst = { + .smp_prepare_boot_cpu = xen_pv_smp_prepare_boot_cpu, + .smp_prepare_cpus = xen_pv_smp_prepare_cpus, + .smp_cpus_done = xen_smp_cpus_done, + + .kick_ap_alive = xen_pv_kick_ap, + .cpu_die = xen_pv_cpu_die, + .cleanup_dead_cpu = xen_pv_cleanup_dead_cpu, + .poll_sync_state = xen_pv_poll_sync_state, + .cpu_disable = xen_pv_cpu_disable, + .play_dead = xen_pv_play_dead, + + .stop_other_cpus = xen_pv_stop_other_cpus, + .smp_send_reschedule = xen_smp_send_reschedule, + + .send_call_func_ipi = xen_smp_send_call_function_ipi, + .send_call_func_single_ipi = xen_smp_send_call_function_single_ipi, +}; + +void __init xen_smp_init(void) +{ + smp_ops = xen_smp_ops; + + /* Avoid searching for BIOS MP tables */ + x86_init.mpparse.find_smp_config = x86_init_noop; + x86_init.mpparse.get_smp_config = _get_smp_config; +} diff --git a/arch/x86/xen/spinlock.c b/arch/x86/xen/spinlock.c new file mode 100644 index 0000000000..5c6fc16e4b --- /dev/null +++ b/arch/x86/xen/spinlock.c @@ -0,0 +1,154 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Split spinlock implementation out into its own file, so it can be + * compiled in a FTRACE-compatible way. + */ +#include <linux/kernel.h> +#include <linux/spinlock.h> +#include <linux/slab.h> +#include <linux/atomic.h> + +#include <asm/paravirt.h> +#include <asm/qspinlock.h> + +#include <xen/events.h> + +#include "xen-ops.h" + +static DEFINE_PER_CPU(int, lock_kicker_irq) = -1; +static DEFINE_PER_CPU(char *, irq_name); +static DEFINE_PER_CPU(atomic_t, xen_qlock_wait_nest); +static bool xen_pvspin = true; + +static void xen_qlock_kick(int cpu) +{ + int irq = per_cpu(lock_kicker_irq, cpu); + + /* Don't kick if the target's kicker interrupt is not initialized. */ + if (irq == -1) + return; + + xen_send_IPI_one(cpu, XEN_SPIN_UNLOCK_VECTOR); +} + +/* + * Halt the current CPU & release it back to the host + */ +static void xen_qlock_wait(u8 *byte, u8 val) +{ + int irq = __this_cpu_read(lock_kicker_irq); + atomic_t *nest_cnt = this_cpu_ptr(&xen_qlock_wait_nest); + + /* If kicker interrupts not initialized yet, just spin */ + if (irq == -1 || in_nmi()) + return; + + /* Detect reentry. */ + atomic_inc(nest_cnt); + + /* If irq pending already and no nested call clear it. */ + if (atomic_read(nest_cnt) == 1 && xen_test_irq_pending(irq)) { + xen_clear_irq_pending(irq); + } else if (READ_ONCE(*byte) == val) { + /* Block until irq becomes pending (or a spurious wakeup) */ + xen_poll_irq(irq); + } + + atomic_dec(nest_cnt); +} + +static irqreturn_t dummy_handler(int irq, void *dev_id) +{ + BUG(); + return IRQ_HANDLED; +} + +void xen_init_lock_cpu(int cpu) +{ + int irq; + char *name; + + if (!xen_pvspin) + return; + + WARN(per_cpu(lock_kicker_irq, cpu) >= 0, "spinlock on CPU%d exists on IRQ%d!\n", + cpu, per_cpu(lock_kicker_irq, cpu)); + + name = kasprintf(GFP_KERNEL, "spinlock%d", cpu); + per_cpu(irq_name, cpu) = name; + irq = bind_ipi_to_irqhandler(XEN_SPIN_UNLOCK_VECTOR, + cpu, + dummy_handler, + IRQF_PERCPU|IRQF_NOBALANCING, + name, + NULL); + + if (irq >= 0) { + disable_irq(irq); /* make sure it's never delivered */ + per_cpu(lock_kicker_irq, cpu) = irq; + } + + printk("cpu %d spinlock event irq %d\n", cpu, irq); +} + +void xen_uninit_lock_cpu(int cpu) +{ + int irq; + + if (!xen_pvspin) + return; + + kfree(per_cpu(irq_name, cpu)); + per_cpu(irq_name, cpu) = NULL; + /* + * When booting the kernel with 'mitigations=auto,nosmt', the secondary + * CPUs are not activated, and lock_kicker_irq is not initialized. + */ + irq = per_cpu(lock_kicker_irq, cpu); + if (irq == -1) + return; + + unbind_from_irqhandler(irq, NULL); + per_cpu(lock_kicker_irq, cpu) = -1; +} + +PV_CALLEE_SAVE_REGS_THUNK(xen_vcpu_stolen); + +/* + * Our init of PV spinlocks is split in two init functions due to us + * using paravirt patching and jump labels patching and having to do + * all of this before SMP code is invoked. + * + * The paravirt patching needs to be done _before_ the alternative asm code + * is started, otherwise we would not patch the core kernel code. + */ +void __init xen_init_spinlocks(void) +{ + /* Don't need to use pvqspinlock code if there is only 1 vCPU. */ + if (num_possible_cpus() == 1 || nopvspin) + xen_pvspin = false; + + if (!xen_pvspin) { + printk(KERN_DEBUG "xen: PV spinlocks disabled\n"); + static_branch_disable(&virt_spin_lock_key); + return; + } + printk(KERN_DEBUG "xen: PV spinlocks enabled\n"); + + __pv_init_lock_hash(); + pv_ops.lock.queued_spin_lock_slowpath = __pv_queued_spin_lock_slowpath; + pv_ops.lock.queued_spin_unlock = + PV_CALLEE_SAVE(__pv_queued_spin_unlock); + pv_ops.lock.wait = xen_qlock_wait; + pv_ops.lock.kick = xen_qlock_kick; + pv_ops.lock.vcpu_is_preempted = PV_CALLEE_SAVE(xen_vcpu_stolen); +} + +static __init int xen_parse_nopvspin(char *arg) +{ + pr_notice("\"xen_nopvspin\" is deprecated, please use \"nopvspin\" instead\n"); + xen_pvspin = false; + return 0; +} +early_param("xen_nopvspin", xen_parse_nopvspin); + diff --git a/arch/x86/xen/suspend.c b/arch/x86/xen/suspend.c new file mode 100644 index 0000000000..1d83152c76 --- /dev/null +++ b/arch/x86/xen/suspend.c @@ -0,0 +1,84 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <linux/types.h> +#include <linux/tick.h> +#include <linux/percpu-defs.h> + +#include <xen/xen.h> +#include <xen/interface/xen.h> +#include <xen/grant_table.h> +#include <xen/events.h> + +#include <asm/cpufeatures.h> +#include <asm/msr-index.h> +#include <asm/xen/hypercall.h> +#include <asm/xen/page.h> +#include <asm/fixmap.h> + +#include "xen-ops.h" +#include "mmu.h" +#include "pmu.h" + +static DEFINE_PER_CPU(u64, spec_ctrl); + +void xen_arch_pre_suspend(void) +{ + xen_save_time_memory_area(); + + if (xen_pv_domain()) + xen_pv_pre_suspend(); +} + +void xen_arch_post_suspend(int cancelled) +{ + if (xen_pv_domain()) + xen_pv_post_suspend(cancelled); + else + xen_hvm_post_suspend(cancelled); + + xen_restore_time_memory_area(); +} + +static void xen_vcpu_notify_restore(void *data) +{ + if (xen_pv_domain() && boot_cpu_has(X86_FEATURE_SPEC_CTRL)) + wrmsrl(MSR_IA32_SPEC_CTRL, this_cpu_read(spec_ctrl)); + + /* Boot processor notified via generic timekeeping_resume() */ + if (smp_processor_id() == 0) + return; + + tick_resume_local(); +} + +static void xen_vcpu_notify_suspend(void *data) +{ + u64 tmp; + + tick_suspend_local(); + + if (xen_pv_domain() && boot_cpu_has(X86_FEATURE_SPEC_CTRL)) { + rdmsrl(MSR_IA32_SPEC_CTRL, tmp); + this_cpu_write(spec_ctrl, tmp); + wrmsrl(MSR_IA32_SPEC_CTRL, 0); + } +} + +void xen_arch_resume(void) +{ + int cpu; + + on_each_cpu(xen_vcpu_notify_restore, NULL, 1); + + for_each_online_cpu(cpu) + xen_pmu_init(cpu); +} + +void xen_arch_suspend(void) +{ + int cpu; + + for_each_online_cpu(cpu) + xen_pmu_finish(cpu); + + on_each_cpu(xen_vcpu_notify_suspend, NULL, 1); +} diff --git a/arch/x86/xen/suspend_hvm.c b/arch/x86/xen/suspend_hvm.c new file mode 100644 index 0000000000..0c4f7554b7 --- /dev/null +++ b/arch/x86/xen/suspend_hvm.c @@ -0,0 +1,27 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <linux/types.h> + +#include <xen/xen.h> +#include <xen/hvm.h> +#include <xen/features.h> +#include <xen/interface/features.h> +#include <xen/events.h> + +#include "xen-ops.h" + +void xen_hvm_post_suspend(int suspend_cancelled) +{ + if (!suspend_cancelled) { + xen_hvm_init_shared_info(); + xen_vcpu_restore(); + } + if (xen_percpu_upcall) { + unsigned int cpu; + + for_each_online_cpu(cpu) + BUG_ON(xen_set_upcall_vector(cpu)); + } else { + xen_setup_callback_vector(); + } + xen_unplug_emulated_devices(); +} diff --git a/arch/x86/xen/suspend_pv.c b/arch/x86/xen/suspend_pv.c new file mode 100644 index 0000000000..cae9660f4c --- /dev/null +++ b/arch/x86/xen/suspend_pv.c @@ -0,0 +1,48 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <linux/types.h> + +#include <asm/xen/hypercall.h> +#include <asm/xen/page.h> + +#include <asm/fixmap.h> + +#include "xen-ops.h" + +void xen_pv_pre_suspend(void) +{ + xen_mm_pin_all(); + + xen_start_info->store_mfn = mfn_to_pfn(xen_start_info->store_mfn); + xen_start_info->console.domU.mfn = + mfn_to_pfn(xen_start_info->console.domU.mfn); + + BUG_ON(!irqs_disabled()); + + HYPERVISOR_shared_info = &xen_dummy_shared_info; + if (HYPERVISOR_update_va_mapping(fix_to_virt(FIX_PARAVIRT_BOOTMAP), + __pte_ma(0), 0)) + BUG(); +} + +void xen_pv_post_suspend(int suspend_cancelled) +{ + xen_build_mfn_list_list(); + set_fixmap(FIX_PARAVIRT_BOOTMAP, xen_start_info->shared_info); + HYPERVISOR_shared_info = (void *)fix_to_virt(FIX_PARAVIRT_BOOTMAP); + xen_setup_mfn_list_list(); + + if (suspend_cancelled) { + xen_start_info->store_mfn = + pfn_to_mfn(xen_start_info->store_mfn); + xen_start_info->console.domU.mfn = + pfn_to_mfn(xen_start_info->console.domU.mfn); + } else { +#ifdef CONFIG_SMP + BUG_ON(xen_cpu_initialized_map == NULL); + cpumask_copy(xen_cpu_initialized_map, cpu_online_mask); +#endif + xen_vcpu_restore(); + } + + xen_mm_unpin_all(); +} diff --git a/arch/x86/xen/time.c b/arch/x86/xen/time.c new file mode 100644 index 0000000000..52fa5609b7 --- /dev/null +++ b/arch/x86/xen/time.c @@ -0,0 +1,657 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Xen time implementation. + * + * This is implemented in terms of a clocksource driver which uses + * the hypervisor clock as a nanosecond timebase, and a clockevent + * driver which uses the hypervisor's timer mechanism. + * + * Jeremy Fitzhardinge <jeremy@xensource.com>, XenSource Inc, 2007 + */ +#include <linux/kernel.h> +#include <linux/interrupt.h> +#include <linux/clocksource.h> +#include <linux/clockchips.h> +#include <linux/gfp.h> +#include <linux/slab.h> +#include <linux/pvclock_gtod.h> +#include <linux/timekeeper_internal.h> + +#include <asm/pvclock.h> +#include <asm/xen/hypervisor.h> +#include <asm/xen/hypercall.h> +#include <asm/xen/cpuid.h> + +#include <xen/events.h> +#include <xen/features.h> +#include <xen/interface/xen.h> +#include <xen/interface/vcpu.h> + +#include "xen-ops.h" + +/* Minimum amount of time until next clock event fires */ +#define TIMER_SLOP 100000 + +static u64 xen_sched_clock_offset __read_mostly; + +/* Get the TSC speed from Xen */ +static unsigned long xen_tsc_khz(void) +{ + struct pvclock_vcpu_time_info *info = + &HYPERVISOR_shared_info->vcpu_info[0].time; + + setup_force_cpu_cap(X86_FEATURE_TSC_KNOWN_FREQ); + return pvclock_tsc_khz(info); +} + +static u64 xen_clocksource_read(void) +{ + struct pvclock_vcpu_time_info *src; + u64 ret; + + preempt_disable_notrace(); + src = &__this_cpu_read(xen_vcpu)->time; + ret = pvclock_clocksource_read(src); + preempt_enable_notrace(); + return ret; +} + +static u64 xen_clocksource_get_cycles(struct clocksource *cs) +{ + return xen_clocksource_read(); +} + +static noinstr u64 xen_sched_clock(void) +{ + struct pvclock_vcpu_time_info *src; + u64 ret; + + src = &__this_cpu_read(xen_vcpu)->time; + ret = pvclock_clocksource_read_nowd(src); + ret -= xen_sched_clock_offset; + + return ret; +} + +static void xen_read_wallclock(struct timespec64 *ts) +{ + struct shared_info *s = HYPERVISOR_shared_info; + struct pvclock_wall_clock *wall_clock = &(s->wc); + struct pvclock_vcpu_time_info *vcpu_time; + + vcpu_time = &get_cpu_var(xen_vcpu)->time; + pvclock_read_wallclock(wall_clock, vcpu_time, ts); + put_cpu_var(xen_vcpu); +} + +static void xen_get_wallclock(struct timespec64 *now) +{ + xen_read_wallclock(now); +} + +static int xen_set_wallclock(const struct timespec64 *now) +{ + return -ENODEV; +} + +static int xen_pvclock_gtod_notify(struct notifier_block *nb, + unsigned long was_set, void *priv) +{ + /* Protected by the calling core code serialization */ + static struct timespec64 next_sync; + + struct xen_platform_op op; + struct timespec64 now; + struct timekeeper *tk = priv; + static bool settime64_supported = true; + int ret; + + now.tv_sec = tk->xtime_sec; + now.tv_nsec = (long)(tk->tkr_mono.xtime_nsec >> tk->tkr_mono.shift); + + /* + * We only take the expensive HV call when the clock was set + * or when the 11 minutes RTC synchronization time elapsed. + */ + if (!was_set && timespec64_compare(&now, &next_sync) < 0) + return NOTIFY_OK; + +again: + if (settime64_supported) { + op.cmd = XENPF_settime64; + op.u.settime64.mbz = 0; + op.u.settime64.secs = now.tv_sec; + op.u.settime64.nsecs = now.tv_nsec; + op.u.settime64.system_time = xen_clocksource_read(); + } else { + op.cmd = XENPF_settime32; + op.u.settime32.secs = now.tv_sec; + op.u.settime32.nsecs = now.tv_nsec; + op.u.settime32.system_time = xen_clocksource_read(); + } + + ret = HYPERVISOR_platform_op(&op); + + if (ret == -ENOSYS && settime64_supported) { + settime64_supported = false; + goto again; + } + if (ret < 0) + return NOTIFY_BAD; + + /* + * Move the next drift compensation time 11 minutes + * ahead. That's emulating the sync_cmos_clock() update for + * the hardware RTC. + */ + next_sync = now; + next_sync.tv_sec += 11 * 60; + + return NOTIFY_OK; +} + +static struct notifier_block xen_pvclock_gtod_notifier = { + .notifier_call = xen_pvclock_gtod_notify, +}; + +static int xen_cs_enable(struct clocksource *cs) +{ + vclocks_set_used(VDSO_CLOCKMODE_PVCLOCK); + return 0; +} + +static struct clocksource xen_clocksource __read_mostly = { + .name = "xen", + .rating = 400, + .read = xen_clocksource_get_cycles, + .mask = CLOCKSOURCE_MASK(64), + .flags = CLOCK_SOURCE_IS_CONTINUOUS, + .enable = xen_cs_enable, +}; + +/* + Xen clockevent implementation + + Xen has two clockevent implementations: + + The old timer_op one works with all released versions of Xen prior + to version 3.0.4. This version of the hypervisor provides a + single-shot timer with nanosecond resolution. However, sharing the + same event channel is a 100Hz tick which is delivered while the + vcpu is running. We don't care about or use this tick, but it will + cause the core time code to think the timer fired too soon, and + will end up resetting it each time. It could be filtered, but + doing so has complications when the ktime clocksource is not yet + the xen clocksource (ie, at boot time). + + The new vcpu_op-based timer interface allows the tick timer period + to be changed or turned off. The tick timer is not useful as a + periodic timer because events are only delivered to running vcpus. + The one-shot timer can report when a timeout is in the past, so + set_next_event is capable of returning -ETIME when appropriate. + This interface is used when available. +*/ + + +/* + Get a hypervisor absolute time. In theory we could maintain an + offset between the kernel's time and the hypervisor's time, and + apply that to a kernel's absolute timeout. Unfortunately the + hypervisor and kernel times can drift even if the kernel is using + the Xen clocksource, because ntp can warp the kernel's clocksource. +*/ +static s64 get_abs_timeout(unsigned long delta) +{ + return xen_clocksource_read() + delta; +} + +static int xen_timerop_shutdown(struct clock_event_device *evt) +{ + /* cancel timeout */ + HYPERVISOR_set_timer_op(0); + + return 0; +} + +static int xen_timerop_set_next_event(unsigned long delta, + struct clock_event_device *evt) +{ + WARN_ON(!clockevent_state_oneshot(evt)); + + if (HYPERVISOR_set_timer_op(get_abs_timeout(delta)) < 0) + BUG(); + + /* We may have missed the deadline, but there's no real way of + knowing for sure. If the event was in the past, then we'll + get an immediate interrupt. */ + + return 0; +} + +static struct clock_event_device xen_timerop_clockevent __ro_after_init = { + .name = "xen", + .features = CLOCK_EVT_FEAT_ONESHOT, + + .max_delta_ns = 0xffffffff, + .max_delta_ticks = 0xffffffff, + .min_delta_ns = TIMER_SLOP, + .min_delta_ticks = TIMER_SLOP, + + .mult = 1, + .shift = 0, + .rating = 500, + + .set_state_shutdown = xen_timerop_shutdown, + .set_next_event = xen_timerop_set_next_event, +}; + +static int xen_vcpuop_shutdown(struct clock_event_device *evt) +{ + int cpu = smp_processor_id(); + + if (HYPERVISOR_vcpu_op(VCPUOP_stop_singleshot_timer, xen_vcpu_nr(cpu), + NULL) || + HYPERVISOR_vcpu_op(VCPUOP_stop_periodic_timer, xen_vcpu_nr(cpu), + NULL)) + BUG(); + + return 0; +} + +static int xen_vcpuop_set_oneshot(struct clock_event_device *evt) +{ + int cpu = smp_processor_id(); + + if (HYPERVISOR_vcpu_op(VCPUOP_stop_periodic_timer, xen_vcpu_nr(cpu), + NULL)) + BUG(); + + return 0; +} + +static int xen_vcpuop_set_next_event(unsigned long delta, + struct clock_event_device *evt) +{ + int cpu = smp_processor_id(); + struct vcpu_set_singleshot_timer single; + int ret; + + WARN_ON(!clockevent_state_oneshot(evt)); + + single.timeout_abs_ns = get_abs_timeout(delta); + /* Get an event anyway, even if the timeout is already expired */ + single.flags = 0; + + ret = HYPERVISOR_vcpu_op(VCPUOP_set_singleshot_timer, xen_vcpu_nr(cpu), + &single); + BUG_ON(ret != 0); + + return ret; +} + +static struct clock_event_device xen_vcpuop_clockevent __ro_after_init = { + .name = "xen", + .features = CLOCK_EVT_FEAT_ONESHOT, + + .max_delta_ns = 0xffffffff, + .max_delta_ticks = 0xffffffff, + .min_delta_ns = TIMER_SLOP, + .min_delta_ticks = TIMER_SLOP, + + .mult = 1, + .shift = 0, + .rating = 500, + + .set_state_shutdown = xen_vcpuop_shutdown, + .set_state_oneshot = xen_vcpuop_set_oneshot, + .set_next_event = xen_vcpuop_set_next_event, +}; + +static const struct clock_event_device *xen_clockevent = + &xen_timerop_clockevent; + +struct xen_clock_event_device { + struct clock_event_device evt; + char name[16]; +}; +static DEFINE_PER_CPU(struct xen_clock_event_device, xen_clock_events) = { .evt.irq = -1 }; + +static irqreturn_t xen_timer_interrupt(int irq, void *dev_id) +{ + struct clock_event_device *evt = this_cpu_ptr(&xen_clock_events.evt); + irqreturn_t ret; + + ret = IRQ_NONE; + if (evt->event_handler) { + evt->event_handler(evt); + ret = IRQ_HANDLED; + } + + return ret; +} + +void xen_teardown_timer(int cpu) +{ + struct clock_event_device *evt; + evt = &per_cpu(xen_clock_events, cpu).evt; + + if (evt->irq >= 0) { + unbind_from_irqhandler(evt->irq, NULL); + evt->irq = -1; + } +} + +void xen_setup_timer(int cpu) +{ + struct xen_clock_event_device *xevt = &per_cpu(xen_clock_events, cpu); + struct clock_event_device *evt = &xevt->evt; + int irq; + + WARN(evt->irq >= 0, "IRQ%d for CPU%d is already allocated\n", evt->irq, cpu); + if (evt->irq >= 0) + xen_teardown_timer(cpu); + + printk(KERN_INFO "installing Xen timer for CPU %d\n", cpu); + + snprintf(xevt->name, sizeof(xevt->name), "timer%d", cpu); + + irq = bind_virq_to_irqhandler(VIRQ_TIMER, cpu, xen_timer_interrupt, + IRQF_PERCPU|IRQF_NOBALANCING|IRQF_TIMER| + IRQF_FORCE_RESUME|IRQF_EARLY_RESUME, + xevt->name, NULL); + (void)xen_set_irq_priority(irq, XEN_IRQ_PRIORITY_MAX); + + memcpy(evt, xen_clockevent, sizeof(*evt)); + + evt->cpumask = cpumask_of(cpu); + evt->irq = irq; +} + + +void xen_setup_cpu_clockevents(void) +{ + clockevents_register_device(this_cpu_ptr(&xen_clock_events.evt)); +} + +void xen_timer_resume(void) +{ + int cpu; + + if (xen_clockevent != &xen_vcpuop_clockevent) + return; + + for_each_online_cpu(cpu) { + if (HYPERVISOR_vcpu_op(VCPUOP_stop_periodic_timer, + xen_vcpu_nr(cpu), NULL)) + BUG(); + } +} + +static struct pvclock_vsyscall_time_info *xen_clock __read_mostly; +static u64 xen_clock_value_saved; + +void xen_save_time_memory_area(void) +{ + struct vcpu_register_time_memory_area t; + int ret; + + xen_clock_value_saved = xen_clocksource_read() - xen_sched_clock_offset; + + if (!xen_clock) + return; + + t.addr.v = NULL; + + ret = HYPERVISOR_vcpu_op(VCPUOP_register_vcpu_time_memory_area, 0, &t); + if (ret != 0) + pr_notice("Cannot save secondary vcpu_time_info (err %d)", + ret); + else + clear_page(xen_clock); +} + +void xen_restore_time_memory_area(void) +{ + struct vcpu_register_time_memory_area t; + int ret; + + if (!xen_clock) + goto out; + + t.addr.v = &xen_clock->pvti; + + ret = HYPERVISOR_vcpu_op(VCPUOP_register_vcpu_time_memory_area, 0, &t); + + /* + * We don't disable VDSO_CLOCKMODE_PVCLOCK entirely if it fails to + * register the secondary time info with Xen or if we migrated to a + * host without the necessary flags. On both of these cases what + * happens is either process seeing a zeroed out pvti or seeing no + * PVCLOCK_TSC_STABLE_BIT bit set. Userspace checks the latter and + * if 0, it discards the data in pvti and fallbacks to a system + * call for a reliable timestamp. + */ + if (ret != 0) + pr_notice("Cannot restore secondary vcpu_time_info (err %d)", + ret); + +out: + /* Need pvclock_resume() before using xen_clocksource_read(). */ + pvclock_resume(); + xen_sched_clock_offset = xen_clocksource_read() - xen_clock_value_saved; +} + +static void xen_setup_vsyscall_time_info(void) +{ + struct vcpu_register_time_memory_area t; + struct pvclock_vsyscall_time_info *ti; + int ret; + + ti = (struct pvclock_vsyscall_time_info *)get_zeroed_page(GFP_KERNEL); + if (!ti) + return; + + t.addr.v = &ti->pvti; + + ret = HYPERVISOR_vcpu_op(VCPUOP_register_vcpu_time_memory_area, 0, &t); + if (ret) { + pr_notice("xen: VDSO_CLOCKMODE_PVCLOCK not supported (err %d)\n", ret); + free_page((unsigned long)ti); + return; + } + + /* + * If primary time info had this bit set, secondary should too since + * it's the same data on both just different memory regions. But we + * still check it in case hypervisor is buggy. + */ + if (!(ti->pvti.flags & PVCLOCK_TSC_STABLE_BIT)) { + t.addr.v = NULL; + ret = HYPERVISOR_vcpu_op(VCPUOP_register_vcpu_time_memory_area, + 0, &t); + if (!ret) + free_page((unsigned long)ti); + + pr_notice("xen: VDSO_CLOCKMODE_PVCLOCK not supported (tsc unstable)\n"); + return; + } + + xen_clock = ti; + pvclock_set_pvti_cpu0_va(xen_clock); + + xen_clocksource.vdso_clock_mode = VDSO_CLOCKMODE_PVCLOCK; +} + +/* + * Check if it is possible to safely use the tsc as a clocksource. This is + * only true if the hypervisor notifies the guest that its tsc is invariant, + * the tsc is stable, and the tsc instruction will never be emulated. + */ +static int __init xen_tsc_safe_clocksource(void) +{ + u32 eax, ebx, ecx, edx; + + if (!(boot_cpu_has(X86_FEATURE_CONSTANT_TSC))) + return 0; + + if (!(boot_cpu_has(X86_FEATURE_NONSTOP_TSC))) + return 0; + + if (check_tsc_unstable()) + return 0; + + /* Leaf 4, sub-leaf 0 (0x40000x03) */ + cpuid_count(xen_cpuid_base() + 3, 0, &eax, &ebx, &ecx, &edx); + + return ebx == XEN_CPUID_TSC_MODE_NEVER_EMULATE; +} + +static void __init xen_time_init(void) +{ + struct pvclock_vcpu_time_info *pvti; + int cpu = smp_processor_id(); + struct timespec64 tp; + + /* + * As Dom0 is never moved, no penalty on using TSC there. + * + * If it is possible for the guest to determine that the tsc is a safe + * clocksource, then set xen_clocksource rating below that of the tsc + * so that the system prefers tsc instead. + */ + if (xen_initial_domain()) + xen_clocksource.rating = 275; + else if (xen_tsc_safe_clocksource()) + xen_clocksource.rating = 299; + + clocksource_register_hz(&xen_clocksource, NSEC_PER_SEC); + + if (HYPERVISOR_vcpu_op(VCPUOP_stop_periodic_timer, xen_vcpu_nr(cpu), + NULL) == 0) { + /* Successfully turned off 100Hz tick, so we have the + vcpuop-based timer interface */ + printk(KERN_DEBUG "Xen: using vcpuop timer interface\n"); + xen_clockevent = &xen_vcpuop_clockevent; + } + + /* Set initial system time with full resolution */ + xen_read_wallclock(&tp); + do_settimeofday64(&tp); + + setup_force_cpu_cap(X86_FEATURE_TSC); + + /* + * We check ahead on the primary time info if this + * bit is supported hence speeding up Xen clocksource. + */ + pvti = &__this_cpu_read(xen_vcpu)->time; + if (pvti->flags & PVCLOCK_TSC_STABLE_BIT) { + pvclock_set_flags(PVCLOCK_TSC_STABLE_BIT); + xen_setup_vsyscall_time_info(); + } + + xen_setup_runstate_info(cpu); + xen_setup_timer(cpu); + xen_setup_cpu_clockevents(); + + xen_time_setup_guest(); + + if (xen_initial_domain()) + pvclock_gtod_register_notifier(&xen_pvclock_gtod_notifier); +} + +static void __init xen_init_time_common(void) +{ + xen_sched_clock_offset = xen_clocksource_read(); + static_call_update(pv_steal_clock, xen_steal_clock); + paravirt_set_sched_clock(xen_sched_clock); + + x86_platform.calibrate_tsc = xen_tsc_khz; + x86_platform.get_wallclock = xen_get_wallclock; +} + +void __init xen_init_time_ops(void) +{ + xen_init_time_common(); + + x86_init.timers.timer_init = xen_time_init; + x86_init.timers.setup_percpu_clockev = x86_init_noop; + x86_cpuinit.setup_percpu_clockev = x86_init_noop; + + /* Dom0 uses the native method to set the hardware RTC. */ + if (!xen_initial_domain()) + x86_platform.set_wallclock = xen_set_wallclock; +} + +#ifdef CONFIG_XEN_PVHVM +static void xen_hvm_setup_cpu_clockevents(void) +{ + int cpu = smp_processor_id(); + xen_setup_runstate_info(cpu); + /* + * xen_setup_timer(cpu) - snprintf is bad in atomic context. Hence + * doing it xen_hvm_cpu_notify (which gets called by smp_init during + * early bootup and also during CPU hotplug events). + */ + xen_setup_cpu_clockevents(); +} + +void __init xen_hvm_init_time_ops(void) +{ + static bool hvm_time_initialized; + + if (hvm_time_initialized) + return; + + /* + * vector callback is needed otherwise we cannot receive interrupts + * on cpu > 0 and at this point we don't know how many cpus are + * available. + */ + if (!xen_have_vector_callback) + return; + + if (!xen_feature(XENFEAT_hvm_safe_pvclock)) { + pr_info_once("Xen doesn't support pvclock on HVM, disable pv timer"); + return; + } + + /* + * Only MAX_VIRT_CPUS 'vcpu_info' are embedded inside 'shared_info'. + * The __this_cpu_read(xen_vcpu) is still NULL when Xen HVM guest + * boots on vcpu >= MAX_VIRT_CPUS (e.g., kexec), To access + * __this_cpu_read(xen_vcpu) via xen_clocksource_read() will panic. + * + * The xen_hvm_init_time_ops() should be called again later after + * __this_cpu_read(xen_vcpu) is available. + */ + if (!__this_cpu_read(xen_vcpu)) { + pr_info("Delay xen_init_time_common() as kernel is running on vcpu=%d\n", + xen_vcpu_nr(0)); + return; + } + + xen_init_time_common(); + + x86_init.timers.setup_percpu_clockev = xen_time_init; + x86_cpuinit.setup_percpu_clockev = xen_hvm_setup_cpu_clockevents; + + x86_platform.set_wallclock = xen_set_wallclock; + + hvm_time_initialized = true; +} +#endif + +/* Kernel parameter to specify Xen timer slop */ +static int __init parse_xen_timer_slop(char *ptr) +{ + unsigned long slop = memparse(ptr, NULL); + + xen_timerop_clockevent.min_delta_ns = slop; + xen_timerop_clockevent.min_delta_ticks = slop; + xen_vcpuop_clockevent.min_delta_ns = slop; + xen_vcpuop_clockevent.min_delta_ticks = slop; + + return 0; +} +early_param("xen_timer_slop", parse_xen_timer_slop); diff --git a/arch/x86/xen/trace.c b/arch/x86/xen/trace.c new file mode 100644 index 0000000000..329f60eb95 --- /dev/null +++ b/arch/x86/xen/trace.c @@ -0,0 +1,21 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <linux/ftrace.h> +#include <xen/interface/xen.h> +#include <xen/interface/xen-mca.h> + +#define HYPERCALL(x) [__HYPERVISOR_##x] = "("#x")", +static const char *xen_hypercall_names[] = { +#include <asm/xen-hypercalls.h> +}; +#undef HYPERCALL + +static const char *xen_hypercall_name(unsigned op) +{ + if (op < ARRAY_SIZE(xen_hypercall_names) && xen_hypercall_names[op] != NULL) + return xen_hypercall_names[op]; + + return ""; +} + +#define CREATE_TRACE_POINTS +#include <trace/events/xen.h> diff --git a/arch/x86/xen/vga.c b/arch/x86/xen/vga.c new file mode 100644 index 0000000000..d97adab842 --- /dev/null +++ b/arch/x86/xen/vga.c @@ -0,0 +1,78 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <linux/screen_info.h> +#include <linux/init.h> + +#include <asm/bootparam.h> +#include <asm/setup.h> + +#include <xen/interface/xen.h> + +#include "xen-ops.h" + +void __init xen_init_vga(const struct dom0_vga_console_info *info, size_t size, + struct screen_info *screen_info) +{ + /* This is drawn from a dump from vgacon:startup in + * standard Linux. */ + screen_info->orig_video_mode = 3; + screen_info->orig_video_isVGA = 1; + screen_info->orig_video_lines = 25; + screen_info->orig_video_cols = 80; + screen_info->orig_video_ega_bx = 3; + screen_info->orig_video_points = 16; + screen_info->orig_y = screen_info->orig_video_lines - 1; + + switch (info->video_type) { + case XEN_VGATYPE_TEXT_MODE_3: + if (size < offsetof(struct dom0_vga_console_info, u.text_mode_3) + + sizeof(info->u.text_mode_3)) + break; + screen_info->orig_video_lines = info->u.text_mode_3.rows; + screen_info->orig_video_cols = info->u.text_mode_3.columns; + screen_info->orig_x = info->u.text_mode_3.cursor_x; + screen_info->orig_y = info->u.text_mode_3.cursor_y; + screen_info->orig_video_points = + info->u.text_mode_3.font_height; + break; + + case XEN_VGATYPE_EFI_LFB: + case XEN_VGATYPE_VESA_LFB: + if (size < offsetof(struct dom0_vga_console_info, + u.vesa_lfb.gbl_caps)) + break; + screen_info->orig_video_isVGA = VIDEO_TYPE_VLFB; + screen_info->lfb_width = info->u.vesa_lfb.width; + screen_info->lfb_height = info->u.vesa_lfb.height; + screen_info->lfb_depth = info->u.vesa_lfb.bits_per_pixel; + screen_info->lfb_base = info->u.vesa_lfb.lfb_base; + screen_info->lfb_size = info->u.vesa_lfb.lfb_size; + screen_info->lfb_linelength = info->u.vesa_lfb.bytes_per_line; + screen_info->red_size = info->u.vesa_lfb.red_size; + screen_info->red_pos = info->u.vesa_lfb.red_pos; + screen_info->green_size = info->u.vesa_lfb.green_size; + screen_info->green_pos = info->u.vesa_lfb.green_pos; + screen_info->blue_size = info->u.vesa_lfb.blue_size; + screen_info->blue_pos = info->u.vesa_lfb.blue_pos; + screen_info->rsvd_size = info->u.vesa_lfb.rsvd_size; + screen_info->rsvd_pos = info->u.vesa_lfb.rsvd_pos; + + if (size >= offsetof(struct dom0_vga_console_info, + u.vesa_lfb.ext_lfb_base) + + sizeof(info->u.vesa_lfb.ext_lfb_base) + && info->u.vesa_lfb.ext_lfb_base) { + screen_info->ext_lfb_base = info->u.vesa_lfb.ext_lfb_base; + screen_info->capabilities |= VIDEO_CAPABILITY_64BIT_BASE; + } + + if (info->video_type == XEN_VGATYPE_EFI_LFB) { + screen_info->orig_video_isVGA = VIDEO_TYPE_EFI; + break; + } + + if (size >= offsetof(struct dom0_vga_console_info, + u.vesa_lfb.mode_attrs) + + sizeof(info->u.vesa_lfb.mode_attrs)) + screen_info->vesa_attributes = info->u.vesa_lfb.mode_attrs; + break; + } +} diff --git a/arch/x86/xen/xen-asm.S b/arch/x86/xen/xen-asm.S new file mode 100644 index 0000000000..1a9cd18dfb --- /dev/null +++ b/arch/x86/xen/xen-asm.S @@ -0,0 +1,309 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Asm versions of Xen pv-ops, suitable for direct use. + * + * We only bother with direct forms (ie, vcpu in percpu data) of the + * operations here; the indirect forms are better handled in C. + */ + +#include <asm/errno.h> +#include <asm/asm-offsets.h> +#include <asm/percpu.h> +#include <asm/processor-flags.h> +#include <asm/segment.h> +#include <asm/thread_info.h> +#include <asm/asm.h> +#include <asm/frame.h> +#include <asm/unwind_hints.h> + +#include <xen/interface/xen.h> + +#include <linux/init.h> +#include <linux/linkage.h> +#include <../entry/calling.h> + +.pushsection .noinstr.text, "ax" +/* + * Disabling events is simply a matter of making the event mask + * non-zero. + */ +SYM_FUNC_START(xen_irq_disable_direct) + movb $1, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask + RET +SYM_FUNC_END(xen_irq_disable_direct) + +/* + * Force an event check by making a hypercall, but preserve regs + * before making the call. + */ +SYM_FUNC_START(check_events) + FRAME_BEGIN + push %rax + push %rcx + push %rdx + push %rsi + push %rdi + push %r8 + push %r9 + push %r10 + push %r11 + call xen_force_evtchn_callback + pop %r11 + pop %r10 + pop %r9 + pop %r8 + pop %rdi + pop %rsi + pop %rdx + pop %rcx + pop %rax + FRAME_END + RET +SYM_FUNC_END(check_events) + +/* + * Enable events. This clears the event mask and tests the pending + * event status with one and operation. If there are pending events, + * then enter the hypervisor to get them handled. + */ +SYM_FUNC_START(xen_irq_enable_direct) + FRAME_BEGIN + /* Unmask events */ + movb $0, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask + + /* + * Preempt here doesn't matter because that will deal with any + * pending interrupts. The pending check may end up being run + * on the wrong CPU, but that doesn't hurt. + */ + + /* Test for pending */ + testb $0xff, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_pending + jz 1f + + call check_events +1: + FRAME_END + RET +SYM_FUNC_END(xen_irq_enable_direct) + +/* + * (xen_)save_fl is used to get the current interrupt enable status. + * Callers expect the status to be in X86_EFLAGS_IF, and other bits + * may be set in the return value. We take advantage of this by + * making sure that X86_EFLAGS_IF has the right value (and other bits + * in that byte are 0), but other bits in the return value are + * undefined. We need to toggle the state of the bit, because Xen and + * x86 use opposite senses (mask vs enable). + */ +SYM_FUNC_START(xen_save_fl_direct) + testb $0xff, PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_mask + setz %ah + addb %ah, %ah + RET +SYM_FUNC_END(xen_save_fl_direct) + +SYM_FUNC_START(xen_read_cr2) + FRAME_BEGIN + _ASM_MOV PER_CPU_VAR(xen_vcpu), %_ASM_AX + _ASM_MOV XEN_vcpu_info_arch_cr2(%_ASM_AX), %_ASM_AX + FRAME_END + RET +SYM_FUNC_END(xen_read_cr2); + +SYM_FUNC_START(xen_read_cr2_direct) + FRAME_BEGIN + _ASM_MOV PER_CPU_VAR(xen_vcpu_info) + XEN_vcpu_info_arch_cr2, %_ASM_AX + FRAME_END + RET +SYM_FUNC_END(xen_read_cr2_direct); +.popsection + +.macro xen_pv_trap name +SYM_CODE_START(xen_\name) + UNWIND_HINT_ENTRY + ENDBR + pop %rcx + pop %r11 + jmp \name +SYM_CODE_END(xen_\name) +_ASM_NOKPROBE(xen_\name) +.endm + +xen_pv_trap asm_exc_divide_error +xen_pv_trap asm_xenpv_exc_debug +xen_pv_trap asm_exc_int3 +xen_pv_trap asm_xenpv_exc_nmi +xen_pv_trap asm_exc_overflow +xen_pv_trap asm_exc_bounds +xen_pv_trap asm_exc_invalid_op +xen_pv_trap asm_exc_device_not_available +xen_pv_trap asm_xenpv_exc_double_fault +xen_pv_trap asm_exc_coproc_segment_overrun +xen_pv_trap asm_exc_invalid_tss +xen_pv_trap asm_exc_segment_not_present +xen_pv_trap asm_exc_stack_segment +xen_pv_trap asm_exc_general_protection +xen_pv_trap asm_exc_page_fault +xen_pv_trap asm_exc_spurious_interrupt_bug +xen_pv_trap asm_exc_coprocessor_error +xen_pv_trap asm_exc_alignment_check +#ifdef CONFIG_X86_CET +xen_pv_trap asm_exc_control_protection +#endif +#ifdef CONFIG_X86_MCE +xen_pv_trap asm_xenpv_exc_machine_check +#endif /* CONFIG_X86_MCE */ +xen_pv_trap asm_exc_simd_coprocessor_error +#ifdef CONFIG_IA32_EMULATION +xen_pv_trap asm_int80_emulation +#endif +xen_pv_trap asm_exc_xen_unknown_trap +xen_pv_trap asm_exc_xen_hypervisor_callback + + __INIT +SYM_CODE_START(xen_early_idt_handler_array) + i = 0 + .rept NUM_EXCEPTION_VECTORS + UNWIND_HINT_UNDEFINED + ENDBR + pop %rcx + pop %r11 + jmp early_idt_handler_array + i*EARLY_IDT_HANDLER_SIZE + i = i + 1 + .fill xen_early_idt_handler_array + i*XEN_EARLY_IDT_HANDLER_SIZE - ., 1, 0xcc + .endr +SYM_CODE_END(xen_early_idt_handler_array) + __FINIT + +hypercall_iret = hypercall_page + __HYPERVISOR_iret * 32 +/* + * Xen64 iret frame: + * + * ss + * rsp + * rflags + * cs + * rip <-- standard iret frame + * + * flags + * + * rcx } + * r11 }<-- pushed by hypercall page + * rsp->rax } + */ +SYM_CODE_START(xen_iret) + UNWIND_HINT_UNDEFINED + ANNOTATE_NOENDBR + pushq $0 + jmp hypercall_iret +SYM_CODE_END(xen_iret) + +/* + * XEN pv doesn't use trampoline stack, PER_CPU_VAR(cpu_tss_rw + TSS_sp0) is + * also the kernel stack. Reusing swapgs_restore_regs_and_return_to_usermode() + * in XEN pv would cause %rsp to move up to the top of the kernel stack and + * leave the IRET frame below %rsp, which is dangerous to be corrupted if #NMI + * interrupts. And swapgs_restore_regs_and_return_to_usermode() pushing the IRET + * frame at the same address is useless. + */ +SYM_CODE_START(xenpv_restore_regs_and_return_to_usermode) + UNWIND_HINT_REGS + POP_REGS + + /* stackleak_erase() can work safely on the kernel stack. */ + STACKLEAK_ERASE_NOCLOBBER + + addq $8, %rsp /* skip regs->orig_ax */ + jmp xen_iret +SYM_CODE_END(xenpv_restore_regs_and_return_to_usermode) + +/* + * Xen handles syscall callbacks much like ordinary exceptions, which + * means we have: + * - kernel gs + * - kernel rsp + * - an iret-like stack frame on the stack (including rcx and r11): + * ss + * rsp + * rflags + * cs + * rip + * r11 + * rsp->rcx + */ + +/* Normal 64-bit system call target */ +SYM_CODE_START(xen_entry_SYSCALL_64) + UNWIND_HINT_ENTRY + ENDBR + popq %rcx + popq %r11 + + /* + * Neither Xen nor the kernel really knows what the old SS and + * CS were. The kernel expects __USER_DS and __USER_CS, so + * report those values even though Xen will guess its own values. + */ + movq $__USER_DS, 4*8(%rsp) + movq $__USER_CS, 1*8(%rsp) + + jmp entry_SYSCALL_64_after_hwframe +SYM_CODE_END(xen_entry_SYSCALL_64) + +#ifdef CONFIG_IA32_EMULATION + +/* 32-bit compat syscall target */ +SYM_CODE_START(xen_entry_SYSCALL_compat) + UNWIND_HINT_ENTRY + ENDBR + popq %rcx + popq %r11 + + /* + * Neither Xen nor the kernel really knows what the old SS and + * CS were. The kernel expects __USER_DS and __USER32_CS, so + * report those values even though Xen will guess its own values. + */ + movq $__USER_DS, 4*8(%rsp) + movq $__USER32_CS, 1*8(%rsp) + + jmp entry_SYSCALL_compat_after_hwframe +SYM_CODE_END(xen_entry_SYSCALL_compat) + +/* 32-bit compat sysenter target */ +SYM_CODE_START(xen_entry_SYSENTER_compat) + UNWIND_HINT_ENTRY + ENDBR + /* + * NB: Xen is polite and clears TF from EFLAGS for us. This means + * that we don't need to guard against single step exceptions here. + */ + popq %rcx + popq %r11 + + /* + * Neither Xen nor the kernel really knows what the old SS and + * CS were. The kernel expects __USER_DS and __USER32_CS, so + * report those values even though Xen will guess its own values. + */ + movq $__USER_DS, 4*8(%rsp) + movq $__USER32_CS, 1*8(%rsp) + + jmp entry_SYSENTER_compat_after_hwframe +SYM_CODE_END(xen_entry_SYSENTER_compat) + +#else /* !CONFIG_IA32_EMULATION */ + +SYM_CODE_START(xen_entry_SYSCALL_compat) +SYM_CODE_START(xen_entry_SYSENTER_compat) + UNWIND_HINT_ENTRY + ENDBR + lea 16(%rsp), %rsp /* strip %rcx, %r11 */ + mov $-ENOSYS, %rax + pushq $0 + jmp hypercall_iret +SYM_CODE_END(xen_entry_SYSENTER_compat) +SYM_CODE_END(xen_entry_SYSCALL_compat) + +#endif /* CONFIG_IA32_EMULATION */ diff --git a/arch/x86/xen/xen-head.S b/arch/x86/xen/xen-head.S new file mode 100644 index 0000000000..a0ea285878 --- /dev/null +++ b/arch/x86/xen/xen-head.S @@ -0,0 +1,124 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Xen-specific pieces of head.S, intended to be included in the right + place in head.S */ + +#ifdef CONFIG_XEN + +#include <linux/elfnote.h> +#include <linux/init.h> + +#include <asm/boot.h> +#include <asm/asm.h> +#include <asm/msr.h> +#include <asm/page_types.h> +#include <asm/percpu.h> +#include <asm/unwind_hints.h> + +#include <xen/interface/elfnote.h> +#include <xen/interface/features.h> +#include <xen/interface/xen.h> +#include <xen/interface/xen-mca.h> +#include <asm/xen/interface.h> + +.pushsection .noinstr.text, "ax" + .balign PAGE_SIZE +SYM_CODE_START(hypercall_page) + .rept (PAGE_SIZE / 32) + UNWIND_HINT_FUNC + ANNOTATE_NOENDBR + ANNOTATE_UNRET_SAFE + ret + /* + * Xen will write the hypercall page, and sort out ENDBR. + */ + .skip 31, 0xcc + .endr + +#define HYPERCALL(n) \ + .equ xen_hypercall_##n, hypercall_page + __HYPERVISOR_##n * 32; \ + .type xen_hypercall_##n, @function; .size xen_hypercall_##n, 32 +#include <asm/xen-hypercalls.h> +#undef HYPERCALL +SYM_CODE_END(hypercall_page) +.popsection + +#ifdef CONFIG_XEN_PV + __INIT +SYM_CODE_START(startup_xen) + UNWIND_HINT_END_OF_STACK + ANNOTATE_NOENDBR + cld + + leaq (__end_init_task - PTREGS_SIZE)(%rip), %rsp + + /* Set up %gs. + * + * The base of %gs always points to fixed_percpu_data. If the + * stack protector canary is enabled, it is located at %gs:40. + * Note that, on SMP, the boot cpu uses init data section until + * the per cpu areas are set up. + */ + movl $MSR_GS_BASE,%ecx + movq $INIT_PER_CPU_VAR(fixed_percpu_data),%rax + cdq + wrmsr + + mov %rsi, %rdi + call xen_start_kernel +SYM_CODE_END(startup_xen) + __FINIT + +#ifdef CONFIG_XEN_PV_SMP +.pushsection .text +SYM_CODE_START(asm_cpu_bringup_and_idle) + UNWIND_HINT_END_OF_STACK + ENDBR + + call cpu_bringup_and_idle +SYM_CODE_END(asm_cpu_bringup_and_idle) + +SYM_CODE_START(xen_cpu_bringup_again) + UNWIND_HINT_FUNC + mov %rdi, %rsp + UNWIND_HINT_REGS + call cpu_bringup_and_idle +SYM_CODE_END(xen_cpu_bringup_again) +.popsection +#endif +#endif + + ELFNOTE(Xen, XEN_ELFNOTE_GUEST_OS, .asciz "linux") + ELFNOTE(Xen, XEN_ELFNOTE_GUEST_VERSION, .asciz "2.6") + ELFNOTE(Xen, XEN_ELFNOTE_XEN_VERSION, .asciz "xen-3.0") +#ifdef CONFIG_XEN_PV + ELFNOTE(Xen, XEN_ELFNOTE_VIRT_BASE, _ASM_PTR __START_KERNEL_map) + /* Map the p2m table to a 512GB-aligned user address. */ + ELFNOTE(Xen, XEN_ELFNOTE_INIT_P2M, .quad (PUD_SIZE * PTRS_PER_PUD)) + ELFNOTE(Xen, XEN_ELFNOTE_ENTRY, _ASM_PTR startup_xen) + ELFNOTE(Xen, XEN_ELFNOTE_FEATURES, .ascii "!writable_page_tables") + ELFNOTE(Xen, XEN_ELFNOTE_PAE_MODE, .asciz "yes") + ELFNOTE(Xen, XEN_ELFNOTE_L1_MFN_VALID, + .quad _PAGE_PRESENT; .quad _PAGE_PRESENT) + ELFNOTE(Xen, XEN_ELFNOTE_MOD_START_PFN, .long 1) + ELFNOTE(Xen, XEN_ELFNOTE_PADDR_OFFSET, _ASM_PTR 0) +# define FEATURES_PV (1 << XENFEAT_writable_page_tables) +#else +# define FEATURES_PV 0 +#endif +#ifdef CONFIG_XEN_PVH +# define FEATURES_PVH (1 << XENFEAT_linux_rsdp_unrestricted) +#else +# define FEATURES_PVH 0 +#endif +#ifdef CONFIG_XEN_DOM0 +# define FEATURES_DOM0 (1 << XENFEAT_dom0) +#else +# define FEATURES_DOM0 0 +#endif + ELFNOTE(Xen, XEN_ELFNOTE_HYPERCALL_PAGE, _ASM_PTR hypercall_page) + ELFNOTE(Xen, XEN_ELFNOTE_SUPPORTED_FEATURES, + .long FEATURES_PV | FEATURES_PVH | FEATURES_DOM0) + ELFNOTE(Xen, XEN_ELFNOTE_LOADER, .asciz "generic") + ELFNOTE(Xen, XEN_ELFNOTE_SUSPEND_CANCEL, .long 1) + +#endif /*CONFIG_XEN */ diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h new file mode 100644 index 0000000000..a87ab36889 --- /dev/null +++ b/arch/x86/xen/xen-ops.h @@ -0,0 +1,166 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef XEN_OPS_H +#define XEN_OPS_H + +#include <linux/init.h> +#include <linux/clocksource.h> +#include <linux/irqreturn.h> +#include <xen/xen-ops.h> + +/* These are code, but not functions. Defined in entry.S */ +extern const char xen_failsafe_callback[]; + +void xen_entry_SYSENTER_compat(void); +#ifdef CONFIG_X86_64 +void xen_entry_SYSCALL_64(void); +void xen_entry_SYSCALL_compat(void); +#endif + +extern void *xen_initial_gdt; + +struct trap_info; +void xen_copy_trap_info(struct trap_info *traps); + +DECLARE_PER_CPU_ALIGNED(struct vcpu_info, xen_vcpu_info); +DECLARE_PER_CPU(unsigned long, xen_cr3); +DECLARE_PER_CPU(unsigned long, xen_current_cr3); + +extern struct start_info *xen_start_info; +extern struct shared_info xen_dummy_shared_info; +extern struct shared_info *HYPERVISOR_shared_info; + +extern bool xen_fifo_events; + +void xen_setup_mfn_list_list(void); +void xen_build_mfn_list_list(void); +void xen_setup_machphys_mapping(void); +void xen_setup_kernel_pagetable(pgd_t *pgd, unsigned long max_pfn); +void __init xen_reserve_special_pages(void); +void __init xen_pt_check_e820(void); + +void xen_mm_pin_all(void); +void xen_mm_unpin_all(void); +#ifdef CONFIG_X86_64 +void __init xen_relocate_p2m(void); +#endif + +bool __init xen_is_e820_reserved(phys_addr_t start, phys_addr_t size); +unsigned long __ref xen_chk_extra_mem(unsigned long pfn); +void __init xen_inv_extra_mem(void); +void __init xen_remap_memory(void); +phys_addr_t __init xen_find_free_area(phys_addr_t size); +char * __init xen_memory_setup(void); +void __init xen_arch_setup(void); +void xen_banner(void); +void xen_enable_sysenter(void); +void xen_enable_syscall(void); +void xen_vcpu_restore(void); + +void xen_hvm_init_shared_info(void); +void xen_unplug_emulated_devices(void); + +void __init xen_build_dynamic_phys_to_machine(void); +void __init xen_vmalloc_p2m_tree(void); + +void xen_init_irq_ops(void); +void xen_setup_timer(int cpu); +void xen_setup_runstate_info(int cpu); +void xen_teardown_timer(int cpu); +void xen_setup_cpu_clockevents(void); +void xen_save_time_memory_area(void); +void xen_restore_time_memory_area(void); +void xen_init_time_ops(void); +void xen_hvm_init_time_ops(void); + +bool xen_vcpu_stolen(int vcpu); + +void xen_vcpu_setup(int cpu); +void xen_vcpu_info_reset(int cpu); +void xen_setup_vcpu_info_placement(void); + +#ifdef CONFIG_SMP +void xen_smp_init(void); +void __init xen_hvm_smp_init(void); + +extern cpumask_var_t xen_cpu_initialized_map; +#else +static inline void xen_smp_init(void) {} +static inline void xen_hvm_smp_init(void) {} +#endif + +#ifdef CONFIG_PARAVIRT_SPINLOCKS +void __init xen_init_spinlocks(void); +void xen_init_lock_cpu(int cpu); +void xen_uninit_lock_cpu(int cpu); +#else +static inline void xen_init_spinlocks(void) +{ +} +static inline void xen_init_lock_cpu(int cpu) +{ +} +static inline void xen_uninit_lock_cpu(int cpu) +{ +} +#endif + +struct dom0_vga_console_info; + +#ifdef CONFIG_XEN_DOM0 +void __init xen_init_vga(const struct dom0_vga_console_info *, size_t size, + struct screen_info *); +#else +static inline void __init xen_init_vga(const struct dom0_vga_console_info *info, + size_t size, struct screen_info *si) +{ +} +#endif + +void xen_add_preferred_consoles(void); + +void __init xen_init_apic(void); + +#ifdef CONFIG_XEN_EFI +extern void xen_efi_init(struct boot_params *boot_params); +#else +static inline void __init xen_efi_init(struct boot_params *boot_params) +{ +} +#endif + +__visible void xen_irq_enable_direct(void); +__visible void xen_irq_disable_direct(void); +__visible unsigned long xen_save_fl_direct(void); + +__visible unsigned long xen_read_cr2(void); +__visible unsigned long xen_read_cr2_direct(void); + +/* These are not functions, and cannot be called normally */ +__visible void xen_iret(void); + +extern int xen_panic_handler_init(void); + +int xen_cpuhp_setup(int (*cpu_up_prepare_cb)(unsigned int), + int (*cpu_dead_cb)(unsigned int)); + +void xen_pin_vcpu(int cpu); + +void xen_emergency_restart(void); +void xen_force_evtchn_callback(void); + +#ifdef CONFIG_XEN_PV +void xen_pv_pre_suspend(void); +void xen_pv_post_suspend(int suspend_cancelled); +void xen_start_kernel(struct start_info *si); +#else +static inline void xen_pv_pre_suspend(void) {} +static inline void xen_pv_post_suspend(int suspend_cancelled) {} +#endif + +#ifdef CONFIG_XEN_PVHVM +void xen_hvm_post_suspend(int suspend_cancelled); +#else +static inline void xen_hvm_post_suspend(int suspend_cancelled) {} +#endif + +#endif /* XEN_OPS_H */ |