From 2c3c1048746a4622d8c89a29670120dc8fab93c4 Mon Sep 17 00:00:00 2001 From: Daniel Baumann Date: Sun, 7 Apr 2024 20:49:45 +0200 Subject: Adding upstream version 6.1.76. Signed-off-by: Daniel Baumann --- arch/x86/power/Makefile | 12 + arch/x86/power/cpu.c | 544 ++++++++++++++++++++++++++++++++++++++ arch/x86/power/hibernate.c | 219 +++++++++++++++ arch/x86/power/hibernate_32.c | 198 ++++++++++++++ arch/x86/power/hibernate_64.c | 142 ++++++++++ arch/x86/power/hibernate_asm_32.S | 112 ++++++++ arch/x86/power/hibernate_asm_64.S | 150 +++++++++++ 7 files changed, 1377 insertions(+) create mode 100644 arch/x86/power/Makefile create mode 100644 arch/x86/power/cpu.c create mode 100644 arch/x86/power/hibernate.c create mode 100644 arch/x86/power/hibernate_32.c create mode 100644 arch/x86/power/hibernate_64.c create mode 100644 arch/x86/power/hibernate_asm_32.S create mode 100644 arch/x86/power/hibernate_asm_64.S (limited to 'arch/x86/power') diff --git a/arch/x86/power/Makefile b/arch/x86/power/Makefile new file mode 100644 index 000000000..379777572 --- /dev/null +++ b/arch/x86/power/Makefile @@ -0,0 +1,12 @@ +# SPDX-License-Identifier: GPL-2.0 + +# __restore_processor_state() restores %gs after S3 resume and so should not +# itself be stack-protected +CFLAGS_cpu.o := -fno-stack-protector + +# Clang may incorrectly inline functions with stack protector enabled into +# __restore_processor_state(): https://bugs.llvm.org/show_bug.cgi?id=47479 +CFLAGS_REMOVE_cpu.o := $(CC_FLAGS_LTO) + +obj-$(CONFIG_PM_SLEEP) += cpu.o +obj-$(CONFIG_HIBERNATION) += hibernate_$(BITS).o hibernate_asm_$(BITS).o hibernate.o diff --git a/arch/x86/power/cpu.c b/arch/x86/power/cpu.c new file mode 100644 index 000000000..93ae33248 --- /dev/null +++ b/arch/x86/power/cpu.c @@ -0,0 +1,544 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Suspend support specific for i386/x86-64. + * + * Copyright (c) 2007 Rafael J. Wysocki + * Copyright (c) 2002 Pavel Machek + * Copyright (c) 2001 Patrick Mochel + */ + +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#ifdef CONFIG_X86_32 +__visible unsigned long saved_context_ebx; +__visible unsigned long saved_context_esp, saved_context_ebp; +__visible unsigned long saved_context_esi, saved_context_edi; +__visible unsigned long saved_context_eflags; +#endif +struct saved_context saved_context; + +static void msr_save_context(struct saved_context *ctxt) +{ + struct saved_msr *msr = ctxt->saved_msrs.array; + struct saved_msr *end = msr + ctxt->saved_msrs.num; + + while (msr < end) { + if (msr->valid) + rdmsrl(msr->info.msr_no, msr->info.reg.q); + msr++; + } +} + +static void msr_restore_context(struct saved_context *ctxt) +{ + struct saved_msr *msr = ctxt->saved_msrs.array; + struct saved_msr *end = msr + ctxt->saved_msrs.num; + + while (msr < end) { + if (msr->valid) + wrmsrl(msr->info.msr_no, msr->info.reg.q); + msr++; + } +} + +/** + * __save_processor_state() - Save CPU registers before creating a + * hibernation image and before restoring + * the memory state from it + * @ctxt: Structure to store the registers contents in. + * + * NOTE: If there is a CPU register the modification of which by the + * boot kernel (ie. the kernel used for loading the hibernation image) + * might affect the operations of the restored target kernel (ie. the one + * saved in the hibernation image), then its contents must be saved by this + * function. In other words, if kernel A is hibernated and different + * kernel B is used for loading the hibernation image into memory, the + * kernel A's __save_processor_state() function must save all registers + * needed by kernel A, so that it can operate correctly after the resume + * regardless of what kernel B does in the meantime. + */ +static void __save_processor_state(struct saved_context *ctxt) +{ +#ifdef CONFIG_X86_32 + mtrr_save_fixed_ranges(NULL); +#endif + kernel_fpu_begin(); + + /* + * descriptor tables + */ + store_idt(&ctxt->idt); + + /* + * We save it here, but restore it only in the hibernate case. + * For ACPI S3 resume, this is loaded via 'early_gdt_desc' in 64-bit + * mode in "secondary_startup_64". In 32-bit mode it is done via + * 'pmode_gdt' in wakeup_start. + */ + ctxt->gdt_desc.size = GDT_SIZE - 1; + ctxt->gdt_desc.address = (unsigned long)get_cpu_gdt_rw(smp_processor_id()); + + store_tr(ctxt->tr); + + /* XMM0..XMM15 should be handled by kernel_fpu_begin(). */ + /* + * segment registers + */ + savesegment(gs, ctxt->gs); +#ifdef CONFIG_X86_64 + savesegment(fs, ctxt->fs); + savesegment(ds, ctxt->ds); + savesegment(es, ctxt->es); + + rdmsrl(MSR_FS_BASE, ctxt->fs_base); + rdmsrl(MSR_GS_BASE, ctxt->kernelmode_gs_base); + rdmsrl(MSR_KERNEL_GS_BASE, ctxt->usermode_gs_base); + mtrr_save_fixed_ranges(NULL); + + rdmsrl(MSR_EFER, ctxt->efer); +#endif + + /* + * control registers + */ + ctxt->cr0 = read_cr0(); + ctxt->cr2 = read_cr2(); + ctxt->cr3 = __read_cr3(); + ctxt->cr4 = __read_cr4(); + ctxt->misc_enable_saved = !rdmsrl_safe(MSR_IA32_MISC_ENABLE, + &ctxt->misc_enable); + msr_save_context(ctxt); +} + +/* Needed by apm.c */ +void save_processor_state(void) +{ + __save_processor_state(&saved_context); + x86_platform.save_sched_clock_state(); +} +#ifdef CONFIG_X86_32 +EXPORT_SYMBOL(save_processor_state); +#endif + +static void do_fpu_end(void) +{ + /* + * Restore FPU regs if necessary. + */ + kernel_fpu_end(); +} + +static void fix_processor_context(void) +{ + int cpu = smp_processor_id(); +#ifdef CONFIG_X86_64 + struct desc_struct *desc = get_cpu_gdt_rw(cpu); + tss_desc tss; +#endif + + /* + * We need to reload TR, which requires that we change the + * GDT entry to indicate "available" first. + * + * XXX: This could probably all be replaced by a call to + * force_reload_TR(). + */ + set_tss_desc(cpu, &get_cpu_entry_area(cpu)->tss.x86_tss); + +#ifdef CONFIG_X86_64 + memcpy(&tss, &desc[GDT_ENTRY_TSS], sizeof(tss_desc)); + tss.type = 0x9; /* The available 64-bit TSS (see AMD vol 2, pg 91 */ + write_gdt_entry(desc, GDT_ENTRY_TSS, &tss, DESC_TSS); + + syscall_init(); /* This sets MSR_*STAR and related */ +#else + if (boot_cpu_has(X86_FEATURE_SEP)) + enable_sep_cpu(); +#endif + load_TR_desc(); /* This does ltr */ + load_mm_ldt(current->active_mm); /* This does lldt */ + initialize_tlbstate_and_flush(); + + fpu__resume_cpu(); + + /* The processor is back on the direct GDT, load back the fixmap */ + load_fixmap_gdt(cpu); +} + +/** + * __restore_processor_state() - Restore the contents of CPU registers saved + * by __save_processor_state() + * @ctxt: Structure to load the registers contents from. + * + * The asm code that gets us here will have restored a usable GDT, although + * it will be pointing to the wrong alias. + */ +static void notrace __restore_processor_state(struct saved_context *ctxt) +{ + struct cpuinfo_x86 *c; + + if (ctxt->misc_enable_saved) + wrmsrl(MSR_IA32_MISC_ENABLE, ctxt->misc_enable); + /* + * control registers + */ + /* cr4 was introduced in the Pentium CPU */ +#ifdef CONFIG_X86_32 + if (ctxt->cr4) + __write_cr4(ctxt->cr4); +#else +/* CONFIG X86_64 */ + wrmsrl(MSR_EFER, ctxt->efer); + __write_cr4(ctxt->cr4); +#endif + write_cr3(ctxt->cr3); + write_cr2(ctxt->cr2); + write_cr0(ctxt->cr0); + + /* Restore the IDT. */ + load_idt(&ctxt->idt); + + /* + * Just in case the asm code got us here with the SS, DS, or ES + * out of sync with the GDT, update them. + */ + loadsegment(ss, __KERNEL_DS); + loadsegment(ds, __USER_DS); + loadsegment(es, __USER_DS); + + /* + * Restore percpu access. Percpu access can happen in exception + * handlers or in complicated helpers like load_gs_index(). + */ +#ifdef CONFIG_X86_64 + wrmsrl(MSR_GS_BASE, ctxt->kernelmode_gs_base); +#else + loadsegment(fs, __KERNEL_PERCPU); +#endif + + /* Restore the TSS, RO GDT, LDT, and usermode-relevant MSRs. */ + fix_processor_context(); + + /* + * Now that we have descriptor tables fully restored and working + * exception handling, restore the usermode segments. + */ +#ifdef CONFIG_X86_64 + loadsegment(ds, ctxt->es); + loadsegment(es, ctxt->es); + loadsegment(fs, ctxt->fs); + load_gs_index(ctxt->gs); + + /* + * Restore FSBASE and GSBASE after restoring the selectors, since + * restoring the selectors clobbers the bases. Keep in mind + * that MSR_KERNEL_GS_BASE is horribly misnamed. + */ + wrmsrl(MSR_FS_BASE, ctxt->fs_base); + wrmsrl(MSR_KERNEL_GS_BASE, ctxt->usermode_gs_base); +#else + loadsegment(gs, ctxt->gs); +#endif + + do_fpu_end(); + tsc_verify_tsc_adjust(true); + x86_platform.restore_sched_clock_state(); + mtrr_bp_restore(); + perf_restore_debug_store(); + + c = &cpu_data(smp_processor_id()); + if (cpu_has(c, X86_FEATURE_MSR_IA32_FEAT_CTL)) + init_ia32_feat_ctl(c); + + microcode_bsp_resume(); + + /* + * This needs to happen after the microcode has been updated upon resume + * because some of the MSRs are "emulated" in microcode. + */ + msr_restore_context(ctxt); +} + +/* Needed by apm.c */ +void notrace restore_processor_state(void) +{ + __restore_processor_state(&saved_context); +} +#ifdef CONFIG_X86_32 +EXPORT_SYMBOL(restore_processor_state); +#endif + +#if defined(CONFIG_HIBERNATION) && defined(CONFIG_HOTPLUG_CPU) +static void resume_play_dead(void) +{ + play_dead_common(); + tboot_shutdown(TB_SHUTDOWN_WFS); + hlt_play_dead(); +} + +int hibernate_resume_nonboot_cpu_disable(void) +{ + void (*play_dead)(void) = smp_ops.play_dead; + int ret; + + /* + * Ensure that MONITOR/MWAIT will not be used in the "play dead" loop + * during hibernate image restoration, because it is likely that the + * monitored address will be actually written to at that time and then + * the "dead" CPU will attempt to execute instructions again, but the + * address in its instruction pointer may not be possible to resolve + * any more at that point (the page tables used by it previously may + * have been overwritten by hibernate image data). + * + * First, make sure that we wake up all the potentially disabled SMT + * threads which have been initially brought up and then put into + * mwait/cpuidle sleep. + * Those will be put to proper (not interfering with hibernation + * resume) sleep afterwards, and the resumed kernel will decide itself + * what to do with them. + */ + ret = cpuhp_smt_enable(); + if (ret) + return ret; + smp_ops.play_dead = resume_play_dead; + ret = freeze_secondary_cpus(0); + smp_ops.play_dead = play_dead; + return ret; +} +#endif + +/* + * When bsp_check() is called in hibernate and suspend, cpu hotplug + * is disabled already. So it's unnecessary to handle race condition between + * cpumask query and cpu hotplug. + */ +static int bsp_check(void) +{ + if (cpumask_first(cpu_online_mask) != 0) { + pr_warn("CPU0 is offline.\n"); + return -ENODEV; + } + + return 0; +} + +static int bsp_pm_callback(struct notifier_block *nb, unsigned long action, + void *ptr) +{ + int ret = 0; + + switch (action) { + case PM_SUSPEND_PREPARE: + case PM_HIBERNATION_PREPARE: + ret = bsp_check(); + break; +#ifdef CONFIG_DEBUG_HOTPLUG_CPU0 + case PM_RESTORE_PREPARE: + /* + * When system resumes from hibernation, online CPU0 because + * 1. it's required for resume and + * 2. the CPU was online before hibernation + */ + if (!cpu_online(0)) + _debug_hotplug_cpu(0, 1); + break; + case PM_POST_RESTORE: + /* + * When a resume really happens, this code won't be called. + * + * This code is called only when user space hibernation software + * prepares for snapshot device during boot time. So we just + * call _debug_hotplug_cpu() to restore to CPU0's state prior to + * preparing the snapshot device. + * + * This works for normal boot case in our CPU0 hotplug debug + * mode, i.e. CPU0 is offline and user mode hibernation + * software initializes during boot time. + * + * If CPU0 is online and user application accesses snapshot + * device after boot time, this will offline CPU0 and user may + * see different CPU0 state before and after accessing + * the snapshot device. But hopefully this is not a case when + * user debugging CPU0 hotplug. Even if users hit this case, + * they can easily online CPU0 back. + * + * To simplify this debug code, we only consider normal boot + * case. Otherwise we need to remember CPU0's state and restore + * to that state and resolve racy conditions etc. + */ + _debug_hotplug_cpu(0, 0); + break; +#endif + default: + break; + } + return notifier_from_errno(ret); +} + +static int __init bsp_pm_check_init(void) +{ + /* + * Set this bsp_pm_callback as lower priority than + * cpu_hotplug_pm_callback. So cpu_hotplug_pm_callback will be called + * earlier to disable cpu hotplug before bsp online check. + */ + pm_notifier(bsp_pm_callback, -INT_MAX); + return 0; +} + +core_initcall(bsp_pm_check_init); + +static int msr_build_context(const u32 *msr_id, const int num) +{ + struct saved_msrs *saved_msrs = &saved_context.saved_msrs; + struct saved_msr *msr_array; + int total_num; + int i, j; + + total_num = saved_msrs->num + num; + + msr_array = kmalloc_array(total_num, sizeof(struct saved_msr), GFP_KERNEL); + if (!msr_array) { + pr_err("x86/pm: Can not allocate memory to save/restore MSRs during suspend.\n"); + return -ENOMEM; + } + + if (saved_msrs->array) { + /* + * Multiple callbacks can invoke this function, so copy any + * MSR save requests from previous invocations. + */ + memcpy(msr_array, saved_msrs->array, + sizeof(struct saved_msr) * saved_msrs->num); + + kfree(saved_msrs->array); + } + + for (i = saved_msrs->num, j = 0; i < total_num; i++, j++) { + u64 dummy; + + msr_array[i].info.msr_no = msr_id[j]; + msr_array[i].valid = !rdmsrl_safe(msr_id[j], &dummy); + msr_array[i].info.reg.q = 0; + } + saved_msrs->num = total_num; + saved_msrs->array = msr_array; + + return 0; +} + +/* + * The following sections are a quirk framework for problematic BIOSen: + * Sometimes MSRs are modified by the BIOSen after suspended to + * RAM, this might cause unexpected behavior after wakeup. + * Thus we save/restore these specified MSRs across suspend/resume + * in order to work around it. + * + * For any further problematic BIOSen/platforms, + * please add your own function similar to msr_initialize_bdw. + */ +static int msr_initialize_bdw(const struct dmi_system_id *d) +{ + /* Add any extra MSR ids into this array. */ + u32 bdw_msr_id[] = { MSR_IA32_THERM_CONTROL }; + + pr_info("x86/pm: %s detected, MSR saving is needed during suspending.\n", d->ident); + return msr_build_context(bdw_msr_id, ARRAY_SIZE(bdw_msr_id)); +} + +static const struct dmi_system_id msr_save_dmi_table[] = { + { + .callback = msr_initialize_bdw, + .ident = "BROADWELL BDX_EP", + .matches = { + DMI_MATCH(DMI_PRODUCT_NAME, "GRANTLEY"), + DMI_MATCH(DMI_PRODUCT_VERSION, "E63448-400"), + }, + }, + {} +}; + +static int msr_save_cpuid_features(const struct x86_cpu_id *c) +{ + u32 cpuid_msr_id[] = { + MSR_AMD64_CPUID_FN_1, + }; + + pr_info("x86/pm: family %#hx cpu detected, MSR saving is needed during suspending.\n", + c->family); + + return msr_build_context(cpuid_msr_id, ARRAY_SIZE(cpuid_msr_id)); +} + +static const struct x86_cpu_id msr_save_cpu_table[] = { + X86_MATCH_VENDOR_FAM(AMD, 0x15, &msr_save_cpuid_features), + X86_MATCH_VENDOR_FAM(AMD, 0x16, &msr_save_cpuid_features), + {} +}; + +typedef int (*pm_cpu_match_t)(const struct x86_cpu_id *); +static int pm_cpu_check(const struct x86_cpu_id *c) +{ + const struct x86_cpu_id *m; + int ret = 0; + + m = x86_match_cpu(msr_save_cpu_table); + if (m) { + pm_cpu_match_t fn; + + fn = (pm_cpu_match_t)m->driver_data; + ret = fn(m); + } + + return ret; +} + +static void pm_save_spec_msr(void) +{ + struct msr_enumeration { + u32 msr_no; + u32 feature; + } msr_enum[] = { + { MSR_IA32_SPEC_CTRL, X86_FEATURE_MSR_SPEC_CTRL }, + { MSR_IA32_TSX_CTRL, X86_FEATURE_MSR_TSX_CTRL }, + { MSR_TSX_FORCE_ABORT, X86_FEATURE_TSX_FORCE_ABORT }, + { MSR_IA32_MCU_OPT_CTRL, X86_FEATURE_SRBDS_CTRL }, + { MSR_AMD64_LS_CFG, X86_FEATURE_LS_CFG_SSBD }, + { MSR_AMD64_DE_CFG, X86_FEATURE_LFENCE_RDTSC }, + }; + int i; + + for (i = 0; i < ARRAY_SIZE(msr_enum); i++) { + if (boot_cpu_has(msr_enum[i].feature)) + msr_build_context(&msr_enum[i].msr_no, 1); + } +} + +static int pm_check_save_msr(void) +{ + dmi_check_system(msr_save_dmi_table); + pm_cpu_check(msr_save_cpu_table); + pm_save_spec_msr(); + + return 0; +} + +device_initcall(pm_check_save_msr); diff --git a/arch/x86/power/hibernate.c b/arch/x86/power/hibernate.c new file mode 100644 index 000000000..e94e0050a --- /dev/null +++ b/arch/x86/power/hibernate.c @@ -0,0 +1,219 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Hibernation support for x86 + * + * Copyright (c) 2007 Rafael J. Wysocki + * Copyright (c) 2002 Pavel Machek + * Copyright (c) 2001 Patrick Mochel + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +/* + * Address to jump to in the last phase of restore in order to get to the image + * kernel's text (this value is passed in the image header). + */ +unsigned long restore_jump_address __visible; +unsigned long jump_address_phys; + +/* + * Value of the cr3 register from before the hibernation (this value is passed + * in the image header). + */ +unsigned long restore_cr3 __visible; +unsigned long temp_pgt __visible; +unsigned long relocated_restore_code __visible; + +/** + * pfn_is_nosave - check if given pfn is in the 'nosave' section + */ +int pfn_is_nosave(unsigned long pfn) +{ + unsigned long nosave_begin_pfn; + unsigned long nosave_end_pfn; + + nosave_begin_pfn = __pa_symbol(&__nosave_begin) >> PAGE_SHIFT; + nosave_end_pfn = PAGE_ALIGN(__pa_symbol(&__nosave_end)) >> PAGE_SHIFT; + + return pfn >= nosave_begin_pfn && pfn < nosave_end_pfn; +} + +struct restore_data_record { + unsigned long jump_address; + unsigned long jump_address_phys; + unsigned long cr3; + unsigned long magic; + unsigned long e820_checksum; +}; + +/** + * compute_e820_crc32 - calculate crc32 of a given e820 table + * + * @table: the e820 table to be calculated + * + * Return: the resulting checksum + */ +static inline u32 compute_e820_crc32(struct e820_table *table) +{ + int size = offsetof(struct e820_table, entries) + + sizeof(struct e820_entry) * table->nr_entries; + + return ~crc32_le(~0, (unsigned char const *)table, size); +} + +#ifdef CONFIG_X86_64 +#define RESTORE_MAGIC 0x23456789ABCDEF02UL +#else +#define RESTORE_MAGIC 0x12345679UL +#endif + +/** + * arch_hibernation_header_save - populate the architecture specific part + * of a hibernation image header + * @addr: address to save the data at + */ +int arch_hibernation_header_save(void *addr, unsigned int max_size) +{ + struct restore_data_record *rdr = addr; + + if (max_size < sizeof(struct restore_data_record)) + return -EOVERFLOW; + rdr->magic = RESTORE_MAGIC; + rdr->jump_address = (unsigned long)restore_registers; + rdr->jump_address_phys = __pa_symbol(restore_registers); + + /* + * The restore code fixes up CR3 and CR4 in the following sequence: + * + * [in hibernation asm] + * 1. CR3 <= temporary page tables + * 2. CR4 <= mmu_cr4_features (from the kernel that restores us) + * 3. CR3 <= rdr->cr3 + * 4. CR4 <= mmu_cr4_features (from us, i.e. the image kernel) + * [in restore_processor_state()] + * 5. CR4 <= saved CR4 + * 6. CR3 <= saved CR3 + * + * Our mmu_cr4_features has CR4.PCIDE=0, and toggling + * CR4.PCIDE while CR3's PCID bits are nonzero is illegal, so + * rdr->cr3 needs to point to valid page tables but must not + * have any of the PCID bits set. + */ + rdr->cr3 = restore_cr3 & ~CR3_PCID_MASK; + + rdr->e820_checksum = compute_e820_crc32(e820_table_firmware); + return 0; +} + +/** + * arch_hibernation_header_restore - read the architecture specific data + * from the hibernation image header + * @addr: address to read the data from + */ +int arch_hibernation_header_restore(void *addr) +{ + struct restore_data_record *rdr = addr; + + if (rdr->magic != RESTORE_MAGIC) { + pr_crit("Unrecognized hibernate image header format!\n"); + return -EINVAL; + } + + restore_jump_address = rdr->jump_address; + jump_address_phys = rdr->jump_address_phys; + restore_cr3 = rdr->cr3; + + if (rdr->e820_checksum != compute_e820_crc32(e820_table_firmware)) { + pr_crit("Hibernate inconsistent memory map detected!\n"); + return -ENODEV; + } + + return 0; +} + +int relocate_restore_code(void) +{ + pgd_t *pgd; + p4d_t *p4d; + pud_t *pud; + pmd_t *pmd; + pte_t *pte; + + relocated_restore_code = get_safe_page(GFP_ATOMIC); + if (!relocated_restore_code) + return -ENOMEM; + + memcpy((void *)relocated_restore_code, core_restore_code, PAGE_SIZE); + + /* Make the page containing the relocated code executable */ + pgd = (pgd_t *)__va(read_cr3_pa()) + + pgd_index(relocated_restore_code); + p4d = p4d_offset(pgd, relocated_restore_code); + if (p4d_large(*p4d)) { + set_p4d(p4d, __p4d(p4d_val(*p4d) & ~_PAGE_NX)); + goto out; + } + pud = pud_offset(p4d, relocated_restore_code); + if (pud_large(*pud)) { + set_pud(pud, __pud(pud_val(*pud) & ~_PAGE_NX)); + goto out; + } + pmd = pmd_offset(pud, relocated_restore_code); + if (pmd_large(*pmd)) { + set_pmd(pmd, __pmd(pmd_val(*pmd) & ~_PAGE_NX)); + goto out; + } + pte = pte_offset_kernel(pmd, relocated_restore_code); + set_pte(pte, __pte(pte_val(*pte) & ~_PAGE_NX)); +out: + __flush_tlb_all(); + return 0; +} + +int arch_resume_nosmt(void) +{ + int ret = 0; + /* + * We reached this while coming out of hibernation. This means + * that SMT siblings are sleeping in hlt, as mwait is not safe + * against control transition during resume (see comment in + * hibernate_resume_nonboot_cpu_disable()). + * + * If the resumed kernel has SMT disabled, we have to take all the + * SMT siblings out of hlt, and offline them again so that they + * end up in mwait proper. + * + * Called with hotplug disabled. + */ + cpu_hotplug_enable(); + if (cpu_smt_control == CPU_SMT_DISABLED || + cpu_smt_control == CPU_SMT_FORCE_DISABLED) { + enum cpuhp_smt_control old = cpu_smt_control; + + ret = cpuhp_smt_enable(); + if (ret) + goto out; + ret = cpuhp_smt_disable(old); + if (ret) + goto out; + } +out: + cpu_hotplug_disable(); + return ret; +} diff --git a/arch/x86/power/hibernate_32.c b/arch/x86/power/hibernate_32.c new file mode 100644 index 000000000..223d5bca2 --- /dev/null +++ b/arch/x86/power/hibernate_32.c @@ -0,0 +1,198 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Hibernation support specific for i386 - temporary page tables + * + * Copyright (c) 2006 Rafael J. Wysocki + */ + +#include +#include +#include +#include + +#include +#include +#include +#include + +/* Pointer to the temporary resume page tables */ +pgd_t *resume_pg_dir; + +/* The following three functions are based on the analogous code in + * arch/x86/mm/init_32.c + */ + +/* + * Create a middle page table on a resume-safe page and put a pointer to it in + * the given global directory entry. This only returns the gd entry + * in non-PAE compilation mode, since the middle layer is folded. + */ +static pmd_t *resume_one_md_table_init(pgd_t *pgd) +{ + p4d_t *p4d; + pud_t *pud; + pmd_t *pmd_table; + +#ifdef CONFIG_X86_PAE + pmd_table = (pmd_t *)get_safe_page(GFP_ATOMIC); + if (!pmd_table) + return NULL; + + set_pgd(pgd, __pgd(__pa(pmd_table) | _PAGE_PRESENT)); + p4d = p4d_offset(pgd, 0); + pud = pud_offset(p4d, 0); + + BUG_ON(pmd_table != pmd_offset(pud, 0)); +#else + p4d = p4d_offset(pgd, 0); + pud = pud_offset(p4d, 0); + pmd_table = pmd_offset(pud, 0); +#endif + + return pmd_table; +} + +/* + * Create a page table on a resume-safe page and place a pointer to it in + * a middle page directory entry. + */ +static pte_t *resume_one_page_table_init(pmd_t *pmd) +{ + if (pmd_none(*pmd)) { + pte_t *page_table = (pte_t *)get_safe_page(GFP_ATOMIC); + if (!page_table) + return NULL; + + set_pmd(pmd, __pmd(__pa(page_table) | _PAGE_TABLE)); + + BUG_ON(page_table != pte_offset_kernel(pmd, 0)); + + return page_table; + } + + return pte_offset_kernel(pmd, 0); +} + +/* + * This maps the physical memory to kernel virtual address space, a total + * of max_low_pfn pages, by creating page tables starting from address + * PAGE_OFFSET. The page tables are allocated out of resume-safe pages. + */ +static int resume_physical_mapping_init(pgd_t *pgd_base) +{ + unsigned long pfn; + pgd_t *pgd; + pmd_t *pmd; + pte_t *pte; + int pgd_idx, pmd_idx; + + pgd_idx = pgd_index(PAGE_OFFSET); + pgd = pgd_base + pgd_idx; + pfn = 0; + + for (; pgd_idx < PTRS_PER_PGD; pgd++, pgd_idx++) { + pmd = resume_one_md_table_init(pgd); + if (!pmd) + return -ENOMEM; + + if (pfn >= max_low_pfn) + continue; + + for (pmd_idx = 0; pmd_idx < PTRS_PER_PMD; pmd++, pmd_idx++) { + if (pfn >= max_low_pfn) + break; + + /* Map with big pages if possible, otherwise create + * normal page tables. + * NOTE: We can mark everything as executable here + */ + if (boot_cpu_has(X86_FEATURE_PSE)) { + set_pmd(pmd, pfn_pmd(pfn, PAGE_KERNEL_LARGE_EXEC)); + pfn += PTRS_PER_PTE; + } else { + pte_t *max_pte; + + pte = resume_one_page_table_init(pmd); + if (!pte) + return -ENOMEM; + + max_pte = pte + PTRS_PER_PTE; + for (; pte < max_pte; pte++, pfn++) { + if (pfn >= max_low_pfn) + break; + + set_pte(pte, pfn_pte(pfn, PAGE_KERNEL_EXEC)); + } + } + } + } + + return 0; +} + +static inline void resume_init_first_level_page_table(pgd_t *pg_dir) +{ +#ifdef CONFIG_X86_PAE + int i; + + /* Init entries of the first-level page table to the zero page */ + for (i = 0; i < PTRS_PER_PGD; i++) + set_pgd(pg_dir + i, + __pgd(__pa(empty_zero_page) | _PAGE_PRESENT)); +#endif +} + +static int set_up_temporary_text_mapping(pgd_t *pgd_base) +{ + pgd_t *pgd; + pmd_t *pmd; + pte_t *pte; + + pgd = pgd_base + pgd_index(restore_jump_address); + + pmd = resume_one_md_table_init(pgd); + if (!pmd) + return -ENOMEM; + + if (boot_cpu_has(X86_FEATURE_PSE)) { + set_pmd(pmd + pmd_index(restore_jump_address), + __pmd((jump_address_phys & PMD_MASK) | pgprot_val(PAGE_KERNEL_LARGE_EXEC))); + } else { + pte = resume_one_page_table_init(pmd); + if (!pte) + return -ENOMEM; + set_pte(pte + pte_index(restore_jump_address), + __pte((jump_address_phys & PAGE_MASK) | pgprot_val(PAGE_KERNEL_EXEC))); + } + + return 0; +} + +asmlinkage int swsusp_arch_resume(void) +{ + int error; + + resume_pg_dir = (pgd_t *)get_safe_page(GFP_ATOMIC); + if (!resume_pg_dir) + return -ENOMEM; + + resume_init_first_level_page_table(resume_pg_dir); + + error = set_up_temporary_text_mapping(resume_pg_dir); + if (error) + return error; + + error = resume_physical_mapping_init(resume_pg_dir); + if (error) + return error; + + temp_pgt = __pa(resume_pg_dir); + + error = relocate_restore_code(); + if (error) + return error; + + /* We have got enough memory and from now on we cannot recover */ + restore_image(); + return 0; +} diff --git a/arch/x86/power/hibernate_64.c b/arch/x86/power/hibernate_64.c new file mode 100644 index 000000000..a595953f1 --- /dev/null +++ b/arch/x86/power/hibernate_64.c @@ -0,0 +1,142 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Hibernation support for x86-64 + * + * Copyright (c) 2007 Rafael J. Wysocki + * Copyright (c) 2002 Pavel Machek + * Copyright (c) 2001 Patrick Mochel + */ + +#include +#include +#include +#include +#include +#include + +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +static int set_up_temporary_text_mapping(pgd_t *pgd) +{ + pmd_t *pmd; + pud_t *pud; + p4d_t *p4d = NULL; + pgprot_t pgtable_prot = __pgprot(_KERNPG_TABLE); + pgprot_t pmd_text_prot = __pgprot(__PAGE_KERNEL_LARGE_EXEC); + + /* Filter out unsupported __PAGE_KERNEL* bits: */ + pgprot_val(pmd_text_prot) &= __default_kernel_pte_mask; + pgprot_val(pgtable_prot) &= __default_kernel_pte_mask; + + /* + * The new mapping only has to cover the page containing the image + * kernel's entry point (jump_address_phys), because the switch over to + * it is carried out by relocated code running from a page allocated + * specifically for this purpose and covered by the identity mapping, so + * the temporary kernel text mapping is only needed for the final jump. + * Moreover, in that mapping the virtual address of the image kernel's + * entry point must be the same as its virtual address in the image + * kernel (restore_jump_address), so the image kernel's + * restore_registers() code doesn't find itself in a different area of + * the virtual address space after switching over to the original page + * tables used by the image kernel. + */ + + if (pgtable_l5_enabled()) { + p4d = (p4d_t *)get_safe_page(GFP_ATOMIC); + if (!p4d) + return -ENOMEM; + } + + pud = (pud_t *)get_safe_page(GFP_ATOMIC); + if (!pud) + return -ENOMEM; + + pmd = (pmd_t *)get_safe_page(GFP_ATOMIC); + if (!pmd) + return -ENOMEM; + + set_pmd(pmd + pmd_index(restore_jump_address), + __pmd((jump_address_phys & PMD_MASK) | pgprot_val(pmd_text_prot))); + set_pud(pud + pud_index(restore_jump_address), + __pud(__pa(pmd) | pgprot_val(pgtable_prot))); + if (p4d) { + p4d_t new_p4d = __p4d(__pa(pud) | pgprot_val(pgtable_prot)); + pgd_t new_pgd = __pgd(__pa(p4d) | pgprot_val(pgtable_prot)); + + set_p4d(p4d + p4d_index(restore_jump_address), new_p4d); + set_pgd(pgd + pgd_index(restore_jump_address), new_pgd); + } else { + /* No p4d for 4-level paging: point the pgd to the pud page table */ + pgd_t new_pgd = __pgd(__pa(pud) | pgprot_val(pgtable_prot)); + set_pgd(pgd + pgd_index(restore_jump_address), new_pgd); + } + + return 0; +} + +static void *alloc_pgt_page(void *context) +{ + return (void *)get_safe_page(GFP_ATOMIC); +} + +static int set_up_temporary_mappings(void) +{ + struct x86_mapping_info info = { + .alloc_pgt_page = alloc_pgt_page, + .page_flag = __PAGE_KERNEL_LARGE_EXEC, + .offset = __PAGE_OFFSET, + }; + unsigned long mstart, mend; + pgd_t *pgd; + int result; + int i; + + pgd = (pgd_t *)get_safe_page(GFP_ATOMIC); + if (!pgd) + return -ENOMEM; + + /* Prepare a temporary mapping for the kernel text */ + result = set_up_temporary_text_mapping(pgd); + if (result) + return result; + + /* Set up the direct mapping from scratch */ + for (i = 0; i < nr_pfn_mapped; i++) { + mstart = pfn_mapped[i].start << PAGE_SHIFT; + mend = pfn_mapped[i].end << PAGE_SHIFT; + + result = kernel_ident_mapping_init(&info, pgd, mstart, mend); + if (result) + return result; + } + + temp_pgt = __pa(pgd); + return 0; +} + +asmlinkage int swsusp_arch_resume(void) +{ + int error; + + /* We have got enough memory and from now on we cannot recover */ + error = set_up_temporary_mappings(); + if (error) + return error; + + error = relocate_restore_code(); + if (error) + return error; + + restore_image(); + return 0; +} diff --git a/arch/x86/power/hibernate_asm_32.S b/arch/x86/power/hibernate_asm_32.S new file mode 100644 index 000000000..5606a15cf --- /dev/null +++ b/arch/x86/power/hibernate_asm_32.S @@ -0,0 +1,112 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * This may not use any stack, nor any variable that is not "NoSave": + * + * Its rewriting one kernel image with another. What is stack in "old" + * image could very well be data page in "new" image, and overwriting + * your own stack under you is bad idea. + */ + +#include +#include +#include +#include +#include +#include + +.text + +SYM_FUNC_START(swsusp_arch_suspend) + movl %esp, saved_context_esp + movl %ebx, saved_context_ebx + movl %ebp, saved_context_ebp + movl %esi, saved_context_esi + movl %edi, saved_context_edi + pushfl + popl saved_context_eflags + + /* save cr3 */ + movl %cr3, %eax + movl %eax, restore_cr3 + + FRAME_BEGIN + call swsusp_save + FRAME_END + RET +SYM_FUNC_END(swsusp_arch_suspend) + +SYM_CODE_START(restore_image) + /* prepare to jump to the image kernel */ + movl restore_jump_address, %ebx + movl restore_cr3, %ebp + + movl mmu_cr4_features, %ecx + + /* jump to relocated restore code */ + movl relocated_restore_code, %eax + jmpl *%eax +SYM_CODE_END(restore_image) + +/* code below has been relocated to a safe page */ +SYM_CODE_START(core_restore_code) + movl temp_pgt, %eax + movl %eax, %cr3 + + jecxz 1f # cr4 Pentium and higher, skip if zero + andl $~(X86_CR4_PGE), %ecx + movl %ecx, %cr4; # turn off PGE + movl %cr3, %eax; # flush TLB + movl %eax, %cr3 +1: + movl restore_pblist, %edx + .p2align 4,,7 + +copy_loop: + testl %edx, %edx + jz done + + movl pbe_address(%edx), %esi + movl pbe_orig_address(%edx), %edi + + movl $(PAGE_SIZE >> 2), %ecx + rep + movsl + + movl pbe_next(%edx), %edx + jmp copy_loop + .p2align 4,,7 + +done: + jmpl *%ebx +SYM_CODE_END(core_restore_code) + + /* code below belongs to the image kernel */ + .align PAGE_SIZE +SYM_FUNC_START(restore_registers) + /* go back to the original page tables */ + movl %ebp, %cr3 + movl mmu_cr4_features, %ecx + jecxz 1f # cr4 Pentium and higher, skip if zero + movl %ecx, %cr4; # turn PGE back on +1: + + movl saved_context_esp, %esp + movl saved_context_ebp, %ebp + movl saved_context_ebx, %ebx + movl saved_context_esi, %esi + movl saved_context_edi, %edi + + pushl saved_context_eflags + popfl + + /* Saved in save_processor_state. */ + movl $saved_context, %eax + lgdt saved_context_gdt_desc(%eax) + + xorl %eax, %eax + + /* tell the hibernation core that we've just restored the memory */ + movl %eax, in_suspend + + RET +SYM_FUNC_END(restore_registers) diff --git a/arch/x86/power/hibernate_asm_64.S b/arch/x86/power/hibernate_asm_64.S new file mode 100644 index 000000000..0a0539e1c --- /dev/null +++ b/arch/x86/power/hibernate_asm_64.S @@ -0,0 +1,150 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Hibernation support for x86-64 + * + * Copyright 2007 Rafael J. Wysocki + * Copyright 2005 Andi Kleen + * Copyright 2004 Pavel Machek + * + * swsusp_arch_resume must not use any stack or any nonlocal variables while + * copying pages: + * + * Its rewriting one kernel image with another. What is stack in "old" + * image could very well be data page in "new" image, and overwriting + * your own stack under you is bad idea. + */ + + .text +#include +#include +#include +#include +#include +#include +#include + + /* code below belongs to the image kernel */ + .align PAGE_SIZE +SYM_FUNC_START(restore_registers) + /* go back to the original page tables */ + movq %r9, %cr3 + + /* Flush TLB, including "global" things (vmalloc) */ + movq mmu_cr4_features(%rip), %rax + movq %rax, %rdx + andq $~(X86_CR4_PGE), %rdx + movq %rdx, %cr4; # turn off PGE + movq %cr3, %rcx; # flush TLB + movq %rcx, %cr3 + movq %rax, %cr4; # turn PGE back on + + /* We don't restore %rax, it must be 0 anyway */ + movq $saved_context, %rax + movq pt_regs_sp(%rax), %rsp + movq pt_regs_bp(%rax), %rbp + movq pt_regs_si(%rax), %rsi + movq pt_regs_di(%rax), %rdi + movq pt_regs_bx(%rax), %rbx + movq pt_regs_cx(%rax), %rcx + movq pt_regs_dx(%rax), %rdx + movq pt_regs_r8(%rax), %r8 + movq pt_regs_r9(%rax), %r9 + movq pt_regs_r10(%rax), %r10 + movq pt_regs_r11(%rax), %r11 + movq pt_regs_r12(%rax), %r12 + movq pt_regs_r13(%rax), %r13 + movq pt_regs_r14(%rax), %r14 + movq pt_regs_r15(%rax), %r15 + pushq pt_regs_flags(%rax) + popfq + + /* Saved in save_processor_state. */ + lgdt saved_context_gdt_desc(%rax) + + xorl %eax, %eax + + /* tell the hibernation core that we've just restored the memory */ + movq %rax, in_suspend(%rip) + + RET +SYM_FUNC_END(restore_registers) + +SYM_FUNC_START(swsusp_arch_suspend) + movq $saved_context, %rax + movq %rsp, pt_regs_sp(%rax) + movq %rbp, pt_regs_bp(%rax) + movq %rsi, pt_regs_si(%rax) + movq %rdi, pt_regs_di(%rax) + movq %rbx, pt_regs_bx(%rax) + movq %rcx, pt_regs_cx(%rax) + movq %rdx, pt_regs_dx(%rax) + movq %r8, pt_regs_r8(%rax) + movq %r9, pt_regs_r9(%rax) + movq %r10, pt_regs_r10(%rax) + movq %r11, pt_regs_r11(%rax) + movq %r12, pt_regs_r12(%rax) + movq %r13, pt_regs_r13(%rax) + movq %r14, pt_regs_r14(%rax) + movq %r15, pt_regs_r15(%rax) + pushfq + popq pt_regs_flags(%rax) + + /* save cr3 */ + movq %cr3, %rax + movq %rax, restore_cr3(%rip) + + FRAME_BEGIN + call swsusp_save + FRAME_END + RET +SYM_FUNC_END(swsusp_arch_suspend) + +SYM_FUNC_START(restore_image) + /* prepare to jump to the image kernel */ + movq restore_jump_address(%rip), %r8 + movq restore_cr3(%rip), %r9 + + /* prepare to switch to temporary page tables */ + movq temp_pgt(%rip), %rax + movq mmu_cr4_features(%rip), %rbx + + /* prepare to copy image data to their original locations */ + movq restore_pblist(%rip), %rdx + + /* jump to relocated restore code */ + movq relocated_restore_code(%rip), %rcx + ANNOTATE_RETPOLINE_SAFE + jmpq *%rcx +SYM_FUNC_END(restore_image) + + /* code below has been relocated to a safe page */ +SYM_FUNC_START(core_restore_code) + /* switch to temporary page tables */ + movq %rax, %cr3 + /* flush TLB */ + movq %rbx, %rcx + andq $~(X86_CR4_PGE), %rcx + movq %rcx, %cr4; # turn off PGE + movq %cr3, %rcx; # flush TLB + movq %rcx, %cr3; + movq %rbx, %cr4; # turn PGE back on +.Lloop: + testq %rdx, %rdx + jz .Ldone + + /* get addresses from the pbe and copy the page */ + movq pbe_address(%rdx), %rsi + movq pbe_orig_address(%rdx), %rdi + movq $(PAGE_SIZE >> 3), %rcx + rep + movsq + + /* progress to the next pbe */ + movq pbe_next(%rdx), %rdx + jmp .Lloop + +.Ldone: + /* jump to the restore_registers address from the image header */ + ANNOTATE_RETPOLINE_SAFE + jmpq *%r8 +SYM_FUNC_END(core_restore_code) -- cgit v1.2.3