summaryrefslogtreecommitdiffstats
path: root/arch/x86/realmode
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-11 08:27:49 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-11 08:27:49 +0000
commitace9429bb58fd418f0c81d4c2835699bddf6bde6 (patch)
treeb2d64bc10158fdd5497876388cd68142ca374ed3 /arch/x86/realmode
parentInitial commit. (diff)
downloadlinux-ace9429bb58fd418f0c81d4c2835699bddf6bde6.tar.xz
linux-ace9429bb58fd418f0c81d4c2835699bddf6bde6.zip
Adding upstream version 6.6.15.upstream/6.6.15
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'arch/x86/realmode')
-rw-r--r--arch/x86/realmode/Makefile22
-rw-r--r--arch/x86/realmode/init.c220
-rw-r--r--arch/x86/realmode/rm/.gitignore4
-rw-r--r--arch/x86/realmode/rm/Makefile80
-rw-r--r--arch/x86/realmode/rm/bioscall.S1
-rw-r--r--arch/x86/realmode/rm/copy.S1
-rw-r--r--arch/x86/realmode/rm/header.S45
-rw-r--r--arch/x86/realmode/rm/realmode.h22
-rw-r--r--arch/x86/realmode/rm/realmode.lds.S77
-rw-r--r--arch/x86/realmode/rm/reboot.S157
-rw-r--r--arch/x86/realmode/rm/regs.c1
-rw-r--r--arch/x86/realmode/rm/stack.S18
-rw-r--r--arch/x86/realmode/rm/trampoline_32.S73
-rw-r--r--arch/x86/realmode/rm/trampoline_64.S263
-rw-r--r--arch/x86/realmode/rm/trampoline_common.S14
-rw-r--r--arch/x86/realmode/rm/video-bios.c1
-rw-r--r--arch/x86/realmode/rm/video-mode.c1
-rw-r--r--arch/x86/realmode/rm/video-vesa.c1
-rw-r--r--arch/x86/realmode/rm/video-vga.c1
-rw-r--r--arch/x86/realmode/rm/wakemain.c87
-rw-r--r--arch/x86/realmode/rm/wakeup.h43
-rw-r--r--arch/x86/realmode/rm/wakeup_asm.S179
-rw-r--r--arch/x86/realmode/rmpiggy.S19
23 files changed, 1330 insertions, 0 deletions
diff --git a/arch/x86/realmode/Makefile b/arch/x86/realmode/Makefile
new file mode 100644
index 0000000000..a0b491ae2d
--- /dev/null
+++ b/arch/x86/realmode/Makefile
@@ -0,0 +1,22 @@
+#
+# arch/x86/realmode/Makefile
+#
+# This file is subject to the terms and conditions of the GNU General Public
+# License. See the file "COPYING" in the main directory of this archive
+# for more details.
+#
+#
+
+# Sanitizer runtimes are unavailable and cannot be linked here.
+KASAN_SANITIZE := n
+KCSAN_SANITIZE := n
+
+subdir- := rm
+
+obj-y += init.o
+obj-y += rmpiggy.o
+
+$(obj)/rmpiggy.o: $(obj)/rm/realmode.bin
+
+$(obj)/rm/realmode.bin: FORCE
+ $(Q)$(MAKE) $(build)=$(obj)/rm $@
diff --git a/arch/x86/realmode/init.c b/arch/x86/realmode/init.c
new file mode 100644
index 0000000000..788e555954
--- /dev/null
+++ b/arch/x86/realmode/init.c
@@ -0,0 +1,220 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/memblock.h>
+#include <linux/cc_platform.h>
+#include <linux/pgtable.h>
+
+#include <asm/set_memory.h>
+#include <asm/realmode.h>
+#include <asm/tlbflush.h>
+#include <asm/crash.h>
+#include <asm/sev.h>
+
+struct real_mode_header *real_mode_header;
+u32 *trampoline_cr4_features;
+
+/* Hold the pgd entry used on booting additional CPUs */
+pgd_t trampoline_pgd_entry;
+
+void load_trampoline_pgtable(void)
+{
+#ifdef CONFIG_X86_32
+ load_cr3(initial_page_table);
+#else
+ /*
+ * This function is called before exiting to real-mode and that will
+ * fail with CR4.PCIDE still set.
+ */
+ if (boot_cpu_has(X86_FEATURE_PCID))
+ cr4_clear_bits(X86_CR4_PCIDE);
+
+ write_cr3(real_mode_header->trampoline_pgd);
+#endif
+
+ /*
+ * The CR3 write above will not flush global TLB entries.
+ * Stale, global entries from previous page tables may still be
+ * present. Flush those stale entries.
+ *
+ * This ensures that memory accessed while running with
+ * trampoline_pgd is *actually* mapped into trampoline_pgd.
+ */
+ __flush_tlb_all();
+}
+
+void __init reserve_real_mode(void)
+{
+ phys_addr_t mem;
+ size_t size = real_mode_size_needed();
+
+ if (!size)
+ return;
+
+ WARN_ON(slab_is_available());
+
+ /* Has to be under 1M so we can execute real-mode AP code. */
+ mem = memblock_phys_alloc_range(size, PAGE_SIZE, 0, 1<<20);
+ if (!mem)
+ pr_info("No sub-1M memory is available for the trampoline\n");
+ else
+ set_real_mode_mem(mem);
+
+ /*
+ * Unconditionally reserve the entire fisrt 1M, see comment in
+ * setup_arch().
+ */
+ memblock_reserve(0, SZ_1M);
+}
+
+static void __init sme_sev_setup_real_mode(struct trampoline_header *th)
+{
+#ifdef CONFIG_AMD_MEM_ENCRYPT
+ if (cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT))
+ th->flags |= TH_FLAGS_SME_ACTIVE;
+
+ if (cc_platform_has(CC_ATTR_GUEST_STATE_ENCRYPT)) {
+ /*
+ * Skip the call to verify_cpu() in secondary_startup_64 as it
+ * will cause #VC exceptions when the AP can't handle them yet.
+ */
+ th->start = (u64) secondary_startup_64_no_verify;
+
+ if (sev_es_setup_ap_jump_table(real_mode_header))
+ panic("Failed to get/update SEV-ES AP Jump Table");
+ }
+#endif
+}
+
+static void __init setup_real_mode(void)
+{
+ u16 real_mode_seg;
+ const u32 *rel;
+ u32 count;
+ unsigned char *base;
+ unsigned long phys_base;
+ struct trampoline_header *trampoline_header;
+ size_t size = PAGE_ALIGN(real_mode_blob_end - real_mode_blob);
+#ifdef CONFIG_X86_64
+ u64 *trampoline_pgd;
+ u64 efer;
+ int i;
+#endif
+
+ base = (unsigned char *)real_mode_header;
+
+ /*
+ * If SME is active, the trampoline area will need to be in
+ * decrypted memory in order to bring up other processors
+ * successfully. This is not needed for SEV.
+ */
+ if (cc_platform_has(CC_ATTR_HOST_MEM_ENCRYPT))
+ set_memory_decrypted((unsigned long)base, size >> PAGE_SHIFT);
+
+ memcpy(base, real_mode_blob, size);
+
+ phys_base = __pa(base);
+ real_mode_seg = phys_base >> 4;
+
+ rel = (u32 *) real_mode_relocs;
+
+ /* 16-bit segment relocations. */
+ count = *rel++;
+ while (count--) {
+ u16 *seg = (u16 *) (base + *rel++);
+ *seg = real_mode_seg;
+ }
+
+ /* 32-bit linear relocations. */
+ count = *rel++;
+ while (count--) {
+ u32 *ptr = (u32 *) (base + *rel++);
+ *ptr += phys_base;
+ }
+
+ /* Must be performed *after* relocation. */
+ trampoline_header = (struct trampoline_header *)
+ __va(real_mode_header->trampoline_header);
+
+#ifdef CONFIG_X86_32
+ trampoline_header->start = __pa_symbol(startup_32_smp);
+ trampoline_header->gdt_limit = __BOOT_DS + 7;
+ trampoline_header->gdt_base = __pa_symbol(boot_gdt);
+#else
+ /*
+ * Some AMD processors will #GP(0) if EFER.LMA is set in WRMSR
+ * so we need to mask it out.
+ */
+ rdmsrl(MSR_EFER, efer);
+ trampoline_header->efer = efer & ~EFER_LMA;
+
+ trampoline_header->start = (u64) secondary_startup_64;
+ trampoline_cr4_features = &trampoline_header->cr4;
+ *trampoline_cr4_features = mmu_cr4_features;
+
+ trampoline_header->flags = 0;
+
+ trampoline_lock = &trampoline_header->lock;
+ *trampoline_lock = 0;
+
+ trampoline_pgd = (u64 *) __va(real_mode_header->trampoline_pgd);
+
+ /* Map the real mode stub as virtual == physical */
+ trampoline_pgd[0] = trampoline_pgd_entry.pgd;
+
+ /*
+ * Include the entirety of the kernel mapping into the trampoline
+ * PGD. This way, all mappings present in the normal kernel page
+ * tables are usable while running on trampoline_pgd.
+ */
+ for (i = pgd_index(__PAGE_OFFSET); i < PTRS_PER_PGD; i++)
+ trampoline_pgd[i] = init_top_pgt[i].pgd;
+#endif
+
+ sme_sev_setup_real_mode(trampoline_header);
+}
+
+/*
+ * reserve_real_mode() gets called very early, to guarantee the
+ * availability of low memory. This is before the proper kernel page
+ * tables are set up, so we cannot set page permissions in that
+ * function. Also trampoline code will be executed by APs so we
+ * need to mark it executable at do_pre_smp_initcalls() at least,
+ * thus run it as a early_initcall().
+ */
+static void __init set_real_mode_permissions(void)
+{
+ unsigned char *base = (unsigned char *) real_mode_header;
+ size_t size = PAGE_ALIGN(real_mode_blob_end - real_mode_blob);
+
+ size_t ro_size =
+ PAGE_ALIGN(real_mode_header->ro_end) -
+ __pa(base);
+
+ size_t text_size =
+ PAGE_ALIGN(real_mode_header->ro_end) -
+ real_mode_header->text_start;
+
+ unsigned long text_start =
+ (unsigned long) __va(real_mode_header->text_start);
+
+ set_memory_nx((unsigned long) base, size >> PAGE_SHIFT);
+ set_memory_ro((unsigned long) base, ro_size >> PAGE_SHIFT);
+ set_memory_x((unsigned long) text_start, text_size >> PAGE_SHIFT);
+}
+
+void __init init_real_mode(void)
+{
+ if (!real_mode_header)
+ panic("Real mode trampoline was not allocated");
+
+ setup_real_mode();
+ set_real_mode_permissions();
+}
+
+static int __init do_init_real_mode(void)
+{
+ x86_platform.realmode_init();
+ return 0;
+}
+early_initcall(do_init_real_mode);
diff --git a/arch/x86/realmode/rm/.gitignore b/arch/x86/realmode/rm/.gitignore
new file mode 100644
index 0000000000..6c3464f461
--- /dev/null
+++ b/arch/x86/realmode/rm/.gitignore
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
+pasyms.h
+realmode.lds
+realmode.relocs
diff --git a/arch/x86/realmode/rm/Makefile b/arch/x86/realmode/rm/Makefile
new file mode 100644
index 0000000000..f614009d3e
--- /dev/null
+++ b/arch/x86/realmode/rm/Makefile
@@ -0,0 +1,80 @@
+#
+# arch/x86/realmode/Makefile
+#
+# This file is subject to the terms and conditions of the GNU General Public
+# License. See the file "COPYING" in the main directory of this archive
+# for more details.
+#
+#
+
+# Sanitizer runtimes are unavailable and cannot be linked here.
+KASAN_SANITIZE := n
+KCSAN_SANITIZE := n
+KMSAN_SANITIZE := n
+OBJECT_FILES_NON_STANDARD := y
+
+# Prevents link failures: __sanitizer_cov_trace_pc() is not linked in.
+KCOV_INSTRUMENT := n
+
+always-y := realmode.bin realmode.relocs
+
+wakeup-objs := wakeup_asm.o wakemain.o video-mode.o
+wakeup-objs += copy.o bioscall.o regs.o
+# The link order of the video-*.o modules can matter. In particular,
+# video-vga.o *must* be listed first, followed by video-vesa.o.
+# Hardware-specific drivers should follow in the order they should be
+# probed, and video-bios.o should typically be last.
+wakeup-objs += video-vga.o
+wakeup-objs += video-vesa.o
+wakeup-objs += video-bios.o
+
+realmode-y += header.o
+realmode-y += trampoline_$(BITS).o
+realmode-y += stack.o
+realmode-y += reboot.o
+realmode-$(CONFIG_ACPI_SLEEP) += $(wakeup-objs)
+
+targets += $(realmode-y)
+
+REALMODE_OBJS = $(addprefix $(obj)/,$(realmode-y))
+
+sed-pasyms := -n -r -e 's/^([0-9a-fA-F]+) [ABCDGRSTVW] (.+)$$/pa_\2 = \2;/p'
+
+quiet_cmd_pasyms = PASYMS $@
+ cmd_pasyms = $(NM) $(real-prereqs) | sed $(sed-pasyms) | sort | uniq > $@
+
+targets += pasyms.h
+$(obj)/pasyms.h: $(REALMODE_OBJS) FORCE
+ $(call if_changed,pasyms)
+
+targets += realmode.lds
+$(obj)/realmode.lds: $(obj)/pasyms.h
+
+LDFLAGS_realmode.elf := -m elf_i386 --emit-relocs -T
+CPPFLAGS_realmode.lds += -P -C -I$(objtree)/$(obj)
+
+targets += realmode.elf
+$(obj)/realmode.elf: $(obj)/realmode.lds $(REALMODE_OBJS) FORCE
+ $(call if_changed,ld)
+
+OBJCOPYFLAGS_realmode.bin := -O binary
+
+targets += realmode.bin
+$(obj)/realmode.bin: $(obj)/realmode.elf $(obj)/realmode.relocs FORCE
+ $(call if_changed,objcopy)
+
+quiet_cmd_relocs = RELOCS $@
+ cmd_relocs = arch/x86/tools/relocs --realmode $< > $@
+
+targets += realmode.relocs
+$(obj)/realmode.relocs: $(obj)/realmode.elf FORCE
+ $(call if_changed,relocs)
+
+# ---------------------------------------------------------------------------
+
+KBUILD_CFLAGS := $(REALMODE_CFLAGS) -D_SETUP -D_WAKEUP \
+ -I$(srctree)/arch/x86/boot
+KBUILD_AFLAGS := $(KBUILD_CFLAGS) -D__ASSEMBLY__
+KBUILD_CFLAGS += -fno-asynchronous-unwind-tables
+GCOV_PROFILE := n
+UBSAN_SANITIZE := n
diff --git a/arch/x86/realmode/rm/bioscall.S b/arch/x86/realmode/rm/bioscall.S
new file mode 100644
index 0000000000..16162d1979
--- /dev/null
+++ b/arch/x86/realmode/rm/bioscall.S
@@ -0,0 +1 @@
+#include "../../boot/bioscall.S"
diff --git a/arch/x86/realmode/rm/copy.S b/arch/x86/realmode/rm/copy.S
new file mode 100644
index 0000000000..b785e6f38f
--- /dev/null
+++ b/arch/x86/realmode/rm/copy.S
@@ -0,0 +1 @@
+#include "../../boot/copy.S"
diff --git a/arch/x86/realmode/rm/header.S b/arch/x86/realmode/rm/header.S
new file mode 100644
index 0000000000..2eb62be6d2
--- /dev/null
+++ b/arch/x86/realmode/rm/header.S
@@ -0,0 +1,45 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Real-mode blob header; this should match realmode.h and be
+ * readonly; for mutable data instead add pointers into the .data
+ * or .bss sections as appropriate.
+ */
+
+#include <linux/linkage.h>
+#include <asm/page_types.h>
+#include <asm/segment.h>
+
+#include "realmode.h"
+
+ .section ".header", "a"
+
+ .balign 16
+SYM_DATA_START(real_mode_header)
+ .long pa_text_start
+ .long pa_ro_end
+ /* SMP trampoline */
+ .long pa_trampoline_start
+ .long pa_trampoline_header
+#ifdef CONFIG_AMD_MEM_ENCRYPT
+ .long pa_sev_es_trampoline_start
+#endif
+#ifdef CONFIG_X86_64
+ .long pa_trampoline_start64
+ .long pa_trampoline_pgd;
+#endif
+ /* ACPI S3 wakeup */
+#ifdef CONFIG_ACPI_SLEEP
+ .long pa_wakeup_start
+ .long pa_wakeup_header
+#endif
+ /* APM/BIOS reboot */
+ .long pa_machine_real_restart_asm
+#ifdef CONFIG_X86_64
+ .long __KERNEL32_CS
+#endif
+SYM_DATA_END(real_mode_header)
+
+ /* End signature, used to verify integrity */
+ .section ".signature","a"
+ .balign 4
+SYM_DATA(end_signature, .long REALMODE_END_SIGNATURE)
diff --git a/arch/x86/realmode/rm/realmode.h b/arch/x86/realmode/rm/realmode.h
new file mode 100644
index 0000000000..c76041a353
--- /dev/null
+++ b/arch/x86/realmode/rm/realmode.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef ARCH_X86_REALMODE_RM_REALMODE_H
+#define ARCH_X86_REALMODE_RM_REALMODE_H
+
+#ifdef __ASSEMBLY__
+
+/*
+ * 16-bit ljmpw to the real_mode_seg
+ *
+ * This must be open-coded since gas will choke on using a
+ * relocatable symbol for the segment portion.
+ */
+#define LJMPW_RM(to) .byte 0xea ; .word (to), real_mode_seg
+
+#endif /* __ASSEMBLY__ */
+
+/*
+ * Signature at the end of the realmode region
+ */
+#define REALMODE_END_SIGNATURE 0x65a22c82
+
+#endif /* ARCH_X86_REALMODE_RM_REALMODE_H */
diff --git a/arch/x86/realmode/rm/realmode.lds.S b/arch/x86/realmode/rm/realmode.lds.S
new file mode 100644
index 0000000000..63aa51875b
--- /dev/null
+++ b/arch/x86/realmode/rm/realmode.lds.S
@@ -0,0 +1,77 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * realmode.lds.S
+ *
+ * Linker script for the real-mode code
+ */
+
+#include <asm/page_types.h>
+
+#undef i386
+
+OUTPUT_FORMAT("elf32-i386")
+OUTPUT_ARCH(i386)
+ENTRY(pa_text_start)
+
+SECTIONS
+{
+ real_mode_seg = 0;
+
+ . = 0;
+ .header : {
+ pa_real_mode_base = .;
+ *(.header)
+ }
+
+ . = ALIGN(4);
+ .rodata : {
+ *(.rodata)
+ *(.rodata.*)
+ . = ALIGN(16);
+ video_cards = .;
+ *(.videocards)
+ video_cards_end = .;
+ }
+
+ . = ALIGN(PAGE_SIZE);
+ pa_text_start = .;
+ .text : {
+ *(.text)
+ *(.text.*)
+ }
+
+ .text32 : {
+ *(.text32)
+ *(.text32.*)
+ }
+
+ .text64 : {
+ *(.text64)
+ *(.text64.*)
+ }
+ pa_ro_end = .;
+
+ . = ALIGN(PAGE_SIZE);
+ .data : {
+ *(.data)
+ *(.data.*)
+ }
+
+ . = ALIGN(128);
+ .bss : {
+ *(.bss*)
+ }
+
+ /* End signature for integrity checking */
+ . = ALIGN(4);
+ .signature : {
+ *(.signature)
+ }
+
+ /DISCARD/ : {
+ *(.note*)
+ *(.debug*)
+ }
+
+#include "pasyms.h"
+}
diff --git a/arch/x86/realmode/rm/reboot.S b/arch/x86/realmode/rm/reboot.S
new file mode 100644
index 0000000000..f10515b10e
--- /dev/null
+++ b/arch/x86/realmode/rm/reboot.S
@@ -0,0 +1,157 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#include <linux/linkage.h>
+#include <asm/segment.h>
+#include <asm/page_types.h>
+#include <asm/processor-flags.h>
+#include <asm/msr-index.h>
+#include "realmode.h"
+
+/*
+ * The following code and data reboots the machine by switching to real
+ * mode and jumping to the BIOS reset entry point, as if the CPU has
+ * really been reset. The previous version asked the keyboard
+ * controller to pulse the CPU reset line, which is more thorough, but
+ * doesn't work with at least one type of 486 motherboard. It is easy
+ * to stop this code working; hence the copious comments.
+ *
+ * This code is called with the restart type (0 = BIOS, 1 = APM) in
+ * the primary argument register (%eax for 32 bit, %edi for 64 bit).
+ */
+ .section ".text32", "ax"
+ .code32
+SYM_CODE_START(machine_real_restart_asm)
+
+#ifdef CONFIG_X86_64
+ /* Switch to trampoline GDT as it is guaranteed < 4 GiB */
+ movl $__KERNEL_DS, %eax
+ movl %eax, %ds
+ lgdtl pa_tr_gdt
+
+ /* Disable paging to drop us out of long mode */
+ movl %cr0, %eax
+ andl $~X86_CR0_PG, %eax
+ movl %eax, %cr0
+ ljmpl $__KERNEL32_CS, $pa_machine_real_restart_paging_off
+
+SYM_INNER_LABEL(machine_real_restart_paging_off, SYM_L_GLOBAL)
+ xorl %eax, %eax
+ xorl %edx, %edx
+ movl $MSR_EFER, %ecx
+ wrmsr
+
+ movl %edi, %eax
+
+#endif /* CONFIG_X86_64 */
+
+ /* Set up the IDT for real mode. */
+ lidtl pa_machine_real_restart_idt
+
+ /*
+ * Set up a GDT from which we can load segment descriptors for real
+ * mode. The GDT is not used in real mode; it is just needed here to
+ * prepare the descriptors.
+ */
+ lgdtl pa_machine_real_restart_gdt
+
+ /*
+ * Load the data segment registers with 16-bit compatible values
+ */
+ movl $16, %ecx
+ movl %ecx, %ds
+ movl %ecx, %es
+ movl %ecx, %fs
+ movl %ecx, %gs
+ movl %ecx, %ss
+ ljmpw $8, $1f
+SYM_CODE_END(machine_real_restart_asm)
+
+/*
+ * This is 16-bit protected mode code to disable paging and the cache,
+ * switch to real mode and jump to the BIOS reset code.
+ *
+ * The instruction that switches to real mode by writing to CR0 must be
+ * followed immediately by a far jump instruction, which set CS to a
+ * valid value for real mode, and flushes the prefetch queue to avoid
+ * running instructions that have already been decoded in protected
+ * mode.
+ *
+ * Clears all the flags except ET, especially PG (paging), PE
+ * (protected-mode enable) and TS (task switch for coprocessor state
+ * save). Flushes the TLB after paging has been disabled. Sets CD and
+ * NW, to disable the cache on a 486, and invalidates the cache. This
+ * is more like the state of a 486 after reset. I don't know if
+ * something else should be done for other chips.
+ *
+ * More could be done here to set up the registers as if a CPU reset had
+ * occurred; hopefully real BIOSs don't assume much. This is not the
+ * actual BIOS entry point, anyway (that is at 0xfffffff0).
+ *
+ * Most of this work is probably excessive, but it is what is tested.
+ */
+ .text
+ .code16
+
+ .balign 16
+machine_real_restart_asm16:
+1:
+ xorl %ecx, %ecx
+ movl %cr0, %edx
+ andl $0x00000011, %edx
+ orl $0x60000000, %edx
+ movl %edx, %cr0
+ movl %ecx, %cr3
+ movl %cr0, %edx
+ testl $0x60000000, %edx /* If no cache bits -> no wbinvd */
+ jz 2f
+ wbinvd
+2:
+ andb $0x10, %dl
+ movl %edx, %cr0
+ LJMPW_RM(3f)
+3:
+ andw %ax, %ax
+ jz bios
+
+apm:
+ movw $0x1000, %ax
+ movw %ax, %ss
+ movw $0xf000, %sp
+ movw $0x5307, %ax
+ movw $0x0001, %bx
+ movw $0x0003, %cx
+ int $0x15
+ /* This should never return... */
+
+bios:
+ ljmpw $0xf000, $0xfff0
+
+ .section ".rodata", "a"
+
+ .balign 16
+SYM_DATA_START(machine_real_restart_idt)
+ .word 0xffff /* Length - real mode default value */
+ .long 0 /* Base - real mode default value */
+SYM_DATA_END(machine_real_restart_idt)
+
+ .balign 16
+SYM_DATA_START(machine_real_restart_gdt)
+ /* Self-pointer */
+ .word 0xffff /* Length - real mode default value */
+ .long pa_machine_real_restart_gdt
+ .word 0
+
+ /*
+ * 16-bit code segment pointing to real_mode_seg
+ * Selector value 8
+ */
+ .word 0xffff /* Limit */
+ .long 0x9b000000 + pa_real_mode_base
+ .word 0
+
+ /*
+ * 16-bit data segment with the selector value 16 = 0x10 and
+ * base value 0x100; since this is consistent with real mode
+ * semantics we don't have to reload the segments once CR0.PE = 0.
+ */
+ .quad GDT_ENTRY(0x0093, 0x100, 0xffff)
+SYM_DATA_END(machine_real_restart_gdt)
diff --git a/arch/x86/realmode/rm/regs.c b/arch/x86/realmode/rm/regs.c
new file mode 100644
index 0000000000..fbb15b9f9c
--- /dev/null
+++ b/arch/x86/realmode/rm/regs.c
@@ -0,0 +1 @@
+#include "../../boot/regs.c"
diff --git a/arch/x86/realmode/rm/stack.S b/arch/x86/realmode/rm/stack.S
new file mode 100644
index 0000000000..0fca64061a
--- /dev/null
+++ b/arch/x86/realmode/rm/stack.S
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Common heap and stack allocations
+ */
+
+#include <linux/linkage.h>
+
+ .data
+SYM_DATA(HEAP, .long rm_heap)
+SYM_DATA(heap_end, .long rm_stack)
+
+ .bss
+ .balign 16
+SYM_DATA(rm_heap, .space 2048)
+
+SYM_DATA_START(rm_stack)
+ .space 2048
+SYM_DATA_END_LABEL(rm_stack, SYM_L_GLOBAL, rm_stack_end)
diff --git a/arch/x86/realmode/rm/trampoline_32.S b/arch/x86/realmode/rm/trampoline_32.S
new file mode 100644
index 0000000000..3fad907a17
--- /dev/null
+++ b/arch/x86/realmode/rm/trampoline_32.S
@@ -0,0 +1,73 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ *
+ * Trampoline.S Derived from Setup.S by Linus Torvalds
+ *
+ * 4 Jan 1997 Michael Chastain: changed to gnu as.
+ *
+ * This is only used for booting secondary CPUs in SMP machine
+ *
+ * Entry: CS:IP point to the start of our code, we are
+ * in real mode with no stack, but the rest of the
+ * trampoline page to make our stack and everything else
+ * is a mystery.
+ *
+ * We jump into arch/x86/kernel/head_32.S.
+ *
+ * On entry to trampoline_start, the processor is in real mode
+ * with 16-bit addressing and 16-bit data. CS has some value
+ * and IP is zero. Thus, we load CS to the physical segment
+ * of the real mode code before doing anything further.
+ */
+
+#include <linux/linkage.h>
+#include <asm/segment.h>
+#include <asm/page_types.h>
+#include "realmode.h"
+
+ .text
+ .code16
+
+ .balign PAGE_SIZE
+SYM_CODE_START(trampoline_start)
+ wbinvd # Needed for NUMA-Q should be harmless for others
+
+ LJMPW_RM(1f)
+1:
+ mov %cs, %ax # Code and data in the same place
+ mov %ax, %ds
+
+ cli # We should be safe anyway
+
+ movl tr_start, %eax # where we need to go
+
+ /*
+ * GDT tables in non default location kernel can be beyond 16MB and
+ * lgdt will not be able to load the address as in real mode default
+ * operand size is 16bit. Use lgdtl instead to force operand size
+ * to 32 bit.
+ */
+ lidtl tr_idt # load idt with 0, 0
+ lgdtl tr_gdt # load gdt with whatever is appropriate
+
+ movw $1, %dx # protected mode (PE) bit
+ lmsw %dx # into protected mode
+
+ ljmpl $__BOOT_CS, $pa_startup_32
+SYM_CODE_END(trampoline_start)
+
+ .section ".text32","ax"
+ .code32
+SYM_CODE_START(startup_32) # note: also used from wakeup_asm.S
+ jmp *%eax
+SYM_CODE_END(startup_32)
+
+ .bss
+ .balign 8
+SYM_DATA_START(trampoline_header)
+ SYM_DATA_LOCAL(tr_start, .space 4)
+ SYM_DATA_LOCAL(tr_gdt_pad, .space 2)
+ SYM_DATA_LOCAL(tr_gdt, .space 6)
+SYM_DATA_END(trampoline_header)
+
+#include "trampoline_common.S"
diff --git a/arch/x86/realmode/rm/trampoline_64.S b/arch/x86/realmode/rm/trampoline_64.S
new file mode 100644
index 0000000000..c9f76fae90
--- /dev/null
+++ b/arch/x86/realmode/rm/trampoline_64.S
@@ -0,0 +1,263 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ *
+ * Trampoline.S Derived from Setup.S by Linus Torvalds
+ *
+ * 4 Jan 1997 Michael Chastain: changed to gnu as.
+ * 15 Sept 2005 Eric Biederman: 64bit PIC support
+ *
+ * Entry: CS:IP point to the start of our code, we are
+ * in real mode with no stack, but the rest of the
+ * trampoline page to make our stack and everything else
+ * is a mystery.
+ *
+ * On entry to trampoline_start, the processor is in real mode
+ * with 16-bit addressing and 16-bit data. CS has some value
+ * and IP is zero. Thus, data addresses need to be absolute
+ * (no relocation) and are taken with regard to r_base.
+ *
+ * With the addition of trampoline_level4_pgt this code can
+ * now enter a 64bit kernel that lives at arbitrary 64bit
+ * physical addresses.
+ *
+ * If you work on this file, check the object module with objdump
+ * --full-contents --reloc to make sure there are no relocation
+ * entries.
+ */
+
+#include <linux/linkage.h>
+#include <asm/pgtable_types.h>
+#include <asm/page_types.h>
+#include <asm/msr.h>
+#include <asm/segment.h>
+#include <asm/processor-flags.h>
+#include <asm/realmode.h>
+#include "realmode.h"
+
+ .text
+ .code16
+
+.macro LOCK_AND_LOAD_REALMODE_ESP lock_pa=0
+ /*
+ * Make sure only one CPU fiddles with the realmode stack
+ */
+.Llock_rm\@:
+ .if \lock_pa
+ lock btsl $0, pa_tr_lock
+ .else
+ lock btsl $0, tr_lock
+ .endif
+ jnc 2f
+ pause
+ jmp .Llock_rm\@
+2:
+ # Setup stack
+ movl $rm_stack_end, %esp
+.endm
+
+ .balign PAGE_SIZE
+SYM_CODE_START(trampoline_start)
+ cli # We should be safe anyway
+ wbinvd
+
+ LJMPW_RM(1f)
+1:
+ mov %cs, %ax # Code and data in the same place
+ mov %ax, %ds
+ mov %ax, %es
+ mov %ax, %ss
+
+ LOCK_AND_LOAD_REALMODE_ESP
+
+ call verify_cpu # Verify the cpu supports long mode
+ testl %eax, %eax # Check for return code
+ jnz no_longmode
+
+.Lswitch_to_protected:
+ /*
+ * GDT tables in non default location kernel can be beyond 16MB and
+ * lgdt will not be able to load the address as in real mode default
+ * operand size is 16bit. Use lgdtl instead to force operand size
+ * to 32 bit.
+ */
+
+ lidtl tr_idt # load idt with 0, 0
+ lgdtl tr_gdt # load gdt with whatever is appropriate
+
+ movw $__KERNEL_DS, %dx # Data segment descriptor
+
+ # Enable protected mode
+ movl $(CR0_STATE & ~X86_CR0_PG), %eax
+ movl %eax, %cr0 # into protected mode
+
+ # flush prefetch and jump to startup_32
+ ljmpl $__KERNEL32_CS, $pa_startup_32
+
+no_longmode:
+ hlt
+ jmp no_longmode
+SYM_CODE_END(trampoline_start)
+
+#ifdef CONFIG_AMD_MEM_ENCRYPT
+/* SEV-ES supports non-zero IP for entry points - no alignment needed */
+SYM_CODE_START(sev_es_trampoline_start)
+ cli # We should be safe anyway
+
+ LJMPW_RM(1f)
+1:
+ mov %cs, %ax # Code and data in the same place
+ mov %ax, %ds
+ mov %ax, %es
+ mov %ax, %ss
+
+ LOCK_AND_LOAD_REALMODE_ESP
+
+ jmp .Lswitch_to_protected
+SYM_CODE_END(sev_es_trampoline_start)
+#endif /* CONFIG_AMD_MEM_ENCRYPT */
+
+#include "../kernel/verify_cpu.S"
+
+ .section ".text32","ax"
+ .code32
+ .balign 4
+SYM_CODE_START(startup_32)
+ movl %edx, %ss
+ addl $pa_real_mode_base, %esp
+ movl %edx, %ds
+ movl %edx, %es
+ movl %edx, %fs
+ movl %edx, %gs
+
+ /*
+ * Check for memory encryption support. This is a safety net in
+ * case BIOS hasn't done the necessary step of setting the bit in
+ * the MSR for this AP. If SME is active and we've gotten this far
+ * then it is safe for us to set the MSR bit and continue. If we
+ * don't we'll eventually crash trying to execute encrypted
+ * instructions.
+ */
+ btl $TH_FLAGS_SME_ACTIVE_BIT, pa_tr_flags
+ jnc .Ldone
+ movl $MSR_AMD64_SYSCFG, %ecx
+ rdmsr
+ bts $MSR_AMD64_SYSCFG_MEM_ENCRYPT_BIT, %eax
+ jc .Ldone
+
+ /*
+ * Memory encryption is enabled but the SME enable bit for this
+ * CPU has has not been set. It is safe to set it, so do so.
+ */
+ wrmsr
+.Ldone:
+
+ movl pa_tr_cr4, %eax
+ movl %eax, %cr4 # Enable PAE mode
+
+ # Setup trampoline 4 level pagetables
+ movl $pa_trampoline_pgd, %eax
+ movl %eax, %cr3
+
+ # Set up EFER
+ movl $MSR_EFER, %ecx
+ rdmsr
+ /*
+ * Skip writing to EFER if the register already has desired
+ * value (to avoid #VE for the TDX guest).
+ */
+ cmp pa_tr_efer, %eax
+ jne .Lwrite_efer
+ cmp pa_tr_efer + 4, %edx
+ je .Ldone_efer
+.Lwrite_efer:
+ movl pa_tr_efer, %eax
+ movl pa_tr_efer + 4, %edx
+ wrmsr
+
+.Ldone_efer:
+ # Enable paging and in turn activate Long Mode.
+ movl $CR0_STATE, %eax
+ movl %eax, %cr0
+
+ /*
+ * At this point we're in long mode but in 32bit compatibility mode
+ * with EFER.LME = 1, CS.L = 0, CS.D = 1 (and in turn
+ * EFER.LMA = 1). Now we want to jump in 64bit mode, to do that we use
+ * the new gdt/idt that has __KERNEL_CS with CS.L = 1.
+ */
+ ljmpl $__KERNEL_CS, $pa_startup_64
+SYM_CODE_END(startup_32)
+
+SYM_CODE_START(pa_trampoline_compat)
+ /*
+ * In compatibility mode. Prep ESP and DX for startup_32, then disable
+ * paging and complete the switch to legacy 32-bit mode.
+ */
+ LOCK_AND_LOAD_REALMODE_ESP lock_pa=1
+ movw $__KERNEL_DS, %dx
+
+ movl $(CR0_STATE & ~X86_CR0_PG), %eax
+ movl %eax, %cr0
+ ljmpl $__KERNEL32_CS, $pa_startup_32
+SYM_CODE_END(pa_trampoline_compat)
+
+ .section ".text64","ax"
+ .code64
+ .balign 4
+SYM_CODE_START(startup_64)
+ # Now jump into the kernel using virtual addresses
+ jmpq *tr_start(%rip)
+SYM_CODE_END(startup_64)
+
+SYM_CODE_START(trampoline_start64)
+ /*
+ * APs start here on a direct transfer from 64-bit BIOS with identity
+ * mapped page tables. Load the kernel's GDT in order to gear down to
+ * 32-bit mode (to handle 4-level vs. 5-level paging), and to (re)load
+ * segment registers. Load the zero IDT so any fault triggers a
+ * shutdown instead of jumping back into BIOS.
+ */
+ lidt tr_idt(%rip)
+ lgdt tr_gdt64(%rip)
+
+ ljmpl *tr_compat(%rip)
+SYM_CODE_END(trampoline_start64)
+
+ .section ".rodata","a"
+ # Duplicate the global descriptor table
+ # so the kernel can live anywhere
+ .balign 16
+SYM_DATA_START(tr_gdt)
+ .short tr_gdt_end - tr_gdt - 1 # gdt limit
+ .long pa_tr_gdt
+ .short 0
+ .quad 0x00cf9b000000ffff # __KERNEL32_CS
+ .quad 0x00af9b000000ffff # __KERNEL_CS
+ .quad 0x00cf93000000ffff # __KERNEL_DS
+SYM_DATA_END_LABEL(tr_gdt, SYM_L_LOCAL, tr_gdt_end)
+
+SYM_DATA_START(tr_gdt64)
+ .short tr_gdt_end - tr_gdt - 1 # gdt limit
+ .long pa_tr_gdt
+ .long 0
+SYM_DATA_END(tr_gdt64)
+
+SYM_DATA_START(tr_compat)
+ .long pa_trampoline_compat
+ .short __KERNEL32_CS
+SYM_DATA_END(tr_compat)
+
+ .bss
+ .balign PAGE_SIZE
+SYM_DATA(trampoline_pgd, .space PAGE_SIZE)
+
+ .balign 8
+SYM_DATA_START(trampoline_header)
+ SYM_DATA_LOCAL(tr_start, .space 8)
+ SYM_DATA(tr_efer, .space 8)
+ SYM_DATA(tr_cr4, .space 4)
+ SYM_DATA(tr_flags, .space 4)
+ SYM_DATA(tr_lock, .space 4)
+SYM_DATA_END(trampoline_header)
+
+#include "trampoline_common.S"
diff --git a/arch/x86/realmode/rm/trampoline_common.S b/arch/x86/realmode/rm/trampoline_common.S
new file mode 100644
index 0000000000..4331c32c47
--- /dev/null
+++ b/arch/x86/realmode/rm/trampoline_common.S
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+ .section ".rodata","a"
+ .balign 16
+
+/*
+ * When a bootloader hands off to the kernel in 32-bit mode an
+ * IDT with a 2-byte limit and 4-byte base is needed. When a boot
+ * loader hands off to a kernel 64-bit mode the base address
+ * extends to 8-bytes. Reserve enough space for either scenario.
+ */
+SYM_DATA_START_LOCAL(tr_idt)
+ .short 0
+ .quad 0
+SYM_DATA_END(tr_idt)
diff --git a/arch/x86/realmode/rm/video-bios.c b/arch/x86/realmode/rm/video-bios.c
new file mode 100644
index 0000000000..848b25aaf1
--- /dev/null
+++ b/arch/x86/realmode/rm/video-bios.c
@@ -0,0 +1 @@
+#include "../../boot/video-bios.c"
diff --git a/arch/x86/realmode/rm/video-mode.c b/arch/x86/realmode/rm/video-mode.c
new file mode 100644
index 0000000000..2a98b7e236
--- /dev/null
+++ b/arch/x86/realmode/rm/video-mode.c
@@ -0,0 +1 @@
+#include "../../boot/video-mode.c"
diff --git a/arch/x86/realmode/rm/video-vesa.c b/arch/x86/realmode/rm/video-vesa.c
new file mode 100644
index 0000000000..413edddb51
--- /dev/null
+++ b/arch/x86/realmode/rm/video-vesa.c
@@ -0,0 +1 @@
+#include "../../boot/video-vesa.c"
diff --git a/arch/x86/realmode/rm/video-vga.c b/arch/x86/realmode/rm/video-vga.c
new file mode 100644
index 0000000000..3085f5c9d2
--- /dev/null
+++ b/arch/x86/realmode/rm/video-vga.c
@@ -0,0 +1 @@
+#include "../../boot/video-vga.c"
diff --git a/arch/x86/realmode/rm/wakemain.c b/arch/x86/realmode/rm/wakemain.c
new file mode 100644
index 0000000000..a6f4d8388a
--- /dev/null
+++ b/arch/x86/realmode/rm/wakemain.c
@@ -0,0 +1,87 @@
+// SPDX-License-Identifier: GPL-2.0
+#include "wakeup.h"
+#include "boot.h"
+
+static void udelay(int loops)
+{
+ while (loops--)
+ io_delay(); /* Approximately 1 us */
+}
+
+static void beep(unsigned int hz)
+{
+ u8 enable;
+
+ if (!hz) {
+ enable = 0x00; /* Turn off speaker */
+ } else {
+ u16 div = 1193181/hz;
+
+ outb(0xb6, 0x43); /* Ctr 2, squarewave, load, binary */
+ io_delay();
+ outb(div, 0x42); /* LSB of counter */
+ io_delay();
+ outb(div >> 8, 0x42); /* MSB of counter */
+ io_delay();
+
+ enable = 0x03; /* Turn on speaker */
+ }
+ inb(0x61); /* Dummy read of System Control Port B */
+ io_delay();
+ outb(enable, 0x61); /* Enable timer 2 output to speaker */
+ io_delay();
+}
+
+#define DOT_HZ 880
+#define DASH_HZ 587
+#define US_PER_DOT 125000
+
+/* Okay, this is totally silly, but it's kind of fun. */
+static void send_morse(const char *pattern)
+{
+ char s;
+
+ while ((s = *pattern++)) {
+ switch (s) {
+ case '.':
+ beep(DOT_HZ);
+ udelay(US_PER_DOT);
+ beep(0);
+ udelay(US_PER_DOT);
+ break;
+ case '-':
+ beep(DASH_HZ);
+ udelay(US_PER_DOT * 3);
+ beep(0);
+ udelay(US_PER_DOT);
+ break;
+ default: /* Assume it's a space */
+ udelay(US_PER_DOT * 3);
+ break;
+ }
+ }
+}
+
+struct port_io_ops pio_ops;
+
+void main(void)
+{
+ init_default_io_ops();
+
+ /* Kill machine if structures are wrong */
+ if (wakeup_header.real_magic != 0x12345678)
+ while (1)
+ ;
+
+ if (wakeup_header.realmode_flags & 4)
+ send_morse("...-");
+
+ if (wakeup_header.realmode_flags & 1)
+ asm volatile("lcallw $0xc000,$3");
+
+ if (wakeup_header.realmode_flags & 2) {
+ /* Need to call BIOS */
+ probe_cards(0);
+ set_mode(wakeup_header.video_mode);
+ }
+}
diff --git a/arch/x86/realmode/rm/wakeup.h b/arch/x86/realmode/rm/wakeup.h
new file mode 100644
index 0000000000..0e4fd08ae4
--- /dev/null
+++ b/arch/x86/realmode/rm/wakeup.h
@@ -0,0 +1,43 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Definitions for the wakeup data structure at the head of the
+ * wakeup code.
+ */
+
+#ifndef ARCH_X86_KERNEL_ACPI_RM_WAKEUP_H
+#define ARCH_X86_KERNEL_ACPI_RM_WAKEUP_H
+
+#ifndef __ASSEMBLY__
+#include <linux/types.h>
+
+/* This must match data at wakeup.S */
+struct wakeup_header {
+ u16 video_mode; /* Video mode number */
+ u32 pmode_entry; /* Protected mode resume point, 32-bit only */
+ u16 pmode_cs;
+ u32 pmode_cr0; /* Protected mode cr0 */
+ u32 pmode_cr3; /* Protected mode cr3 */
+ u32 pmode_cr4; /* Protected mode cr4 */
+ u32 pmode_efer_low; /* Protected mode EFER */
+ u32 pmode_efer_high;
+ u64 pmode_gdt;
+ u32 pmode_misc_en_low; /* Protected mode MISC_ENABLE */
+ u32 pmode_misc_en_high;
+ u32 pmode_behavior; /* Wakeup routine behavior flags */
+ u32 realmode_flags;
+ u32 real_magic;
+ u32 signature; /* To check we have correct structure */
+} __attribute__((__packed__));
+
+extern struct wakeup_header wakeup_header;
+#endif
+
+#define WAKEUP_HEADER_OFFSET 8
+#define WAKEUP_HEADER_SIGNATURE 0x51ee1111
+
+/* Wakeup behavior bits */
+#define WAKEUP_BEHAVIOR_RESTORE_MISC_ENABLE 0
+#define WAKEUP_BEHAVIOR_RESTORE_CR4 1
+#define WAKEUP_BEHAVIOR_RESTORE_EFER 2
+
+#endif /* ARCH_X86_KERNEL_ACPI_RM_WAKEUP_H */
diff --git a/arch/x86/realmode/rm/wakeup_asm.S b/arch/x86/realmode/rm/wakeup_asm.S
new file mode 100644
index 0000000000..02d0ba16ae
--- /dev/null
+++ b/arch/x86/realmode/rm/wakeup_asm.S
@@ -0,0 +1,179 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * ACPI wakeup real mode startup stub
+ */
+#include <linux/linkage.h>
+#include <asm/segment.h>
+#include <asm/msr-index.h>
+#include <asm/page_types.h>
+#include <asm/pgtable_types.h>
+#include <asm/processor-flags.h>
+#include "realmode.h"
+#include "wakeup.h"
+
+ .code16
+
+/* This should match the structure in wakeup.h */
+ .section ".data", "aw"
+
+ .balign 16
+SYM_DATA_START(wakeup_header)
+ video_mode: .short 0 /* Video mode number */
+ pmode_entry: .long 0
+ pmode_cs: .short __KERNEL_CS
+ pmode_cr0: .long 0 /* Saved %cr0 */
+ pmode_cr3: .long 0 /* Saved %cr3 */
+ pmode_cr4: .long 0 /* Saved %cr4 */
+ pmode_efer: .quad 0 /* Saved EFER */
+ pmode_gdt: .quad 0
+ pmode_misc_en: .quad 0 /* Saved MISC_ENABLE MSR */
+ pmode_behavior: .long 0 /* Wakeup behavior flags */
+ realmode_flags: .long 0
+ real_magic: .long 0
+ signature: .long WAKEUP_HEADER_SIGNATURE
+SYM_DATA_END(wakeup_header)
+
+ .text
+ .code16
+
+ .balign 16
+SYM_CODE_START(wakeup_start)
+ cli
+ cld
+
+ LJMPW_RM(3f)
+3:
+ /* Apparently some dimwit BIOS programmers don't know how to
+ program a PM to RM transition, and we might end up here with
+ junk in the data segment descriptor registers. The only way
+ to repair that is to go into PM and fix it ourselves... */
+ movw $16, %cx
+ lgdtl %cs:wakeup_gdt
+ movl %cr0, %eax
+ orb $X86_CR0_PE, %al
+ movl %eax, %cr0
+ ljmpw $8, $2f
+2:
+ movw %cx, %ds
+ movw %cx, %es
+ movw %cx, %ss
+ movw %cx, %fs
+ movw %cx, %gs
+
+ andb $~X86_CR0_PE, %al
+ movl %eax, %cr0
+ LJMPW_RM(3f)
+3:
+ /* Set up segments */
+ movw %cs, %ax
+ movw %ax, %ss
+ movl $rm_stack_end, %esp
+ movw %ax, %ds
+ movw %ax, %es
+ movw %ax, %fs
+ movw %ax, %gs
+
+ lidtl .Lwakeup_idt
+
+ /* Clear the EFLAGS */
+ pushl $0
+ popfl
+
+ /* Check header signature... */
+ movl signature, %eax
+ cmpl $WAKEUP_HEADER_SIGNATURE, %eax
+ jne bogus_real_magic
+
+ /* Check we really have everything... */
+ movl end_signature, %eax
+ cmpl $REALMODE_END_SIGNATURE, %eax
+ jne bogus_real_magic
+
+ /* Call the C code */
+ calll main
+
+ /* Restore MISC_ENABLE before entering protected mode, in case
+ BIOS decided to clear XD_DISABLE during S3. */
+ movl pmode_behavior, %edi
+ btl $WAKEUP_BEHAVIOR_RESTORE_MISC_ENABLE, %edi
+ jnc 1f
+
+ movl pmode_misc_en, %eax
+ movl pmode_misc_en + 4, %edx
+ movl $MSR_IA32_MISC_ENABLE, %ecx
+ wrmsr
+1:
+
+ /* Do any other stuff... */
+
+#ifndef CONFIG_64BIT
+ /* This could also be done in C code... */
+ movl pmode_cr3, %eax
+ movl %eax, %cr3
+
+ btl $WAKEUP_BEHAVIOR_RESTORE_CR4, %edi
+ jnc 1f
+ movl pmode_cr4, %eax
+ movl %eax, %cr4
+1:
+ btl $WAKEUP_BEHAVIOR_RESTORE_EFER, %edi
+ jnc 1f
+ movl pmode_efer, %eax
+ movl pmode_efer + 4, %edx
+ movl $MSR_EFER, %ecx
+ wrmsr
+1:
+
+ lgdtl pmode_gdt
+
+ /* This really couldn't... */
+ movl pmode_entry, %eax
+ movl pmode_cr0, %ecx
+ movl %ecx, %cr0
+ ljmpl $__KERNEL_CS, $pa_startup_32
+ /* -> jmp *%eax in trampoline_32.S */
+#else
+ jmp trampoline_start
+#endif
+SYM_CODE_END(wakeup_start)
+
+bogus_real_magic:
+1:
+ hlt
+ jmp 1b
+
+ .section ".rodata","a"
+
+ /*
+ * Set up the wakeup GDT. We set these up as Big Real Mode,
+ * that is, with limits set to 4 GB. At least the Lenovo
+ * Thinkpad X61 is known to need this for the video BIOS
+ * initialization quirk to work; this is likely to also
+ * be the case for other laptops or integrated video devices.
+ */
+
+ .balign 16
+SYM_DATA_START(wakeup_gdt)
+ .word 3*8-1 /* Self-descriptor */
+ .long pa_wakeup_gdt
+ .word 0
+
+ .word 0xffff /* 16-bit code segment @ real_mode_base */
+ .long 0x9b000000 + pa_real_mode_base
+ .word 0x008f /* big real mode */
+
+ .word 0xffff /* 16-bit data segment @ real_mode_base */
+ .long 0x93000000 + pa_real_mode_base
+ .word 0x008f /* big real mode */
+SYM_DATA_END(wakeup_gdt)
+
+ .section ".rodata","a"
+ .balign 8
+
+ /* This is the standard real-mode IDT */
+ .balign 16
+SYM_DATA_START_LOCAL(.Lwakeup_idt)
+ .word 0xffff /* limit */
+ .long 0 /* address */
+ .word 0
+SYM_DATA_END(.Lwakeup_idt)
diff --git a/arch/x86/realmode/rmpiggy.S b/arch/x86/realmode/rmpiggy.S
new file mode 100644
index 0000000000..c8fef76743
--- /dev/null
+++ b/arch/x86/realmode/rmpiggy.S
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Wrapper script for the realmode binary as a transport object
+ * before copying to low memory.
+ */
+#include <linux/linkage.h>
+#include <asm/page_types.h>
+
+ .section ".init.data","aw"
+
+ .balign PAGE_SIZE
+
+SYM_DATA_START(real_mode_blob)
+ .incbin "arch/x86/realmode/rm/realmode.bin"
+SYM_DATA_END_LABEL(real_mode_blob, SYM_L_GLOBAL, real_mode_blob_end)
+
+SYM_DATA_START(real_mode_relocs)
+ .incbin "arch/x86/realmode/rm/realmode.relocs"
+SYM_DATA_END(real_mode_relocs)