diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-06-03 19:25:32 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-06-03 19:25:32 +0000 |
commit | e8d3ef9fa6e1265bce5992d8f77414220b39d2d9 (patch) | |
tree | 09bf132380f6d9b925db951e6521776afea25d83 /arch/x86 | |
parent | Releasing progress-linux version 5.10.216-1progress6u1. (diff) | |
download | linux-e8d3ef9fa6e1265bce5992d8f77414220b39d2d9.tar.xz linux-e8d3ef9fa6e1265bce5992d8f77414220b39d2d9.zip |
Merging upstream version 5.10.218.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'arch/x86')
-rw-r--r-- | arch/x86/entry/entry_64.S | 17 | ||||
-rw-r--r-- | arch/x86/include/asm/irqflags.h | 7 | ||||
-rw-r--r-- | arch/x86/include/asm/paravirt.h | 5 | ||||
-rw-r--r-- | arch/x86/include/asm/paravirt_types.h | 8 | ||||
-rw-r--r-- | arch/x86/kernel/asm-offsets_64.c | 2 | ||||
-rw-r--r-- | arch/x86/kernel/paravirt.c | 5 | ||||
-rw-r--r-- | arch/x86/kernel/paravirt_patch.c | 4 | ||||
-rw-r--r-- | arch/x86/kvm/x86.c | 11 | ||||
-rw-r--r-- | arch/x86/xen/enlighten_pv.c | 1 | ||||
-rw-r--r-- | arch/x86/xen/xen-asm.S | 21 | ||||
-rw-r--r-- | arch/x86/xen/xen-ops.h | 2 |
11 files changed, 18 insertions, 65 deletions
diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S index 1631a9a15..bd785386d 100644 --- a/arch/x86/entry/entry_64.S +++ b/arch/x86/entry/entry_64.S @@ -46,14 +46,6 @@ .code64 .section .entry.text, "ax" -#ifdef CONFIG_PARAVIRT_XXL -SYM_CODE_START(native_usergs_sysret64) - UNWIND_HINT_EMPTY - swapgs - sysretq -SYM_CODE_END(native_usergs_sysret64) -#endif /* CONFIG_PARAVIRT_XXL */ - /* * 64-bit SYSCALL instruction entry. Up to 6 arguments in registers. * @@ -128,7 +120,12 @@ SYM_INNER_LABEL(entry_SYSCALL_64_after_hwframe, SYM_L_GLOBAL) * Try to use SYSRET instead of IRET if we're returning to * a completely clean 64-bit userspace context. If we're not, * go to the slow exit path. + * In the Xen PV case we must use iret anyway. */ + + ALTERNATIVE "", "jmp swapgs_restore_regs_and_return_to_usermode", \ + X86_FEATURE_XENPV + movq RCX(%rsp), %rcx movq RIP(%rsp), %r11 @@ -220,7 +217,9 @@ syscall_return_via_sysret: popq %rdi popq %rsp - USERGS_SYSRET64 + swapgs + CLEAR_CPU_BUFFERS + sysretq SYM_CODE_END(entry_SYSCALL_64) /* diff --git a/arch/x86/include/asm/irqflags.h b/arch/x86/include/asm/irqflags.h index f40dea50d..e585a4705 100644 --- a/arch/x86/include/asm/irqflags.h +++ b/arch/x86/include/asm/irqflags.h @@ -132,13 +132,6 @@ static __always_inline unsigned long arch_local_irq_save(void) #endif #define INTERRUPT_RETURN jmp native_iret -#define USERGS_SYSRET64 \ - swapgs; \ - CLEAR_CPU_BUFFERS; \ - sysretq; -#define USERGS_SYSRET32 \ - swapgs; \ - sysretl #else #define INTERRUPT_RETURN iret diff --git a/arch/x86/include/asm/paravirt.h b/arch/x86/include/asm/paravirt.h index 4a32b0d34..3c89c1f64 100644 --- a/arch/x86/include/asm/paravirt.h +++ b/arch/x86/include/asm/paravirt.h @@ -776,11 +776,6 @@ extern void default_banner(void); #ifdef CONFIG_X86_64 #ifdef CONFIG_PARAVIRT_XXL -#define USERGS_SYSRET64 \ - PARA_SITE(PARA_PATCH(PV_CPU_usergs_sysret64), \ - ANNOTATE_RETPOLINE_SAFE; \ - jmp PARA_INDIRECT(pv_ops+PV_CPU_usergs_sysret64);) - #ifdef CONFIG_DEBUG_ENTRY #define SAVE_FLAGS(clobbers) \ PARA_SITE(PARA_PATCH(PV_IRQ_save_fl), \ diff --git a/arch/x86/include/asm/paravirt_types.h b/arch/x86/include/asm/paravirt_types.h index 903d71884..55d8b7950 100644 --- a/arch/x86/include/asm/paravirt_types.h +++ b/arch/x86/include/asm/paravirt_types.h @@ -157,14 +157,6 @@ struct pv_cpu_ops { u64 (*read_pmc)(int counter); - /* - * Switch to usermode gs and return to 64-bit usermode using - * sysret. Only used in 64-bit kernels to return to 64-bit - * processes. Usermode register state, including %rsp, must - * already be restored. - */ - void (*usergs_sysret64)(void); - /* Normal iret. Jump to this with the standard iret stack frame set up. */ void (*iret)(void); diff --git a/arch/x86/kernel/asm-offsets_64.c b/arch/x86/kernel/asm-offsets_64.c index 1354bc306..b14533af7 100644 --- a/arch/x86/kernel/asm-offsets_64.c +++ b/arch/x86/kernel/asm-offsets_64.c @@ -13,8 +13,6 @@ int main(void) { #ifdef CONFIG_PARAVIRT #ifdef CONFIG_PARAVIRT_XXL - OFFSET(PV_CPU_usergs_sysret64, paravirt_patch_template, - cpu.usergs_sysret64); #ifdef CONFIG_DEBUG_ENTRY OFFSET(PV_IRQ_save_fl, paravirt_patch_template, irq.save_fl); #endif diff --git a/arch/x86/kernel/paravirt.c b/arch/x86/kernel/paravirt.c index f0e4ad859..9d91061b8 100644 --- a/arch/x86/kernel/paravirt.c +++ b/arch/x86/kernel/paravirt.c @@ -124,8 +124,7 @@ unsigned paravirt_patch_default(u8 type, void *insn_buff, else if (opfunc == _paravirt_ident_64) ret = paravirt_patch_ident_64(insn_buff, len); - else if (type == PARAVIRT_PATCH(cpu.iret) || - type == PARAVIRT_PATCH(cpu.usergs_sysret64)) + else if (type == PARAVIRT_PATCH(cpu.iret)) /* If operation requires a jmp, then jmp */ ret = paravirt_patch_jmp(insn_buff, opfunc, addr, len); #endif @@ -159,7 +158,6 @@ static u64 native_steal_clock(int cpu) /* These are in entry.S */ extern void native_iret(void); -extern void native_usergs_sysret64(void); static struct resource reserve_ioports = { .start = 0, @@ -299,7 +297,6 @@ struct paravirt_patch_template pv_ops = { .cpu.load_sp0 = native_load_sp0, - .cpu.usergs_sysret64 = native_usergs_sysret64, .cpu.iret = native_iret, #ifdef CONFIG_X86_IOPL_IOPERM diff --git a/arch/x86/kernel/paravirt_patch.c b/arch/x86/kernel/paravirt_patch.c index 7c518b08a..2fada2c34 100644 --- a/arch/x86/kernel/paravirt_patch.c +++ b/arch/x86/kernel/paravirt_patch.c @@ -27,7 +27,6 @@ struct patch_xxl { const unsigned char mmu_write_cr3[3]; const unsigned char irq_restore_fl[2]; const unsigned char cpu_wbinvd[2]; - const unsigned char cpu_usergs_sysret64[6]; const unsigned char mov64[3]; }; @@ -40,8 +39,6 @@ static const struct patch_xxl patch_data_xxl = { .mmu_write_cr3 = { 0x0f, 0x22, 0xdf }, // mov %rdi, %cr3 .irq_restore_fl = { 0x57, 0x9d }, // push %rdi; popfq .cpu_wbinvd = { 0x0f, 0x09 }, // wbinvd - .cpu_usergs_sysret64 = { 0x0f, 0x01, 0xf8, - 0x48, 0x0f, 0x07 }, // swapgs; sysretq .mov64 = { 0x48, 0x89, 0xf8 }, // mov %rdi, %rax }; @@ -83,7 +80,6 @@ unsigned int native_patch(u8 type, void *insn_buff, unsigned long addr, PATCH_CASE(mmu, read_cr3, xxl, insn_buff, len); PATCH_CASE(mmu, write_cr3, xxl, insn_buff, len); - PATCH_CASE(cpu, usergs_sysret64, xxl, insn_buff, len); PATCH_CASE(cpu, wbinvd, xxl, insn_buff, len); #endif diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 8e0b957c6..bc2954393 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -8501,13 +8501,20 @@ static void update_cr8_intercept(struct kvm_vcpu *vcpu) static void kvm_inject_exception(struct kvm_vcpu *vcpu) { + /* + * Suppress the error code if the vCPU is in Real Mode, as Real Mode + * exceptions don't report error codes. The presence of an error code + * is carried with the exception and only stripped when the exception + * is injected as intercepted #PF VM-Exits for AMD's Paged Real Mode do + * report an error code despite the CPU being in Real Mode. + */ + vcpu->arch.exception.has_error_code &= is_protmode(vcpu); + trace_kvm_inj_exception(vcpu->arch.exception.nr, vcpu->arch.exception.has_error_code, vcpu->arch.exception.error_code, vcpu->arch.exception.injected); - if (vcpu->arch.exception.error_code && !is_protmode(vcpu)) - vcpu->arch.exception.error_code = false; kvm_x86_ops.queue_exception(vcpu); } diff --git a/arch/x86/xen/enlighten_pv.c b/arch/x86/xen/enlighten_pv.c index 94804670c..b1efc4b4f 100644 --- a/arch/x86/xen/enlighten_pv.c +++ b/arch/x86/xen/enlighten_pv.c @@ -1059,7 +1059,6 @@ static const struct pv_cpu_ops xen_cpu_ops __initconst = { .read_pmc = xen_read_pmc, .iret = xen_iret, - .usergs_sysret64 = xen_sysret64, .load_tr_desc = paravirt_nop, .set_ldt = xen_set_ldt, diff --git a/arch/x86/xen/xen-asm.S b/arch/x86/xen/xen-asm.S index e3031afcb..3a33713cf 100644 --- a/arch/x86/xen/xen-asm.S +++ b/arch/x86/xen/xen-asm.S @@ -220,27 +220,6 @@ SYM_CODE_START(xen_iret) jmp hypercall_iret SYM_CODE_END(xen_iret) -SYM_CODE_START(xen_sysret64) - UNWIND_HINT_EMPTY - /* - * We're already on the usermode stack at this point, but - * still with the kernel gs, so we can easily switch back. - * - * tss.sp2 is scratch space. - */ - movq %rsp, PER_CPU_VAR(cpu_tss_rw + TSS_sp2) - movq PER_CPU_VAR(cpu_current_top_of_stack), %rsp - - pushq $__USER_DS - pushq PER_CPU_VAR(cpu_tss_rw + TSS_sp2) - pushq %r11 - pushq $__USER_CS - pushq %rcx - - pushq $VGCF_in_syscall - jmp hypercall_iret -SYM_CODE_END(xen_sysret64) - /* * XEN pv doesn't use trampoline stack, PER_CPU_VAR(cpu_tss_rw + TSS_sp0) is * also the kernel stack. Reusing swapgs_restore_regs_and_return_to_usermode() diff --git a/arch/x86/xen/xen-ops.h b/arch/x86/xen/xen-ops.h index 8695809b8..98242430d 100644 --- a/arch/x86/xen/xen-ops.h +++ b/arch/x86/xen/xen-ops.h @@ -138,8 +138,6 @@ __visible unsigned long xen_read_cr2_direct(void); /* These are not functions, and cannot be called normally */ __visible void xen_iret(void); -__visible void xen_sysret32(void); -__visible void xen_sysret64(void); extern int xen_panic_handler_init(void); |