diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-08-07 13:11:27 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-08-07 13:11:27 +0000 |
commit | 34996e42f82bfd60bc2c191e5cae3c6ab233ec6c (patch) | |
tree | 62db60558cbf089714b48daeabca82bf2b20b20e /arch/x86/lib | |
parent | Adding debian version 6.8.12-1. (diff) | |
download | linux-34996e42f82bfd60bc2c191e5cae3c6ab233ec6c.tar.xz linux-34996e42f82bfd60bc2c191e5cae3c6ab233ec6c.zip |
Merging upstream version 6.9.7.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'arch/x86/lib')
-rw-r--r-- | arch/x86/lib/Makefile | 2 | ||||
-rw-r--r-- | arch/x86/lib/cmpxchg16b_emu.S | 12 | ||||
-rw-r--r-- | arch/x86/lib/cmpxchg8b_emu.S | 30 | ||||
-rw-r--r-- | arch/x86/lib/getuser.S | 6 | ||||
-rw-r--r-- | arch/x86/lib/insn-eval.c | 6 | ||||
-rw-r--r-- | arch/x86/lib/insn.c | 58 | ||||
-rw-r--r-- | arch/x86/lib/msr-smp.c | 12 | ||||
-rw-r--r-- | arch/x86/lib/msr.c | 6 | ||||
-rw-r--r-- | arch/x86/lib/retpoline.S | 46 | ||||
-rw-r--r-- | arch/x86/lib/x86-opcode-map.txt | 4 |
10 files changed, 93 insertions, 89 deletions
diff --git a/arch/x86/lib/Makefile b/arch/x86/lib/Makefile index f0dae4fb6d..6da73513f0 100644 --- a/arch/x86/lib/Makefile +++ b/arch/x86/lib/Makefile @@ -36,7 +36,7 @@ lib-$(CONFIG_ARCH_HAS_COPY_MC) += copy_mc.o copy_mc_64.o lib-$(CONFIG_INSTRUCTION_DECODER) += insn.o inat.o insn-eval.o lib-$(CONFIG_RANDOMIZE_BASE) += kaslr.o lib-$(CONFIG_FUNCTION_ERROR_INJECTION) += error-inject.o -lib-$(CONFIG_RETPOLINE) += retpoline.o +lib-$(CONFIG_MITIGATION_RETPOLINE) += retpoline.o obj-y += msr.o msr-reg.o msr-reg-export.o hweight.o obj-y += iomem.o diff --git a/arch/x86/lib/cmpxchg16b_emu.S b/arch/x86/lib/cmpxchg16b_emu.S index 6962df3157..4fb44894ad 100644 --- a/arch/x86/lib/cmpxchg16b_emu.S +++ b/arch/x86/lib/cmpxchg16b_emu.S @@ -23,14 +23,14 @@ SYM_FUNC_START(this_cpu_cmpxchg16b_emu) cli /* if (*ptr == old) */ - cmpq PER_CPU_VAR(0(%rsi)), %rax + cmpq __percpu (%rsi), %rax jne .Lnot_same - cmpq PER_CPU_VAR(8(%rsi)), %rdx + cmpq __percpu 8(%rsi), %rdx jne .Lnot_same /* *ptr = new */ - movq %rbx, PER_CPU_VAR(0(%rsi)) - movq %rcx, PER_CPU_VAR(8(%rsi)) + movq %rbx, __percpu (%rsi) + movq %rcx, __percpu 8(%rsi) /* set ZF in EFLAGS to indicate success */ orl $X86_EFLAGS_ZF, (%rsp) @@ -42,8 +42,8 @@ SYM_FUNC_START(this_cpu_cmpxchg16b_emu) /* *ptr != old */ /* old = *ptr */ - movq PER_CPU_VAR(0(%rsi)), %rax - movq PER_CPU_VAR(8(%rsi)), %rdx + movq __percpu (%rsi), %rax + movq __percpu 8(%rsi), %rdx /* clear ZF in EFLAGS to indicate failure */ andl $(~X86_EFLAGS_ZF), (%rsp) diff --git a/arch/x86/lib/cmpxchg8b_emu.S b/arch/x86/lib/cmpxchg8b_emu.S index 873e4ef23e..1c96be769a 100644 --- a/arch/x86/lib/cmpxchg8b_emu.S +++ b/arch/x86/lib/cmpxchg8b_emu.S @@ -24,12 +24,12 @@ SYM_FUNC_START(cmpxchg8b_emu) pushfl cli - cmpl 0(%esi), %eax + cmpl (%esi), %eax jne .Lnot_same cmpl 4(%esi), %edx jne .Lnot_same - movl %ebx, 0(%esi) + movl %ebx, (%esi) movl %ecx, 4(%esi) orl $X86_EFLAGS_ZF, (%esp) @@ -38,7 +38,7 @@ SYM_FUNC_START(cmpxchg8b_emu) RET .Lnot_same: - movl 0(%esi), %eax + movl (%esi), %eax movl 4(%esi), %edx andl $(~X86_EFLAGS_ZF), (%esp) @@ -53,18 +53,30 @@ EXPORT_SYMBOL(cmpxchg8b_emu) #ifndef CONFIG_UML +/* + * Emulate 'cmpxchg8b %fs:(%rsi)' + * + * Inputs: + * %esi : memory location to compare + * %eax : low 32 bits of old value + * %edx : high 32 bits of old value + * %ebx : low 32 bits of new value + * %ecx : high 32 bits of new value + * + * Notably this is not LOCK prefixed and is not safe against NMIs + */ SYM_FUNC_START(this_cpu_cmpxchg8b_emu) pushfl cli - cmpl PER_CPU_VAR(0(%esi)), %eax + cmpl __percpu (%esi), %eax jne .Lnot_same2 - cmpl PER_CPU_VAR(4(%esi)), %edx + cmpl __percpu 4(%esi), %edx jne .Lnot_same2 - movl %ebx, PER_CPU_VAR(0(%esi)) - movl %ecx, PER_CPU_VAR(4(%esi)) + movl %ebx, __percpu (%esi) + movl %ecx, __percpu 4(%esi) orl $X86_EFLAGS_ZF, (%esp) @@ -72,8 +84,8 @@ SYM_FUNC_START(this_cpu_cmpxchg8b_emu) RET .Lnot_same2: - movl PER_CPU_VAR(0(%esi)), %eax - movl PER_CPU_VAR(4(%esi)), %edx + movl __percpu (%esi), %eax + movl __percpu 4(%esi), %edx andl $(~X86_EFLAGS_ZF), (%esp) diff --git a/arch/x86/lib/getuser.S b/arch/x86/lib/getuser.S index 10d5ed8b59..a1cb3a4e67 100644 --- a/arch/x86/lib/getuser.S +++ b/arch/x86/lib/getuser.S @@ -44,7 +44,11 @@ or %rdx, %rax .else cmp $TASK_SIZE_MAX-\size+1, %eax +.if \size != 8 jae .Lbad_get_user +.else + jae .Lbad_get_user_8 +.endif sbb %edx, %edx /* array_index_mask_nospec() */ and %edx, %eax .endif @@ -154,7 +158,7 @@ SYM_CODE_END(__get_user_handle_exception) #ifdef CONFIG_X86_32 SYM_CODE_START_LOCAL(__get_user_8_handle_exception) ASM_CLAC -bad_get_user_8: +.Lbad_get_user_8: xor %edx,%edx xor %ecx,%ecx mov $(-EFAULT),%_ASM_AX diff --git a/arch/x86/lib/insn-eval.c b/arch/x86/lib/insn-eval.c index 558a605929..98631c0e7a 100644 --- a/arch/x86/lib/insn-eval.c +++ b/arch/x86/lib/insn-eval.c @@ -1129,15 +1129,15 @@ static int get_eff_addr_modrm_16(struct insn *insn, struct pt_regs *regs, * get_eff_addr_sib() - Obtain referenced effective address via SIB * @insn: Instruction. Must be valid. * @regs: Register values as seen when entering kernel mode - * @regoff: Obtained operand offset, in pt_regs, associated with segment + * @base_offset: Obtained operand offset, in pt_regs, associated with segment * @eff_addr: Obtained effective address * * Obtain the effective address referenced by the SIB byte of @insn. After * identifying the registers involved in the indexed, register-indirect memory * reference, its value is obtained from the operands in @regs. The computed * address is stored @eff_addr. Also, the register operand that indicates the - * associated segment is stored in @regoff, this parameter can later be used to - * determine such segment. + * associated segment is stored in @base_offset; this parameter can later be + * used to determine such segment. * * Returns: * diff --git a/arch/x86/lib/insn.c b/arch/x86/lib/insn.c index 55e371cc69..1bb155a095 100644 --- a/arch/x86/lib/insn.c +++ b/arch/x86/lib/insn.c @@ -71,7 +71,7 @@ void insn_init(struct insn *insn, const void *kaddr, int buf_len, int x86_64) insn->kaddr = kaddr; insn->end_kaddr = kaddr + buf_len; insn->next_byte = kaddr; - insn->x86_64 = x86_64 ? 1 : 0; + insn->x86_64 = x86_64; insn->opnd_bytes = 4; if (x86_64) insn->addr_bytes = 8; @@ -268,11 +268,9 @@ int insn_get_opcode(struct insn *insn) if (opcode->got) return 0; - if (!insn->prefixes.got) { - ret = insn_get_prefixes(insn); - if (ret) - return ret; - } + ret = insn_get_prefixes(insn); + if (ret) + return ret; /* Get first opcode */ op = get_next(insn_byte_t, insn); @@ -339,11 +337,9 @@ int insn_get_modrm(struct insn *insn) if (modrm->got) return 0; - if (!insn->opcode.got) { - ret = insn_get_opcode(insn); - if (ret) - return ret; - } + ret = insn_get_opcode(insn); + if (ret) + return ret; if (inat_has_modrm(insn->attr)) { mod = get_next(insn_byte_t, insn); @@ -386,11 +382,9 @@ int insn_rip_relative(struct insn *insn) if (!insn->x86_64) return 0; - if (!modrm->got) { - ret = insn_get_modrm(insn); - if (ret) - return 0; - } + ret = insn_get_modrm(insn); + if (ret) + return 0; /* * For rip-relative instructions, the mod field (top 2 bits) * is zero and the r/m field (bottom 3 bits) is 0x5. @@ -417,11 +411,9 @@ int insn_get_sib(struct insn *insn) if (insn->sib.got) return 0; - if (!insn->modrm.got) { - ret = insn_get_modrm(insn); - if (ret) - return ret; - } + ret = insn_get_modrm(insn); + if (ret) + return ret; if (insn->modrm.nbytes) { modrm = insn->modrm.bytes[0]; @@ -460,11 +452,9 @@ int insn_get_displacement(struct insn *insn) if (insn->displacement.got) return 0; - if (!insn->sib.got) { - ret = insn_get_sib(insn); - if (ret) - return ret; - } + ret = insn_get_sib(insn); + if (ret) + return ret; if (insn->modrm.nbytes) { /* @@ -628,11 +618,9 @@ int insn_get_immediate(struct insn *insn) if (insn->immediate.got) return 0; - if (!insn->displacement.got) { - ret = insn_get_displacement(insn); - if (ret) - return ret; - } + ret = insn_get_displacement(insn); + if (ret) + return ret; if (inat_has_moffset(insn->attr)) { if (!__get_moffset(insn)) @@ -703,11 +691,9 @@ int insn_get_length(struct insn *insn) if (insn->length) return 0; - if (!insn->immediate.got) { - ret = insn_get_immediate(insn); - if (ret) - return ret; - } + ret = insn_get_immediate(insn); + if (ret) + return ret; insn->length = (unsigned char)((unsigned long)insn->next_byte - (unsigned long)insn->kaddr); diff --git a/arch/x86/lib/msr-smp.c b/arch/x86/lib/msr-smp.c index 40bbe56bde..acd463d887 100644 --- a/arch/x86/lib/msr-smp.c +++ b/arch/x86/lib/msr-smp.c @@ -9,10 +9,9 @@ static void __rdmsr_on_cpu(void *info) { struct msr_info *rv = info; struct msr *reg; - int this_cpu = raw_smp_processor_id(); if (rv->msrs) - reg = per_cpu_ptr(rv->msrs, this_cpu); + reg = this_cpu_ptr(rv->msrs); else reg = &rv->reg; @@ -23,10 +22,9 @@ static void __wrmsr_on_cpu(void *info) { struct msr_info *rv = info; struct msr *reg; - int this_cpu = raw_smp_processor_id(); if (rv->msrs) - reg = per_cpu_ptr(rv->msrs, this_cpu); + reg = this_cpu_ptr(rv->msrs); else reg = &rv->reg; @@ -97,7 +95,7 @@ int wrmsrl_on_cpu(unsigned int cpu, u32 msr_no, u64 q) EXPORT_SYMBOL(wrmsrl_on_cpu); static void __rwmsr_on_cpus(const struct cpumask *mask, u32 msr_no, - struct msr *msrs, + struct msr __percpu *msrs, void (*msr_func) (void *info)) { struct msr_info rv; @@ -124,7 +122,7 @@ static void __rwmsr_on_cpus(const struct cpumask *mask, u32 msr_no, * @msrs: array of MSR values * */ -void rdmsr_on_cpus(const struct cpumask *mask, u32 msr_no, struct msr *msrs) +void rdmsr_on_cpus(const struct cpumask *mask, u32 msr_no, struct msr __percpu *msrs) { __rwmsr_on_cpus(mask, msr_no, msrs, __rdmsr_on_cpu); } @@ -138,7 +136,7 @@ EXPORT_SYMBOL(rdmsr_on_cpus); * @msrs: array of MSR values * */ -void wrmsr_on_cpus(const struct cpumask *mask, u32 msr_no, struct msr *msrs) +void wrmsr_on_cpus(const struct cpumask *mask, u32 msr_no, struct msr __percpu *msrs) { __rwmsr_on_cpus(mask, msr_no, msrs, __wrmsr_on_cpu); } diff --git a/arch/x86/lib/msr.c b/arch/x86/lib/msr.c index 47fd9bd6b9..4bf4fad5b1 100644 --- a/arch/x86/lib/msr.c +++ b/arch/x86/lib/msr.c @@ -6,9 +6,9 @@ #define CREATE_TRACE_POINTS #include <asm/msr-trace.h> -struct msr *msrs_alloc(void) +struct msr __percpu *msrs_alloc(void) { - struct msr *msrs = NULL; + struct msr __percpu *msrs = NULL; msrs = alloc_percpu(struct msr); if (!msrs) { @@ -20,7 +20,7 @@ struct msr *msrs_alloc(void) } EXPORT_SYMBOL(msrs_alloc); -void msrs_free(struct msr *msrs) +void msrs_free(struct msr __percpu *msrs) { free_percpu(msrs); } diff --git a/arch/x86/lib/retpoline.S b/arch/x86/lib/retpoline.S index 5b26a952e2..391059b2c6 100644 --- a/arch/x86/lib/retpoline.S +++ b/arch/x86/lib/retpoline.S @@ -71,7 +71,7 @@ SYM_CODE_END(__x86_indirect_thunk_array) #include <asm/GEN-for-each-reg.h> #undef GEN -#ifdef CONFIG_CALL_DEPTH_TRACKING +#ifdef CONFIG_MITIGATION_CALL_DEPTH_TRACKING .macro CALL_THUNK reg .align RETPOLINE_THUNK_SIZE @@ -127,7 +127,7 @@ SYM_CODE_END(__x86_indirect_jump_thunk_array) #undef GEN #endif -#ifdef CONFIG_RETHUNK +#ifdef CONFIG_MITIGATION_RETHUNK /* * Be careful here: that label cannot really be removed because in @@ -138,7 +138,7 @@ SYM_CODE_END(__x86_indirect_jump_thunk_array) */ .section .text..__x86.return_thunk -#ifdef CONFIG_CPU_SRSO +#ifdef CONFIG_MITIGATION_SRSO /* * srso_alias_untrain_ret() and srso_alias_safe_ret() are placed at @@ -225,8 +225,7 @@ SYM_CODE_START(srso_return_thunk) SYM_CODE_END(srso_return_thunk) #define JMP_SRSO_UNTRAIN_RET "jmp srso_untrain_ret" -#else /* !CONFIG_CPU_SRSO */ -#define JMP_SRSO_UNTRAIN_RET "ud2" +#else /* !CONFIG_MITIGATION_SRSO */ /* Dummy for the alternative in CALL_UNTRAIN_RET. */ SYM_CODE_START(srso_alias_untrain_ret) ANNOTATE_UNRET_SAFE @@ -235,9 +234,10 @@ SYM_CODE_START(srso_alias_untrain_ret) int3 SYM_FUNC_END(srso_alias_untrain_ret) __EXPORT_THUNK(srso_alias_untrain_ret) -#endif /* CONFIG_CPU_SRSO */ +#define JMP_SRSO_UNTRAIN_RET "ud2" +#endif /* CONFIG_MITIGATION_SRSO */ -#ifdef CONFIG_CPU_UNRET_ENTRY +#ifdef CONFIG_MITIGATION_UNRET_ENTRY /* * Some generic notes on the untraining sequences: @@ -319,20 +319,20 @@ SYM_CODE_END(retbleed_return_thunk) SYM_FUNC_END(retbleed_untrain_ret) #define JMP_RETBLEED_UNTRAIN_RET "jmp retbleed_untrain_ret" -#else /* !CONFIG_CPU_UNRET_ENTRY */ +#else /* !CONFIG_MITIGATION_UNRET_ENTRY */ #define JMP_RETBLEED_UNTRAIN_RET "ud2" -#endif /* CONFIG_CPU_UNRET_ENTRY */ +#endif /* CONFIG_MITIGATION_UNRET_ENTRY */ -#if defined(CONFIG_CPU_UNRET_ENTRY) || defined(CONFIG_CPU_SRSO) +#if defined(CONFIG_MITIGATION_UNRET_ENTRY) || defined(CONFIG_MITIGATION_SRSO) SYM_FUNC_START(entry_untrain_ret) ALTERNATIVE JMP_RETBLEED_UNTRAIN_RET, JMP_SRSO_UNTRAIN_RET, X86_FEATURE_SRSO SYM_FUNC_END(entry_untrain_ret) __EXPORT_THUNK(entry_untrain_ret) -#endif /* CONFIG_CPU_UNRET_ENTRY || CONFIG_CPU_SRSO */ +#endif /* CONFIG_MITIGATION_UNRET_ENTRY || CONFIG_MITIGATION_SRSO */ -#ifdef CONFIG_CALL_DEPTH_TRACKING +#ifdef CONFIG_MITIGATION_CALL_DEPTH_TRACKING .align 64 SYM_FUNC_START(call_depth_return_thunk) @@ -364,7 +364,7 @@ SYM_FUNC_START(call_depth_return_thunk) int3 SYM_FUNC_END(call_depth_return_thunk) -#endif /* CONFIG_CALL_DEPTH_TRACKING */ +#endif /* CONFIG_MITIGATION_CALL_DEPTH_TRACKING */ /* * This function name is magical and is used by -mfunction-return=thunk-extern @@ -374,21 +374,25 @@ SYM_FUNC_END(call_depth_return_thunk) * 'JMP __x86_return_thunk' sites are changed to something else by * apply_returns(). * - * This should be converted eventually to call a warning function which - * should scream loudly when the default return thunk is called after - * alternatives have been applied. - * - * That warning function cannot BUG() because the bug splat cannot be - * displayed in all possible configurations, leading to users not really - * knowing why the machine froze. + * The ALTERNATIVE below adds a really loud warning to catch the case + * where the insufficient default return thunk ends up getting used for + * whatever reason like miscompilation or failure of + * objtool/alternatives/etc to patch all the return sites. */ SYM_CODE_START(__x86_return_thunk) UNWIND_HINT_FUNC ANNOTATE_NOENDBR +#if defined(CONFIG_MITIGATION_UNRET_ENTRY) || \ + defined(CONFIG_MITIGATION_SRSO) || \ + defined(CONFIG_MITIGATION_CALL_DEPTH_TRACKING) + ALTERNATIVE __stringify(ANNOTATE_UNRET_SAFE; ret), \ + "jmp warn_thunk_thunk", X86_FEATURE_ALWAYS +#else ANNOTATE_UNRET_SAFE ret +#endif int3 SYM_CODE_END(__x86_return_thunk) EXPORT_SYMBOL(__x86_return_thunk) -#endif /* CONFIG_RETHUNK */ +#endif /* CONFIG_MITIGATION_RETHUNK */ diff --git a/arch/x86/lib/x86-opcode-map.txt b/arch/x86/lib/x86-opcode-map.txt index d1ccd06c53..da9347552b 100644 --- a/arch/x86/lib/x86-opcode-map.txt +++ b/arch/x86/lib/x86-opcode-map.txt @@ -1051,8 +1051,8 @@ GrpTable: Grp6 EndTable GrpTable: Grp7 -0: SGDT Ms | VMCALL (001),(11B) | VMLAUNCH (010),(11B) | VMRESUME (011),(11B) | VMXOFF (100),(11B) | PCONFIG (101),(11B) | ENCLV (000),(11B) -1: SIDT Ms | MONITOR (000),(11B) | MWAIT (001),(11B) | CLAC (010),(11B) | STAC (011),(11B) | ENCLS (111),(11B) +0: SGDT Ms | VMCALL (001),(11B) | VMLAUNCH (010),(11B) | VMRESUME (011),(11B) | VMXOFF (100),(11B) | PCONFIG (101),(11B) | ENCLV (000),(11B) | WRMSRNS (110),(11B) +1: SIDT Ms | MONITOR (000),(11B) | MWAIT (001),(11B) | CLAC (010),(11B) | STAC (011),(11B) | ENCLS (111),(11B) | ERETU (F3),(010),(11B) | ERETS (F2),(010),(11B) 2: LGDT Ms | XGETBV (000),(11B) | XSETBV (001),(11B) | VMFUNC (100),(11B) | XEND (101)(11B) | XTEST (110)(11B) | ENCLU (111),(11B) 3: LIDT Ms 4: SMSW Mw/Rv |