From 953b921688bb14675ebdf3c36148d5a6cd7e1a76 Mon Sep 17 00:00:00 2001 From: Daniel Baumann Date: Mon, 3 Jun 2024 07:09:00 +0200 Subject: Merging upstream version 6.8.12. Signed-off-by: Daniel Baumann --- arch/x86/mm/fault.c | 33 +-------------------- arch/x86/mm/numa.c | 4 +-- arch/x86/mm/pat/set_memory.c | 68 +++++++++++++++++++++++++++++++++++--------- 3 files changed, 57 insertions(+), 48 deletions(-) (limited to 'arch/x86/mm') diff --git a/arch/x86/mm/fault.c b/arch/x86/mm/fault.c index b01df023de..e604d2d6cc 100644 --- a/arch/x86/mm/fault.c +++ b/arch/x86/mm/fault.c @@ -717,39 +717,8 @@ kernelmode_fixup_or_oops(struct pt_regs *regs, unsigned long error_code, WARN_ON_ONCE(user_mode(regs)); /* Are we prepared to handle this kernel fault? */ - if (fixup_exception(regs, X86_TRAP_PF, error_code, address)) { - /* - * Any interrupt that takes a fault gets the fixup. This makes - * the below recursive fault logic only apply to a faults from - * task context. - */ - if (in_interrupt()) - return; - - /* - * Per the above we're !in_interrupt(), aka. task context. - * - * In this case we need to make sure we're not recursively - * faulting through the emulate_vsyscall() logic. - */ - if (current->thread.sig_on_uaccess_err && signal) { - sanitize_error_code(address, &error_code); - - set_signal_archinfo(address, error_code); - - if (si_code == SEGV_PKUERR) { - force_sig_pkuerr((void __user *)address, pkey); - } else { - /* XXX: hwpoison faults will set the wrong code. */ - force_sig_fault(signal, si_code, (void __user *)address); - } - } - - /* - * Barring that, we can do the fixup and be happy. - */ + if (fixup_exception(regs, X86_TRAP_PF, error_code, address)) return; - } /* * AMD erratum #91 manifests as a spurious page fault on a PREFETCH diff --git a/arch/x86/mm/numa.c b/arch/x86/mm/numa.c index 65e9a6e391..ce84ba86e6 100644 --- a/arch/x86/mm/numa.c +++ b/arch/x86/mm/numa.c @@ -929,6 +929,8 @@ int memory_add_physaddr_to_nid(u64 start) } EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid); +#endif + static int __init cmp_memblk(const void *a, const void *b) { const struct numa_memblk *ma = *(const struct numa_memblk **)a; @@ -1001,5 +1003,3 @@ int __init numa_fill_memblks(u64 start, u64 end) } return 0; } - -#endif diff --git a/arch/x86/mm/pat/set_memory.c b/arch/x86/mm/pat/set_memory.c index 135bb594df..b4073fb452 100644 --- a/arch/x86/mm/pat/set_memory.c +++ b/arch/x86/mm/pat/set_memory.c @@ -619,7 +619,8 @@ static inline pgprot_t static_protections(pgprot_t prot, unsigned long start, * Validate strict W^X semantics. */ static inline pgprot_t verify_rwx(pgprot_t old, pgprot_t new, unsigned long start, - unsigned long pfn, unsigned long npg) + unsigned long pfn, unsigned long npg, + bool nx, bool rw) { unsigned long end; @@ -641,6 +642,10 @@ static inline pgprot_t verify_rwx(pgprot_t old, pgprot_t new, unsigned long star if ((pgprot_val(new) & (_PAGE_RW | _PAGE_NX)) != _PAGE_RW) return new; + /* Non-leaf translation entries can disable writing or execution. */ + if (!rw || nx) + return new; + end = start + npg * PAGE_SIZE - 1; WARN_ONCE(1, "CPA detected W^X violation: %016llx -> %016llx range: 0x%016lx - 0x%016lx PFN %lx\n", (unsigned long long)pgprot_val(old), @@ -657,20 +662,26 @@ static inline pgprot_t verify_rwx(pgprot_t old, pgprot_t new, unsigned long star /* * Lookup the page table entry for a virtual address in a specific pgd. - * Return a pointer to the entry and the level of the mapping. + * Return a pointer to the entry, the level of the mapping, and the effective + * NX and RW bits of all page table levels. */ -pte_t *lookup_address_in_pgd(pgd_t *pgd, unsigned long address, - unsigned int *level) +pte_t *lookup_address_in_pgd_attr(pgd_t *pgd, unsigned long address, + unsigned int *level, bool *nx, bool *rw) { p4d_t *p4d; pud_t *pud; pmd_t *pmd; *level = PG_LEVEL_NONE; + *nx = false; + *rw = true; if (pgd_none(*pgd)) return NULL; + *nx |= pgd_flags(*pgd) & _PAGE_NX; + *rw &= pgd_flags(*pgd) & _PAGE_RW; + p4d = p4d_offset(pgd, address); if (p4d_none(*p4d)) return NULL; @@ -679,6 +690,9 @@ pte_t *lookup_address_in_pgd(pgd_t *pgd, unsigned long address, if (p4d_large(*p4d) || !p4d_present(*p4d)) return (pte_t *)p4d; + *nx |= p4d_flags(*p4d) & _PAGE_NX; + *rw &= p4d_flags(*p4d) & _PAGE_RW; + pud = pud_offset(p4d, address); if (pud_none(*pud)) return NULL; @@ -687,6 +701,9 @@ pte_t *lookup_address_in_pgd(pgd_t *pgd, unsigned long address, if (pud_leaf(*pud) || !pud_present(*pud)) return (pte_t *)pud; + *nx |= pud_flags(*pud) & _PAGE_NX; + *rw &= pud_flags(*pud) & _PAGE_RW; + pmd = pmd_offset(pud, address); if (pmd_none(*pmd)) return NULL; @@ -695,11 +712,26 @@ pte_t *lookup_address_in_pgd(pgd_t *pgd, unsigned long address, if (pmd_large(*pmd) || !pmd_present(*pmd)) return (pte_t *)pmd; + *nx |= pmd_flags(*pmd) & _PAGE_NX; + *rw &= pmd_flags(*pmd) & _PAGE_RW; + *level = PG_LEVEL_4K; return pte_offset_kernel(pmd, address); } +/* + * Lookup the page table entry for a virtual address in a specific pgd. + * Return a pointer to the entry and the level of the mapping. + */ +pte_t *lookup_address_in_pgd(pgd_t *pgd, unsigned long address, + unsigned int *level) +{ + bool nx, rw; + + return lookup_address_in_pgd_attr(pgd, address, level, &nx, &rw); +} + /* * Lookup the page table entry for a virtual address. Return a pointer * to the entry and the level of the mapping. @@ -715,13 +747,16 @@ pte_t *lookup_address(unsigned long address, unsigned int *level) EXPORT_SYMBOL_GPL(lookup_address); static pte_t *_lookup_address_cpa(struct cpa_data *cpa, unsigned long address, - unsigned int *level) + unsigned int *level, bool *nx, bool *rw) { - if (cpa->pgd) - return lookup_address_in_pgd(cpa->pgd + pgd_index(address), - address, level); + pgd_t *pgd; + + if (!cpa->pgd) + pgd = pgd_offset_k(address); + else + pgd = cpa->pgd + pgd_index(address); - return lookup_address(address, level); + return lookup_address_in_pgd_attr(pgd, address, level, nx, rw); } /* @@ -849,12 +884,13 @@ static int __should_split_large_page(pte_t *kpte, unsigned long address, pgprot_t old_prot, new_prot, req_prot, chk_prot; pte_t new_pte, *tmp; enum pg_level level; + bool nx, rw; /* * Check for races, another CPU might have split this page * up already: */ - tmp = _lookup_address_cpa(cpa, address, &level); + tmp = _lookup_address_cpa(cpa, address, &level, &nx, &rw); if (tmp != kpte) return 1; @@ -965,7 +1001,8 @@ static int __should_split_large_page(pte_t *kpte, unsigned long address, new_prot = static_protections(req_prot, lpaddr, old_pfn, numpages, psize, CPA_DETECT); - new_prot = verify_rwx(old_prot, new_prot, lpaddr, old_pfn, numpages); + new_prot = verify_rwx(old_prot, new_prot, lpaddr, old_pfn, numpages, + nx, rw); /* * If there is a conflict, split the large page. @@ -1046,6 +1083,7 @@ __split_large_page(struct cpa_data *cpa, pte_t *kpte, unsigned long address, pte_t *pbase = (pte_t *)page_address(base); unsigned int i, level; pgprot_t ref_prot; + bool nx, rw; pte_t *tmp; spin_lock(&pgd_lock); @@ -1053,7 +1091,7 @@ __split_large_page(struct cpa_data *cpa, pte_t *kpte, unsigned long address, * Check for races, another CPU might have split this page * up for us already: */ - tmp = _lookup_address_cpa(cpa, address, &level); + tmp = _lookup_address_cpa(cpa, address, &level, &nx, &rw); if (tmp != kpte) { spin_unlock(&pgd_lock); return 1; @@ -1594,10 +1632,11 @@ static int __change_page_attr(struct cpa_data *cpa, int primary) int do_split, err; unsigned int level; pte_t *kpte, old_pte; + bool nx, rw; address = __cpa_addr(cpa, cpa->curpage); repeat: - kpte = _lookup_address_cpa(cpa, address, &level); + kpte = _lookup_address_cpa(cpa, address, &level, &nx, &rw); if (!kpte) return __cpa_process_fault(cpa, address, primary); @@ -1619,7 +1658,8 @@ repeat: new_prot = static_protections(new_prot, address, pfn, 1, 0, CPA_PROTECT); - new_prot = verify_rwx(old_prot, new_prot, address, pfn, 1); + new_prot = verify_rwx(old_prot, new_prot, address, pfn, 1, + nx, rw); new_prot = pgprot_clear_protnone_bits(new_prot); -- cgit v1.2.3