diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-05-18 17:35:05 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-05-18 17:39:31 +0000 |
commit | 85c675d0d09a45a135bddd15d7b385f8758c32fb (patch) | |
tree | 76267dbc9b9a130337be3640948fe397b04ac629 /arch/s390/mm | |
parent | Adding upstream version 6.6.15. (diff) | |
download | linux-85c675d0d09a45a135bddd15d7b385f8758c32fb.tar.xz linux-85c675d0d09a45a135bddd15d7b385f8758c32fb.zip |
Adding upstream version 6.7.7.upstream/6.7.7
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'arch/s390/mm')
-rw-r--r-- | arch/s390/mm/cmm.c | 1 | ||||
-rw-r--r-- | arch/s390/mm/dump_pagetables.c | 2 | ||||
-rw-r--r-- | arch/s390/mm/extable.c | 18 | ||||
-rw-r--r-- | arch/s390/mm/fault.c | 461 | ||||
-rw-r--r-- | arch/s390/mm/gmap.c | 5 | ||||
-rw-r--r-- | arch/s390/mm/init.c | 8 | ||||
-rw-r--r-- | arch/s390/mm/maccess.c | 2 | ||||
-rw-r--r-- | arch/s390/mm/page-states.c | 206 | ||||
-rw-r--r-- | arch/s390/mm/pageattr.c | 2 | ||||
-rw-r--r-- | arch/s390/mm/pgalloc.c | 304 | ||||
-rw-r--r-- | arch/s390/mm/vmem.c | 11 |
11 files changed, 248 insertions, 772 deletions
diff --git a/arch/s390/mm/cmm.c b/arch/s390/mm/cmm.c index f475153132..f8b13f2476 100644 --- a/arch/s390/mm/cmm.c +++ b/arch/s390/mm/cmm.c @@ -332,7 +332,6 @@ static struct ctl_table cmm_table[] = { .mode = 0644, .proc_handler = cmm_timeout_handler, }, - { } }; #ifdef CONFIG_CMM_IUCV diff --git a/arch/s390/mm/dump_pagetables.c b/arch/s390/mm/dump_pagetables.c index b51666967a..d37a8f607b 100644 --- a/arch/s390/mm/dump_pagetables.c +++ b/arch/s390/mm/dump_pagetables.c @@ -287,7 +287,7 @@ static int pt_dump_init(void) * kernel ASCE. We need this to keep the page table walker functions * from accessing non-existent entries. */ - max_addr = (S390_lowcore.kernel_asce & _REGION_ENTRY_TYPE_MASK) >> 2; + max_addr = (S390_lowcore.kernel_asce.val & _REGION_ENTRY_TYPE_MASK) >> 2; max_addr = 1UL << (max_addr * 11 + 31); address_markers[IDENTITY_AFTER_END_NR].start_address = ident_map_size; address_markers[AMODE31_START_NR].start_address = (unsigned long)__samode31; diff --git a/arch/s390/mm/extable.c b/arch/s390/mm/extable.c index fe87291df9..0a0738a473 100644 --- a/arch/s390/mm/extable.c +++ b/arch/s390/mm/extable.c @@ -61,6 +61,22 @@ static bool ex_handler_ua_load_reg(const struct exception_table_entry *ex, return true; } +static bool ex_handler_zeropad(const struct exception_table_entry *ex, struct pt_regs *regs) +{ + unsigned int reg_addr = FIELD_GET(EX_DATA_REG_ADDR, ex->data); + unsigned int reg_data = FIELD_GET(EX_DATA_REG_ERR, ex->data); + unsigned long data, addr, offset; + + addr = regs->gprs[reg_addr]; + offset = addr & (sizeof(unsigned long) - 1); + addr &= ~(sizeof(unsigned long) - 1); + data = *(unsigned long *)addr; + data <<= BITS_PER_BYTE * offset; + regs->gprs[reg_data] = data; + regs->psw.addr = extable_fixup(ex); + return true; +} + bool fixup_exception(struct pt_regs *regs) { const struct exception_table_entry *ex; @@ -81,6 +97,8 @@ bool fixup_exception(struct pt_regs *regs) return ex_handler_ua_load_reg(ex, false, regs); case EX_TYPE_UA_LOAD_REGPAIR: return ex_handler_ua_load_reg(ex, true, regs); + case EX_TYPE_ZEROPAD: + return ex_handler_zeropad(ex, regs); } panic("invalid exception table entry"); } diff --git a/arch/s390/mm/fault.c b/arch/s390/mm/fault.c index b678295931..ab4098886e 100644 --- a/arch/s390/mm/fault.c +++ b/arch/s390/mm/fault.c @@ -3,17 +3,19 @@ * S390 version * Copyright IBM Corp. 1999 * Author(s): Hartmut Penner (hp@de.ibm.com) - * Ulrich Weigand (uweigand@de.ibm.com) + * Ulrich Weigand (uweigand@de.ibm.com) * * Derived from "arch/i386/mm/fault.c" * Copyright (C) 1995 Linus Torvalds */ #include <linux/kernel_stat.h> +#include <linux/mmu_context.h> #include <linux/perf_event.h> #include <linux/signal.h> #include <linux/sched.h> #include <linux/sched/debug.h> +#include <linux/jump_label.h> #include <linux/kernel.h> #include <linux/errno.h> #include <linux/string.h> @@ -34,38 +36,27 @@ #include <linux/kfence.h> #include <asm/asm-extable.h> #include <asm/asm-offsets.h> +#include <asm/ptrace.h> +#include <asm/fault.h> #include <asm/diag.h> #include <asm/gmap.h> #include <asm/irq.h> -#include <asm/mmu_context.h> #include <asm/facility.h> #include <asm/uv.h> #include "../kernel/entry.h" -#define __FAIL_ADDR_MASK -4096L - -/* - * Allocate private vm_fault_reason from top. Please make sure it won't - * collide with vm_fault_reason. - */ -#define VM_FAULT_BADCONTEXT ((__force vm_fault_t)0x80000000) -#define VM_FAULT_BADMAP ((__force vm_fault_t)0x40000000) -#define VM_FAULT_BADACCESS ((__force vm_fault_t)0x20000000) -#define VM_FAULT_SIGNAL ((__force vm_fault_t)0x10000000) -#define VM_FAULT_PFAULT ((__force vm_fault_t)0x8000000) - enum fault_type { KERNEL_FAULT, USER_FAULT, GMAP_FAULT, }; -static unsigned long store_indication __read_mostly; +static DEFINE_STATIC_KEY_FALSE(have_store_indication); static int __init fault_init(void) { if (test_facility(75)) - store_indication = 0xc00; + static_branch_enable(&have_store_indication); return 0; } early_initcall(fault_init); @@ -75,11 +66,9 @@ early_initcall(fault_init); */ static enum fault_type get_fault_type(struct pt_regs *regs) { - unsigned long trans_exc_code; + union teid teid = { .val = regs->int_parm_long }; - trans_exc_code = regs->int_parm_long & 3; - if (likely(trans_exc_code == 0)) { - /* primary space exception */ + if (likely(teid.as == PSW_BITS_AS_PRIMARY)) { if (user_mode(regs)) return USER_FAULT; if (!IS_ENABLED(CONFIG_PGSTE)) @@ -88,83 +77,77 @@ static enum fault_type get_fault_type(struct pt_regs *regs) return GMAP_FAULT; return KERNEL_FAULT; } - if (trans_exc_code == 2) + if (teid.as == PSW_BITS_AS_SECONDARY) return USER_FAULT; - if (trans_exc_code == 1) { - /* access register mode, not used in the kernel */ + /* Access register mode, not used in the kernel */ + if (teid.as == PSW_BITS_AS_ACCREG) return USER_FAULT; - } - /* home space exception -> access via kernel ASCE */ + /* Home space -> access via kernel ASCE */ return KERNEL_FAULT; } static unsigned long get_fault_address(struct pt_regs *regs) { - unsigned long trans_exc_code = regs->int_parm_long; - - return trans_exc_code & __FAIL_ADDR_MASK; -} - -static bool fault_is_write(struct pt_regs *regs) -{ - unsigned long trans_exc_code = regs->int_parm_long; + union teid teid = { .val = regs->int_parm_long }; - return (trans_exc_code & store_indication) == 0x400; + return teid.addr * PAGE_SIZE; } -static int bad_address(void *p) +static __always_inline bool fault_is_write(struct pt_regs *regs) { - unsigned long dummy; + union teid teid = { .val = regs->int_parm_long }; - return get_kernel_nofault(dummy, (unsigned long *)p); + if (static_branch_likely(&have_store_indication)) + return teid.fsi == TEID_FSI_STORE; + return false; } static void dump_pagetable(unsigned long asce, unsigned long address) { - unsigned long *table = __va(asce & _ASCE_ORIGIN); + unsigned long entry, *table = __va(asce & _ASCE_ORIGIN); pr_alert("AS:%016lx ", asce); switch (asce & _ASCE_TYPE_MASK) { case _ASCE_TYPE_REGION1: table += (address & _REGION1_INDEX) >> _REGION1_SHIFT; - if (bad_address(table)) + if (get_kernel_nofault(entry, table)) goto bad; - pr_cont("R1:%016lx ", *table); - if (*table & _REGION_ENTRY_INVALID) + pr_cont("R1:%016lx ", entry); + if (entry & _REGION_ENTRY_INVALID) goto out; - table = __va(*table & _REGION_ENTRY_ORIGIN); + table = __va(entry & _REGION_ENTRY_ORIGIN); fallthrough; case _ASCE_TYPE_REGION2: table += (address & _REGION2_INDEX) >> _REGION2_SHIFT; - if (bad_address(table)) + if (get_kernel_nofault(entry, table)) goto bad; - pr_cont("R2:%016lx ", *table); - if (*table & _REGION_ENTRY_INVALID) + pr_cont("R2:%016lx ", entry); + if (entry & _REGION_ENTRY_INVALID) goto out; - table = __va(*table & _REGION_ENTRY_ORIGIN); + table = __va(entry & _REGION_ENTRY_ORIGIN); fallthrough; case _ASCE_TYPE_REGION3: table += (address & _REGION3_INDEX) >> _REGION3_SHIFT; - if (bad_address(table)) + if (get_kernel_nofault(entry, table)) goto bad; - pr_cont("R3:%016lx ", *table); - if (*table & (_REGION_ENTRY_INVALID | _REGION3_ENTRY_LARGE)) + pr_cont("R3:%016lx ", entry); + if (entry & (_REGION_ENTRY_INVALID | _REGION3_ENTRY_LARGE)) goto out; - table = __va(*table & _REGION_ENTRY_ORIGIN); + table = __va(entry & _REGION_ENTRY_ORIGIN); fallthrough; case _ASCE_TYPE_SEGMENT: table += (address & _SEGMENT_INDEX) >> _SEGMENT_SHIFT; - if (bad_address(table)) + if (get_kernel_nofault(entry, table)) goto bad; - pr_cont("S:%016lx ", *table); - if (*table & (_SEGMENT_ENTRY_INVALID | _SEGMENT_ENTRY_LARGE)) + pr_cont("S:%016lx ", entry); + if (entry & (_SEGMENT_ENTRY_INVALID | _SEGMENT_ENTRY_LARGE)) goto out; - table = __va(*table & _SEGMENT_ENTRY_ORIGIN); + table = __va(entry & _SEGMENT_ENTRY_ORIGIN); } table += (address & _PAGE_INDEX) >> _PAGE_SHIFT; - if (bad_address(table)) + if (get_kernel_nofault(entry, table)) goto bad; - pr_cont("P:%016lx ", *table); + pr_cont("P:%016lx ", entry); out: pr_cont("\n"); return; @@ -174,173 +157,113 @@ bad: static void dump_fault_info(struct pt_regs *regs) { + union teid teid = { .val = regs->int_parm_long }; unsigned long asce; pr_alert("Failing address: %016lx TEID: %016lx\n", - regs->int_parm_long & __FAIL_ADDR_MASK, regs->int_parm_long); + get_fault_address(regs), teid.val); pr_alert("Fault in "); - switch (regs->int_parm_long & 3) { - case 3: + switch (teid.as) { + case PSW_BITS_AS_HOME: pr_cont("home space "); break; - case 2: + case PSW_BITS_AS_SECONDARY: pr_cont("secondary space "); break; - case 1: + case PSW_BITS_AS_ACCREG: pr_cont("access register "); break; - case 0: + case PSW_BITS_AS_PRIMARY: pr_cont("primary space "); break; } pr_cont("mode while using "); switch (get_fault_type(regs)) { case USER_FAULT: - asce = S390_lowcore.user_asce; + asce = S390_lowcore.user_asce.val; pr_cont("user "); break; case GMAP_FAULT: - asce = ((struct gmap *) S390_lowcore.gmap)->asce; + asce = ((struct gmap *)S390_lowcore.gmap)->asce; pr_cont("gmap "); break; case KERNEL_FAULT: - asce = S390_lowcore.kernel_asce; + asce = S390_lowcore.kernel_asce.val; pr_cont("kernel "); break; default: unreachable(); } pr_cont("ASCE.\n"); - dump_pagetable(asce, regs->int_parm_long & __FAIL_ADDR_MASK); + dump_pagetable(asce, get_fault_address(regs)); } int show_unhandled_signals = 1; void report_user_fault(struct pt_regs *regs, long signr, int is_mm_fault) { + static DEFINE_RATELIMIT_STATE(rs, DEFAULT_RATELIMIT_INTERVAL, DEFAULT_RATELIMIT_BURST); + if ((task_pid_nr(current) > 1) && !show_unhandled_signals) return; if (!unhandled_signal(current, signr)) return; - if (!printk_ratelimit()) + if (!__ratelimit(&rs)) return; - printk(KERN_ALERT "User process fault: interruption code %04x ilc:%d ", - regs->int_code & 0xffff, regs->int_code >> 17); + pr_alert("User process fault: interruption code %04x ilc:%d ", + regs->int_code & 0xffff, regs->int_code >> 17); print_vma_addr(KERN_CONT "in ", regs->psw.addr); - printk(KERN_CONT "\n"); + pr_cont("\n"); if (is_mm_fault) dump_fault_info(regs); show_regs(regs); } -/* - * Send SIGSEGV to task. This is an external routine - * to keep the stack usage of do_page_fault small. - */ -static noinline void do_sigsegv(struct pt_regs *regs, int si_code) +static void do_sigsegv(struct pt_regs *regs, int si_code) { report_user_fault(regs, SIGSEGV, 1); - force_sig_fault(SIGSEGV, si_code, - (void __user *)(regs->int_parm_long & __FAIL_ADDR_MASK)); + force_sig_fault(SIGSEGV, si_code, (void __user *)get_fault_address(regs)); } -static noinline void do_no_context(struct pt_regs *regs, vm_fault_t fault) +static void handle_fault_error_nolock(struct pt_regs *regs, int si_code) { enum fault_type fault_type; unsigned long address; bool is_write; + if (user_mode(regs)) { + if (WARN_ON_ONCE(!si_code)) + si_code = SEGV_MAPERR; + return do_sigsegv(regs, si_code); + } if (fixup_exception(regs)) return; fault_type = get_fault_type(regs); - if ((fault_type == KERNEL_FAULT) && (fault == VM_FAULT_BADCONTEXT)) { + if (fault_type == KERNEL_FAULT) { address = get_fault_address(regs); is_write = fault_is_write(regs); if (kfence_handle_page_fault(address, is_write, regs)) return; } - /* - * Oops. The kernel tried to access some bad page. We'll have to - * terminate things with extreme prejudice. - */ if (fault_type == KERNEL_FAULT) - printk(KERN_ALERT "Unable to handle kernel pointer dereference" - " in virtual kernel address space\n"); + pr_alert("Unable to handle kernel pointer dereference in virtual kernel address space\n"); else - printk(KERN_ALERT "Unable to handle kernel paging request" - " in virtual user address space\n"); + pr_alert("Unable to handle kernel paging request in virtual user address space\n"); dump_fault_info(regs); die(regs, "Oops"); } -static noinline void do_low_address(struct pt_regs *regs) +static void handle_fault_error(struct pt_regs *regs, int si_code) { - /* Low-address protection hit in kernel mode means - NULL pointer write access in kernel mode. */ - if (regs->psw.mask & PSW_MASK_PSTATE) { - /* Low-address protection hit in user mode 'cannot happen'. */ - die (regs, "Low-address protection"); - } + struct mm_struct *mm = current->mm; - do_no_context(regs, VM_FAULT_BADACCESS); + mmap_read_unlock(mm); + handle_fault_error_nolock(regs, si_code); } -static noinline void do_sigbus(struct pt_regs *regs) +static void do_sigbus(struct pt_regs *regs) { - /* - * Send a sigbus, regardless of whether we were in kernel - * or user mode. - */ - force_sig_fault(SIGBUS, BUS_ADRERR, - (void __user *)(regs->int_parm_long & __FAIL_ADDR_MASK)); -} - -static noinline void do_fault_error(struct pt_regs *regs, vm_fault_t fault) -{ - int si_code; - - switch (fault) { - case VM_FAULT_BADACCESS: - case VM_FAULT_BADMAP: - /* Bad memory access. Check if it is kernel or user space. */ - if (user_mode(regs)) { - /* User mode accesses just cause a SIGSEGV */ - si_code = (fault == VM_FAULT_BADMAP) ? - SEGV_MAPERR : SEGV_ACCERR; - do_sigsegv(regs, si_code); - break; - } - fallthrough; - case VM_FAULT_BADCONTEXT: - case VM_FAULT_PFAULT: - do_no_context(regs, fault); - break; - case VM_FAULT_SIGNAL: - if (!user_mode(regs)) - do_no_context(regs, fault); - break; - default: /* fault & VM_FAULT_ERROR */ - if (fault & VM_FAULT_OOM) { - if (!user_mode(regs)) - do_no_context(regs, fault); - else - pagefault_out_of_memory(); - } else if (fault & VM_FAULT_SIGSEGV) { - /* Kernel mode? Handle exceptions or die */ - if (!user_mode(regs)) - do_no_context(regs, fault); - else - do_sigsegv(regs, SEGV_MAPERR); - } else if (fault & VM_FAULT_SIGBUS) { - /* Kernel mode? Handle exceptions or die */ - if (!user_mode(regs)) - do_no_context(regs, fault); - else - do_sigbus(regs); - } else - BUG(); - break; - } + force_sig_fault(SIGBUS, BUS_ADRERR, (void __user *)get_fault_address(regs)); } /* @@ -349,20 +272,20 @@ static noinline void do_fault_error(struct pt_regs *regs, vm_fault_t fault) * routines. * * interruption code (int_code): - * 04 Protection -> Write-Protection (suppression) - * 10 Segment translation -> Not present (nullification) - * 11 Page translation -> Not present (nullification) - * 3b Region third trans. -> Not present (nullification) + * 04 Protection -> Write-Protection (suppression) + * 10 Segment translation -> Not present (nullification) + * 11 Page translation -> Not present (nullification) + * 3b Region third trans. -> Not present (nullification) */ -static inline vm_fault_t do_exception(struct pt_regs *regs, int access) +static void do_exception(struct pt_regs *regs, int access) { - struct gmap *gmap; + struct vm_area_struct *vma; struct task_struct *tsk; + unsigned long address; struct mm_struct *mm; - struct vm_area_struct *vma; enum fault_type type; - unsigned long address; unsigned int flags; + struct gmap *gmap; vm_fault_t fault; bool is_write; @@ -372,31 +295,21 @@ static inline vm_fault_t do_exception(struct pt_regs *regs, int access) * been nullified. Don't signal single step via SIGTRAP. */ clear_thread_flag(TIF_PER_TRAP); - if (kprobe_page_fault(regs, 14)) - return 0; - + return; mm = tsk->mm; address = get_fault_address(regs); is_write = fault_is_write(regs); - - /* - * Verify that the fault happened in user space, that - * we are not in an interrupt and that there is a - * user context. - */ - fault = VM_FAULT_BADCONTEXT; type = get_fault_type(regs); switch (type) { case KERNEL_FAULT: - goto out; + return handle_fault_error_nolock(regs, 0); case USER_FAULT: case GMAP_FAULT: if (faulthandler_disabled() || !mm) - goto out; + return handle_fault_error_nolock(regs, 0); break; } - perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address); flags = FAULT_FLAG_DEFAULT; if (user_mode(regs)) @@ -419,125 +332,120 @@ static inline vm_fault_t do_exception(struct pt_regs *regs, int access) vma_end_read(vma); if (!(fault & VM_FAULT_RETRY)) { count_vm_vma_lock_event(VMA_LOCK_SUCCESS); - if (likely(!(fault & VM_FAULT_ERROR))) - fault = 0; - goto out; + if (unlikely(fault & VM_FAULT_ERROR)) + goto error; + return; } count_vm_vma_lock_event(VMA_LOCK_RETRY); + if (fault & VM_FAULT_MAJOR) + flags |= FAULT_FLAG_TRIED; + /* Quick path to respond to signals */ if (fault_signal_pending(fault, regs)) { - fault = VM_FAULT_SIGNAL; - goto out; + if (!user_mode(regs)) + handle_fault_error_nolock(regs, 0); + return; } lock_mmap: mmap_read_lock(mm); - gmap = NULL; if (IS_ENABLED(CONFIG_PGSTE) && type == GMAP_FAULT) { - gmap = (struct gmap *) S390_lowcore.gmap; + gmap = (struct gmap *)S390_lowcore.gmap; current->thread.gmap_addr = address; current->thread.gmap_write_flag = !!(flags & FAULT_FLAG_WRITE); current->thread.gmap_int_code = regs->int_code & 0xffff; address = __gmap_translate(gmap, address); - if (address == -EFAULT) { - fault = VM_FAULT_BADMAP; - goto out_up; - } + if (address == -EFAULT) + return handle_fault_error(regs, SEGV_MAPERR); if (gmap->pfault_enabled) flags |= FAULT_FLAG_RETRY_NOWAIT; } - retry: - fault = VM_FAULT_BADMAP; vma = find_vma(mm, address); if (!vma) - goto out_up; - + return handle_fault_error(regs, SEGV_MAPERR); if (unlikely(vma->vm_start > address)) { if (!(vma->vm_flags & VM_GROWSDOWN)) - goto out_up; + return handle_fault_error(regs, SEGV_MAPERR); vma = expand_stack(mm, address); if (!vma) - goto out; + return handle_fault_error_nolock(regs, SEGV_MAPERR); } - - /* - * Ok, we have a good vm_area for this memory access, so - * we can handle it.. - */ - fault = VM_FAULT_BADACCESS; if (unlikely(!(vma->vm_flags & access))) - goto out_up; - - /* - * If for any reason at all we couldn't handle the fault, - * make sure we exit gracefully rather than endlessly redo - * the fault. - */ + return handle_fault_error(regs, SEGV_ACCERR); fault = handle_mm_fault(vma, address, flags, regs); if (fault_signal_pending(fault, regs)) { - fault = VM_FAULT_SIGNAL; if (flags & FAULT_FLAG_RETRY_NOWAIT) - goto out_up; - goto out; + mmap_read_unlock(mm); + if (!user_mode(regs)) + handle_fault_error_nolock(regs, 0); + return; } - /* The fault is fully completed (including releasing mmap lock) */ if (fault & VM_FAULT_COMPLETED) { if (gmap) { mmap_read_lock(mm); - goto out_gmap; + goto gmap; } - fault = 0; - goto out; + return; + } + if (unlikely(fault & VM_FAULT_ERROR)) { + mmap_read_unlock(mm); + goto error; } - - if (unlikely(fault & VM_FAULT_ERROR)) - goto out_up; - if (fault & VM_FAULT_RETRY) { - if (IS_ENABLED(CONFIG_PGSTE) && gmap && - (flags & FAULT_FLAG_RETRY_NOWAIT)) { + if (IS_ENABLED(CONFIG_PGSTE) && gmap && (flags & FAULT_FLAG_RETRY_NOWAIT)) { /* - * FAULT_FLAG_RETRY_NOWAIT has been set, mmap_lock has - * not been released + * FAULT_FLAG_RETRY_NOWAIT has been set, + * mmap_lock has not been released */ current->thread.gmap_pfault = 1; - fault = VM_FAULT_PFAULT; - goto out_up; + return handle_fault_error(regs, 0); } flags &= ~FAULT_FLAG_RETRY_NOWAIT; flags |= FAULT_FLAG_TRIED; mmap_read_lock(mm); goto retry; } -out_gmap: +gmap: if (IS_ENABLED(CONFIG_PGSTE) && gmap) { address = __gmap_link(gmap, current->thread.gmap_addr, address); - if (address == -EFAULT) { - fault = VM_FAULT_BADMAP; - goto out_up; - } + if (address == -EFAULT) + return handle_fault_error(regs, SEGV_MAPERR); if (address == -ENOMEM) { fault = VM_FAULT_OOM; - goto out_up; + mmap_read_unlock(mm); + goto error; } } - fault = 0; -out_up: mmap_read_unlock(mm); -out: - return fault; + return; +error: + if (fault & VM_FAULT_OOM) { + if (!user_mode(regs)) + handle_fault_error_nolock(regs, 0); + else + pagefault_out_of_memory(); + } else if (fault & VM_FAULT_SIGSEGV) { + if (!user_mode(regs)) + handle_fault_error_nolock(regs, 0); + else + do_sigsegv(regs, SEGV_MAPERR); + } else if (fault & VM_FAULT_SIGBUS) { + if (!user_mode(regs)) + handle_fault_error_nolock(regs, 0); + else + do_sigbus(regs); + } else { + BUG(); + } } void do_protection_exception(struct pt_regs *regs) { - unsigned long trans_exc_code; - int access; - vm_fault_t fault; + union teid teid = { .val = regs->int_parm_long }; - trans_exc_code = regs->int_parm_long; /* * Protection exceptions are suppressing, decrement psw address. * The exception to this rule are aborted transactions, for these @@ -550,33 +458,28 @@ void do_protection_exception(struct pt_regs *regs) * as a special case because the translation exception code * field is not guaranteed to contain valid data in this case. */ - if (unlikely(!(trans_exc_code & 4))) { - do_low_address(regs); - return; + if (unlikely(!teid.b61)) { + if (user_mode(regs)) { + /* Low-address protection in user mode: cannot happen */ + die(regs, "Low-address protection"); + } + /* + * Low-address protection in kernel mode means + * NULL pointer write access in kernel mode. + */ + return handle_fault_error_nolock(regs, 0); } - if (unlikely(MACHINE_HAS_NX && (trans_exc_code & 0x80))) { - regs->int_parm_long = (trans_exc_code & ~PAGE_MASK) | - (regs->psw.addr & PAGE_MASK); - access = VM_EXEC; - fault = VM_FAULT_BADACCESS; - } else { - access = VM_WRITE; - fault = do_exception(regs, access); + if (unlikely(MACHINE_HAS_NX && teid.b56)) { + regs->int_parm_long = (teid.addr * PAGE_SIZE) | (regs->psw.addr & PAGE_MASK); + return handle_fault_error_nolock(regs, SEGV_ACCERR); } - if (unlikely(fault)) - do_fault_error(regs, fault); + do_exception(regs, VM_WRITE); } NOKPROBE_SYMBOL(do_protection_exception); void do_dat_exception(struct pt_regs *regs) { - int access; - vm_fault_t fault; - - access = VM_ACCESS_FLAGS; - fault = do_exception(regs, access); - if (unlikely(fault)) - do_fault_error(regs, fault); + do_exception(regs, VM_ACCESS_FLAGS); } NOKPROBE_SYMBOL(do_dat_exception); @@ -584,7 +487,8 @@ NOKPROBE_SYMBOL(do_dat_exception); void do_secure_storage_access(struct pt_regs *regs) { - unsigned long addr = regs->int_parm_long & __FAIL_ADDR_MASK; + union teid teid = { .val = regs->int_parm_long }; + unsigned long addr = get_fault_address(regs); struct vm_area_struct *vma; struct mm_struct *mm; struct page *page; @@ -592,14 +496,12 @@ void do_secure_storage_access(struct pt_regs *regs) int rc; /* - * bit 61 tells us if the address is valid, if it's not we - * have a major problem and should stop the kernel or send a - * SIGSEGV to the process. Unfortunately bit 61 is not - * reliable without the misc UV feature so we need to check - * for that as well. + * Bit 61 indicates if the address is valid, if it is not the + * kernel should be stopped or SIGSEGV should be sent to the + * process. Bit 61 is not reliable without the misc UV feature, + * therefore this needs to be checked too. */ - if (uv_has_feature(BIT_UV_FEAT_MISC) && - !test_bit_inv(61, ®s->int_parm_long)) { + if (uv_has_feature(BIT_UV_FEAT_MISC) && !teid.b61) { /* * When this happens, userspace did something that it * was not supposed to do, e.g. branching into secure @@ -609,14 +511,12 @@ void do_secure_storage_access(struct pt_regs *regs) send_sig(SIGSEGV, current, 0); return; } - /* - * The kernel should never run into this case and we - * have no way out of this situation. + * The kernel should never run into this case and + * there is no way out of this situation. */ panic("Unexpected PGM 0x3d with TEID bit 61=0"); } - switch (get_fault_type(regs)) { case GMAP_FAULT: mm = current->mm; @@ -624,20 +524,15 @@ void do_secure_storage_access(struct pt_regs *regs) mmap_read_lock(mm); addr = __gmap_translate(gmap, addr); mmap_read_unlock(mm); - if (IS_ERR_VALUE(addr)) { - do_fault_error(regs, VM_FAULT_BADMAP); - break; - } + if (IS_ERR_VALUE(addr)) + return handle_fault_error_nolock(regs, SEGV_MAPERR); fallthrough; case USER_FAULT: mm = current->mm; mmap_read_lock(mm); vma = find_vma(mm, addr); - if (!vma) { - mmap_read_unlock(mm); - do_fault_error(regs, VM_FAULT_BADMAP); - break; - } + if (!vma) + return handle_fault_error(regs, SEGV_MAPERR); page = follow_page(vma, addr, FOLL_WRITE | FOLL_GET); if (IS_ERR_OR_NULL(page)) { mmap_read_unlock(mm); @@ -658,23 +553,18 @@ void do_secure_storage_access(struct pt_regs *regs) BUG(); break; default: - do_fault_error(regs, VM_FAULT_BADMAP); - WARN_ON_ONCE(1); + unreachable(); } } NOKPROBE_SYMBOL(do_secure_storage_access); void do_non_secure_storage_access(struct pt_regs *regs) { - unsigned long gaddr = regs->int_parm_long & __FAIL_ADDR_MASK; struct gmap *gmap = (struct gmap *)S390_lowcore.gmap; + unsigned long gaddr = get_fault_address(regs); - if (get_fault_type(regs) != GMAP_FAULT) { - do_fault_error(regs, VM_FAULT_BADMAP); - WARN_ON_ONCE(1); - return; - } - + if (WARN_ON_ONCE(get_fault_type(regs) != GMAP_FAULT)) + return handle_fault_error_nolock(regs, SEGV_MAPERR); if (gmap_convert_to_secure(gmap, gaddr) == -EINVAL) send_sig(SIGSEGV, current, 0); } @@ -682,8 +572,8 @@ NOKPROBE_SYMBOL(do_non_secure_storage_access); void do_secure_storage_violation(struct pt_regs *regs) { - unsigned long gaddr = regs->int_parm_long & __FAIL_ADDR_MASK; struct gmap *gmap = (struct gmap *)S390_lowcore.gmap; + unsigned long gaddr = get_fault_address(regs); /* * If the VM has been rebooted, its address space might still contain @@ -699,9 +589,8 @@ void do_secure_storage_violation(struct pt_regs *regs) * This exception is only triggered when a guest 2 is running * and can therefore never occur in kernel context. */ - printk_ratelimited(KERN_WARNING - "Secure storage violation in task: %s, pid %d\n", - current->comm, current->pid); + pr_warn_ratelimited("Secure storage violation in task: %s, pid %d\n", + current->comm, current->pid); send_sig(SIGSEGV, current, 0); } diff --git a/arch/s390/mm/gmap.c b/arch/s390/mm/gmap.c index 20786f6883..8da39deb56 100644 --- a/arch/s390/mm/gmap.c +++ b/arch/s390/mm/gmap.c @@ -18,7 +18,7 @@ #include <linux/ksm.h> #include <linux/mman.h> #include <linux/pgtable.h> - +#include <asm/page-states.h> #include <asm/pgalloc.h> #include <asm/gmap.h> #include <asm/page.h> @@ -33,7 +33,7 @@ static struct page *gmap_alloc_crst(void) page = alloc_pages(GFP_KERNEL_ACCOUNT, CRST_ALLOC_ORDER); if (!page) return NULL; - arch_set_page_dat(page, CRST_ALLOC_ORDER); + __arch_set_page_dat(page_to_virt(page), 1UL << CRST_ALLOC_ORDER); return page; } @@ -1691,6 +1691,7 @@ struct gmap *gmap_shadow(struct gmap *parent, unsigned long asce, return ERR_PTR(-ENOMEM); new->mm = parent->mm; new->parent = gmap_get(parent); + new->private = parent->private; new->orig_asce = asce; new->edat_level = edat_level; new->initialized = false; diff --git a/arch/s390/mm/init.c b/arch/s390/mm/init.c index 8b94d2212d..43e612bc2b 100644 --- a/arch/s390/mm/init.c +++ b/arch/s390/mm/init.c @@ -35,6 +35,7 @@ #include <asm/processor.h> #include <linux/uaccess.h> #include <asm/pgalloc.h> +#include <asm/ctlreg.h> #include <asm/kfence.h> #include <asm/ptdump.h> #include <asm/dma.h> @@ -42,7 +43,6 @@ #include <asm/tlb.h> #include <asm/tlbflush.h> #include <asm/sections.h> -#include <asm/ctl_reg.h> #include <asm/sclp.h> #include <asm/set_memory.h> #include <asm/kasan.h> @@ -54,7 +54,7 @@ pgd_t swapper_pg_dir[PTRS_PER_PGD] __section(".bss..swapper_pg_dir"); pgd_t invalid_pg_dir[PTRS_PER_PGD] __section(".bss..invalid_pg_dir"); -unsigned long __bootdata_preserved(s390_invalid_asce); +struct ctlreg __bootdata_preserved(s390_invalid_asce); unsigned long empty_zero_page, zero_page_mask; EXPORT_SYMBOL(empty_zero_page); @@ -164,14 +164,10 @@ void __init mem_init(void) pv_init(); kfence_split_mapping(); - /* Setup guest page hinting */ - cmma_init(); /* this will put all low memory onto the freelists */ memblock_free_all(); setup_zero_pages(); /* Setup zeroed pages. */ - - cmma_init_nodat(); } void free_initmem(void) diff --git a/arch/s390/mm/maccess.c b/arch/s390/mm/maccess.c index c805b3e259..632c3a55fe 100644 --- a/arch/s390/mm/maccess.c +++ b/arch/s390/mm/maccess.c @@ -15,10 +15,10 @@ #include <linux/uio.h> #include <linux/io.h> #include <asm/asm-extable.h> -#include <asm/ctl_reg.h> #include <asm/abs_lowcore.h> #include <asm/stacktrace.h> #include <asm/maccess.h> +#include <asm/ctlreg.h> unsigned long __bootdata_preserved(__memcpy_real_area); pte_t *__bootdata_preserved(memcpy_real_ptep); diff --git a/arch/s390/mm/page-states.c b/arch/s390/mm/page-states.c index 79a037f49f..01f9b39e65 100644 --- a/arch/s390/mm/page-states.c +++ b/arch/s390/mm/page-states.c @@ -7,205 +7,18 @@ * Author(s): Martin Schwidefsky <schwidefsky@de.ibm.com> */ -#include <linux/kernel.h> -#include <linux/errno.h> -#include <linux/types.h> #include <linux/mm.h> -#include <linux/memblock.h> -#include <linux/gfp.h> -#include <linux/init.h> -#include <asm/asm-extable.h> -#include <asm/facility.h> #include <asm/page-states.h> +#include <asm/sections.h> +#include <asm/page.h> -static int cmma_flag = 1; - -static int __init cmma(char *str) -{ - bool enabled; - - if (!kstrtobool(str, &enabled)) - cmma_flag = enabled; - return 1; -} -__setup("cmma=", cmma); - -static inline int cmma_test_essa(void) -{ - unsigned long tmp = 0; - int rc = -EOPNOTSUPP; - - /* test ESSA_GET_STATE */ - asm volatile( - " .insn rrf,0xb9ab0000,%[tmp],%[tmp],%[cmd],0\n" - "0: la %[rc],0\n" - "1:\n" - EX_TABLE(0b,1b) - : [rc] "+&d" (rc), [tmp] "+&d" (tmp) - : [cmd] "i" (ESSA_GET_STATE)); - return rc; -} - -void __init cmma_init(void) -{ - if (!cmma_flag) - return; - if (cmma_test_essa()) { - cmma_flag = 0; - return; - } - if (test_facility(147)) - cmma_flag = 2; -} - -static inline void set_page_unused(struct page *page, int order) -{ - int i, rc; - - for (i = 0; i < (1 << order); i++) - asm volatile(".insn rrf,0xb9ab0000,%0,%1,%2,0" - : "=&d" (rc) - : "a" (page_to_phys(page + i)), - "i" (ESSA_SET_UNUSED)); -} - -static inline void set_page_stable_dat(struct page *page, int order) -{ - int i, rc; - - for (i = 0; i < (1 << order); i++) - asm volatile(".insn rrf,0xb9ab0000,%0,%1,%2,0" - : "=&d" (rc) - : "a" (page_to_phys(page + i)), - "i" (ESSA_SET_STABLE)); -} - -static inline void set_page_stable_nodat(struct page *page, int order) -{ - int i, rc; - - for (i = 0; i < (1 << order); i++) - asm volatile(".insn rrf,0xb9ab0000,%0,%1,%2,0" - : "=&d" (rc) - : "a" (page_to_phys(page + i)), - "i" (ESSA_SET_STABLE_NODAT)); -} - -static void mark_kernel_pmd(pud_t *pud, unsigned long addr, unsigned long end) -{ - unsigned long next; - struct page *page; - pmd_t *pmd; - - pmd = pmd_offset(pud, addr); - do { - next = pmd_addr_end(addr, end); - if (pmd_none(*pmd) || pmd_large(*pmd)) - continue; - page = phys_to_page(pmd_val(*pmd)); - set_bit(PG_arch_1, &page->flags); - } while (pmd++, addr = next, addr != end); -} - -static void mark_kernel_pud(p4d_t *p4d, unsigned long addr, unsigned long end) -{ - unsigned long next; - struct page *page; - pud_t *pud; - int i; - - pud = pud_offset(p4d, addr); - do { - next = pud_addr_end(addr, end); - if (pud_none(*pud) || pud_large(*pud)) - continue; - if (!pud_folded(*pud)) { - page = phys_to_page(pud_val(*pud)); - for (i = 0; i < 4; i++) - set_bit(PG_arch_1, &page[i].flags); - } - mark_kernel_pmd(pud, addr, next); - } while (pud++, addr = next, addr != end); -} - -static void mark_kernel_p4d(pgd_t *pgd, unsigned long addr, unsigned long end) -{ - unsigned long next; - struct page *page; - p4d_t *p4d; - int i; - - p4d = p4d_offset(pgd, addr); - do { - next = p4d_addr_end(addr, end); - if (p4d_none(*p4d)) - continue; - if (!p4d_folded(*p4d)) { - page = phys_to_page(p4d_val(*p4d)); - for (i = 0; i < 4; i++) - set_bit(PG_arch_1, &page[i].flags); - } - mark_kernel_pud(p4d, addr, next); - } while (p4d++, addr = next, addr != end); -} - -static void mark_kernel_pgd(void) -{ - unsigned long addr, next; - struct page *page; - pgd_t *pgd; - int i; - - addr = 0; - pgd = pgd_offset_k(addr); - do { - next = pgd_addr_end(addr, MODULES_END); - if (pgd_none(*pgd)) - continue; - if (!pgd_folded(*pgd)) { - page = phys_to_page(pgd_val(*pgd)); - for (i = 0; i < 4; i++) - set_bit(PG_arch_1, &page[i].flags); - } - mark_kernel_p4d(pgd, addr, next); - } while (pgd++, addr = next, addr != MODULES_END); -} - -void __init cmma_init_nodat(void) -{ - struct page *page; - unsigned long start, end, ix; - int i; - - if (cmma_flag < 2) - return; - /* Mark pages used in kernel page tables */ - mark_kernel_pgd(); - page = virt_to_page(&swapper_pg_dir); - for (i = 0; i < 4; i++) - set_bit(PG_arch_1, &page[i].flags); - page = virt_to_page(&invalid_pg_dir); - for (i = 0; i < 4; i++) - set_bit(PG_arch_1, &page[i].flags); - - /* Set all kernel pages not used for page tables to stable/no-dat */ - for_each_mem_pfn_range(i, MAX_NUMNODES, &start, &end, NULL) { - page = pfn_to_page(start); - for (ix = start; ix < end; ix++, page++) { - if (__test_and_clear_bit(PG_arch_1, &page->flags)) - continue; /* skip page table pages */ - if (!list_empty(&page->lru)) - continue; /* skip free pages */ - set_page_stable_nodat(page, 0); - } - } -} +int __bootdata_preserved(cmma_flag); void arch_free_page(struct page *page, int order) { if (!cmma_flag) return; - set_page_unused(page, order); + __set_page_unused(page_to_virt(page), 1UL << order); } void arch_alloc_page(struct page *page, int order) @@ -213,14 +26,7 @@ void arch_alloc_page(struct page *page, int order) if (!cmma_flag) return; if (cmma_flag < 2) - set_page_stable_dat(page, order); + __set_page_stable_dat(page_to_virt(page), 1UL << order); else - set_page_stable_nodat(page, order); -} - -void arch_set_page_dat(struct page *page, int order) -{ - if (!cmma_flag) - return; - set_page_stable_dat(page, order); + __set_page_stable_nodat(page_to_virt(page), 1UL << order); } diff --git a/arch/s390/mm/pageattr.c b/arch/s390/mm/pageattr.c index b87e96c64b..631e3a4ee2 100644 --- a/arch/s390/mm/pageattr.c +++ b/arch/s390/mm/pageattr.c @@ -75,7 +75,7 @@ static void pgt_set(unsigned long *old, unsigned long new, unsigned long addr, break; } table = (unsigned long *)((unsigned long)old & mask); - crdte(*old, new, table, dtt, addr, S390_lowcore.kernel_asce); + crdte(*old, new, table, dtt, addr, S390_lowcore.kernel_asce.val); } else if (MACHINE_HAS_IDTE) { cspg(old, *old, new); } else { diff --git a/arch/s390/mm/pgalloc.c b/arch/s390/mm/pgalloc.c index 6396d6b06a..008e487c94 100644 --- a/arch/s390/mm/pgalloc.c +++ b/arch/s390/mm/pgalloc.c @@ -10,6 +10,7 @@ #include <linux/slab.h> #include <linux/mm.h> #include <asm/mmu_context.h> +#include <asm/page-states.h> #include <asm/pgalloc.h> #include <asm/gmap.h> #include <asm/tlb.h> @@ -30,7 +31,6 @@ static struct ctl_table page_table_sysctl[] = { .extra1 = SYSCTL_ZERO, .extra2 = SYSCTL_ONE, }, - { } }; static int __init page_table_register_sysctl(void) @@ -44,11 +44,13 @@ __initcall(page_table_register_sysctl); unsigned long *crst_table_alloc(struct mm_struct *mm) { struct ptdesc *ptdesc = pagetable_alloc(GFP_KERNEL, CRST_ALLOC_ORDER); + unsigned long *table; if (!ptdesc) return NULL; - arch_set_page_dat(ptdesc_page(ptdesc), CRST_ALLOC_ORDER); - return (unsigned long *) ptdesc_to_virt(ptdesc); + table = ptdesc_to_virt(ptdesc); + __arch_set_page_dat(table, 1UL << CRST_ALLOC_ORDER); + return table; } void crst_table_free(struct mm_struct *mm, unsigned long *table) @@ -62,8 +64,8 @@ static void __crst_table_upgrade(void *arg) /* change all active ASCEs to avoid the creation of new TLBs */ if (current->active_mm == mm) { - S390_lowcore.user_asce = mm->context.asce; - __ctl_load(S390_lowcore.user_asce, 7, 7); + S390_lowcore.user_asce.val = mm->context.asce; + local_ctl_load(7, &S390_lowcore.user_asce); } __tlb_flush_local(); } @@ -131,11 +133,6 @@ err_p4d: return -ENOMEM; } -static inline unsigned int atomic_xor_bits(atomic_t *v, unsigned int bits) -{ - return atomic_fetch_xor(bits, v) ^ bits; -} - #ifdef CONFIG_PGSTE struct page *page_table_alloc_pgste(struct mm_struct *mm) @@ -146,7 +143,7 @@ struct page *page_table_alloc_pgste(struct mm_struct *mm) ptdesc = pagetable_alloc(GFP_KERNEL, 0); if (ptdesc) { table = (u64 *)ptdesc_to_virt(ptdesc); - arch_set_page_dat(virt_to_page(table), 0); + __arch_set_page_dat(table, 1); memset64(table, _PAGE_INVALID, PTRS_PER_PTE); memset64(table + PTRS_PER_PTE, 0, PTRS_PER_PTE); } @@ -160,125 +157,11 @@ void page_table_free_pgste(struct page *page) #endif /* CONFIG_PGSTE */ -/* - * A 2KB-pgtable is either upper or lower half of a normal page. - * The second half of the page may be unused or used as another - * 2KB-pgtable. - * - * Whenever possible the parent page for a new 2KB-pgtable is picked - * from the list of partially allocated pages mm_context_t::pgtable_list. - * In case the list is empty a new parent page is allocated and added to - * the list. - * - * When a parent page gets fully allocated it contains 2KB-pgtables in both - * upper and lower halves and is removed from mm_context_t::pgtable_list. - * - * When 2KB-pgtable is freed from to fully allocated parent page that - * page turns partially allocated and added to mm_context_t::pgtable_list. - * - * If 2KB-pgtable is freed from the partially allocated parent page that - * page turns unused and gets removed from mm_context_t::pgtable_list. - * Furthermore, the unused parent page is released. - * - * As follows from the above, no unallocated or fully allocated parent - * pages are contained in mm_context_t::pgtable_list. - * - * The upper byte (bits 24-31) of the parent page _refcount is used - * for tracking contained 2KB-pgtables and has the following format: - * - * PP AA - * 01234567 upper byte (bits 24-31) of struct page::_refcount - * || || - * || |+--- upper 2KB-pgtable is allocated - * || +---- lower 2KB-pgtable is allocated - * |+------- upper 2KB-pgtable is pending for removal - * +-------- lower 2KB-pgtable is pending for removal - * - * (See commit 620b4e903179 ("s390: use _refcount for pgtables") on why - * using _refcount is possible). - * - * When 2KB-pgtable is allocated the corresponding AA bit is set to 1. - * The parent page is either: - * - added to mm_context_t::pgtable_list in case the second half of the - * parent page is still unallocated; - * - removed from mm_context_t::pgtable_list in case both hales of the - * parent page are allocated; - * These operations are protected with mm_context_t::lock. - * - * When 2KB-pgtable is deallocated the corresponding AA bit is set to 0 - * and the corresponding PP bit is set to 1 in a single atomic operation. - * Thus, PP and AA bits corresponding to the same 2KB-pgtable are mutually - * exclusive and may never be both set to 1! - * The parent page is either: - * - added to mm_context_t::pgtable_list in case the second half of the - * parent page is still allocated; - * - removed from mm_context_t::pgtable_list in case the second half of - * the parent page is unallocated; - * These operations are protected with mm_context_t::lock. - * - * It is important to understand that mm_context_t::lock only protects - * mm_context_t::pgtable_list and AA bits, but not the parent page itself - * and PP bits. - * - * Releasing the parent page happens whenever the PP bit turns from 1 to 0, - * while both AA bits and the second PP bit are already unset. Then the - * parent page does not contain any 2KB-pgtable fragment anymore, and it has - * also been removed from mm_context_t::pgtable_list. It is safe to release - * the page therefore. - * - * PGSTE memory spaces use full 4KB-pgtables and do not need most of the - * logic described above. Both AA bits are set to 1 to denote a 4KB-pgtable - * while the PP bits are never used, nor such a page is added to or removed - * from mm_context_t::pgtable_list. - * - * pte_free_defer() overrides those rules: it takes the page off pgtable_list, - * and prevents both 2K fragments from being reused. pte_free_defer() has to - * guarantee that its pgtable cannot be reused before the RCU grace period - * has elapsed (which page_table_free_rcu() does not actually guarantee). - * But for simplicity, because page->rcu_head overlays page->lru, and because - * the RCU callback might not be called before the mm_context_t has been freed, - * pte_free_defer() in this implementation prevents both fragments from being - * reused, and delays making the call to RCU until both fragments are freed. - */ unsigned long *page_table_alloc(struct mm_struct *mm) { - unsigned long *table; struct ptdesc *ptdesc; - unsigned int mask, bit; - - /* Try to get a fragment of a 4K page as a 2K page table */ - if (!mm_alloc_pgste(mm)) { - table = NULL; - spin_lock_bh(&mm->context.lock); - if (!list_empty(&mm->context.pgtable_list)) { - ptdesc = list_first_entry(&mm->context.pgtable_list, - struct ptdesc, pt_list); - mask = atomic_read(&ptdesc->_refcount) >> 24; - /* - * The pending removal bits must also be checked. - * Failure to do so might lead to an impossible - * value of (i.e 0x13 or 0x23) written to _refcount. - * Such values violate the assumption that pending and - * allocation bits are mutually exclusive, and the rest - * of the code unrails as result. That could lead to - * a whole bunch of races and corruptions. - */ - mask = (mask | (mask >> 4)) & 0x03U; - if (mask != 0x03U) { - table = (unsigned long *) ptdesc_to_virt(ptdesc); - bit = mask & 1; /* =1 -> second 2K */ - if (bit) - table += PTRS_PER_PTE; - atomic_xor_bits(&ptdesc->_refcount, - 0x01U << (bit + 24)); - list_del_init(&ptdesc->pt_list); - } - } - spin_unlock_bh(&mm->context.lock); - if (table) - return table; - } - /* Allocate a fresh page */ + unsigned long *table; + ptdesc = pagetable_alloc(GFP_KERNEL, 0); if (!ptdesc) return NULL; @@ -286,177 +169,57 @@ unsigned long *page_table_alloc(struct mm_struct *mm) pagetable_free(ptdesc); return NULL; } - arch_set_page_dat(ptdesc_page(ptdesc), 0); - /* Initialize page table */ - table = (unsigned long *) ptdesc_to_virt(ptdesc); - if (mm_alloc_pgste(mm)) { - /* Return 4K page table with PGSTEs */ - INIT_LIST_HEAD(&ptdesc->pt_list); - atomic_xor_bits(&ptdesc->_refcount, 0x03U << 24); - memset64((u64 *)table, _PAGE_INVALID, PTRS_PER_PTE); - memset64((u64 *)table + PTRS_PER_PTE, 0, PTRS_PER_PTE); - } else { - /* Return the first 2K fragment of the page */ - atomic_xor_bits(&ptdesc->_refcount, 0x01U << 24); - memset64((u64 *)table, _PAGE_INVALID, 2 * PTRS_PER_PTE); - spin_lock_bh(&mm->context.lock); - list_add(&ptdesc->pt_list, &mm->context.pgtable_list); - spin_unlock_bh(&mm->context.lock); - } + table = ptdesc_to_virt(ptdesc); + __arch_set_page_dat(table, 1); + /* pt_list is used by gmap only */ + INIT_LIST_HEAD(&ptdesc->pt_list); + memset64((u64 *)table, _PAGE_INVALID, PTRS_PER_PTE); + memset64((u64 *)table + PTRS_PER_PTE, 0, PTRS_PER_PTE); return table; } -static void page_table_release_check(struct page *page, void *table, - unsigned int half, unsigned int mask) +static void pagetable_pte_dtor_free(struct ptdesc *ptdesc) { - char msg[128]; - - if (!IS_ENABLED(CONFIG_DEBUG_VM)) - return; - if (!mask && list_empty(&page->lru)) - return; - snprintf(msg, sizeof(msg), - "Invalid pgtable %p release half 0x%02x mask 0x%02x", - table, half, mask); - dump_page(page, msg); -} - -static void pte_free_now(struct rcu_head *head) -{ - struct ptdesc *ptdesc; - - ptdesc = container_of(head, struct ptdesc, pt_rcu_head); pagetable_pte_dtor(ptdesc); pagetable_free(ptdesc); } void page_table_free(struct mm_struct *mm, unsigned long *table) { - unsigned int mask, bit, half; struct ptdesc *ptdesc = virt_to_ptdesc(table); - if (!mm_alloc_pgste(mm)) { - /* Free 2K page table fragment of a 4K page */ - bit = ((unsigned long) table & ~PAGE_MASK)/(PTRS_PER_PTE*sizeof(pte_t)); - spin_lock_bh(&mm->context.lock); - /* - * Mark the page for delayed release. The actual release - * will happen outside of the critical section from this - * function or from __tlb_remove_table() - */ - mask = atomic_xor_bits(&ptdesc->_refcount, 0x11U << (bit + 24)); - mask >>= 24; - if ((mask & 0x03U) && !folio_test_active(ptdesc_folio(ptdesc))) { - /* - * Other half is allocated, and neither half has had - * its free deferred: add page to head of list, to make - * this freed half available for immediate reuse. - */ - list_add(&ptdesc->pt_list, &mm->context.pgtable_list); - } else { - /* If page is on list, now remove it. */ - list_del_init(&ptdesc->pt_list); - } - spin_unlock_bh(&mm->context.lock); - mask = atomic_xor_bits(&ptdesc->_refcount, 0x10U << (bit + 24)); - mask >>= 24; - if (mask != 0x00U) - return; - half = 0x01U << bit; - } else { - half = 0x03U; - mask = atomic_xor_bits(&ptdesc->_refcount, 0x03U << 24); - mask >>= 24; - } - - page_table_release_check(ptdesc_page(ptdesc), table, half, mask); - if (folio_test_clear_active(ptdesc_folio(ptdesc))) - call_rcu(&ptdesc->pt_rcu_head, pte_free_now); - else - pte_free_now(&ptdesc->pt_rcu_head); + pagetable_pte_dtor_free(ptdesc); } -void page_table_free_rcu(struct mmu_gather *tlb, unsigned long *table, - unsigned long vmaddr) +void __tlb_remove_table(void *table) { - struct mm_struct *mm; - unsigned int bit, mask; struct ptdesc *ptdesc = virt_to_ptdesc(table); + struct page *page = ptdesc_page(ptdesc); - mm = tlb->mm; - if (mm_alloc_pgste(mm)) { - gmap_unlink(mm, table, vmaddr); - table = (unsigned long *) ((unsigned long)table | 0x03U); - tlb_remove_ptdesc(tlb, table); + if (compound_order(page) == CRST_ALLOC_ORDER) { + /* pmd, pud, or p4d */ + pagetable_free(ptdesc); return; } - bit = ((unsigned long) table & ~PAGE_MASK) / (PTRS_PER_PTE*sizeof(pte_t)); - spin_lock_bh(&mm->context.lock); - /* - * Mark the page for delayed release. The actual release will happen - * outside of the critical section from __tlb_remove_table() or from - * page_table_free() - */ - mask = atomic_xor_bits(&ptdesc->_refcount, 0x11U << (bit + 24)); - mask >>= 24; - if ((mask & 0x03U) && !folio_test_active(ptdesc_folio(ptdesc))) { - /* - * Other half is allocated, and neither half has had - * its free deferred: add page to end of list, to make - * this freed half available for reuse once its pending - * bit has been cleared by __tlb_remove_table(). - */ - list_add_tail(&ptdesc->pt_list, &mm->context.pgtable_list); - } else { - /* If page is on list, now remove it. */ - list_del_init(&ptdesc->pt_list); - } - spin_unlock_bh(&mm->context.lock); - table = (unsigned long *) ((unsigned long) table | (0x01U << bit)); - tlb_remove_table(tlb, table); + pagetable_pte_dtor_free(ptdesc); } -void __tlb_remove_table(void *_table) +#ifdef CONFIG_TRANSPARENT_HUGEPAGE +static void pte_free_now(struct rcu_head *head) { - unsigned int mask = (unsigned long) _table & 0x03U, half = mask; - void *table = (void *)((unsigned long) _table ^ mask); - struct ptdesc *ptdesc = virt_to_ptdesc(table); - - switch (half) { - case 0x00U: /* pmd, pud, or p4d */ - pagetable_free(ptdesc); - return; - case 0x01U: /* lower 2K of a 4K page table */ - case 0x02U: /* higher 2K of a 4K page table */ - mask = atomic_xor_bits(&ptdesc->_refcount, mask << (4 + 24)); - mask >>= 24; - if (mask != 0x00U) - return; - break; - case 0x03U: /* 4K page table with pgstes */ - mask = atomic_xor_bits(&ptdesc->_refcount, 0x03U << 24); - mask >>= 24; - break; - } + struct ptdesc *ptdesc = container_of(head, struct ptdesc, pt_rcu_head); - page_table_release_check(ptdesc_page(ptdesc), table, half, mask); - if (folio_test_clear_active(ptdesc_folio(ptdesc))) - call_rcu(&ptdesc->pt_rcu_head, pte_free_now); - else - pte_free_now(&ptdesc->pt_rcu_head); + pagetable_pte_dtor_free(ptdesc); } -#ifdef CONFIG_TRANSPARENT_HUGEPAGE void pte_free_defer(struct mm_struct *mm, pgtable_t pgtable) { - struct page *page; + struct ptdesc *ptdesc = virt_to_ptdesc(pgtable); - page = virt_to_page(pgtable); - SetPageActive(page); - page_table_free(mm, (unsigned long *)pgtable); + call_rcu(&ptdesc->pt_rcu_head, pte_free_now); /* - * page_table_free() does not do the pgste gmap_unlink() which - * page_table_free_rcu() does: warn us if pgste ever reaches here. + * THPs are not allowed for KVM guests. Warn if pgste ever reaches here. + * Turn to the generic pte_free_defer() version once gmap is removed. */ WARN_ON_ONCE(mm_has_pgste(mm)); } @@ -489,11 +252,10 @@ static unsigned long *base_crst_alloc(unsigned long val) unsigned long *table; struct ptdesc *ptdesc; - ptdesc = pagetable_alloc(GFP_KERNEL & ~__GFP_HIGHMEM, CRST_ALLOC_ORDER); + ptdesc = pagetable_alloc(GFP_KERNEL, CRST_ALLOC_ORDER); if (!ptdesc) return NULL; table = ptdesc_address(ptdesc); - crst_table_init(table, val); return table; } diff --git a/arch/s390/mm/vmem.c b/arch/s390/mm/vmem.c index 6d276103c6..186a020857 100644 --- a/arch/s390/mm/vmem.c +++ b/arch/s390/mm/vmem.c @@ -15,6 +15,7 @@ #include <asm/page-states.h> #include <asm/cacheflush.h> #include <asm/nospec-branch.h> +#include <asm/ctlreg.h> #include <asm/pgalloc.h> #include <asm/setup.h> #include <asm/tlbflush.h> @@ -49,8 +50,7 @@ void *vmem_crst_alloc(unsigned long val) if (!table) return NULL; crst_table_init(table, val); - if (slab_is_available()) - arch_set_page_dat(virt_to_page(table), CRST_ALLOC_ORDER); + __arch_set_page_dat(table, 1UL << CRST_ALLOC_ORDER); return table; } @@ -66,6 +66,7 @@ pte_t __ref *vmem_pte_alloc(void) if (!pte) return NULL; memset64((u64 *)pte, _PAGE_INVALID, PTRS_PER_PTE); + __arch_set_page_dat(pte, 1); return pte; } @@ -502,6 +503,8 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node, return ret; } +#ifdef CONFIG_MEMORY_HOTPLUG + void vmemmap_free(unsigned long start, unsigned long end, struct vmem_altmap *altmap) { @@ -510,6 +513,8 @@ void vmemmap_free(unsigned long start, unsigned long end, mutex_unlock(&vmem_mutex); } +#endif + void vmem_remove_mapping(unsigned long start, unsigned long size) { mutex_lock(&vmem_mutex); @@ -663,7 +668,7 @@ void __init vmem_map_init(void) __set_memory_4k(__va(0), RELOC_HIDE(__va(0), ident_map_size)); } if (MACHINE_HAS_NX) - ctl_set_bit(0, 20); + system_ctl_set_bit(0, CR0_INSTRUCTION_EXEC_PROTECTION_BIT); pr_info("Write protected kernel read-only data: %luk\n", (unsigned long)(__end_rodata - _stext) >> 10); } |