diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-27 10:05:51 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-27 10:05:51 +0000 |
commit | 5d1646d90e1f2cceb9f0828f4b28318cd0ec7744 (patch) | |
tree | a94efe259b9009378be6d90eb30d2b019d95c194 /arch/arm64/mm | |
parent | Initial commit. (diff) | |
download | linux-430c2fc249ea5c0536abd21c23382884005c9093.tar.xz linux-430c2fc249ea5c0536abd21c23382884005c9093.zip |
Adding upstream version 5.10.209.upstream/5.10.209upstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to '')
-rw-r--r-- | arch/arm64/mm/Makefile | 15 | ||||
-rw-r--r-- | arch/arm64/mm/cache.S | 244 | ||||
-rw-r--r-- | arch/arm64/mm/context.c | 424 | ||||
-rw-r--r-- | arch/arm64/mm/copypage.c | 37 | ||||
-rw-r--r-- | arch/arm64/mm/dma-mapping.c | 59 | ||||
-rw-r--r-- | arch/arm64/mm/extable.c | 22 | ||||
-rw-r--r-- | arch/arm64/mm/fault.c | 855 | ||||
-rw-r--r-- | arch/arm64/mm/flush.c | 96 | ||||
-rw-r--r-- | arch/arm64/mm/hugetlbpage.c | 518 | ||||
-rw-r--r-- | arch/arm64/mm/init.c | 613 | ||||
-rw-r--r-- | arch/arm64/mm/ioremap.c | 110 | ||||
-rw-r--r-- | arch/arm64/mm/kasan_init.c | 276 | ||||
-rw-r--r-- | arch/arm64/mm/mmap.c | 70 | ||||
-rw-r--r-- | arch/arm64/mm/mmu.c | 1595 | ||||
-rw-r--r-- | arch/arm64/mm/mteswap.c | 83 | ||||
-rw-r--r-- | arch/arm64/mm/numa.c | 464 | ||||
-rw-r--r-- | arch/arm64/mm/pageattr.c | 234 | ||||
-rw-r--r-- | arch/arm64/mm/pgd.c | 56 | ||||
-rw-r--r-- | arch/arm64/mm/physaddr.c | 31 | ||||
-rw-r--r-- | arch/arm64/mm/proc.S | 506 | ||||
-rw-r--r-- | arch/arm64/mm/ptdump.c | 393 | ||||
-rw-r--r-- | arch/arm64/mm/ptdump_debugfs.c | 22 |
22 files changed, 6723 insertions, 0 deletions
diff --git a/arch/arm64/mm/Makefile b/arch/arm64/mm/Makefile new file mode 100644 index 000000000..5ead3c3de --- /dev/null +++ b/arch/arm64/mm/Makefile @@ -0,0 +1,15 @@ +# SPDX-License-Identifier: GPL-2.0 +obj-y := dma-mapping.o extable.o fault.o init.o \ + cache.o copypage.o flush.o \ + ioremap.o mmap.o pgd.o mmu.o \ + context.o proc.o pageattr.o +obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o +obj-$(CONFIG_PTDUMP_CORE) += ptdump.o +obj-$(CONFIG_PTDUMP_DEBUGFS) += ptdump_debugfs.o +obj-$(CONFIG_NUMA) += numa.o +obj-$(CONFIG_DEBUG_VIRTUAL) += physaddr.o +obj-$(CONFIG_ARM64_MTE) += mteswap.o +KASAN_SANITIZE_physaddr.o += n + +obj-$(CONFIG_KASAN) += kasan_init.o +KASAN_SANITIZE_kasan_init.o := n diff --git a/arch/arm64/mm/cache.S b/arch/arm64/mm/cache.S new file mode 100644 index 000000000..7b8158ae3 --- /dev/null +++ b/arch/arm64/mm/cache.S @@ -0,0 +1,244 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Cache maintenance + * + * Copyright (C) 2001 Deep Blue Solutions Ltd. + * Copyright (C) 2012 ARM Ltd. + */ + +#include <linux/errno.h> +#include <linux/linkage.h> +#include <linux/init.h> +#include <asm/assembler.h> +#include <asm/cpufeature.h> +#include <asm/alternative.h> +#include <asm/asm-uaccess.h> + +/* + * flush_icache_range(start,end) + * + * Ensure that the I and D caches are coherent within specified region. + * This is typically used when code has been written to a memory region, + * and will be executed. + * + * - start - virtual start address of region + * - end - virtual end address of region + */ +SYM_FUNC_START(__flush_icache_range) + /* FALLTHROUGH */ + +/* + * __flush_cache_user_range(start,end) + * + * Ensure that the I and D caches are coherent within specified region. + * This is typically used when code has been written to a memory region, + * and will be executed. + * + * - start - virtual start address of region + * - end - virtual end address of region + */ +SYM_FUNC_START(__flush_cache_user_range) + uaccess_ttbr0_enable x2, x3, x4 +alternative_if ARM64_HAS_CACHE_IDC + dsb ishst + b 7f +alternative_else_nop_endif + dcache_line_size x2, x3 + sub x3, x2, #1 + bic x4, x0, x3 +1: +user_alt 9f, "dc cvau, x4", "dc civac, x4", ARM64_WORKAROUND_CLEAN_CACHE + add x4, x4, x2 + cmp x4, x1 + b.lo 1b + dsb ish + +7: +alternative_if ARM64_HAS_CACHE_DIC + isb + b 8f +alternative_else_nop_endif + invalidate_icache_by_line x0, x1, x2, x3, 9f +8: mov x0, #0 +1: + uaccess_ttbr0_disable x1, x2 + ret +9: + mov x0, #-EFAULT + b 1b +SYM_FUNC_END(__flush_icache_range) +SYM_FUNC_END(__flush_cache_user_range) + +/* + * invalidate_icache_range(start,end) + * + * Ensure that the I cache is invalid within specified region. + * + * - start - virtual start address of region + * - end - virtual end address of region + */ +SYM_FUNC_START(invalidate_icache_range) +alternative_if ARM64_HAS_CACHE_DIC + mov x0, xzr + isb + ret +alternative_else_nop_endif + + uaccess_ttbr0_enable x2, x3, x4 + + invalidate_icache_by_line x0, x1, x2, x3, 2f + mov x0, xzr +1: + uaccess_ttbr0_disable x1, x2 + ret +2: + mov x0, #-EFAULT + b 1b +SYM_FUNC_END(invalidate_icache_range) + +/* + * __flush_dcache_area(kaddr, size) + * + * Ensure that any D-cache lines for the interval [kaddr, kaddr+size) + * are cleaned and invalidated to the PoC. + * + * - kaddr - kernel address + * - size - size in question + */ +SYM_FUNC_START_PI(__flush_dcache_area) + dcache_by_line_op civac, sy, x0, x1, x2, x3 + ret +SYM_FUNC_END_PI(__flush_dcache_area) + +/* + * __clean_dcache_area_pou(kaddr, size) + * + * Ensure that any D-cache lines for the interval [kaddr, kaddr+size) + * are cleaned to the PoU. + * + * - kaddr - kernel address + * - size - size in question + */ +SYM_FUNC_START(__clean_dcache_area_pou) +alternative_if ARM64_HAS_CACHE_IDC + dsb ishst + ret +alternative_else_nop_endif + dcache_by_line_op cvau, ish, x0, x1, x2, x3 + ret +SYM_FUNC_END(__clean_dcache_area_pou) + +/* + * __inval_dcache_area(kaddr, size) + * + * Ensure that any D-cache lines for the interval [kaddr, kaddr+size) + * are invalidated. Any partial lines at the ends of the interval are + * also cleaned to PoC to prevent data loss. + * + * - kaddr - kernel address + * - size - size in question + */ +SYM_FUNC_START_LOCAL(__dma_inv_area) +SYM_FUNC_START_PI(__inval_dcache_area) + /* FALLTHROUGH */ + +/* + * __dma_inv_area(start, size) + * - start - virtual start address of region + * - size - size in question + */ + add x1, x1, x0 + dcache_line_size x2, x3 + sub x3, x2, #1 + tst x1, x3 // end cache line aligned? + bic x1, x1, x3 + b.eq 1f + dc civac, x1 // clean & invalidate D / U line +1: tst x0, x3 // start cache line aligned? + bic x0, x0, x3 + b.eq 2f + dc civac, x0 // clean & invalidate D / U line + b 3f +2: dc ivac, x0 // invalidate D / U line +3: add x0, x0, x2 + cmp x0, x1 + b.lo 2b + dsb sy + ret +SYM_FUNC_END_PI(__inval_dcache_area) +SYM_FUNC_END(__dma_inv_area) + +/* + * __clean_dcache_area_poc(kaddr, size) + * + * Ensure that any D-cache lines for the interval [kaddr, kaddr+size) + * are cleaned to the PoC. + * + * - kaddr - kernel address + * - size - size in question + */ +SYM_FUNC_START_LOCAL(__dma_clean_area) +SYM_FUNC_START_PI(__clean_dcache_area_poc) + /* FALLTHROUGH */ + +/* + * __dma_clean_area(start, size) + * - start - virtual start address of region + * - size - size in question + */ + dcache_by_line_op cvac, sy, x0, x1, x2, x3 + ret +SYM_FUNC_END_PI(__clean_dcache_area_poc) +SYM_FUNC_END(__dma_clean_area) + +/* + * __clean_dcache_area_pop(kaddr, size) + * + * Ensure that any D-cache lines for the interval [kaddr, kaddr+size) + * are cleaned to the PoP. + * + * - kaddr - kernel address + * - size - size in question + */ +SYM_FUNC_START_PI(__clean_dcache_area_pop) + alternative_if_not ARM64_HAS_DCPOP + b __clean_dcache_area_poc + alternative_else_nop_endif + dcache_by_line_op cvap, sy, x0, x1, x2, x3 + ret +SYM_FUNC_END_PI(__clean_dcache_area_pop) + +/* + * __dma_flush_area(start, size) + * + * clean & invalidate D / U line + * + * - start - virtual start address of region + * - size - size in question + */ +SYM_FUNC_START_PI(__dma_flush_area) + dcache_by_line_op civac, sy, x0, x1, x2, x3 + ret +SYM_FUNC_END_PI(__dma_flush_area) + +/* + * __dma_map_area(start, size, dir) + * - start - kernel virtual start address + * - size - size of region + * - dir - DMA direction + */ +SYM_FUNC_START_PI(__dma_map_area) + b __dma_clean_area +SYM_FUNC_END_PI(__dma_map_area) + +/* + * __dma_unmap_area(start, size, dir) + * - start - kernel virtual start address + * - size - size of region + * - dir - DMA direction + */ +SYM_FUNC_START_PI(__dma_unmap_area) + cmp w2, #DMA_TO_DEVICE + b.ne __dma_inv_area + ret +SYM_FUNC_END_PI(__dma_unmap_area) diff --git a/arch/arm64/mm/context.c b/arch/arm64/mm/context.c new file mode 100644 index 000000000..001737a8f --- /dev/null +++ b/arch/arm64/mm/context.c @@ -0,0 +1,424 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Based on arch/arm/mm/context.c + * + * Copyright (C) 2002-2003 Deep Blue Solutions Ltd, all rights reserved. + * Copyright (C) 2012 ARM Ltd. + */ + +#include <linux/bitfield.h> +#include <linux/bitops.h> +#include <linux/sched.h> +#include <linux/slab.h> +#include <linux/mm.h> + +#include <asm/cpufeature.h> +#include <asm/mmu_context.h> +#include <asm/smp.h> +#include <asm/tlbflush.h> + +static u32 asid_bits; +static DEFINE_RAW_SPINLOCK(cpu_asid_lock); + +static atomic64_t asid_generation; +static unsigned long *asid_map; + +static DEFINE_PER_CPU(atomic64_t, active_asids); +static DEFINE_PER_CPU(u64, reserved_asids); +static cpumask_t tlb_flush_pending; + +static unsigned long max_pinned_asids; +static unsigned long nr_pinned_asids; +static unsigned long *pinned_asid_map; + +#define ASID_MASK (~GENMASK(asid_bits - 1, 0)) +#define ASID_FIRST_VERSION (1UL << asid_bits) + +#define NUM_USER_ASIDS ASID_FIRST_VERSION +#define asid2idx(asid) ((asid) & ~ASID_MASK) +#define idx2asid(idx) asid2idx(idx) + +/* Get the ASIDBits supported by the current CPU */ +static u32 get_cpu_asid_bits(void) +{ + u32 asid; + int fld = cpuid_feature_extract_unsigned_field(read_cpuid(ID_AA64MMFR0_EL1), + ID_AA64MMFR0_ASID_SHIFT); + + switch (fld) { + default: + pr_warn("CPU%d: Unknown ASID size (%d); assuming 8-bit\n", + smp_processor_id(), fld); + fallthrough; + case 0: + asid = 8; + break; + case 2: + asid = 16; + } + + return asid; +} + +/* Check if the current cpu's ASIDBits is compatible with asid_bits */ +void verify_cpu_asid_bits(void) +{ + u32 asid = get_cpu_asid_bits(); + + if (asid < asid_bits) { + /* + * We cannot decrease the ASID size at runtime, so panic if we support + * fewer ASID bits than the boot CPU. + */ + pr_crit("CPU%d: smaller ASID size(%u) than boot CPU (%u)\n", + smp_processor_id(), asid, asid_bits); + cpu_panic_kernel(); + } +} + +static void set_kpti_asid_bits(unsigned long *map) +{ + unsigned int len = BITS_TO_LONGS(NUM_USER_ASIDS) * sizeof(unsigned long); + /* + * In case of KPTI kernel/user ASIDs are allocated in + * pairs, the bottom bit distinguishes the two: if it + * is set, then the ASID will map only userspace. Thus + * mark even as reserved for kernel. + */ + memset(map, 0xaa, len); +} + +static void set_reserved_asid_bits(void) +{ + if (pinned_asid_map) + bitmap_copy(asid_map, pinned_asid_map, NUM_USER_ASIDS); + else if (arm64_kernel_unmapped_at_el0()) + set_kpti_asid_bits(asid_map); + else + bitmap_clear(asid_map, 0, NUM_USER_ASIDS); +} + +#define asid_gen_match(asid) \ + (!(((asid) ^ atomic64_read(&asid_generation)) >> asid_bits)) + +static void flush_context(void) +{ + int i; + u64 asid; + + /* Update the list of reserved ASIDs and the ASID bitmap. */ + set_reserved_asid_bits(); + + for_each_possible_cpu(i) { + asid = atomic64_xchg_relaxed(&per_cpu(active_asids, i), 0); + /* + * If this CPU has already been through a + * rollover, but hasn't run another task in + * the meantime, we must preserve its reserved + * ASID, as this is the only trace we have of + * the process it is still running. + */ + if (asid == 0) + asid = per_cpu(reserved_asids, i); + __set_bit(asid2idx(asid), asid_map); + per_cpu(reserved_asids, i) = asid; + } + + /* + * Queue a TLB invalidation for each CPU to perform on next + * context-switch + */ + cpumask_setall(&tlb_flush_pending); +} + +static bool check_update_reserved_asid(u64 asid, u64 newasid) +{ + int cpu; + bool hit = false; + + /* + * Iterate over the set of reserved ASIDs looking for a match. + * If we find one, then we can update our mm to use newasid + * (i.e. the same ASID in the current generation) but we can't + * exit the loop early, since we need to ensure that all copies + * of the old ASID are updated to reflect the mm. Failure to do + * so could result in us missing the reserved ASID in a future + * generation. + */ + for_each_possible_cpu(cpu) { + if (per_cpu(reserved_asids, cpu) == asid) { + hit = true; + per_cpu(reserved_asids, cpu) = newasid; + } + } + + return hit; +} + +static u64 new_context(struct mm_struct *mm) +{ + static u32 cur_idx = 1; + u64 asid = atomic64_read(&mm->context.id); + u64 generation = atomic64_read(&asid_generation); + + if (asid != 0) { + u64 newasid = generation | (asid & ~ASID_MASK); + + /* + * If our current ASID was active during a rollover, we + * can continue to use it and this was just a false alarm. + */ + if (check_update_reserved_asid(asid, newasid)) + return newasid; + + /* + * If it is pinned, we can keep using it. Note that reserved + * takes priority, because even if it is also pinned, we need to + * update the generation into the reserved_asids. + */ + if (refcount_read(&mm->context.pinned)) + return newasid; + + /* + * We had a valid ASID in a previous life, so try to re-use + * it if possible. + */ + if (!__test_and_set_bit(asid2idx(asid), asid_map)) + return newasid; + } + + /* + * Allocate a free ASID. If we can't find one, take a note of the + * currently active ASIDs and mark the TLBs as requiring flushes. We + * always count from ASID #2 (index 1), as we use ASID #0 when setting + * a reserved TTBR0 for the init_mm and we allocate ASIDs in even/odd + * pairs. + */ + asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, cur_idx); + if (asid != NUM_USER_ASIDS) + goto set_asid; + + /* We're out of ASIDs, so increment the global generation count */ + generation = atomic64_add_return_relaxed(ASID_FIRST_VERSION, + &asid_generation); + flush_context(); + + /* We have more ASIDs than CPUs, so this will always succeed */ + asid = find_next_zero_bit(asid_map, NUM_USER_ASIDS, 1); + +set_asid: + __set_bit(asid, asid_map); + cur_idx = asid; + return idx2asid(asid) | generation; +} + +void check_and_switch_context(struct mm_struct *mm) +{ + unsigned long flags; + unsigned int cpu; + u64 asid, old_active_asid; + + if (system_supports_cnp()) + cpu_set_reserved_ttbr0(); + + asid = atomic64_read(&mm->context.id); + + /* + * The memory ordering here is subtle. + * If our active_asids is non-zero and the ASID matches the current + * generation, then we update the active_asids entry with a relaxed + * cmpxchg. Racing with a concurrent rollover means that either: + * + * - We get a zero back from the cmpxchg and end up waiting on the + * lock. Taking the lock synchronises with the rollover and so + * we are forced to see the updated generation. + * + * - We get a valid ASID back from the cmpxchg, which means the + * relaxed xchg in flush_context will treat us as reserved + * because atomic RmWs are totally ordered for a given location. + */ + old_active_asid = atomic64_read(this_cpu_ptr(&active_asids)); + if (old_active_asid && asid_gen_match(asid) && + atomic64_cmpxchg_relaxed(this_cpu_ptr(&active_asids), + old_active_asid, asid)) + goto switch_mm_fastpath; + + raw_spin_lock_irqsave(&cpu_asid_lock, flags); + /* Check that our ASID belongs to the current generation. */ + asid = atomic64_read(&mm->context.id); + if (!asid_gen_match(asid)) { + asid = new_context(mm); + atomic64_set(&mm->context.id, asid); + } + + cpu = smp_processor_id(); + if (cpumask_test_and_clear_cpu(cpu, &tlb_flush_pending)) + local_flush_tlb_all(); + + atomic64_set(this_cpu_ptr(&active_asids), asid); + raw_spin_unlock_irqrestore(&cpu_asid_lock, flags); + +switch_mm_fastpath: + + arm64_apply_bp_hardening(); + + /* + * Defer TTBR0_EL1 setting for user threads to uaccess_enable() when + * emulating PAN. + */ + if (!system_uses_ttbr0_pan()) + cpu_switch_mm(mm->pgd, mm); +} + +unsigned long arm64_mm_context_get(struct mm_struct *mm) +{ + unsigned long flags; + u64 asid; + + if (!pinned_asid_map) + return 0; + + raw_spin_lock_irqsave(&cpu_asid_lock, flags); + + asid = atomic64_read(&mm->context.id); + + if (refcount_inc_not_zero(&mm->context.pinned)) + goto out_unlock; + + if (nr_pinned_asids >= max_pinned_asids) { + asid = 0; + goto out_unlock; + } + + if (!asid_gen_match(asid)) { + /* + * We went through one or more rollover since that ASID was + * used. Ensure that it is still valid, or generate a new one. + */ + asid = new_context(mm); + atomic64_set(&mm->context.id, asid); + } + + nr_pinned_asids++; + __set_bit(asid2idx(asid), pinned_asid_map); + refcount_set(&mm->context.pinned, 1); + +out_unlock: + raw_spin_unlock_irqrestore(&cpu_asid_lock, flags); + + asid &= ~ASID_MASK; + + /* Set the equivalent of USER_ASID_BIT */ + if (asid && arm64_kernel_unmapped_at_el0()) + asid |= 1; + + return asid; +} +EXPORT_SYMBOL_GPL(arm64_mm_context_get); + +void arm64_mm_context_put(struct mm_struct *mm) +{ + unsigned long flags; + u64 asid = atomic64_read(&mm->context.id); + + if (!pinned_asid_map) + return; + + raw_spin_lock_irqsave(&cpu_asid_lock, flags); + + if (refcount_dec_and_test(&mm->context.pinned)) { + __clear_bit(asid2idx(asid), pinned_asid_map); + nr_pinned_asids--; + } + + raw_spin_unlock_irqrestore(&cpu_asid_lock, flags); +} +EXPORT_SYMBOL_GPL(arm64_mm_context_put); + +/* Errata workaround post TTBRx_EL1 update. */ +asmlinkage void post_ttbr_update_workaround(void) +{ + if (!IS_ENABLED(CONFIG_CAVIUM_ERRATUM_27456)) + return; + + asm(ALTERNATIVE("nop; nop; nop", + "ic iallu; dsb nsh; isb", + ARM64_WORKAROUND_CAVIUM_27456)); +} + +void cpu_do_switch_mm(phys_addr_t pgd_phys, struct mm_struct *mm) +{ + unsigned long ttbr1 = read_sysreg(ttbr1_el1); + unsigned long asid = ASID(mm); + unsigned long ttbr0 = phys_to_ttbr(pgd_phys); + + /* Skip CNP for the reserved ASID */ + if (system_supports_cnp() && asid) + ttbr0 |= TTBR_CNP_BIT; + + /* SW PAN needs a copy of the ASID in TTBR0 for entry */ + if (IS_ENABLED(CONFIG_ARM64_SW_TTBR0_PAN)) + ttbr0 |= FIELD_PREP(TTBR_ASID_MASK, asid); + + /* Set ASID in TTBR1 since TCR.A1 is set */ + ttbr1 &= ~TTBR_ASID_MASK; + ttbr1 |= FIELD_PREP(TTBR_ASID_MASK, asid); + + write_sysreg(ttbr1, ttbr1_el1); + isb(); + write_sysreg(ttbr0, ttbr0_el1); + isb(); + post_ttbr_update_workaround(); +} + +static int asids_update_limit(void) +{ + unsigned long num_available_asids = NUM_USER_ASIDS; + + if (arm64_kernel_unmapped_at_el0()) { + num_available_asids /= 2; + if (pinned_asid_map) + set_kpti_asid_bits(pinned_asid_map); + } + /* + * Expect allocation after rollover to fail if we don't have at least + * one more ASID than CPUs. ASID #0 is reserved for init_mm. + */ + WARN_ON(num_available_asids - 1 <= num_possible_cpus()); + pr_info("ASID allocator initialised with %lu entries\n", + num_available_asids); + + /* + * There must always be an ASID available after rollover. Ensure that, + * even if all CPUs have a reserved ASID and the maximum number of ASIDs + * are pinned, there still is at least one empty slot in the ASID map. + */ + max_pinned_asids = num_available_asids - num_possible_cpus() - 2; + return 0; +} +arch_initcall(asids_update_limit); + +static int asids_init(void) +{ + asid_bits = get_cpu_asid_bits(); + atomic64_set(&asid_generation, ASID_FIRST_VERSION); + asid_map = kcalloc(BITS_TO_LONGS(NUM_USER_ASIDS), sizeof(*asid_map), + GFP_KERNEL); + if (!asid_map) + panic("Failed to allocate bitmap for %lu ASIDs\n", + NUM_USER_ASIDS); + + pinned_asid_map = kcalloc(BITS_TO_LONGS(NUM_USER_ASIDS), + sizeof(*pinned_asid_map), GFP_KERNEL); + nr_pinned_asids = 0; + + /* + * We cannot call set_reserved_asid_bits() here because CPU + * caps are not finalized yet, so it is safer to assume KPTI + * and reserve kernel ASID's from beginning. + */ + if (IS_ENABLED(CONFIG_UNMAP_KERNEL_AT_EL0)) + set_kpti_asid_bits(asid_map); + return 0; +} +early_initcall(asids_init); diff --git a/arch/arm64/mm/copypage.c b/arch/arm64/mm/copypage.c new file mode 100644 index 000000000..24913271e --- /dev/null +++ b/arch/arm64/mm/copypage.c @@ -0,0 +1,37 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Based on arch/arm/mm/copypage.c + * + * Copyright (C) 2002 Deep Blue Solutions Ltd, All Rights Reserved. + * Copyright (C) 2012 ARM Ltd. + */ + +#include <linux/bitops.h> +#include <linux/mm.h> + +#include <asm/page.h> +#include <asm/cacheflush.h> +#include <asm/cpufeature.h> +#include <asm/mte.h> + +void copy_highpage(struct page *to, struct page *from) +{ + void *kto = page_address(to); + void *kfrom = page_address(from); + + copy_page(kto, kfrom); + + if (system_supports_mte() && test_bit(PG_mte_tagged, &from->flags)) { + set_bit(PG_mte_tagged, &to->flags); + mte_copy_page_tags(kto, kfrom); + } +} +EXPORT_SYMBOL(copy_highpage); + +void copy_user_highpage(struct page *to, struct page *from, + unsigned long vaddr, struct vm_area_struct *vma) +{ + copy_highpage(to, from); + flush_dcache_page(to); +} +EXPORT_SYMBOL_GPL(copy_user_highpage); diff --git a/arch/arm64/mm/dma-mapping.c b/arch/arm64/mm/dma-mapping.c new file mode 100644 index 000000000..93e87b287 --- /dev/null +++ b/arch/arm64/mm/dma-mapping.c @@ -0,0 +1,59 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2012 ARM Ltd. + * Author: Catalin Marinas <catalin.marinas@arm.com> + */ + +#include <linux/gfp.h> +#include <linux/cache.h> +#include <linux/dma-map-ops.h> +#include <linux/dma-iommu.h> +#include <xen/xen.h> +#include <xen/swiotlb-xen.h> + +#include <asm/cacheflush.h> + +void arch_sync_dma_for_device(phys_addr_t paddr, size_t size, + enum dma_data_direction dir) +{ + __dma_map_area(phys_to_virt(paddr), size, dir); +} + +void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size, + enum dma_data_direction dir) +{ + __dma_unmap_area(phys_to_virt(paddr), size, dir); +} + +void arch_dma_prep_coherent(struct page *page, size_t size) +{ + __dma_flush_area(page_address(page), size); +} + +#ifdef CONFIG_IOMMU_DMA +void arch_teardown_dma_ops(struct device *dev) +{ + dev->dma_ops = NULL; +} +#endif + +void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size, + const struct iommu_ops *iommu, bool coherent) +{ + int cls = cache_line_size_of_cpu(); + + WARN_TAINT(!coherent && cls > ARCH_DMA_MINALIGN, + TAINT_CPU_OUT_OF_SPEC, + "%s %s: ARCH_DMA_MINALIGN smaller than CTR_EL0.CWG (%d < %d)", + dev_driver_string(dev), dev_name(dev), + ARCH_DMA_MINALIGN, cls); + + dev->dma_coherent = coherent; + if (iommu) + iommu_setup_dma_ops(dev, dma_base, size); + +#ifdef CONFIG_XEN + if (xen_initial_domain()) + dev->dma_ops = &xen_swiotlb_dma_ops; +#endif +} diff --git a/arch/arm64/mm/extable.c b/arch/arm64/mm/extable.c new file mode 100644 index 000000000..aa0060178 --- /dev/null +++ b/arch/arm64/mm/extable.c @@ -0,0 +1,22 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Based on arch/arm/mm/extable.c + */ + +#include <linux/extable.h> +#include <linux/uaccess.h> + +int fixup_exception(struct pt_regs *regs) +{ + const struct exception_table_entry *fixup; + + fixup = search_exception_tables(instruction_pointer(regs)); + if (!fixup) + return 0; + + if (in_bpf_jit(regs)) + return arm64_bpf_fixup_exception(fixup, regs); + + regs->pc = (unsigned long)&fixup->fixup + fixup->fixup; + return 1; +} diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c new file mode 100644 index 000000000..d8baedd16 --- /dev/null +++ b/arch/arm64/mm/fault.c @@ -0,0 +1,855 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Based on arch/arm/mm/fault.c + * + * Copyright (C) 1995 Linus Torvalds + * Copyright (C) 1995-2004 Russell King + * Copyright (C) 2012 ARM Ltd. + */ + +#include <linux/acpi.h> +#include <linux/bitfield.h> +#include <linux/extable.h> +#include <linux/signal.h> +#include <linux/mm.h> +#include <linux/hardirq.h> +#include <linux/init.h> +#include <linux/kprobes.h> +#include <linux/uaccess.h> +#include <linux/page-flags.h> +#include <linux/sched/signal.h> +#include <linux/sched/debug.h> +#include <linux/highmem.h> +#include <linux/perf_event.h> +#include <linux/preempt.h> +#include <linux/hugetlb.h> + +#include <asm/acpi.h> +#include <asm/bug.h> +#include <asm/cmpxchg.h> +#include <asm/cpufeature.h> +#include <asm/exception.h> +#include <asm/daifflags.h> +#include <asm/debug-monitors.h> +#include <asm/esr.h> +#include <asm/kprobes.h> +#include <asm/processor.h> +#include <asm/sysreg.h> +#include <asm/system_misc.h> +#include <asm/tlbflush.h> +#include <asm/traps.h> + +struct fault_info { + int (*fn)(unsigned long addr, unsigned int esr, + struct pt_regs *regs); + int sig; + int code; + const char *name; +}; + +static const struct fault_info fault_info[]; +static struct fault_info debug_fault_info[]; + +static inline const struct fault_info *esr_to_fault_info(unsigned int esr) +{ + return fault_info + (esr & ESR_ELx_FSC); +} + +static inline const struct fault_info *esr_to_debug_fault_info(unsigned int esr) +{ + return debug_fault_info + DBG_ESR_EVT(esr); +} + +static void data_abort_decode(unsigned int esr) +{ + pr_alert("Data abort info:\n"); + + if (esr & ESR_ELx_ISV) { + pr_alert(" Access size = %u byte(s)\n", + 1U << ((esr & ESR_ELx_SAS) >> ESR_ELx_SAS_SHIFT)); + pr_alert(" SSE = %lu, SRT = %lu\n", + (esr & ESR_ELx_SSE) >> ESR_ELx_SSE_SHIFT, + (esr & ESR_ELx_SRT_MASK) >> ESR_ELx_SRT_SHIFT); + pr_alert(" SF = %lu, AR = %lu\n", + (esr & ESR_ELx_SF) >> ESR_ELx_SF_SHIFT, + (esr & ESR_ELx_AR) >> ESR_ELx_AR_SHIFT); + } else { + pr_alert(" ISV = 0, ISS = 0x%08lx\n", esr & ESR_ELx_ISS_MASK); + } + + pr_alert(" CM = %lu, WnR = %lu\n", + (esr & ESR_ELx_CM) >> ESR_ELx_CM_SHIFT, + (esr & ESR_ELx_WNR) >> ESR_ELx_WNR_SHIFT); +} + +static void mem_abort_decode(unsigned int esr) +{ + pr_alert("Mem abort info:\n"); + + pr_alert(" ESR = 0x%08x\n", esr); + pr_alert(" EC = 0x%02lx: %s, IL = %u bits\n", + ESR_ELx_EC(esr), esr_get_class_string(esr), + (esr & ESR_ELx_IL) ? 32 : 16); + pr_alert(" SET = %lu, FnV = %lu\n", + (esr & ESR_ELx_SET_MASK) >> ESR_ELx_SET_SHIFT, + (esr & ESR_ELx_FnV) >> ESR_ELx_FnV_SHIFT); + pr_alert(" EA = %lu, S1PTW = %lu\n", + (esr & ESR_ELx_EA) >> ESR_ELx_EA_SHIFT, + (esr & ESR_ELx_S1PTW) >> ESR_ELx_S1PTW_SHIFT); + + if (esr_is_data_abort(esr)) + data_abort_decode(esr); +} + +static inline unsigned long mm_to_pgd_phys(struct mm_struct *mm) +{ + /* Either init_pg_dir or swapper_pg_dir */ + if (mm == &init_mm) + return __pa_symbol(mm->pgd); + + return (unsigned long)virt_to_phys(mm->pgd); +} + +/* + * Dump out the page tables associated with 'addr' in the currently active mm. + */ +static void show_pte(unsigned long addr) +{ + struct mm_struct *mm; + pgd_t *pgdp; + pgd_t pgd; + + if (is_ttbr0_addr(addr)) { + /* TTBR0 */ + mm = current->active_mm; + if (mm == &init_mm) { + pr_alert("[%016lx] user address but active_mm is swapper\n", + addr); + return; + } + } else if (is_ttbr1_addr(addr)) { + /* TTBR1 */ + mm = &init_mm; + } else { + pr_alert("[%016lx] address between user and kernel address ranges\n", + addr); + return; + } + + pr_alert("%s pgtable: %luk pages, %llu-bit VAs, pgdp=%016lx\n", + mm == &init_mm ? "swapper" : "user", PAGE_SIZE / SZ_1K, + vabits_actual, mm_to_pgd_phys(mm)); + pgdp = pgd_offset(mm, addr); + pgd = READ_ONCE(*pgdp); + pr_alert("[%016lx] pgd=%016llx", addr, pgd_val(pgd)); + + do { + p4d_t *p4dp, p4d; + pud_t *pudp, pud; + pmd_t *pmdp, pmd; + pte_t *ptep, pte; + + if (pgd_none(pgd) || pgd_bad(pgd)) + break; + + p4dp = p4d_offset(pgdp, addr); + p4d = READ_ONCE(*p4dp); + pr_cont(", p4d=%016llx", p4d_val(p4d)); + if (p4d_none(p4d) || p4d_bad(p4d)) + break; + + pudp = pud_offset(p4dp, addr); + pud = READ_ONCE(*pudp); + pr_cont(", pud=%016llx", pud_val(pud)); + if (pud_none(pud) || pud_bad(pud)) + break; + + pmdp = pmd_offset(pudp, addr); + pmd = READ_ONCE(*pmdp); + pr_cont(", pmd=%016llx", pmd_val(pmd)); + if (pmd_none(pmd) || pmd_bad(pmd)) + break; + + ptep = pte_offset_map(pmdp, addr); + pte = READ_ONCE(*ptep); + pr_cont(", pte=%016llx", pte_val(pte)); + pte_unmap(ptep); + } while(0); + + pr_cont("\n"); +} + +/* + * This function sets the access flags (dirty, accessed), as well as write + * permission, and only to a more permissive setting. + * + * It needs to cope with hardware update of the accessed/dirty state by other + * agents in the system and can safely skip the __sync_icache_dcache() call as, + * like set_pte_at(), the PTE is never changed from no-exec to exec here. + * + * Returns whether or not the PTE actually changed. + */ +int ptep_set_access_flags(struct vm_area_struct *vma, + unsigned long address, pte_t *ptep, + pte_t entry, int dirty) +{ + pteval_t old_pteval, pteval; + pte_t pte = READ_ONCE(*ptep); + + if (pte_same(pte, entry)) + return 0; + + /* only preserve the access flags and write permission */ + pte_val(entry) &= PTE_RDONLY | PTE_AF | PTE_WRITE | PTE_DIRTY; + + /* + * Setting the flags must be done atomically to avoid racing with the + * hardware update of the access/dirty state. The PTE_RDONLY bit must + * be set to the most permissive (lowest value) of *ptep and entry + * (calculated as: a & b == ~(~a | ~b)). + */ + pte_val(entry) ^= PTE_RDONLY; + pteval = pte_val(pte); + do { + old_pteval = pteval; + pteval ^= PTE_RDONLY; + pteval |= pte_val(entry); + pteval ^= PTE_RDONLY; + pteval = cmpxchg_relaxed(&pte_val(*ptep), old_pteval, pteval); + } while (pteval != old_pteval); + + /* Invalidate a stale read-only entry */ + if (dirty) + flush_tlb_page(vma, address); + return 1; +} + +static bool is_el1_instruction_abort(unsigned int esr) +{ + return ESR_ELx_EC(esr) == ESR_ELx_EC_IABT_CUR; +} + +static inline bool is_el1_permission_fault(unsigned long addr, unsigned int esr, + struct pt_regs *regs) +{ + unsigned int ec = ESR_ELx_EC(esr); + unsigned int fsc_type = esr & ESR_ELx_FSC_TYPE; + + if (ec != ESR_ELx_EC_DABT_CUR && ec != ESR_ELx_EC_IABT_CUR) + return false; + + if (fsc_type == ESR_ELx_FSC_PERM) + return true; + + if (is_ttbr0_addr(addr) && system_uses_ttbr0_pan()) + return fsc_type == ESR_ELx_FSC_FAULT && + (regs->pstate & PSR_PAN_BIT); + + return false; +} + +static bool __kprobes is_spurious_el1_translation_fault(unsigned long addr, + unsigned int esr, + struct pt_regs *regs) +{ + unsigned long flags; + u64 par, dfsc; + + if (ESR_ELx_EC(esr) != ESR_ELx_EC_DABT_CUR || + (esr & ESR_ELx_FSC_TYPE) != ESR_ELx_FSC_FAULT) + return false; + + local_irq_save(flags); + asm volatile("at s1e1r, %0" :: "r" (addr)); + isb(); + par = read_sysreg_par(); + local_irq_restore(flags); + + /* + * If we now have a valid translation, treat the translation fault as + * spurious. + */ + if (!(par & SYS_PAR_EL1_F)) + return true; + + /* + * If we got a different type of fault from the AT instruction, + * treat the translation fault as spurious. + */ + dfsc = FIELD_GET(SYS_PAR_EL1_FST, par); + return (dfsc & ESR_ELx_FSC_TYPE) != ESR_ELx_FSC_FAULT; +} + +static void die_kernel_fault(const char *msg, unsigned long addr, + unsigned int esr, struct pt_regs *regs) +{ + bust_spinlocks(1); + + pr_alert("Unable to handle kernel %s at virtual address %016lx\n", msg, + addr); + + mem_abort_decode(esr); + + show_pte(addr); + die("Oops", regs, esr); + bust_spinlocks(0); + make_task_dead(SIGKILL); +} + +static void __do_kernel_fault(unsigned long addr, unsigned int esr, + struct pt_regs *regs) +{ + const char *msg; + + /* + * Are we prepared to handle this kernel fault? + * We are almost certainly not prepared to handle instruction faults. + */ + if (!is_el1_instruction_abort(esr) && fixup_exception(regs)) + return; + + if (WARN_RATELIMIT(is_spurious_el1_translation_fault(addr, esr, regs), + "Ignoring spurious kernel translation fault at virtual address %016lx\n", addr)) + return; + + if (is_el1_permission_fault(addr, esr, regs)) { + if (esr & ESR_ELx_WNR) + msg = "write to read-only memory"; + else if (is_el1_instruction_abort(esr)) + msg = "execute from non-executable memory"; + else + msg = "read from unreadable memory"; + } else if (addr < PAGE_SIZE) { + msg = "NULL pointer dereference"; + } else { + msg = "paging request"; + } + + die_kernel_fault(msg, addr, esr, regs); +} + +static void set_thread_esr(unsigned long address, unsigned int esr) +{ + current->thread.fault_address = address; + + /* + * If the faulting address is in the kernel, we must sanitize the ESR. + * From userspace's point of view, kernel-only mappings don't exist + * at all, so we report them as level 0 translation faults. + * (This is not quite the way that "no mapping there at all" behaves: + * an alignment fault not caused by the memory type would take + * precedence over translation fault for a real access to empty + * space. Unfortunately we can't easily distinguish "alignment fault + * not caused by memory type" from "alignment fault caused by memory + * type", so we ignore this wrinkle and just return the translation + * fault.) + */ + if (!is_ttbr0_addr(current->thread.fault_address)) { + switch (ESR_ELx_EC(esr)) { + case ESR_ELx_EC_DABT_LOW: + /* + * These bits provide only information about the + * faulting instruction, which userspace knows already. + * We explicitly clear bits which are architecturally + * RES0 in case they are given meanings in future. + * We always report the ESR as if the fault was taken + * to EL1 and so ISV and the bits in ISS[23:14] are + * clear. (In fact it always will be a fault to EL1.) + */ + esr &= ESR_ELx_EC_MASK | ESR_ELx_IL | + ESR_ELx_CM | ESR_ELx_WNR; + esr |= ESR_ELx_FSC_FAULT; + break; + case ESR_ELx_EC_IABT_LOW: + /* + * Claim a level 0 translation fault. + * All other bits are architecturally RES0 for faults + * reported with that DFSC value, so we clear them. + */ + esr &= ESR_ELx_EC_MASK | ESR_ELx_IL; + esr |= ESR_ELx_FSC_FAULT; + break; + default: + /* + * This should never happen (entry.S only brings us + * into this code for insn and data aborts from a lower + * exception level). Fail safe by not providing an ESR + * context record at all. + */ + WARN(1, "ESR 0x%x is not DABT or IABT from EL0\n", esr); + esr = 0; + break; + } + } + + current->thread.fault_code = esr; +} + +static void do_bad_area(unsigned long addr, unsigned int esr, struct pt_regs *regs) +{ + /* + * If we are in kernel mode at this point, we have no context to + * handle this fault with. + */ + if (user_mode(regs)) { + const struct fault_info *inf = esr_to_fault_info(esr); + + set_thread_esr(addr, esr); + arm64_force_sig_fault(inf->sig, inf->code, (void __user *)addr, + inf->name); + } else { + __do_kernel_fault(addr, esr, regs); + } +} + +#define VM_FAULT_BADMAP ((__force vm_fault_t)0x010000) +#define VM_FAULT_BADACCESS ((__force vm_fault_t)0x020000) + +static vm_fault_t __do_page_fault(struct mm_struct *mm, unsigned long addr, + unsigned int mm_flags, unsigned long vm_flags, + struct pt_regs *regs) +{ + struct vm_area_struct *vma = find_vma(mm, addr); + + if (unlikely(!vma)) + return VM_FAULT_BADMAP; + + /* + * Ok, we have a good vm_area for this memory access, so we can handle + * it. + */ + if (unlikely(vma->vm_start > addr)) { + if (!(vma->vm_flags & VM_GROWSDOWN)) + return VM_FAULT_BADMAP; + if (expand_stack(vma, addr)) + return VM_FAULT_BADMAP; + } + + /* + * Check that the permissions on the VMA allow for the fault which + * occurred. + */ + if (!(vma->vm_flags & vm_flags)) + return VM_FAULT_BADACCESS; + return handle_mm_fault(vma, addr & PAGE_MASK, mm_flags, regs); +} + +static bool is_el0_instruction_abort(unsigned int esr) +{ + return ESR_ELx_EC(esr) == ESR_ELx_EC_IABT_LOW; +} + +/* + * Note: not valid for EL1 DC IVAC, but we never use that such that it + * should fault. EL0 cannot issue DC IVAC (undef). + */ +static bool is_write_abort(unsigned int esr) +{ + return (esr & ESR_ELx_WNR) && !(esr & ESR_ELx_CM); +} + +static int __kprobes do_page_fault(unsigned long addr, unsigned int esr, + struct pt_regs *regs) +{ + const struct fault_info *inf; + struct mm_struct *mm = current->mm; + vm_fault_t fault; + unsigned long vm_flags = VM_ACCESS_FLAGS; + unsigned int mm_flags = FAULT_FLAG_DEFAULT; + + if (kprobe_page_fault(regs, esr)) + return 0; + + /* + * If we're in an interrupt or have no user context, we must not take + * the fault. + */ + if (faulthandler_disabled() || !mm) + goto no_context; + + if (user_mode(regs)) + mm_flags |= FAULT_FLAG_USER; + + if (is_el0_instruction_abort(esr)) { + vm_flags = VM_EXEC; + mm_flags |= FAULT_FLAG_INSTRUCTION; + } else if (is_write_abort(esr)) { + vm_flags = VM_WRITE; + mm_flags |= FAULT_FLAG_WRITE; + } + + if (is_ttbr0_addr(addr) && is_el1_permission_fault(addr, esr, regs)) { + /* regs->orig_addr_limit may be 0 if we entered from EL0 */ + if (regs->orig_addr_limit == KERNEL_DS) + die_kernel_fault("access to user memory with fs=KERNEL_DS", + addr, esr, regs); + + if (is_el1_instruction_abort(esr)) + die_kernel_fault("execution of user memory", + addr, esr, regs); + + if (!search_exception_tables(regs->pc)) + die_kernel_fault("access to user memory outside uaccess routines", + addr, esr, regs); + } + + perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, addr); + + /* + * As per x86, we may deadlock here. However, since the kernel only + * validly references user space from well defined areas of the code, + * we can bug out early if this is from code which shouldn't. + */ + if (!mmap_read_trylock(mm)) { + if (!user_mode(regs) && !search_exception_tables(regs->pc)) + goto no_context; +retry: + mmap_read_lock(mm); + } else { + /* + * The above down_read_trylock() might have succeeded in which + * case, we'll have missed the might_sleep() from down_read(). + */ + might_sleep(); +#ifdef CONFIG_DEBUG_VM + if (!user_mode(regs) && !search_exception_tables(regs->pc)) { + mmap_read_unlock(mm); + goto no_context; + } +#endif + } + + fault = __do_page_fault(mm, addr, mm_flags, vm_flags, regs); + + /* Quick path to respond to signals */ + if (fault_signal_pending(fault, regs)) { + if (!user_mode(regs)) + goto no_context; + return 0; + } + + if (fault & VM_FAULT_RETRY) { + if (mm_flags & FAULT_FLAG_ALLOW_RETRY) { + mm_flags |= FAULT_FLAG_TRIED; + goto retry; + } + } + mmap_read_unlock(mm); + + /* + * Handle the "normal" (no error) case first. + */ + if (likely(!(fault & (VM_FAULT_ERROR | VM_FAULT_BADMAP | + VM_FAULT_BADACCESS)))) + return 0; + + /* + * If we are in kernel mode at this point, we have no context to + * handle this fault with. + */ + if (!user_mode(regs)) + goto no_context; + + if (fault & VM_FAULT_OOM) { + /* + * We ran out of memory, call the OOM killer, and return to + * userspace (which will retry the fault, or kill us if we got + * oom-killed). + */ + pagefault_out_of_memory(); + return 0; + } + + inf = esr_to_fault_info(esr); + set_thread_esr(addr, esr); + if (fault & VM_FAULT_SIGBUS) { + /* + * We had some memory, but were unable to successfully fix up + * this page fault. + */ + arm64_force_sig_fault(SIGBUS, BUS_ADRERR, (void __user *)addr, + inf->name); + } else if (fault & (VM_FAULT_HWPOISON_LARGE | VM_FAULT_HWPOISON)) { + unsigned int lsb; + + lsb = PAGE_SHIFT; + if (fault & VM_FAULT_HWPOISON_LARGE) + lsb = hstate_index_to_shift(VM_FAULT_GET_HINDEX(fault)); + + arm64_force_sig_mceerr(BUS_MCEERR_AR, (void __user *)addr, lsb, + inf->name); + } else { + /* + * Something tried to access memory that isn't in our memory + * map. + */ + arm64_force_sig_fault(SIGSEGV, + fault == VM_FAULT_BADACCESS ? SEGV_ACCERR : SEGV_MAPERR, + (void __user *)addr, + inf->name); + } + + return 0; + +no_context: + __do_kernel_fault(addr, esr, regs); + return 0; +} + +static int __kprobes do_translation_fault(unsigned long addr, + unsigned int esr, + struct pt_regs *regs) +{ + if (is_ttbr0_addr(addr)) + return do_page_fault(addr, esr, regs); + + do_bad_area(addr, esr, regs); + return 0; +} + +static int do_alignment_fault(unsigned long addr, unsigned int esr, + struct pt_regs *regs) +{ + do_bad_area(addr, esr, regs); + return 0; +} + +static int do_bad(unsigned long addr, unsigned int esr, struct pt_regs *regs) +{ + return 1; /* "fault" */ +} + +static int do_sea(unsigned long addr, unsigned int esr, struct pt_regs *regs) +{ + const struct fault_info *inf; + void __user *siaddr; + + inf = esr_to_fault_info(esr); + + if (user_mode(regs) && apei_claim_sea(regs) == 0) { + /* + * APEI claimed this as a firmware-first notification. + * Some processing deferred to task_work before ret_to_user(). + */ + return 0; + } + + if (esr & ESR_ELx_FnV) + siaddr = NULL; + else + siaddr = (void __user *)addr; + arm64_notify_die(inf->name, regs, inf->sig, inf->code, siaddr, esr); + + return 0; +} + +static int do_tag_check_fault(unsigned long addr, unsigned int esr, + struct pt_regs *regs) +{ + do_bad_area(addr, esr, regs); + return 0; +} + +static const struct fault_info fault_info[] = { + { do_bad, SIGKILL, SI_KERNEL, "ttbr address size fault" }, + { do_bad, SIGKILL, SI_KERNEL, "level 1 address size fault" }, + { do_bad, SIGKILL, SI_KERNEL, "level 2 address size fault" }, + { do_bad, SIGKILL, SI_KERNEL, "level 3 address size fault" }, + { do_translation_fault, SIGSEGV, SEGV_MAPERR, "level 0 translation fault" }, + { do_translation_fault, SIGSEGV, SEGV_MAPERR, "level 1 translation fault" }, + { do_translation_fault, SIGSEGV, SEGV_MAPERR, "level 2 translation fault" }, + { do_translation_fault, SIGSEGV, SEGV_MAPERR, "level 3 translation fault" }, + { do_bad, SIGKILL, SI_KERNEL, "unknown 8" }, + { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 1 access flag fault" }, + { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 2 access flag fault" }, + { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 3 access flag fault" }, + { do_bad, SIGKILL, SI_KERNEL, "unknown 12" }, + { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 1 permission fault" }, + { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 2 permission fault" }, + { do_page_fault, SIGSEGV, SEGV_ACCERR, "level 3 permission fault" }, + { do_sea, SIGBUS, BUS_OBJERR, "synchronous external abort" }, + { do_tag_check_fault, SIGSEGV, SEGV_MTESERR, "synchronous tag check fault" }, + { do_bad, SIGKILL, SI_KERNEL, "unknown 18" }, + { do_bad, SIGKILL, SI_KERNEL, "unknown 19" }, + { do_sea, SIGKILL, SI_KERNEL, "level 0 (translation table walk)" }, + { do_sea, SIGKILL, SI_KERNEL, "level 1 (translation table walk)" }, + { do_sea, SIGKILL, SI_KERNEL, "level 2 (translation table walk)" }, + { do_sea, SIGKILL, SI_KERNEL, "level 3 (translation table walk)" }, + { do_sea, SIGBUS, BUS_OBJERR, "synchronous parity or ECC error" }, // Reserved when RAS is implemented + { do_bad, SIGKILL, SI_KERNEL, "unknown 25" }, + { do_bad, SIGKILL, SI_KERNEL, "unknown 26" }, + { do_bad, SIGKILL, SI_KERNEL, "unknown 27" }, + { do_sea, SIGKILL, SI_KERNEL, "level 0 synchronous parity error (translation table walk)" }, // Reserved when RAS is implemented + { do_sea, SIGKILL, SI_KERNEL, "level 1 synchronous parity error (translation table walk)" }, // Reserved when RAS is implemented + { do_sea, SIGKILL, SI_KERNEL, "level 2 synchronous parity error (translation table walk)" }, // Reserved when RAS is implemented + { do_sea, SIGKILL, SI_KERNEL, "level 3 synchronous parity error (translation table walk)" }, // Reserved when RAS is implemented + { do_bad, SIGKILL, SI_KERNEL, "unknown 32" }, + { do_alignment_fault, SIGBUS, BUS_ADRALN, "alignment fault" }, + { do_bad, SIGKILL, SI_KERNEL, "unknown 34" }, + { do_bad, SIGKILL, SI_KERNEL, "unknown 35" }, + { do_bad, SIGKILL, SI_KERNEL, "unknown 36" }, + { do_bad, SIGKILL, SI_KERNEL, "unknown 37" }, + { do_bad, SIGKILL, SI_KERNEL, "unknown 38" }, + { do_bad, SIGKILL, SI_KERNEL, "unknown 39" }, + { do_bad, SIGKILL, SI_KERNEL, "unknown 40" }, + { do_bad, SIGKILL, SI_KERNEL, "unknown 41" }, + { do_bad, SIGKILL, SI_KERNEL, "unknown 42" }, + { do_bad, SIGKILL, SI_KERNEL, "unknown 43" }, + { do_bad, SIGKILL, SI_KERNEL, "unknown 44" }, + { do_bad, SIGKILL, SI_KERNEL, "unknown 45" }, + { do_bad, SIGKILL, SI_KERNEL, "unknown 46" }, + { do_bad, SIGKILL, SI_KERNEL, "unknown 47" }, + { do_bad, SIGKILL, SI_KERNEL, "TLB conflict abort" }, + { do_bad, SIGKILL, SI_KERNEL, "Unsupported atomic hardware update fault" }, + { do_bad, SIGKILL, SI_KERNEL, "unknown 50" }, + { do_bad, SIGKILL, SI_KERNEL, "unknown 51" }, + { do_bad, SIGKILL, SI_KERNEL, "implementation fault (lockdown abort)" }, + { do_bad, SIGBUS, BUS_OBJERR, "implementation fault (unsupported exclusive)" }, + { do_bad, SIGKILL, SI_KERNEL, "unknown 54" }, + { do_bad, SIGKILL, SI_KERNEL, "unknown 55" }, + { do_bad, SIGKILL, SI_KERNEL, "unknown 56" }, + { do_bad, SIGKILL, SI_KERNEL, "unknown 57" }, + { do_bad, SIGKILL, SI_KERNEL, "unknown 58" }, + { do_bad, SIGKILL, SI_KERNEL, "unknown 59" }, + { do_bad, SIGKILL, SI_KERNEL, "unknown 60" }, + { do_bad, SIGKILL, SI_KERNEL, "section domain fault" }, + { do_bad, SIGKILL, SI_KERNEL, "page domain fault" }, + { do_bad, SIGKILL, SI_KERNEL, "unknown 63" }, +}; + +void do_mem_abort(unsigned long addr, unsigned int esr, struct pt_regs *regs) +{ + const struct fault_info *inf = esr_to_fault_info(esr); + + if (!inf->fn(addr, esr, regs)) + return; + + if (!user_mode(regs)) { + pr_alert("Unhandled fault at 0x%016lx\n", addr); + mem_abort_decode(esr); + show_pte(addr); + } + + arm64_notify_die(inf->name, regs, + inf->sig, inf->code, (void __user *)addr, esr); +} +NOKPROBE_SYMBOL(do_mem_abort); + +void do_el0_irq_bp_hardening(void) +{ + /* PC has already been checked in entry.S */ + arm64_apply_bp_hardening(); +} +NOKPROBE_SYMBOL(do_el0_irq_bp_hardening); + +void do_sp_pc_abort(unsigned long addr, unsigned int esr, struct pt_regs *regs) +{ + arm64_notify_die("SP/PC alignment exception", regs, + SIGBUS, BUS_ADRALN, (void __user *)addr, esr); +} +NOKPROBE_SYMBOL(do_sp_pc_abort); + +int __init early_brk64(unsigned long addr, unsigned int esr, + struct pt_regs *regs); + +/* + * __refdata because early_brk64 is __init, but the reference to it is + * clobbered at arch_initcall time. + * See traps.c and debug-monitors.c:debug_traps_init(). + */ +static struct fault_info __refdata debug_fault_info[] = { + { do_bad, SIGTRAP, TRAP_HWBKPT, "hardware breakpoint" }, + { do_bad, SIGTRAP, TRAP_HWBKPT, "hardware single-step" }, + { do_bad, SIGTRAP, TRAP_HWBKPT, "hardware watchpoint" }, + { do_bad, SIGKILL, SI_KERNEL, "unknown 3" }, + { do_bad, SIGTRAP, TRAP_BRKPT, "aarch32 BKPT" }, + { do_bad, SIGKILL, SI_KERNEL, "aarch32 vector catch" }, + { early_brk64, SIGTRAP, TRAP_BRKPT, "aarch64 BRK" }, + { do_bad, SIGKILL, SI_KERNEL, "unknown 7" }, +}; + +void __init hook_debug_fault_code(int nr, + int (*fn)(unsigned long, unsigned int, struct pt_regs *), + int sig, int code, const char *name) +{ + BUG_ON(nr < 0 || nr >= ARRAY_SIZE(debug_fault_info)); + + debug_fault_info[nr].fn = fn; + debug_fault_info[nr].sig = sig; + debug_fault_info[nr].code = code; + debug_fault_info[nr].name = name; +} + +/* + * In debug exception context, we explicitly disable preemption despite + * having interrupts disabled. + * This serves two purposes: it makes it much less likely that we would + * accidentally schedule in exception context and it will force a warning + * if we somehow manage to schedule by accident. + */ +static void debug_exception_enter(struct pt_regs *regs) +{ + preempt_disable(); + + /* This code is a bit fragile. Test it. */ + RCU_LOCKDEP_WARN(!rcu_is_watching(), "exception_enter didn't work"); +} +NOKPROBE_SYMBOL(debug_exception_enter); + +static void debug_exception_exit(struct pt_regs *regs) +{ + preempt_enable_no_resched(); +} +NOKPROBE_SYMBOL(debug_exception_exit); + +#ifdef CONFIG_ARM64_ERRATUM_1463225 +DECLARE_PER_CPU(int, __in_cortex_a76_erratum_1463225_wa); + +static int cortex_a76_erratum_1463225_debug_handler(struct pt_regs *regs) +{ + if (user_mode(regs)) + return 0; + + if (!__this_cpu_read(__in_cortex_a76_erratum_1463225_wa)) + return 0; + + /* + * We've taken a dummy step exception from the kernel to ensure + * that interrupts are re-enabled on the syscall path. Return back + * to cortex_a76_erratum_1463225_svc_handler() with debug exceptions + * masked so that we can safely restore the mdscr and get on with + * handling the syscall. + */ + regs->pstate |= PSR_D_BIT; + return 1; +} +#else +static int cortex_a76_erratum_1463225_debug_handler(struct pt_regs *regs) +{ + return 0; +} +#endif /* CONFIG_ARM64_ERRATUM_1463225 */ +NOKPROBE_SYMBOL(cortex_a76_erratum_1463225_debug_handler); + +void do_debug_exception(unsigned long addr_if_watchpoint, unsigned int esr, + struct pt_regs *regs) +{ + const struct fault_info *inf = esr_to_debug_fault_info(esr); + unsigned long pc = instruction_pointer(regs); + + if (cortex_a76_erratum_1463225_debug_handler(regs)) + return; + + debug_exception_enter(regs); + + if (user_mode(regs) && !is_ttbr0_addr(pc)) + arm64_apply_bp_hardening(); + + if (inf->fn(addr_if_watchpoint, esr, regs)) { + arm64_notify_die(inf->name, regs, + inf->sig, inf->code, (void __user *)pc, esr); + } + + debug_exception_exit(regs); +} +NOKPROBE_SYMBOL(do_debug_exception); diff --git a/arch/arm64/mm/flush.c b/arch/arm64/mm/flush.c new file mode 100644 index 000000000..6d44c028d --- /dev/null +++ b/arch/arm64/mm/flush.c @@ -0,0 +1,96 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Based on arch/arm/mm/flush.c + * + * Copyright (C) 1995-2002 Russell King + * Copyright (C) 2012 ARM Ltd. + */ + +#include <linux/export.h> +#include <linux/mm.h> +#include <linux/pagemap.h> + +#include <asm/cacheflush.h> +#include <asm/cache.h> +#include <asm/tlbflush.h> + +void sync_icache_aliases(void *kaddr, unsigned long len) +{ + unsigned long addr = (unsigned long)kaddr; + + if (icache_is_aliasing()) { + __clean_dcache_area_pou(kaddr, len); + __flush_icache_all(); + } else { + /* + * Don't issue kick_all_cpus_sync() after I-cache invalidation + * for user mappings. + */ + __flush_icache_range(addr, addr + len); + } +} + +static void flush_ptrace_access(struct vm_area_struct *vma, struct page *page, + unsigned long uaddr, void *kaddr, + unsigned long len) +{ + if (vma->vm_flags & VM_EXEC) + sync_icache_aliases(kaddr, len); +} + +/* + * Copy user data from/to a page which is mapped into a different processes + * address space. Really, we want to allow our "user space" model to handle + * this. + */ +void copy_to_user_page(struct vm_area_struct *vma, struct page *page, + unsigned long uaddr, void *dst, const void *src, + unsigned long len) +{ + memcpy(dst, src, len); + flush_ptrace_access(vma, page, uaddr, dst, len); +} + +void __sync_icache_dcache(pte_t pte) +{ + struct page *page = pte_page(pte); + + if (!test_bit(PG_dcache_clean, &page->flags)) { + sync_icache_aliases(page_address(page), page_size(page)); + set_bit(PG_dcache_clean, &page->flags); + } +} +EXPORT_SYMBOL_GPL(__sync_icache_dcache); + +/* + * This function is called when a page has been modified by the kernel. Mark + * it as dirty for later flushing when mapped in user space (if executable, + * see __sync_icache_dcache). + */ +void flush_dcache_page(struct page *page) +{ + if (test_bit(PG_dcache_clean, &page->flags)) + clear_bit(PG_dcache_clean, &page->flags); +} +EXPORT_SYMBOL(flush_dcache_page); + +/* + * Additional functions defined in assembly. + */ +EXPORT_SYMBOL(__flush_icache_range); + +#ifdef CONFIG_ARCH_HAS_PMEM_API +void arch_wb_cache_pmem(void *addr, size_t size) +{ + /* Ensure order against any prior non-cacheable writes */ + dmb(osh); + __clean_dcache_area_pop(addr, size); +} +EXPORT_SYMBOL_GPL(arch_wb_cache_pmem); + +void arch_invalidate_pmem(void *addr, size_t size) +{ + __inval_dcache_area(addr, size); +} +EXPORT_SYMBOL_GPL(arch_invalidate_pmem); +#endif diff --git a/arch/arm64/mm/hugetlbpage.c b/arch/arm64/mm/hugetlbpage.c new file mode 100644 index 000000000..99cd6e718 --- /dev/null +++ b/arch/arm64/mm/hugetlbpage.c @@ -0,0 +1,518 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * arch/arm64/mm/hugetlbpage.c + * + * Copyright (C) 2013 Linaro Ltd. + * + * Based on arch/x86/mm/hugetlbpage.c. + */ + +#include <linux/init.h> +#include <linux/fs.h> +#include <linux/mm.h> +#include <linux/hugetlb.h> +#include <linux/pagemap.h> +#include <linux/err.h> +#include <linux/sysctl.h> +#include <asm/mman.h> +#include <asm/tlb.h> +#include <asm/tlbflush.h> + +/* + * HugeTLB Support Matrix + * + * --------------------------------------------------- + * | Page Size | CONT PTE | PMD | CONT PMD | PUD | + * --------------------------------------------------- + * | 4K | 64K | 2M | 32M | 1G | + * | 16K | 2M | 32M | 1G | | + * | 64K | 2M | 512M | 16G | | + * --------------------------------------------------- + */ + +/* + * Reserve CMA areas for the largest supported gigantic + * huge page when requested. Any other smaller gigantic + * huge pages could still be served from those areas. + */ +#ifdef CONFIG_CMA +void __init arm64_hugetlb_cma_reserve(void) +{ + int order; + +#ifdef CONFIG_ARM64_4K_PAGES + order = PUD_SHIFT - PAGE_SHIFT; +#else + order = CONT_PMD_SHIFT - PAGE_SHIFT; +#endif + /* + * HugeTLB CMA reservation is required for gigantic + * huge pages which could not be allocated via the + * page allocator. Just warn if there is any change + * breaking this assumption. + */ + WARN_ON(order <= MAX_ORDER); + hugetlb_cma_reserve(order); +} +#endif /* CONFIG_CMA */ + +#ifdef CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION +bool arch_hugetlb_migration_supported(struct hstate *h) +{ + size_t pagesize = huge_page_size(h); + + switch (pagesize) { +#ifdef CONFIG_ARM64_4K_PAGES + case PUD_SIZE: +#endif + case PMD_SIZE: + case CONT_PMD_SIZE: + case CONT_PTE_SIZE: + return true; + } + pr_warn("%s: unrecognized huge page size 0x%lx\n", + __func__, pagesize); + return false; +} +#endif + +int pmd_huge(pmd_t pmd) +{ + return pmd_val(pmd) && !(pmd_val(pmd) & PMD_TABLE_BIT); +} + +int pud_huge(pud_t pud) +{ +#ifndef __PAGETABLE_PMD_FOLDED + return pud_val(pud) && !(pud_val(pud) & PUD_TABLE_BIT); +#else + return 0; +#endif +} + +/* + * Select all bits except the pfn + */ +static inline pgprot_t pte_pgprot(pte_t pte) +{ + unsigned long pfn = pte_pfn(pte); + + return __pgprot(pte_val(pfn_pte(pfn, __pgprot(0))) ^ pte_val(pte)); +} + +static int find_num_contig(struct mm_struct *mm, unsigned long addr, + pte_t *ptep, size_t *pgsize) +{ + pgd_t *pgdp = pgd_offset(mm, addr); + p4d_t *p4dp; + pud_t *pudp; + pmd_t *pmdp; + + *pgsize = PAGE_SIZE; + p4dp = p4d_offset(pgdp, addr); + pudp = pud_offset(p4dp, addr); + pmdp = pmd_offset(pudp, addr); + if ((pte_t *)pmdp == ptep) { + *pgsize = PMD_SIZE; + return CONT_PMDS; + } + return CONT_PTES; +} + +static inline int num_contig_ptes(unsigned long size, size_t *pgsize) +{ + int contig_ptes = 0; + + *pgsize = size; + + switch (size) { +#ifdef CONFIG_ARM64_4K_PAGES + case PUD_SIZE: +#endif + case PMD_SIZE: + contig_ptes = 1; + break; + case CONT_PMD_SIZE: + *pgsize = PMD_SIZE; + contig_ptes = CONT_PMDS; + break; + case CONT_PTE_SIZE: + *pgsize = PAGE_SIZE; + contig_ptes = CONT_PTES; + break; + } + + return contig_ptes; +} + +/* + * Changing some bits of contiguous entries requires us to follow a + * Break-Before-Make approach, breaking the whole contiguous set + * before we can change any entries. See ARM DDI 0487A.k_iss10775, + * "Misprogramming of the Contiguous bit", page D4-1762. + * + * This helper performs the break step. + */ +static pte_t get_clear_flush(struct mm_struct *mm, + unsigned long addr, + pte_t *ptep, + unsigned long pgsize, + unsigned long ncontig) +{ + pte_t orig_pte = huge_ptep_get(ptep); + bool valid = pte_valid(orig_pte); + unsigned long i, saddr = addr; + + for (i = 0; i < ncontig; i++, addr += pgsize, ptep++) { + pte_t pte = ptep_get_and_clear(mm, addr, ptep); + + /* + * If HW_AFDBM is enabled, then the HW could turn on + * the dirty or accessed bit for any page in the set, + * so check them all. + */ + if (pte_dirty(pte)) + orig_pte = pte_mkdirty(orig_pte); + + if (pte_young(pte)) + orig_pte = pte_mkyoung(orig_pte); + } + + if (valid) { + struct vm_area_struct vma = TLB_FLUSH_VMA(mm, 0); + flush_tlb_range(&vma, saddr, addr); + } + return orig_pte; +} + +/* + * Changing some bits of contiguous entries requires us to follow a + * Break-Before-Make approach, breaking the whole contiguous set + * before we can change any entries. See ARM DDI 0487A.k_iss10775, + * "Misprogramming of the Contiguous bit", page D4-1762. + * + * This helper performs the break step for use cases where the + * original pte is not needed. + */ +static void clear_flush(struct mm_struct *mm, + unsigned long addr, + pte_t *ptep, + unsigned long pgsize, + unsigned long ncontig) +{ + struct vm_area_struct vma = TLB_FLUSH_VMA(mm, 0); + unsigned long i, saddr = addr; + + for (i = 0; i < ncontig; i++, addr += pgsize, ptep++) + pte_clear(mm, addr, ptep); + + flush_tlb_range(&vma, saddr, addr); +} + +void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, + pte_t *ptep, pte_t pte) +{ + size_t pgsize; + int i; + int ncontig; + unsigned long pfn, dpfn; + pgprot_t hugeprot; + + /* + * Code needs to be expanded to handle huge swap and migration + * entries. Needed for HUGETLB and MEMORY_FAILURE. + */ + WARN_ON(!pte_present(pte)); + + if (!pte_cont(pte)) { + set_pte_at(mm, addr, ptep, pte); + return; + } + + ncontig = find_num_contig(mm, addr, ptep, &pgsize); + pfn = pte_pfn(pte); + dpfn = pgsize >> PAGE_SHIFT; + hugeprot = pte_pgprot(pte); + + clear_flush(mm, addr, ptep, pgsize, ncontig); + + for (i = 0; i < ncontig; i++, ptep++, addr += pgsize, pfn += dpfn) + set_pte_at(mm, addr, ptep, pfn_pte(pfn, hugeprot)); +} + +void set_huge_swap_pte_at(struct mm_struct *mm, unsigned long addr, + pte_t *ptep, pte_t pte, unsigned long sz) +{ + int i, ncontig; + size_t pgsize; + + ncontig = num_contig_ptes(sz, &pgsize); + + for (i = 0; i < ncontig; i++, ptep++) + set_pte(ptep, pte); +} + +pte_t *huge_pte_alloc(struct mm_struct *mm, + unsigned long addr, unsigned long sz) +{ + pgd_t *pgdp; + p4d_t *p4dp; + pud_t *pudp; + pmd_t *pmdp; + pte_t *ptep = NULL; + + pgdp = pgd_offset(mm, addr); + p4dp = p4d_offset(pgdp, addr); + pudp = pud_alloc(mm, p4dp, addr); + if (!pudp) + return NULL; + + if (sz == PUD_SIZE) { + ptep = (pte_t *)pudp; + } else if (sz == (CONT_PTE_SIZE)) { + pmdp = pmd_alloc(mm, pudp, addr); + if (!pmdp) + return NULL; + + WARN_ON(addr & (sz - 1)); + /* + * Note that if this code were ever ported to the + * 32-bit arm platform then it will cause trouble in + * the case where CONFIG_HIGHPTE is set, since there + * will be no pte_unmap() to correspond with this + * pte_alloc_map(). + */ + ptep = pte_alloc_map(mm, pmdp, addr); + } else if (sz == PMD_SIZE) { + if (IS_ENABLED(CONFIG_ARCH_WANT_HUGE_PMD_SHARE) && + pud_none(READ_ONCE(*pudp))) + ptep = huge_pmd_share(mm, addr, pudp); + else + ptep = (pte_t *)pmd_alloc(mm, pudp, addr); + } else if (sz == (CONT_PMD_SIZE)) { + pmdp = pmd_alloc(mm, pudp, addr); + WARN_ON(addr & (sz - 1)); + return (pte_t *)pmdp; + } + + return ptep; +} + +pte_t *huge_pte_offset(struct mm_struct *mm, + unsigned long addr, unsigned long sz) +{ + pgd_t *pgdp; + p4d_t *p4dp; + pud_t *pudp, pud; + pmd_t *pmdp, pmd; + + pgdp = pgd_offset(mm, addr); + if (!pgd_present(READ_ONCE(*pgdp))) + return NULL; + + p4dp = p4d_offset(pgdp, addr); + if (!p4d_present(READ_ONCE(*p4dp))) + return NULL; + + pudp = pud_offset(p4dp, addr); + pud = READ_ONCE(*pudp); + if (sz != PUD_SIZE && pud_none(pud)) + return NULL; + /* hugepage or swap? */ + if (pud_huge(pud) || !pud_present(pud)) + return (pte_t *)pudp; + /* table; check the next level */ + + if (sz == CONT_PMD_SIZE) + addr &= CONT_PMD_MASK; + + pmdp = pmd_offset(pudp, addr); + pmd = READ_ONCE(*pmdp); + if (!(sz == PMD_SIZE || sz == CONT_PMD_SIZE) && + pmd_none(pmd)) + return NULL; + if (pmd_huge(pmd) || !pmd_present(pmd)) + return (pte_t *)pmdp; + + if (sz == CONT_PTE_SIZE) + return pte_offset_kernel(pmdp, (addr & CONT_PTE_MASK)); + + return NULL; +} + +pte_t arch_make_huge_pte(pte_t entry, struct vm_area_struct *vma, + struct page *page, int writable) +{ + size_t pagesize = huge_page_size(hstate_vma(vma)); + + if (pagesize == CONT_PTE_SIZE) { + entry = pte_mkcont(entry); + } else if (pagesize == CONT_PMD_SIZE) { + entry = pmd_pte(pmd_mkcont(pte_pmd(entry))); + } else if (pagesize != PUD_SIZE && pagesize != PMD_SIZE) { + pr_warn("%s: unrecognized huge page size 0x%lx\n", + __func__, pagesize); + } + return entry; +} + +void huge_pte_clear(struct mm_struct *mm, unsigned long addr, + pte_t *ptep, unsigned long sz) +{ + int i, ncontig; + size_t pgsize; + + ncontig = num_contig_ptes(sz, &pgsize); + + for (i = 0; i < ncontig; i++, addr += pgsize, ptep++) + pte_clear(mm, addr, ptep); +} + +pte_t huge_ptep_get_and_clear(struct mm_struct *mm, + unsigned long addr, pte_t *ptep) +{ + int ncontig; + size_t pgsize; + pte_t orig_pte = huge_ptep_get(ptep); + + if (!pte_cont(orig_pte)) + return ptep_get_and_clear(mm, addr, ptep); + + ncontig = find_num_contig(mm, addr, ptep, &pgsize); + + return get_clear_flush(mm, addr, ptep, pgsize, ncontig); +} + +/* + * huge_ptep_set_access_flags will update access flags (dirty, accesssed) + * and write permission. + * + * For a contiguous huge pte range we need to check whether or not write + * permission has to change only on the first pte in the set. Then for + * all the contiguous ptes we need to check whether or not there is a + * discrepancy between dirty or young. + */ +static int __cont_access_flags_changed(pte_t *ptep, pte_t pte, int ncontig) +{ + int i; + + if (pte_write(pte) != pte_write(huge_ptep_get(ptep))) + return 1; + + for (i = 0; i < ncontig; i++) { + pte_t orig_pte = huge_ptep_get(ptep + i); + + if (pte_dirty(pte) != pte_dirty(orig_pte)) + return 1; + + if (pte_young(pte) != pte_young(orig_pte)) + return 1; + } + + return 0; +} + +int huge_ptep_set_access_flags(struct vm_area_struct *vma, + unsigned long addr, pte_t *ptep, + pte_t pte, int dirty) +{ + int ncontig, i; + size_t pgsize = 0; + unsigned long pfn = pte_pfn(pte), dpfn; + pgprot_t hugeprot; + pte_t orig_pte; + + if (!pte_cont(pte)) + return ptep_set_access_flags(vma, addr, ptep, pte, dirty); + + ncontig = find_num_contig(vma->vm_mm, addr, ptep, &pgsize); + dpfn = pgsize >> PAGE_SHIFT; + + if (!__cont_access_flags_changed(ptep, pte, ncontig)) + return 0; + + orig_pte = get_clear_flush(vma->vm_mm, addr, ptep, pgsize, ncontig); + + /* Make sure we don't lose the dirty or young state */ + if (pte_dirty(orig_pte)) + pte = pte_mkdirty(pte); + + if (pte_young(orig_pte)) + pte = pte_mkyoung(pte); + + hugeprot = pte_pgprot(pte); + for (i = 0; i < ncontig; i++, ptep++, addr += pgsize, pfn += dpfn) + set_pte_at(vma->vm_mm, addr, ptep, pfn_pte(pfn, hugeprot)); + + return 1; +} + +void huge_ptep_set_wrprotect(struct mm_struct *mm, + unsigned long addr, pte_t *ptep) +{ + unsigned long pfn, dpfn; + pgprot_t hugeprot; + int ncontig, i; + size_t pgsize; + pte_t pte; + + if (!pte_cont(READ_ONCE(*ptep))) { + ptep_set_wrprotect(mm, addr, ptep); + return; + } + + ncontig = find_num_contig(mm, addr, ptep, &pgsize); + dpfn = pgsize >> PAGE_SHIFT; + + pte = get_clear_flush(mm, addr, ptep, pgsize, ncontig); + pte = pte_wrprotect(pte); + + hugeprot = pte_pgprot(pte); + pfn = pte_pfn(pte); + + for (i = 0; i < ncontig; i++, ptep++, addr += pgsize, pfn += dpfn) + set_pte_at(mm, addr, ptep, pfn_pte(pfn, hugeprot)); +} + +void huge_ptep_clear_flush(struct vm_area_struct *vma, + unsigned long addr, pte_t *ptep) +{ + size_t pgsize; + int ncontig; + + if (!pte_cont(READ_ONCE(*ptep))) { + ptep_clear_flush(vma, addr, ptep); + return; + } + + ncontig = find_num_contig(vma->vm_mm, addr, ptep, &pgsize); + clear_flush(vma->vm_mm, addr, ptep, pgsize, ncontig); +} + +static int __init hugetlbpage_init(void) +{ +#ifdef CONFIG_ARM64_4K_PAGES + hugetlb_add_hstate(PUD_SHIFT - PAGE_SHIFT); +#endif + hugetlb_add_hstate(CONT_PMD_SHIFT - PAGE_SHIFT); + hugetlb_add_hstate(PMD_SHIFT - PAGE_SHIFT); + hugetlb_add_hstate(CONT_PTE_SHIFT - PAGE_SHIFT); + + return 0; +} +arch_initcall(hugetlbpage_init); + +bool __init arch_hugetlb_valid_size(unsigned long size) +{ + switch (size) { +#ifdef CONFIG_ARM64_4K_PAGES + case PUD_SIZE: +#endif + case CONT_PMD_SIZE: + case PMD_SIZE: + case CONT_PTE_SIZE: + return true; + } + + return false; +} diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c new file mode 100644 index 000000000..80cc79760 --- /dev/null +++ b/arch/arm64/mm/init.c @@ -0,0 +1,613 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Based on arch/arm/mm/init.c + * + * Copyright (C) 1995-2005 Russell King + * Copyright (C) 2012 ARM Ltd. + */ + +#include <linux/kernel.h> +#include <linux/export.h> +#include <linux/errno.h> +#include <linux/swap.h> +#include <linux/init.h> +#include <linux/cache.h> +#include <linux/mman.h> +#include <linux/nodemask.h> +#include <linux/initrd.h> +#include <linux/gfp.h> +#include <linux/memblock.h> +#include <linux/sort.h> +#include <linux/of.h> +#include <linux/of_fdt.h> +#include <linux/dma-direct.h> +#include <linux/dma-map-ops.h> +#include <linux/efi.h> +#include <linux/swiotlb.h> +#include <linux/vmalloc.h> +#include <linux/mm.h> +#include <linux/kexec.h> +#include <linux/crash_dump.h> +#include <linux/hugetlb.h> +#include <linux/acpi_iort.h> + +#include <asm/boot.h> +#include <asm/fixmap.h> +#include <asm/kasan.h> +#include <asm/kernel-pgtable.h> +#include <asm/memory.h> +#include <asm/numa.h> +#include <asm/sections.h> +#include <asm/setup.h> +#include <linux/sizes.h> +#include <asm/tlb.h> +#include <asm/alternative.h> + +/* + * We need to be able to catch inadvertent references to memstart_addr + * that occur (potentially in generic code) before arm64_memblock_init() + * executes, which assigns it its actual value. So use a default value + * that cannot be mistaken for a real physical address. + */ +s64 memstart_addr __ro_after_init = -1; +EXPORT_SYMBOL(memstart_addr); + +/* + * If the corresponding config options are enabled, we create both ZONE_DMA + * and ZONE_DMA32. By default ZONE_DMA covers the 32-bit addressable memory + * unless restricted on specific platforms (e.g. 30-bit on Raspberry Pi 4). + * In such case, ZONE_DMA32 covers the rest of the 32-bit addressable memory, + * otherwise it is empty. + * + * Memory reservation for crash kernel either done early or deferred + * depending on DMA memory zones configs (ZONE_DMA) -- + * + * In absence of ZONE_DMA configs arm64_dma_phys_limit initialized + * here instead of max_zone_phys(). This lets early reservation of + * crash kernel memory which has a dependency on arm64_dma_phys_limit. + * Reserving memory early for crash kernel allows linear creation of block + * mappings (greater than page-granularity) for all the memory bank rangs. + * In this scheme a comparatively quicker boot is observed. + * + * If ZONE_DMA configs are defined, crash kernel memory reservation + * is delayed until DMA zone memory range size initilazation performed in + * zone_sizes_init(). The defer is necessary to steer clear of DMA zone + * memory range to avoid overlap allocation. So crash kernel memory boundaries + * are not known when mapping all bank memory ranges, which otherwise means + * not possible to exclude crash kernel range from creating block mappings + * so page-granularity mappings are created for the entire memory range. + * Hence a slightly slower boot is observed. + * + * Note: Page-granularity mapppings are necessary for crash kernel memory + * range for shrinking its size via /sys/kernel/kexec_crash_size interface. + */ +#if IS_ENABLED(CONFIG_ZONE_DMA) || IS_ENABLED(CONFIG_ZONE_DMA32) +phys_addr_t __ro_after_init arm64_dma_phys_limit; +#else +phys_addr_t __ro_after_init arm64_dma_phys_limit = PHYS_MASK + 1; +#endif + +#ifdef CONFIG_KEXEC_CORE +/* + * reserve_crashkernel() - reserves memory for crash kernel + * + * This function reserves memory area given in "crashkernel=" kernel command + * line parameter. The memory reserved is used by dump capture kernel when + * primary kernel is crashing. + */ +static void __init reserve_crashkernel(void) +{ + unsigned long long crash_base, crash_size; + int ret; + + ret = parse_crashkernel(boot_command_line, memblock_phys_mem_size(), + &crash_size, &crash_base); + /* no crashkernel= or invalid value specified */ + if (ret || !crash_size) + return; + + crash_size = PAGE_ALIGN(crash_size); + + if (crash_base == 0) { + /* Current arm64 boot protocol requires 2MB alignment */ + crash_base = memblock_find_in_range(0, arm64_dma_phys_limit, + crash_size, SZ_2M); + if (crash_base == 0) { + pr_warn("cannot allocate crashkernel (size:0x%llx)\n", + crash_size); + return; + } + } else { + /* User specifies base address explicitly. */ + if (!memblock_is_region_memory(crash_base, crash_size)) { + pr_warn("cannot reserve crashkernel: region is not memory\n"); + return; + } + + if (memblock_is_region_reserved(crash_base, crash_size)) { + pr_warn("cannot reserve crashkernel: region overlaps reserved memory\n"); + return; + } + + if (!IS_ALIGNED(crash_base, SZ_2M)) { + pr_warn("cannot reserve crashkernel: base address is not 2MB aligned\n"); + return; + } + } + memblock_reserve(crash_base, crash_size); + + pr_info("crashkernel reserved: 0x%016llx - 0x%016llx (%lld MB)\n", + crash_base, crash_base + crash_size, crash_size >> 20); + + crashk_res.start = crash_base; + crashk_res.end = crash_base + crash_size - 1; +} +#else +static void __init reserve_crashkernel(void) +{ +} +#endif /* CONFIG_KEXEC_CORE */ + +#ifdef CONFIG_CRASH_DUMP +static int __init early_init_dt_scan_elfcorehdr(unsigned long node, + const char *uname, int depth, void *data) +{ + const __be32 *reg; + int len; + + if (depth != 1 || strcmp(uname, "chosen") != 0) + return 0; + + reg = of_get_flat_dt_prop(node, "linux,elfcorehdr", &len); + if (!reg || (len < (dt_root_addr_cells + dt_root_size_cells))) + return 1; + + elfcorehdr_addr = dt_mem_next_cell(dt_root_addr_cells, ®); + elfcorehdr_size = dt_mem_next_cell(dt_root_size_cells, ®); + + return 1; +} + +/* + * reserve_elfcorehdr() - reserves memory for elf core header + * + * This function reserves the memory occupied by an elf core header + * described in the device tree. This region contains all the + * information about primary kernel's core image and is used by a dump + * capture kernel to access the system memory on primary kernel. + */ +static void __init reserve_elfcorehdr(void) +{ + of_scan_flat_dt(early_init_dt_scan_elfcorehdr, NULL); + + if (!elfcorehdr_size) + return; + + if (memblock_is_region_reserved(elfcorehdr_addr, elfcorehdr_size)) { + pr_warn("elfcorehdr is overlapped\n"); + return; + } + + memblock_reserve(elfcorehdr_addr, elfcorehdr_size); + + pr_info("Reserving %lldKB of memory at 0x%llx for elfcorehdr\n", + elfcorehdr_size >> 10, elfcorehdr_addr); +} +#else +static void __init reserve_elfcorehdr(void) +{ +} +#endif /* CONFIG_CRASH_DUMP */ + +/* + * Return the maximum physical address for a zone accessible by the given bits + * limit. If DRAM starts above 32-bit, expand the zone to the maximum + * available memory, otherwise cap it at 32-bit. + */ +static phys_addr_t __init max_zone_phys(unsigned int zone_bits) +{ + phys_addr_t zone_mask = DMA_BIT_MASK(zone_bits); + phys_addr_t phys_start = memblock_start_of_DRAM(); + + if (phys_start > U32_MAX) + zone_mask = PHYS_ADDR_MAX; + else if (phys_start > zone_mask) + zone_mask = U32_MAX; + + return min(zone_mask, memblock_end_of_DRAM() - 1) + 1; +} + +static void __init zone_sizes_init(unsigned long min, unsigned long max) +{ + unsigned long max_zone_pfns[MAX_NR_ZONES] = {0}; + unsigned int __maybe_unused acpi_zone_dma_bits; + unsigned int __maybe_unused dt_zone_dma_bits; + phys_addr_t __maybe_unused dma32_phys_limit = max_zone_phys(32); + +#ifdef CONFIG_ZONE_DMA + acpi_zone_dma_bits = fls64(acpi_iort_dma_get_max_cpu_address()); + dt_zone_dma_bits = fls64(of_dma_get_max_cpu_address(NULL)); + zone_dma_bits = min3(32U, dt_zone_dma_bits, acpi_zone_dma_bits); + arm64_dma_phys_limit = max_zone_phys(zone_dma_bits); + max_zone_pfns[ZONE_DMA] = PFN_DOWN(arm64_dma_phys_limit); +#endif +#ifdef CONFIG_ZONE_DMA32 + max_zone_pfns[ZONE_DMA32] = PFN_DOWN(dma32_phys_limit); + if (!arm64_dma_phys_limit) + arm64_dma_phys_limit = dma32_phys_limit; +#endif + max_zone_pfns[ZONE_NORMAL] = max; + + free_area_init(max_zone_pfns); +} + +int pfn_valid(unsigned long pfn) +{ + phys_addr_t addr = pfn << PAGE_SHIFT; + + if ((addr >> PAGE_SHIFT) != pfn) + return 0; + +#ifdef CONFIG_SPARSEMEM + if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS) + return 0; + + if (!valid_section(__pfn_to_section(pfn))) + return 0; + + /* + * ZONE_DEVICE memory does not have the memblock entries. + * memblock_is_map_memory() check for ZONE_DEVICE based + * addresses will always fail. Even the normal hotplugged + * memory will never have MEMBLOCK_NOMAP flag set in their + * memblock entries. Skip memblock search for all non early + * memory sections covering all of hotplug memory including + * both normal and ZONE_DEVICE based. + */ + if (!early_section(__pfn_to_section(pfn))) + return pfn_section_valid(__pfn_to_section(pfn), pfn); +#endif + return memblock_is_map_memory(addr); +} +EXPORT_SYMBOL(pfn_valid); + +static phys_addr_t memory_limit = PHYS_ADDR_MAX; + +/* + * Limit the memory size that was specified via FDT. + */ +static int __init early_mem(char *p) +{ + if (!p) + return 1; + + memory_limit = memparse(p, &p) & PAGE_MASK; + pr_notice("Memory limited to %lldMB\n", memory_limit >> 20); + + return 0; +} +early_param("mem", early_mem); + +static int __init early_init_dt_scan_usablemem(unsigned long node, + const char *uname, int depth, void *data) +{ + struct memblock_region *usablemem = data; + const __be32 *reg; + int len; + + if (depth != 1 || strcmp(uname, "chosen") != 0) + return 0; + + reg = of_get_flat_dt_prop(node, "linux,usable-memory-range", &len); + if (!reg || (len < (dt_root_addr_cells + dt_root_size_cells))) + return 1; + + usablemem->base = dt_mem_next_cell(dt_root_addr_cells, ®); + usablemem->size = dt_mem_next_cell(dt_root_size_cells, ®); + + return 1; +} + +static void __init fdt_enforce_memory_region(void) +{ + struct memblock_region reg = { + .size = 0, + }; + + of_scan_flat_dt(early_init_dt_scan_usablemem, ®); + + if (reg.size) + memblock_cap_memory_range(reg.base, reg.size); +} + +void __init arm64_memblock_init(void) +{ + const s64 linear_region_size = BIT(vabits_actual - 1); + + /* Handle linux,usable-memory-range property */ + fdt_enforce_memory_region(); + + /* Remove memory above our supported physical address size */ + memblock_remove(1ULL << PHYS_MASK_SHIFT, ULLONG_MAX); + + /* + * Select a suitable value for the base of physical memory. + */ + memstart_addr = round_down(memblock_start_of_DRAM(), + ARM64_MEMSTART_ALIGN); + + /* + * Remove the memory that we will not be able to cover with the + * linear mapping. Take care not to clip the kernel which may be + * high in memory. + */ + memblock_remove(max_t(u64, memstart_addr + linear_region_size, + __pa_symbol(_end)), ULLONG_MAX); + if (memstart_addr + linear_region_size < memblock_end_of_DRAM()) { + /* ensure that memstart_addr remains sufficiently aligned */ + memstart_addr = round_up(memblock_end_of_DRAM() - linear_region_size, + ARM64_MEMSTART_ALIGN); + memblock_remove(0, memstart_addr); + } + + /* + * If we are running with a 52-bit kernel VA config on a system that + * does not support it, we have to place the available physical + * memory in the 48-bit addressable part of the linear region, i.e., + * we have to move it upward. Since memstart_addr represents the + * physical address of PAGE_OFFSET, we have to *subtract* from it. + */ + if (IS_ENABLED(CONFIG_ARM64_VA_BITS_52) && (vabits_actual != 52)) + memstart_addr -= _PAGE_OFFSET(48) - _PAGE_OFFSET(52); + + /* + * Apply the memory limit if it was set. Since the kernel may be loaded + * high up in memory, add back the kernel region that must be accessible + * via the linear mapping. + */ + if (memory_limit != PHYS_ADDR_MAX) { + memblock_mem_limit_remove_map(memory_limit); + memblock_add(__pa_symbol(_text), (u64)(_end - _text)); + } + + if (IS_ENABLED(CONFIG_BLK_DEV_INITRD) && phys_initrd_size) { + /* + * Add back the memory we just removed if it results in the + * initrd to become inaccessible via the linear mapping. + * Otherwise, this is a no-op + */ + u64 base = phys_initrd_start & PAGE_MASK; + u64 size = PAGE_ALIGN(phys_initrd_start + phys_initrd_size) - base; + + /* + * We can only add back the initrd memory if we don't end up + * with more memory than we can address via the linear mapping. + * It is up to the bootloader to position the kernel and the + * initrd reasonably close to each other (i.e., within 32 GB of + * each other) so that all granule/#levels combinations can + * always access both. + */ + if (WARN(base < memblock_start_of_DRAM() || + base + size > memblock_start_of_DRAM() + + linear_region_size, + "initrd not fully accessible via the linear mapping -- please check your bootloader ...\n")) { + phys_initrd_size = 0; + } else { + memblock_remove(base, size); /* clear MEMBLOCK_ flags */ + memblock_add(base, size); + memblock_reserve(base, size); + } + } + + if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) { + extern u16 memstart_offset_seed; + u64 range = linear_region_size - + (memblock_end_of_DRAM() - memblock_start_of_DRAM()); + + /* + * If the size of the linear region exceeds, by a sufficient + * margin, the size of the region that the available physical + * memory spans, randomize the linear region as well. + */ + if (memstart_offset_seed > 0 && range >= ARM64_MEMSTART_ALIGN) { + range /= ARM64_MEMSTART_ALIGN; + memstart_addr -= ARM64_MEMSTART_ALIGN * + ((range * memstart_offset_seed) >> 16); + } + } + + /* + * Register the kernel text, kernel data, initrd, and initial + * pagetables with memblock. + */ + memblock_reserve(__pa_symbol(_text), _end - _text); + if (IS_ENABLED(CONFIG_BLK_DEV_INITRD) && phys_initrd_size) { + /* the generic initrd code expects virtual addresses */ + initrd_start = __phys_to_virt(phys_initrd_start); + initrd_end = initrd_start + phys_initrd_size; + } + + early_init_fdt_scan_reserved_mem(); + + reserve_elfcorehdr(); + + if (!IS_ENABLED(CONFIG_ZONE_DMA) && !IS_ENABLED(CONFIG_ZONE_DMA32)) + reserve_crashkernel(); + + high_memory = __va(memblock_end_of_DRAM() - 1) + 1; +} + +void __init bootmem_init(void) +{ + unsigned long min, max; + + min = PFN_UP(memblock_start_of_DRAM()); + max = PFN_DOWN(memblock_end_of_DRAM()); + + early_memtest(min << PAGE_SHIFT, max << PAGE_SHIFT); + + max_pfn = max_low_pfn = max; + min_low_pfn = min; + + arm64_numa_init(); + + /* + * must be done after arm64_numa_init() which calls numa_init() to + * initialize node_online_map that gets used in hugetlb_cma_reserve() + * while allocating required CMA size across online nodes. + */ +#if defined(CONFIG_HUGETLB_PAGE) && defined(CONFIG_CMA) + arm64_hugetlb_cma_reserve(); +#endif + + dma_pernuma_cma_reserve(); + + /* + * sparse_init() tries to allocate memory from memblock, so must be + * done after the fixed reservations + */ + sparse_init(); + zone_sizes_init(min, max); + + /* + * Reserve the CMA area after arm64_dma_phys_limit was initialised. + */ + dma_contiguous_reserve(arm64_dma_phys_limit); + + /* + * request_standard_resources() depends on crashkernel's memory being + * reserved, so do it here. + */ + if (IS_ENABLED(CONFIG_ZONE_DMA) || IS_ENABLED(CONFIG_ZONE_DMA32)) + reserve_crashkernel(); + + memblock_dump_all(); +} + +#ifndef CONFIG_SPARSEMEM_VMEMMAP +static inline void free_memmap(unsigned long start_pfn, unsigned long end_pfn) +{ + struct page *start_pg, *end_pg; + unsigned long pg, pgend; + + /* + * Convert start_pfn/end_pfn to a struct page pointer. + */ + start_pg = pfn_to_page(start_pfn - 1) + 1; + end_pg = pfn_to_page(end_pfn - 1) + 1; + + /* + * Convert to physical addresses, and round start upwards and end + * downwards. + */ + pg = (unsigned long)PAGE_ALIGN(__pa(start_pg)); + pgend = (unsigned long)__pa(end_pg) & PAGE_MASK; + + /* + * If there are free pages between these, free the section of the + * memmap array. + */ + if (pg < pgend) + memblock_free(pg, pgend - pg); +} + +/* + * The mem_map array can get very big. Free the unused area of the memory map. + */ +static void __init free_unused_memmap(void) +{ + unsigned long start, end, prev_end = 0; + int i; + + for_each_mem_pfn_range(i, MAX_NUMNODES, &start, &end, NULL) { +#ifdef CONFIG_SPARSEMEM + /* + * Take care not to free memmap entries that don't exist due + * to SPARSEMEM sections which aren't present. + */ + start = min(start, ALIGN(prev_end, PAGES_PER_SECTION)); +#endif + /* + * If we had a previous bank, and there is a space between the + * current bank and the previous, free it. + */ + if (prev_end && prev_end < start) + free_memmap(prev_end, start); + + /* + * Align up here since the VM subsystem insists that the + * memmap entries are valid from the bank end aligned to + * MAX_ORDER_NR_PAGES. + */ + prev_end = ALIGN(end, MAX_ORDER_NR_PAGES); + } + +#ifdef CONFIG_SPARSEMEM + if (!IS_ALIGNED(prev_end, PAGES_PER_SECTION)) + free_memmap(prev_end, ALIGN(prev_end, PAGES_PER_SECTION)); +#endif +} +#endif /* !CONFIG_SPARSEMEM_VMEMMAP */ + +/* + * mem_init() marks the free areas in the mem_map and tells us how much memory + * is free. This is done after various parts of the system have claimed their + * memory after the kernel image. + */ +void __init mem_init(void) +{ + if (swiotlb_force == SWIOTLB_FORCE || + max_pfn > PFN_DOWN(arm64_dma_phys_limit)) + swiotlb_init(1); + else + swiotlb_force = SWIOTLB_NO_FORCE; + + set_max_mapnr(max_pfn - PHYS_PFN_OFFSET); + +#ifndef CONFIG_SPARSEMEM_VMEMMAP + free_unused_memmap(); +#endif + /* this will put all unused low memory onto the freelists */ + memblock_free_all(); + + mem_init_print_info(NULL); + + /* + * Check boundaries twice: Some fundamental inconsistencies can be + * detected at build time already. + */ +#ifdef CONFIG_COMPAT + BUILD_BUG_ON(TASK_SIZE_32 > DEFAULT_MAP_WINDOW_64); +#endif + + if (PAGE_SIZE >= 16384 && get_num_physpages() <= 128) { + extern int sysctl_overcommit_memory; + /* + * On a machine this small we won't get anywhere without + * overcommit, so turn it on by default. + */ + sysctl_overcommit_memory = OVERCOMMIT_ALWAYS; + } +} + +void free_initmem(void) +{ + free_reserved_area(lm_alias(__init_begin), + lm_alias(__init_end), + POISON_FREE_INITMEM, "unused kernel"); + /* + * Unmap the __init region but leave the VM area in place. This + * prevents the region from being reused for kernel modules, which + * is not supported by kallsyms. + */ + unmap_kernel_range((u64)__init_begin, (u64)(__init_end - __init_begin)); +} + +void dump_mem_limit(void) +{ + if (memory_limit != PHYS_ADDR_MAX) { + pr_emerg("Memory Limit: %llu MB\n", memory_limit >> 20); + } else { + pr_emerg("Memory Limit: none\n"); + } +} diff --git a/arch/arm64/mm/ioremap.c b/arch/arm64/mm/ioremap.c new file mode 100644 index 000000000..f173a01a0 --- /dev/null +++ b/arch/arm64/mm/ioremap.c @@ -0,0 +1,110 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Based on arch/arm/mm/ioremap.c + * + * (C) Copyright 1995 1996 Linus Torvalds + * Hacked for ARM by Phil Blundell <philb@gnu.org> + * Hacked to allow all architectures to build, and various cleanups + * by Russell King + * Copyright (C) 2012 ARM Ltd. + */ + +#include <linux/export.h> +#include <linux/mm.h> +#include <linux/vmalloc.h> +#include <linux/io.h> +#include <linux/memblock.h> + +#include <asm/fixmap.h> +#include <asm/tlbflush.h> + +static void __iomem *__ioremap_caller(phys_addr_t phys_addr, size_t size, + pgprot_t prot, void *caller) +{ + unsigned long last_addr; + unsigned long offset = phys_addr & ~PAGE_MASK; + int err; + unsigned long addr; + struct vm_struct *area; + + /* + * Page align the mapping address and size, taking account of any + * offset. + */ + phys_addr &= PAGE_MASK; + size = PAGE_ALIGN(size + offset); + + /* + * Don't allow wraparound, zero size or outside PHYS_MASK. + */ + last_addr = phys_addr + size - 1; + if (!size || last_addr < phys_addr || (last_addr & ~PHYS_MASK)) + return NULL; + + /* + * Don't allow RAM to be mapped. + */ + if (WARN_ON(pfn_valid(__phys_to_pfn(phys_addr)))) + return NULL; + + area = get_vm_area_caller(size, VM_IOREMAP, caller); + if (!area) + return NULL; + addr = (unsigned long)area->addr; + area->phys_addr = phys_addr; + + err = ioremap_page_range(addr, addr + size, phys_addr, prot); + if (err) { + vunmap((void *)addr); + return NULL; + } + + return (void __iomem *)(offset + addr); +} + +void __iomem *__ioremap(phys_addr_t phys_addr, size_t size, pgprot_t prot) +{ + return __ioremap_caller(phys_addr, size, prot, + __builtin_return_address(0)); +} +EXPORT_SYMBOL(__ioremap); + +void iounmap(volatile void __iomem *io_addr) +{ + unsigned long addr = (unsigned long)io_addr & PAGE_MASK; + + /* + * We could get an address outside vmalloc range in case + * of ioremap_cache() reusing a RAM mapping. + */ + if (is_vmalloc_addr((void *)addr)) + vunmap((void *)addr); +} +EXPORT_SYMBOL(iounmap); + +void __iomem *ioremap_cache(phys_addr_t phys_addr, size_t size) +{ + /* For normal memory we already have a cacheable mapping. */ + if (pfn_valid(__phys_to_pfn(phys_addr))) + return (void __iomem *)__phys_to_virt(phys_addr); + + return __ioremap_caller(phys_addr, size, __pgprot(PROT_NORMAL), + __builtin_return_address(0)); +} +EXPORT_SYMBOL(ioremap_cache); + +/* + * Must be called after early_fixmap_init + */ +void __init early_ioremap_init(void) +{ + early_ioremap_setup(); +} + +bool arch_memremap_can_ram_remap(resource_size_t offset, size_t size, + unsigned long flags) +{ + unsigned long pfn = PHYS_PFN(offset); + + return memblock_is_map_memory(pfn); +} diff --git a/arch/arm64/mm/kasan_init.c b/arch/arm64/mm/kasan_init.c new file mode 100644 index 000000000..b24e43d20 --- /dev/null +++ b/arch/arm64/mm/kasan_init.c @@ -0,0 +1,276 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * This file contains kasan initialization code for ARM64. + * + * Copyright (c) 2015 Samsung Electronics Co., Ltd. + * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com> + */ + +#define pr_fmt(fmt) "kasan: " fmt +#include <linux/kasan.h> +#include <linux/kernel.h> +#include <linux/sched/task.h> +#include <linux/memblock.h> +#include <linux/start_kernel.h> +#include <linux/mm.h> + +#include <asm/mmu_context.h> +#include <asm/kernel-pgtable.h> +#include <asm/page.h> +#include <asm/pgalloc.h> +#include <asm/sections.h> +#include <asm/tlbflush.h> + +static pgd_t tmp_pg_dir[PTRS_PER_PGD] __initdata __aligned(PGD_SIZE); + +/* + * The p*d_populate functions call virt_to_phys implicitly so they can't be used + * directly on kernel symbols (bm_p*d). All the early functions are called too + * early to use lm_alias so __p*d_populate functions must be used to populate + * with the physical address from __pa_symbol. + */ + +static phys_addr_t __init kasan_alloc_zeroed_page(int node) +{ + void *p = memblock_alloc_try_nid(PAGE_SIZE, PAGE_SIZE, + __pa(MAX_DMA_ADDRESS), + MEMBLOCK_ALLOC_KASAN, node); + if (!p) + panic("%s: Failed to allocate %lu bytes align=0x%lx nid=%d from=%llx\n", + __func__, PAGE_SIZE, PAGE_SIZE, node, + __pa(MAX_DMA_ADDRESS)); + + return __pa(p); +} + +static phys_addr_t __init kasan_alloc_raw_page(int node) +{ + void *p = memblock_alloc_try_nid_raw(PAGE_SIZE, PAGE_SIZE, + __pa(MAX_DMA_ADDRESS), + MEMBLOCK_ALLOC_KASAN, node); + if (!p) + panic("%s: Failed to allocate %lu bytes align=0x%lx nid=%d from=%llx\n", + __func__, PAGE_SIZE, PAGE_SIZE, node, + __pa(MAX_DMA_ADDRESS)); + + return __pa(p); +} + +static pte_t *__init kasan_pte_offset(pmd_t *pmdp, unsigned long addr, int node, + bool early) +{ + if (pmd_none(READ_ONCE(*pmdp))) { + phys_addr_t pte_phys = early ? + __pa_symbol(kasan_early_shadow_pte) + : kasan_alloc_zeroed_page(node); + __pmd_populate(pmdp, pte_phys, PMD_TYPE_TABLE); + } + + return early ? pte_offset_kimg(pmdp, addr) + : pte_offset_kernel(pmdp, addr); +} + +static pmd_t *__init kasan_pmd_offset(pud_t *pudp, unsigned long addr, int node, + bool early) +{ + if (pud_none(READ_ONCE(*pudp))) { + phys_addr_t pmd_phys = early ? + __pa_symbol(kasan_early_shadow_pmd) + : kasan_alloc_zeroed_page(node); + __pud_populate(pudp, pmd_phys, PMD_TYPE_TABLE); + } + + return early ? pmd_offset_kimg(pudp, addr) : pmd_offset(pudp, addr); +} + +static pud_t *__init kasan_pud_offset(p4d_t *p4dp, unsigned long addr, int node, + bool early) +{ + if (p4d_none(READ_ONCE(*p4dp))) { + phys_addr_t pud_phys = early ? + __pa_symbol(kasan_early_shadow_pud) + : kasan_alloc_zeroed_page(node); + __p4d_populate(p4dp, pud_phys, PMD_TYPE_TABLE); + } + + return early ? pud_offset_kimg(p4dp, addr) : pud_offset(p4dp, addr); +} + +static void __init kasan_pte_populate(pmd_t *pmdp, unsigned long addr, + unsigned long end, int node, bool early) +{ + unsigned long next; + pte_t *ptep = kasan_pte_offset(pmdp, addr, node, early); + + do { + phys_addr_t page_phys = early ? + __pa_symbol(kasan_early_shadow_page) + : kasan_alloc_raw_page(node); + if (!early) + memset(__va(page_phys), KASAN_SHADOW_INIT, PAGE_SIZE); + next = addr + PAGE_SIZE; + set_pte(ptep, pfn_pte(__phys_to_pfn(page_phys), PAGE_KERNEL)); + } while (ptep++, addr = next, addr != end && pte_none(READ_ONCE(*ptep))); +} + +static void __init kasan_pmd_populate(pud_t *pudp, unsigned long addr, + unsigned long end, int node, bool early) +{ + unsigned long next; + pmd_t *pmdp = kasan_pmd_offset(pudp, addr, node, early); + + do { + next = pmd_addr_end(addr, end); + kasan_pte_populate(pmdp, addr, next, node, early); + } while (pmdp++, addr = next, addr != end && pmd_none(READ_ONCE(*pmdp))); +} + +static void __init kasan_pud_populate(p4d_t *p4dp, unsigned long addr, + unsigned long end, int node, bool early) +{ + unsigned long next; + pud_t *pudp = kasan_pud_offset(p4dp, addr, node, early); + + do { + next = pud_addr_end(addr, end); + kasan_pmd_populate(pudp, addr, next, node, early); + } while (pudp++, addr = next, addr != end && pud_none(READ_ONCE(*pudp))); +} + +static void __init kasan_p4d_populate(pgd_t *pgdp, unsigned long addr, + unsigned long end, int node, bool early) +{ + unsigned long next; + p4d_t *p4dp = p4d_offset(pgdp, addr); + + do { + next = p4d_addr_end(addr, end); + kasan_pud_populate(p4dp, addr, next, node, early); + } while (p4dp++, addr = next, addr != end); +} + +static void __init kasan_pgd_populate(unsigned long addr, unsigned long end, + int node, bool early) +{ + unsigned long next; + pgd_t *pgdp; + + pgdp = pgd_offset_k(addr); + do { + next = pgd_addr_end(addr, end); + kasan_p4d_populate(pgdp, addr, next, node, early); + } while (pgdp++, addr = next, addr != end); +} + +/* The early shadow maps everything to a single page of zeroes */ +asmlinkage void __init kasan_early_init(void) +{ + BUILD_BUG_ON(KASAN_SHADOW_OFFSET != + KASAN_SHADOW_END - (1UL << (64 - KASAN_SHADOW_SCALE_SHIFT))); + BUILD_BUG_ON(!IS_ALIGNED(_KASAN_SHADOW_START(VA_BITS), PGDIR_SIZE)); + BUILD_BUG_ON(!IS_ALIGNED(_KASAN_SHADOW_START(VA_BITS_MIN), PGDIR_SIZE)); + BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_END, PGDIR_SIZE)); + kasan_pgd_populate(KASAN_SHADOW_START, KASAN_SHADOW_END, NUMA_NO_NODE, + true); +} + +/* Set up full kasan mappings, ensuring that the mapped pages are zeroed */ +static void __init kasan_map_populate(unsigned long start, unsigned long end, + int node) +{ + kasan_pgd_populate(start & PAGE_MASK, PAGE_ALIGN(end), node, false); +} + +/* + * Copy the current shadow region into a new pgdir. + */ +void __init kasan_copy_shadow(pgd_t *pgdir) +{ + pgd_t *pgdp, *pgdp_new, *pgdp_end; + + pgdp = pgd_offset_k(KASAN_SHADOW_START); + pgdp_end = pgd_offset_k(KASAN_SHADOW_END); + pgdp_new = pgd_offset_pgd(pgdir, KASAN_SHADOW_START); + do { + set_pgd(pgdp_new, READ_ONCE(*pgdp)); + } while (pgdp++, pgdp_new++, pgdp != pgdp_end); +} + +static void __init clear_pgds(unsigned long start, + unsigned long end) +{ + /* + * Remove references to kasan page tables from + * swapper_pg_dir. pgd_clear() can't be used + * here because it's nop on 2,3-level pagetable setups + */ + for (; start < end; start += PGDIR_SIZE) + set_pgd(pgd_offset_k(start), __pgd(0)); +} + +void __init kasan_init(void) +{ + u64 kimg_shadow_start, kimg_shadow_end; + u64 mod_shadow_start, mod_shadow_end; + phys_addr_t pa_start, pa_end; + u64 i; + + kimg_shadow_start = (u64)kasan_mem_to_shadow(_text) & PAGE_MASK; + kimg_shadow_end = PAGE_ALIGN((u64)kasan_mem_to_shadow(_end)); + + mod_shadow_start = (u64)kasan_mem_to_shadow((void *)MODULES_VADDR); + mod_shadow_end = (u64)kasan_mem_to_shadow((void *)MODULES_END); + + /* + * We are going to perform proper setup of shadow memory. + * At first we should unmap early shadow (clear_pgds() call below). + * However, instrumented code couldn't execute without shadow memory. + * tmp_pg_dir used to keep early shadow mapped until full shadow + * setup will be finished. + */ + memcpy(tmp_pg_dir, swapper_pg_dir, sizeof(tmp_pg_dir)); + dsb(ishst); + cpu_replace_ttbr1(lm_alias(tmp_pg_dir)); + + clear_pgds(KASAN_SHADOW_START, KASAN_SHADOW_END); + + kasan_map_populate(kimg_shadow_start, kimg_shadow_end, + early_pfn_to_nid(virt_to_pfn(lm_alias(_text)))); + + kasan_populate_early_shadow(kasan_mem_to_shadow((void *)PAGE_END), + (void *)mod_shadow_start); + kasan_populate_early_shadow((void *)kimg_shadow_end, + (void *)KASAN_SHADOW_END); + + if (kimg_shadow_start > mod_shadow_end) + kasan_populate_early_shadow((void *)mod_shadow_end, + (void *)kimg_shadow_start); + + for_each_mem_range(i, &pa_start, &pa_end) { + void *start = (void *)__phys_to_virt(pa_start); + void *end = (void *)__phys_to_virt(pa_end); + + if (start >= end) + break; + + kasan_map_populate((unsigned long)kasan_mem_to_shadow(start), + (unsigned long)kasan_mem_to_shadow(end), + early_pfn_to_nid(virt_to_pfn(start))); + } + + /* + * KAsan may reuse the contents of kasan_early_shadow_pte directly, + * so we should make sure that it maps the zero page read-only. + */ + for (i = 0; i < PTRS_PER_PTE; i++) + set_pte(&kasan_early_shadow_pte[i], + pfn_pte(sym_to_pfn(kasan_early_shadow_page), + PAGE_KERNEL_RO)); + + memset(kasan_early_shadow_page, KASAN_SHADOW_INIT, PAGE_SIZE); + cpu_replace_ttbr1(lm_alias(swapper_pg_dir)); + + /* At this point kasan is fully initialized. Enable error messages */ + init_task.kasan_depth = 0; + pr_info("KernelAddressSanitizer initialized\n"); +} diff --git a/arch/arm64/mm/mmap.c b/arch/arm64/mm/mmap.c new file mode 100644 index 000000000..3028bacbc --- /dev/null +++ b/arch/arm64/mm/mmap.c @@ -0,0 +1,70 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Based on arch/arm/mm/mmap.c + * + * Copyright (C) 2012 ARM Ltd. + */ + +#include <linux/elf.h> +#include <linux/fs.h> +#include <linux/memblock.h> +#include <linux/mm.h> +#include <linux/mman.h> +#include <linux/export.h> +#include <linux/shm.h> +#include <linux/sched/signal.h> +#include <linux/sched/mm.h> +#include <linux/io.h> +#include <linux/personality.h> +#include <linux/random.h> + +#include <asm/cputype.h> + +/* + * You really shouldn't be using read() or write() on /dev/mem. This might go + * away in the future. + */ +int valid_phys_addr_range(phys_addr_t addr, size_t size) +{ + /* + * Check whether addr is covered by a memory region without the + * MEMBLOCK_NOMAP attribute, and whether that region covers the + * entire range. In theory, this could lead to false negatives + * if the range is covered by distinct but adjacent memory regions + * that only differ in other attributes. However, few of such + * attributes have been defined, and it is debatable whether it + * follows that /dev/mem read() calls should be able traverse + * such boundaries. + */ + return memblock_is_region_memory(addr, size) && + memblock_is_map_memory(addr); +} + +/* + * Do not allow /dev/mem mappings beyond the supported physical range. + */ +int valid_mmap_phys_addr_range(unsigned long pfn, size_t size) +{ + return !(((pfn << PAGE_SHIFT) + size) & ~PHYS_MASK); +} + +#ifdef CONFIG_STRICT_DEVMEM + +#include <linux/ioport.h> + +/* + * devmem_is_allowed() checks to see if /dev/mem access to a certain address + * is valid. The argument is a physical page number. We mimic x86 here by + * disallowing access to system RAM as well as device-exclusive MMIO regions. + * This effectively disable read()/write() on /dev/mem. + */ +int devmem_is_allowed(unsigned long pfn) +{ + if (iomem_is_exclusive(pfn << PAGE_SHIFT)) + return 0; + if (!page_is_ram(pfn)) + return 1; + return 0; +} + +#endif diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c new file mode 100644 index 000000000..78f9fb638 --- /dev/null +++ b/arch/arm64/mm/mmu.c @@ -0,0 +1,1595 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Based on arch/arm/mm/mmu.c + * + * Copyright (C) 1995-2005 Russell King + * Copyright (C) 2012 ARM Ltd. + */ + +#include <linux/cache.h> +#include <linux/export.h> +#include <linux/kernel.h> +#include <linux/errno.h> +#include <linux/init.h> +#include <linux/ioport.h> +#include <linux/kexec.h> +#include <linux/libfdt.h> +#include <linux/mman.h> +#include <linux/nodemask.h> +#include <linux/memblock.h> +#include <linux/memory.h> +#include <linux/fs.h> +#include <linux/io.h> +#include <linux/mm.h> +#include <linux/vmalloc.h> + +#include <asm/barrier.h> +#include <asm/cputype.h> +#include <asm/fixmap.h> +#include <asm/kasan.h> +#include <asm/kernel-pgtable.h> +#include <asm/sections.h> +#include <asm/setup.h> +#include <linux/sizes.h> +#include <asm/tlb.h> +#include <asm/mmu_context.h> +#include <asm/ptdump.h> +#include <asm/tlbflush.h> +#include <asm/pgalloc.h> + +#define NO_BLOCK_MAPPINGS BIT(0) +#define NO_CONT_MAPPINGS BIT(1) + +u64 idmap_t0sz = TCR_T0SZ(VA_BITS_MIN); +u64 idmap_ptrs_per_pgd = PTRS_PER_PGD; + +u64 __section(".mmuoff.data.write") vabits_actual; +EXPORT_SYMBOL(vabits_actual); + +u64 kimage_voffset __ro_after_init; +EXPORT_SYMBOL(kimage_voffset); + +/* + * Empty_zero_page is a special page that is used for zero-initialized data + * and COW. + */ +unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)] __page_aligned_bss; +EXPORT_SYMBOL(empty_zero_page); + +static pte_t bm_pte[PTRS_PER_PTE] __page_aligned_bss; +static pmd_t bm_pmd[PTRS_PER_PMD] __page_aligned_bss __maybe_unused; +static pud_t bm_pud[PTRS_PER_PUD] __page_aligned_bss __maybe_unused; + +static DEFINE_SPINLOCK(swapper_pgdir_lock); +static DEFINE_MUTEX(fixmap_lock); + +void set_swapper_pgd(pgd_t *pgdp, pgd_t pgd) +{ + pgd_t *fixmap_pgdp; + + spin_lock(&swapper_pgdir_lock); + fixmap_pgdp = pgd_set_fixmap(__pa_symbol(pgdp)); + WRITE_ONCE(*fixmap_pgdp, pgd); + /* + * We need dsb(ishst) here to ensure the page-table-walker sees + * our new entry before set_p?d() returns. The fixmap's + * flush_tlb_kernel_range() via clear_fixmap() does this for us. + */ + pgd_clear_fixmap(); + spin_unlock(&swapper_pgdir_lock); +} + +pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, + unsigned long size, pgprot_t vma_prot) +{ + if (!pfn_valid(pfn)) + return pgprot_noncached(vma_prot); + else if (file->f_flags & O_SYNC) + return pgprot_writecombine(vma_prot); + return vma_prot; +} +EXPORT_SYMBOL(phys_mem_access_prot); + +static phys_addr_t __init early_pgtable_alloc(int shift) +{ + phys_addr_t phys; + void *ptr; + + phys = memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE); + if (!phys) + panic("Failed to allocate page table page\n"); + + /* + * The FIX_{PGD,PUD,PMD} slots may be in active use, but the FIX_PTE + * slot will be free, so we can (ab)use the FIX_PTE slot to initialise + * any level of table. + */ + ptr = pte_set_fixmap(phys); + + memset(ptr, 0, PAGE_SIZE); + + /* + * Implicit barriers also ensure the zeroed page is visible to the page + * table walker + */ + pte_clear_fixmap(); + + return phys; +} + +static bool pgattr_change_is_safe(u64 old, u64 new) +{ + /* + * The following mapping attributes may be updated in live + * kernel mappings without the need for break-before-make. + */ + pteval_t mask = PTE_PXN | PTE_RDONLY | PTE_WRITE | PTE_NG; + + /* creating or taking down mappings is always safe */ + if (old == 0 || new == 0) + return true; + + /* live contiguous mappings may not be manipulated at all */ + if ((old | new) & PTE_CONT) + return false; + + /* Transitioning from Non-Global to Global is unsafe */ + if (old & ~new & PTE_NG) + return false; + + /* + * Changing the memory type between Normal and Normal-Tagged is safe + * since Tagged is considered a permission attribute from the + * mismatched attribute aliases perspective. + */ + if (((old & PTE_ATTRINDX_MASK) == PTE_ATTRINDX(MT_NORMAL) || + (old & PTE_ATTRINDX_MASK) == PTE_ATTRINDX(MT_NORMAL_TAGGED)) && + ((new & PTE_ATTRINDX_MASK) == PTE_ATTRINDX(MT_NORMAL) || + (new & PTE_ATTRINDX_MASK) == PTE_ATTRINDX(MT_NORMAL_TAGGED))) + mask |= PTE_ATTRINDX_MASK; + + return ((old ^ new) & ~mask) == 0; +} + +static void init_pte(pmd_t *pmdp, unsigned long addr, unsigned long end, + phys_addr_t phys, pgprot_t prot) +{ + pte_t *ptep; + + ptep = pte_set_fixmap_offset(pmdp, addr); + do { + pte_t old_pte = READ_ONCE(*ptep); + + set_pte(ptep, pfn_pte(__phys_to_pfn(phys), prot)); + + /* + * After the PTE entry has been populated once, we + * only allow updates to the permission attributes. + */ + BUG_ON(!pgattr_change_is_safe(pte_val(old_pte), + READ_ONCE(pte_val(*ptep)))); + + phys += PAGE_SIZE; + } while (ptep++, addr += PAGE_SIZE, addr != end); + + pte_clear_fixmap(); +} + +static void alloc_init_cont_pte(pmd_t *pmdp, unsigned long addr, + unsigned long end, phys_addr_t phys, + pgprot_t prot, + phys_addr_t (*pgtable_alloc)(int), + int flags) +{ + unsigned long next; + pmd_t pmd = READ_ONCE(*pmdp); + + BUG_ON(pmd_sect(pmd)); + if (pmd_none(pmd)) { + phys_addr_t pte_phys; + BUG_ON(!pgtable_alloc); + pte_phys = pgtable_alloc(PAGE_SHIFT); + __pmd_populate(pmdp, pte_phys, PMD_TYPE_TABLE); + pmd = READ_ONCE(*pmdp); + } + BUG_ON(pmd_bad(pmd)); + + do { + pgprot_t __prot = prot; + + next = pte_cont_addr_end(addr, end); + + /* use a contiguous mapping if the range is suitably aligned */ + if ((((addr | next | phys) & ~CONT_PTE_MASK) == 0) && + (flags & NO_CONT_MAPPINGS) == 0) + __prot = __pgprot(pgprot_val(prot) | PTE_CONT); + + init_pte(pmdp, addr, next, phys, __prot); + + phys += next - addr; + } while (addr = next, addr != end); +} + +static void init_pmd(pud_t *pudp, unsigned long addr, unsigned long end, + phys_addr_t phys, pgprot_t prot, + phys_addr_t (*pgtable_alloc)(int), int flags) +{ + unsigned long next; + pmd_t *pmdp; + + pmdp = pmd_set_fixmap_offset(pudp, addr); + do { + pmd_t old_pmd = READ_ONCE(*pmdp); + + next = pmd_addr_end(addr, end); + + /* try section mapping first */ + if (((addr | next | phys) & ~SECTION_MASK) == 0 && + (flags & NO_BLOCK_MAPPINGS) == 0) { + pmd_set_huge(pmdp, phys, prot); + + /* + * After the PMD entry has been populated once, we + * only allow updates to the permission attributes. + */ + BUG_ON(!pgattr_change_is_safe(pmd_val(old_pmd), + READ_ONCE(pmd_val(*pmdp)))); + } else { + alloc_init_cont_pte(pmdp, addr, next, phys, prot, + pgtable_alloc, flags); + + BUG_ON(pmd_val(old_pmd) != 0 && + pmd_val(old_pmd) != READ_ONCE(pmd_val(*pmdp))); + } + phys += next - addr; + } while (pmdp++, addr = next, addr != end); + + pmd_clear_fixmap(); +} + +static void alloc_init_cont_pmd(pud_t *pudp, unsigned long addr, + unsigned long end, phys_addr_t phys, + pgprot_t prot, + phys_addr_t (*pgtable_alloc)(int), int flags) +{ + unsigned long next; + pud_t pud = READ_ONCE(*pudp); + + /* + * Check for initial section mappings in the pgd/pud. + */ + BUG_ON(pud_sect(pud)); + if (pud_none(pud)) { + phys_addr_t pmd_phys; + BUG_ON(!pgtable_alloc); + pmd_phys = pgtable_alloc(PMD_SHIFT); + __pud_populate(pudp, pmd_phys, PUD_TYPE_TABLE); + pud = READ_ONCE(*pudp); + } + BUG_ON(pud_bad(pud)); + + do { + pgprot_t __prot = prot; + + next = pmd_cont_addr_end(addr, end); + + /* use a contiguous mapping if the range is suitably aligned */ + if ((((addr | next | phys) & ~CONT_PMD_MASK) == 0) && + (flags & NO_CONT_MAPPINGS) == 0) + __prot = __pgprot(pgprot_val(prot) | PTE_CONT); + + init_pmd(pudp, addr, next, phys, __prot, pgtable_alloc, flags); + + phys += next - addr; + } while (addr = next, addr != end); +} + +static inline bool use_1G_block(unsigned long addr, unsigned long next, + unsigned long phys) +{ + if (PAGE_SHIFT != 12) + return false; + + if (((addr | next | phys) & ~PUD_MASK) != 0) + return false; + + return true; +} + +static void alloc_init_pud(pgd_t *pgdp, unsigned long addr, unsigned long end, + phys_addr_t phys, pgprot_t prot, + phys_addr_t (*pgtable_alloc)(int), + int flags) +{ + unsigned long next; + pud_t *pudp; + p4d_t *p4dp = p4d_offset(pgdp, addr); + p4d_t p4d = READ_ONCE(*p4dp); + + if (p4d_none(p4d)) { + phys_addr_t pud_phys; + BUG_ON(!pgtable_alloc); + pud_phys = pgtable_alloc(PUD_SHIFT); + __p4d_populate(p4dp, pud_phys, PUD_TYPE_TABLE); + p4d = READ_ONCE(*p4dp); + } + BUG_ON(p4d_bad(p4d)); + + /* + * No need for locking during early boot. And it doesn't work as + * expected with KASLR enabled. + */ + if (system_state != SYSTEM_BOOTING) + mutex_lock(&fixmap_lock); + pudp = pud_set_fixmap_offset(p4dp, addr); + do { + pud_t old_pud = READ_ONCE(*pudp); + + next = pud_addr_end(addr, end); + + /* + * For 4K granule only, attempt to put down a 1GB block + */ + if (use_1G_block(addr, next, phys) && + (flags & NO_BLOCK_MAPPINGS) == 0) { + pud_set_huge(pudp, phys, prot); + + /* + * After the PUD entry has been populated once, we + * only allow updates to the permission attributes. + */ + BUG_ON(!pgattr_change_is_safe(pud_val(old_pud), + READ_ONCE(pud_val(*pudp)))); + } else { + alloc_init_cont_pmd(pudp, addr, next, phys, prot, + pgtable_alloc, flags); + + BUG_ON(pud_val(old_pud) != 0 && + pud_val(old_pud) != READ_ONCE(pud_val(*pudp))); + } + phys += next - addr; + } while (pudp++, addr = next, addr != end); + + pud_clear_fixmap(); + if (system_state != SYSTEM_BOOTING) + mutex_unlock(&fixmap_lock); +} + +static void __create_pgd_mapping(pgd_t *pgdir, phys_addr_t phys, + unsigned long virt, phys_addr_t size, + pgprot_t prot, + phys_addr_t (*pgtable_alloc)(int), + int flags) +{ + unsigned long addr, end, next; + pgd_t *pgdp = pgd_offset_pgd(pgdir, virt); + + /* + * If the virtual and physical address don't have the same offset + * within a page, we cannot map the region as the caller expects. + */ + if (WARN_ON((phys ^ virt) & ~PAGE_MASK)) + return; + + phys &= PAGE_MASK; + addr = virt & PAGE_MASK; + end = PAGE_ALIGN(virt + size); + + do { + next = pgd_addr_end(addr, end); + alloc_init_pud(pgdp, addr, next, phys, prot, pgtable_alloc, + flags); + phys += next - addr; + } while (pgdp++, addr = next, addr != end); +} + +static phys_addr_t __pgd_pgtable_alloc(int shift) +{ + void *ptr = (void *)__get_free_page(GFP_PGTABLE_KERNEL); + BUG_ON(!ptr); + + /* Ensure the zeroed page is visible to the page table walker */ + dsb(ishst); + return __pa(ptr); +} + +static phys_addr_t pgd_pgtable_alloc(int shift) +{ + phys_addr_t pa = __pgd_pgtable_alloc(shift); + + /* + * Call proper page table ctor in case later we need to + * call core mm functions like apply_to_page_range() on + * this pre-allocated page table. + * + * We don't select ARCH_ENABLE_SPLIT_PMD_PTLOCK if pmd is + * folded, and if so pgtable_pmd_page_ctor() becomes nop. + */ + if (shift == PAGE_SHIFT) + BUG_ON(!pgtable_pte_page_ctor(phys_to_page(pa))); + else if (shift == PMD_SHIFT) + BUG_ON(!pgtable_pmd_page_ctor(phys_to_page(pa))); + + return pa; +} + +/* + * This function can only be used to modify existing table entries, + * without allocating new levels of table. Note that this permits the + * creation of new section or page entries. + */ +static void __init create_mapping_noalloc(phys_addr_t phys, unsigned long virt, + phys_addr_t size, pgprot_t prot) +{ + if (virt < PAGE_OFFSET) { + pr_warn("BUG: not creating mapping for %pa at 0x%016lx - outside kernel range\n", + &phys, virt); + return; + } + __create_pgd_mapping(init_mm.pgd, phys, virt, size, prot, NULL, + NO_CONT_MAPPINGS); +} + +void __init create_pgd_mapping(struct mm_struct *mm, phys_addr_t phys, + unsigned long virt, phys_addr_t size, + pgprot_t prot, bool page_mappings_only) +{ + int flags = 0; + + BUG_ON(mm == &init_mm); + + if (page_mappings_only) + flags = NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS; + + __create_pgd_mapping(mm->pgd, phys, virt, size, prot, + pgd_pgtable_alloc, flags); +} + +static void update_mapping_prot(phys_addr_t phys, unsigned long virt, + phys_addr_t size, pgprot_t prot) +{ + if (virt < PAGE_OFFSET) { + pr_warn("BUG: not updating mapping for %pa at 0x%016lx - outside kernel range\n", + &phys, virt); + return; + } + + __create_pgd_mapping(init_mm.pgd, phys, virt, size, prot, NULL, + NO_CONT_MAPPINGS); + + /* flush the TLBs after updating live kernel mappings */ + flush_tlb_kernel_range(virt, virt + size); +} + +static void __init __map_memblock(pgd_t *pgdp, phys_addr_t start, + phys_addr_t end, pgprot_t prot, int flags) +{ + __create_pgd_mapping(pgdp, start, __phys_to_virt(start), end - start, + prot, early_pgtable_alloc, flags); +} + +void __init mark_linear_text_alias_ro(void) +{ + /* + * Remove the write permissions from the linear alias of .text/.rodata + */ + update_mapping_prot(__pa_symbol(_text), (unsigned long)lm_alias(_text), + (unsigned long)__init_begin - (unsigned long)_text, + PAGE_KERNEL_RO); +} + +static bool crash_mem_map __initdata; + +static int __init enable_crash_mem_map(char *arg) +{ + /* + * Proper parameter parsing is done by reserve_crashkernel(). We only + * need to know if the linear map has to avoid block mappings so that + * the crashkernel reservations can be unmapped later. + */ + crash_mem_map = true; + + return 0; +} +early_param("crashkernel", enable_crash_mem_map); + +static void __init map_mem(pgd_t *pgdp) +{ + phys_addr_t kernel_start = __pa_symbol(_text); + phys_addr_t kernel_end = __pa_symbol(__init_begin); + phys_addr_t start, end; + int flags = 0; + u64 i; + + if (rodata_full || debug_pagealloc_enabled()) + flags = NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS; + + /* + * Take care not to create a writable alias for the + * read-only text and rodata sections of the kernel image. + * So temporarily mark them as NOMAP to skip mappings in + * the following for-loop + */ + memblock_mark_nomap(kernel_start, kernel_end - kernel_start); + +#ifdef CONFIG_KEXEC_CORE + if (crash_mem_map) { + if (IS_ENABLED(CONFIG_ZONE_DMA) || + IS_ENABLED(CONFIG_ZONE_DMA32)) + flags |= NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS; + else if (crashk_res.end) + memblock_mark_nomap(crashk_res.start, + resource_size(&crashk_res)); + } +#endif + + /* map all the memory banks */ + for_each_mem_range(i, &start, &end) { + if (start >= end) + break; + /* + * The linear map must allow allocation tags reading/writing + * if MTE is present. Otherwise, it has the same attributes as + * PAGE_KERNEL. + */ + __map_memblock(pgdp, start, end, pgprot_tagged(PAGE_KERNEL), + flags); + } + + /* + * Map the linear alias of the [_text, __init_begin) interval + * as non-executable now, and remove the write permission in + * mark_linear_text_alias_ro() below (which will be called after + * alternative patching has completed). This makes the contents + * of the region accessible to subsystems such as hibernate, + * but protects it from inadvertent modification or execution. + * Note that contiguous mappings cannot be remapped in this way, + * so we should avoid them here. + */ + __map_memblock(pgdp, kernel_start, kernel_end, + PAGE_KERNEL, NO_CONT_MAPPINGS); + memblock_clear_nomap(kernel_start, kernel_end - kernel_start); + + /* + * Use page-level mappings here so that we can shrink the region + * in page granularity and put back unused memory to buddy system + * through /sys/kernel/kexec_crash_size interface. + */ +#ifdef CONFIG_KEXEC_CORE + if (crash_mem_map && + !IS_ENABLED(CONFIG_ZONE_DMA) && !IS_ENABLED(CONFIG_ZONE_DMA32)) { + if (crashk_res.end) { + __map_memblock(pgdp, crashk_res.start, + crashk_res.end + 1, + PAGE_KERNEL, + NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS); + memblock_clear_nomap(crashk_res.start, + resource_size(&crashk_res)); + } + } +#endif +} + +void mark_rodata_ro(void) +{ + unsigned long section_size; + + /* + * mark .rodata as read only. Use __init_begin rather than __end_rodata + * to cover NOTES and EXCEPTION_TABLE. + */ + section_size = (unsigned long)__init_begin - (unsigned long)__start_rodata; + update_mapping_prot(__pa_symbol(__start_rodata), (unsigned long)__start_rodata, + section_size, PAGE_KERNEL_RO); + + debug_checkwx(); +} + +static void __init map_kernel_segment(pgd_t *pgdp, void *va_start, void *va_end, + pgprot_t prot, struct vm_struct *vma, + int flags, unsigned long vm_flags) +{ + phys_addr_t pa_start = __pa_symbol(va_start); + unsigned long size = va_end - va_start; + + BUG_ON(!PAGE_ALIGNED(pa_start)); + BUG_ON(!PAGE_ALIGNED(size)); + + __create_pgd_mapping(pgdp, pa_start, (unsigned long)va_start, size, prot, + early_pgtable_alloc, flags); + + if (!(vm_flags & VM_NO_GUARD)) + size += PAGE_SIZE; + + vma->addr = va_start; + vma->phys_addr = pa_start; + vma->size = size; + vma->flags = VM_MAP | vm_flags; + vma->caller = __builtin_return_address(0); + + vm_area_add_early(vma); +} + +static int __init parse_rodata(char *arg) +{ + int ret = strtobool(arg, &rodata_enabled); + if (!ret) { + rodata_full = false; + return 0; + } + + /* permit 'full' in addition to boolean options */ + if (strcmp(arg, "full")) + return -EINVAL; + + rodata_enabled = true; + rodata_full = true; + return 0; +} +early_param("rodata", parse_rodata); + +#ifdef CONFIG_UNMAP_KERNEL_AT_EL0 +static int __init map_entry_trampoline(void) +{ + int i; + + pgprot_t prot = rodata_enabled ? PAGE_KERNEL_ROX : PAGE_KERNEL_EXEC; + phys_addr_t pa_start = __pa_symbol(__entry_tramp_text_start); + + /* The trampoline is always mapped and can therefore be global */ + pgprot_val(prot) &= ~PTE_NG; + + /* Map only the text into the trampoline page table */ + memset(tramp_pg_dir, 0, PGD_SIZE); + __create_pgd_mapping(tramp_pg_dir, pa_start, TRAMP_VALIAS, + entry_tramp_text_size(), prot, + __pgd_pgtable_alloc, NO_BLOCK_MAPPINGS); + + /* Map both the text and data into the kernel page table */ + for (i = 0; i < DIV_ROUND_UP(entry_tramp_text_size(), PAGE_SIZE); i++) + __set_fixmap(FIX_ENTRY_TRAMP_TEXT1 - i, + pa_start + i * PAGE_SIZE, prot); + + if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) { + extern char __entry_tramp_data_start[]; + + __set_fixmap(FIX_ENTRY_TRAMP_DATA, + __pa_symbol(__entry_tramp_data_start), + PAGE_KERNEL_RO); + } + + return 0; +} +core_initcall(map_entry_trampoline); +#endif + +/* + * Open coded check for BTI, only for use to determine configuration + * for early mappings for before the cpufeature code has run. + */ +static bool arm64_early_this_cpu_has_bti(void) +{ + u64 pfr1; + + if (!IS_ENABLED(CONFIG_ARM64_BTI_KERNEL)) + return false; + + pfr1 = read_sysreg_s(SYS_ID_AA64PFR1_EL1); + return cpuid_feature_extract_unsigned_field(pfr1, + ID_AA64PFR1_BT_SHIFT); +} + +/* + * Create fine-grained mappings for the kernel. + */ +static void __init map_kernel(pgd_t *pgdp) +{ + static struct vm_struct vmlinux_text, vmlinux_rodata, vmlinux_inittext, + vmlinux_initdata, vmlinux_data; + + /* + * External debuggers may need to write directly to the text + * mapping to install SW breakpoints. Allow this (only) when + * explicitly requested with rodata=off. + */ + pgprot_t text_prot = rodata_enabled ? PAGE_KERNEL_ROX : PAGE_KERNEL_EXEC; + + /* + * If we have a CPU that supports BTI and a kernel built for + * BTI then mark the kernel executable text as guarded pages + * now so we don't have to rewrite the page tables later. + */ + if (arm64_early_this_cpu_has_bti()) + text_prot = __pgprot_modify(text_prot, PTE_GP, PTE_GP); + + /* + * Only rodata will be remapped with different permissions later on, + * all other segments are allowed to use contiguous mappings. + */ + map_kernel_segment(pgdp, _text, _etext, text_prot, &vmlinux_text, 0, + VM_NO_GUARD); + map_kernel_segment(pgdp, __start_rodata, __inittext_begin, PAGE_KERNEL, + &vmlinux_rodata, NO_CONT_MAPPINGS, VM_NO_GUARD); + map_kernel_segment(pgdp, __inittext_begin, __inittext_end, text_prot, + &vmlinux_inittext, 0, VM_NO_GUARD); + map_kernel_segment(pgdp, __initdata_begin, __initdata_end, PAGE_KERNEL, + &vmlinux_initdata, 0, VM_NO_GUARD); + map_kernel_segment(pgdp, _data, _end, PAGE_KERNEL, &vmlinux_data, 0, 0); + + if (!READ_ONCE(pgd_val(*pgd_offset_pgd(pgdp, FIXADDR_START)))) { + /* + * The fixmap falls in a separate pgd to the kernel, and doesn't + * live in the carveout for the swapper_pg_dir. We can simply + * re-use the existing dir for the fixmap. + */ + set_pgd(pgd_offset_pgd(pgdp, FIXADDR_START), + READ_ONCE(*pgd_offset_k(FIXADDR_START))); + } else if (CONFIG_PGTABLE_LEVELS > 3) { + pgd_t *bm_pgdp; + p4d_t *bm_p4dp; + pud_t *bm_pudp; + /* + * The fixmap shares its top level pgd entry with the kernel + * mapping. This can really only occur when we are running + * with 16k/4 levels, so we can simply reuse the pud level + * entry instead. + */ + BUG_ON(!IS_ENABLED(CONFIG_ARM64_16K_PAGES)); + bm_pgdp = pgd_offset_pgd(pgdp, FIXADDR_START); + bm_p4dp = p4d_offset(bm_pgdp, FIXADDR_START); + bm_pudp = pud_set_fixmap_offset(bm_p4dp, FIXADDR_START); + pud_populate(&init_mm, bm_pudp, lm_alias(bm_pmd)); + pud_clear_fixmap(); + } else { + BUG(); + } + + kasan_copy_shadow(pgdp); +} + +void __init paging_init(void) +{ + pgd_t *pgdp = pgd_set_fixmap(__pa_symbol(swapper_pg_dir)); + + map_kernel(pgdp); + map_mem(pgdp); + + pgd_clear_fixmap(); + + cpu_replace_ttbr1(lm_alias(swapper_pg_dir)); + init_mm.pgd = swapper_pg_dir; + + memblock_free(__pa_symbol(init_pg_dir), + __pa_symbol(init_pg_end) - __pa_symbol(init_pg_dir)); + + memblock_allow_resize(); +} + +/* + * Check whether a kernel address is valid (derived from arch/x86/). + */ +int kern_addr_valid(unsigned long addr) +{ + pgd_t *pgdp; + p4d_t *p4dp; + pud_t *pudp, pud; + pmd_t *pmdp, pmd; + pte_t *ptep, pte; + + addr = arch_kasan_reset_tag(addr); + if ((((long)addr) >> VA_BITS) != -1UL) + return 0; + + pgdp = pgd_offset_k(addr); + if (pgd_none(READ_ONCE(*pgdp))) + return 0; + + p4dp = p4d_offset(pgdp, addr); + if (p4d_none(READ_ONCE(*p4dp))) + return 0; + + pudp = pud_offset(p4dp, addr); + pud = READ_ONCE(*pudp); + if (pud_none(pud)) + return 0; + + if (pud_sect(pud)) + return pfn_valid(pud_pfn(pud)); + + pmdp = pmd_offset(pudp, addr); + pmd = READ_ONCE(*pmdp); + if (pmd_none(pmd)) + return 0; + + if (pmd_sect(pmd)) + return pfn_valid(pmd_pfn(pmd)); + + ptep = pte_offset_kernel(pmdp, addr); + pte = READ_ONCE(*ptep); + if (pte_none(pte)) + return 0; + + return pfn_valid(pte_pfn(pte)); +} + +#ifdef CONFIG_MEMORY_HOTPLUG +static void free_hotplug_page_range(struct page *page, size_t size, + struct vmem_altmap *altmap) +{ + if (altmap) { + vmem_altmap_free(altmap, size >> PAGE_SHIFT); + } else { + WARN_ON(PageReserved(page)); + free_pages((unsigned long)page_address(page), get_order(size)); + } +} + +static void free_hotplug_pgtable_page(struct page *page) +{ + free_hotplug_page_range(page, PAGE_SIZE, NULL); +} + +static bool pgtable_range_aligned(unsigned long start, unsigned long end, + unsigned long floor, unsigned long ceiling, + unsigned long mask) +{ + start &= mask; + if (start < floor) + return false; + + if (ceiling) { + ceiling &= mask; + if (!ceiling) + return false; + } + + if (end - 1 > ceiling - 1) + return false; + return true; +} + +static void unmap_hotplug_pte_range(pmd_t *pmdp, unsigned long addr, + unsigned long end, bool free_mapped, + struct vmem_altmap *altmap) +{ + pte_t *ptep, pte; + + do { + ptep = pte_offset_kernel(pmdp, addr); + pte = READ_ONCE(*ptep); + if (pte_none(pte)) + continue; + + WARN_ON(!pte_present(pte)); + pte_clear(&init_mm, addr, ptep); + flush_tlb_kernel_range(addr, addr + PAGE_SIZE); + if (free_mapped) + free_hotplug_page_range(pte_page(pte), + PAGE_SIZE, altmap); + } while (addr += PAGE_SIZE, addr < end); +} + +static void unmap_hotplug_pmd_range(pud_t *pudp, unsigned long addr, + unsigned long end, bool free_mapped, + struct vmem_altmap *altmap) +{ + unsigned long next; + pmd_t *pmdp, pmd; + + do { + next = pmd_addr_end(addr, end); + pmdp = pmd_offset(pudp, addr); + pmd = READ_ONCE(*pmdp); + if (pmd_none(pmd)) + continue; + + WARN_ON(!pmd_present(pmd)); + if (pmd_sect(pmd)) { + pmd_clear(pmdp); + + /* + * One TLBI should be sufficient here as the PMD_SIZE + * range is mapped with a single block entry. + */ + flush_tlb_kernel_range(addr, addr + PAGE_SIZE); + if (free_mapped) + free_hotplug_page_range(pmd_page(pmd), + PMD_SIZE, altmap); + continue; + } + WARN_ON(!pmd_table(pmd)); + unmap_hotplug_pte_range(pmdp, addr, next, free_mapped, altmap); + } while (addr = next, addr < end); +} + +static void unmap_hotplug_pud_range(p4d_t *p4dp, unsigned long addr, + unsigned long end, bool free_mapped, + struct vmem_altmap *altmap) +{ + unsigned long next; + pud_t *pudp, pud; + + do { + next = pud_addr_end(addr, end); + pudp = pud_offset(p4dp, addr); + pud = READ_ONCE(*pudp); + if (pud_none(pud)) + continue; + + WARN_ON(!pud_present(pud)); + if (pud_sect(pud)) { + pud_clear(pudp); + + /* + * One TLBI should be sufficient here as the PUD_SIZE + * range is mapped with a single block entry. + */ + flush_tlb_kernel_range(addr, addr + PAGE_SIZE); + if (free_mapped) + free_hotplug_page_range(pud_page(pud), + PUD_SIZE, altmap); + continue; + } + WARN_ON(!pud_table(pud)); + unmap_hotplug_pmd_range(pudp, addr, next, free_mapped, altmap); + } while (addr = next, addr < end); +} + +static void unmap_hotplug_p4d_range(pgd_t *pgdp, unsigned long addr, + unsigned long end, bool free_mapped, + struct vmem_altmap *altmap) +{ + unsigned long next; + p4d_t *p4dp, p4d; + + do { + next = p4d_addr_end(addr, end); + p4dp = p4d_offset(pgdp, addr); + p4d = READ_ONCE(*p4dp); + if (p4d_none(p4d)) + continue; + + WARN_ON(!p4d_present(p4d)); + unmap_hotplug_pud_range(p4dp, addr, next, free_mapped, altmap); + } while (addr = next, addr < end); +} + +static void unmap_hotplug_range(unsigned long addr, unsigned long end, + bool free_mapped, struct vmem_altmap *altmap) +{ + unsigned long next; + pgd_t *pgdp, pgd; + + /* + * altmap can only be used as vmemmap mapping backing memory. + * In case the backing memory itself is not being freed, then + * altmap is irrelevant. Warn about this inconsistency when + * encountered. + */ + WARN_ON(!free_mapped && altmap); + + do { + next = pgd_addr_end(addr, end); + pgdp = pgd_offset_k(addr); + pgd = READ_ONCE(*pgdp); + if (pgd_none(pgd)) + continue; + + WARN_ON(!pgd_present(pgd)); + unmap_hotplug_p4d_range(pgdp, addr, next, free_mapped, altmap); + } while (addr = next, addr < end); +} + +static void free_empty_pte_table(pmd_t *pmdp, unsigned long addr, + unsigned long end, unsigned long floor, + unsigned long ceiling) +{ + pte_t *ptep, pte; + unsigned long i, start = addr; + + do { + ptep = pte_offset_kernel(pmdp, addr); + pte = READ_ONCE(*ptep); + + /* + * This is just a sanity check here which verifies that + * pte clearing has been done by earlier unmap loops. + */ + WARN_ON(!pte_none(pte)); + } while (addr += PAGE_SIZE, addr < end); + + if (!pgtable_range_aligned(start, end, floor, ceiling, PMD_MASK)) + return; + + /* + * Check whether we can free the pte page if the rest of the + * entries are empty. Overlap with other regions have been + * handled by the floor/ceiling check. + */ + ptep = pte_offset_kernel(pmdp, 0UL); + for (i = 0; i < PTRS_PER_PTE; i++) { + if (!pte_none(READ_ONCE(ptep[i]))) + return; + } + + pmd_clear(pmdp); + __flush_tlb_kernel_pgtable(start); + free_hotplug_pgtable_page(virt_to_page(ptep)); +} + +static void free_empty_pmd_table(pud_t *pudp, unsigned long addr, + unsigned long end, unsigned long floor, + unsigned long ceiling) +{ + pmd_t *pmdp, pmd; + unsigned long i, next, start = addr; + + do { + next = pmd_addr_end(addr, end); + pmdp = pmd_offset(pudp, addr); + pmd = READ_ONCE(*pmdp); + if (pmd_none(pmd)) + continue; + + WARN_ON(!pmd_present(pmd) || !pmd_table(pmd) || pmd_sect(pmd)); + free_empty_pte_table(pmdp, addr, next, floor, ceiling); + } while (addr = next, addr < end); + + if (CONFIG_PGTABLE_LEVELS <= 2) + return; + + if (!pgtable_range_aligned(start, end, floor, ceiling, PUD_MASK)) + return; + + /* + * Check whether we can free the pmd page if the rest of the + * entries are empty. Overlap with other regions have been + * handled by the floor/ceiling check. + */ + pmdp = pmd_offset(pudp, 0UL); + for (i = 0; i < PTRS_PER_PMD; i++) { + if (!pmd_none(READ_ONCE(pmdp[i]))) + return; + } + + pud_clear(pudp); + __flush_tlb_kernel_pgtable(start); + free_hotplug_pgtable_page(virt_to_page(pmdp)); +} + +static void free_empty_pud_table(p4d_t *p4dp, unsigned long addr, + unsigned long end, unsigned long floor, + unsigned long ceiling) +{ + pud_t *pudp, pud; + unsigned long i, next, start = addr; + + do { + next = pud_addr_end(addr, end); + pudp = pud_offset(p4dp, addr); + pud = READ_ONCE(*pudp); + if (pud_none(pud)) + continue; + + WARN_ON(!pud_present(pud) || !pud_table(pud) || pud_sect(pud)); + free_empty_pmd_table(pudp, addr, next, floor, ceiling); + } while (addr = next, addr < end); + + if (CONFIG_PGTABLE_LEVELS <= 3) + return; + + if (!pgtable_range_aligned(start, end, floor, ceiling, PGDIR_MASK)) + return; + + /* + * Check whether we can free the pud page if the rest of the + * entries are empty. Overlap with other regions have been + * handled by the floor/ceiling check. + */ + pudp = pud_offset(p4dp, 0UL); + for (i = 0; i < PTRS_PER_PUD; i++) { + if (!pud_none(READ_ONCE(pudp[i]))) + return; + } + + p4d_clear(p4dp); + __flush_tlb_kernel_pgtable(start); + free_hotplug_pgtable_page(virt_to_page(pudp)); +} + +static void free_empty_p4d_table(pgd_t *pgdp, unsigned long addr, + unsigned long end, unsigned long floor, + unsigned long ceiling) +{ + unsigned long next; + p4d_t *p4dp, p4d; + + do { + next = p4d_addr_end(addr, end); + p4dp = p4d_offset(pgdp, addr); + p4d = READ_ONCE(*p4dp); + if (p4d_none(p4d)) + continue; + + WARN_ON(!p4d_present(p4d)); + free_empty_pud_table(p4dp, addr, next, floor, ceiling); + } while (addr = next, addr < end); +} + +static void free_empty_tables(unsigned long addr, unsigned long end, + unsigned long floor, unsigned long ceiling) +{ + unsigned long next; + pgd_t *pgdp, pgd; + + do { + next = pgd_addr_end(addr, end); + pgdp = pgd_offset_k(addr); + pgd = READ_ONCE(*pgdp); + if (pgd_none(pgd)) + continue; + + WARN_ON(!pgd_present(pgd)); + free_empty_p4d_table(pgdp, addr, next, floor, ceiling); + } while (addr = next, addr < end); +} +#endif + +#ifdef CONFIG_SPARSEMEM_VMEMMAP +#if !ARM64_SWAPPER_USES_SECTION_MAPS +int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node, + struct vmem_altmap *altmap) +{ + return vmemmap_populate_basepages(start, end, node, altmap); +} +#else /* !ARM64_SWAPPER_USES_SECTION_MAPS */ +int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node, + struct vmem_altmap *altmap) +{ + unsigned long addr = start; + unsigned long next; + pgd_t *pgdp; + p4d_t *p4dp; + pud_t *pudp; + pmd_t *pmdp; + + do { + next = pmd_addr_end(addr, end); + + pgdp = vmemmap_pgd_populate(addr, node); + if (!pgdp) + return -ENOMEM; + + p4dp = vmemmap_p4d_populate(pgdp, addr, node); + if (!p4dp) + return -ENOMEM; + + pudp = vmemmap_pud_populate(p4dp, addr, node); + if (!pudp) + return -ENOMEM; + + pmdp = pmd_offset(pudp, addr); + if (pmd_none(READ_ONCE(*pmdp))) { + void *p = NULL; + + p = vmemmap_alloc_block_buf(PMD_SIZE, node, altmap); + if (!p) + return -ENOMEM; + + pmd_set_huge(pmdp, __pa(p), __pgprot(PROT_SECT_NORMAL)); + } else + vmemmap_verify((pte_t *)pmdp, node, addr, next); + } while (addr = next, addr != end); + + return 0; +} +#endif /* !ARM64_SWAPPER_USES_SECTION_MAPS */ +void vmemmap_free(unsigned long start, unsigned long end, + struct vmem_altmap *altmap) +{ +#ifdef CONFIG_MEMORY_HOTPLUG + WARN_ON((start < VMEMMAP_START) || (end > VMEMMAP_END)); + + unmap_hotplug_range(start, end, true, altmap); + free_empty_tables(start, end, VMEMMAP_START, VMEMMAP_END); +#endif +} +#endif /* CONFIG_SPARSEMEM_VMEMMAP */ + +static inline pud_t * fixmap_pud(unsigned long addr) +{ + pgd_t *pgdp = pgd_offset_k(addr); + p4d_t *p4dp = p4d_offset(pgdp, addr); + p4d_t p4d = READ_ONCE(*p4dp); + + BUG_ON(p4d_none(p4d) || p4d_bad(p4d)); + + return pud_offset_kimg(p4dp, addr); +} + +static inline pmd_t * fixmap_pmd(unsigned long addr) +{ + pud_t *pudp = fixmap_pud(addr); + pud_t pud = READ_ONCE(*pudp); + + BUG_ON(pud_none(pud) || pud_bad(pud)); + + return pmd_offset_kimg(pudp, addr); +} + +static inline pte_t * fixmap_pte(unsigned long addr) +{ + return &bm_pte[pte_index(addr)]; +} + +/* + * The p*d_populate functions call virt_to_phys implicitly so they can't be used + * directly on kernel symbols (bm_p*d). This function is called too early to use + * lm_alias so __p*d_populate functions must be used to populate with the + * physical address from __pa_symbol. + */ +void __init early_fixmap_init(void) +{ + pgd_t *pgdp; + p4d_t *p4dp, p4d; + pud_t *pudp; + pmd_t *pmdp; + unsigned long addr = FIXADDR_START; + + pgdp = pgd_offset_k(addr); + p4dp = p4d_offset(pgdp, addr); + p4d = READ_ONCE(*p4dp); + if (CONFIG_PGTABLE_LEVELS > 3 && + !(p4d_none(p4d) || p4d_page_paddr(p4d) == __pa_symbol(bm_pud))) { + /* + * We only end up here if the kernel mapping and the fixmap + * share the top level pgd entry, which should only happen on + * 16k/4 levels configurations. + */ + BUG_ON(!IS_ENABLED(CONFIG_ARM64_16K_PAGES)); + pudp = pud_offset_kimg(p4dp, addr); + } else { + if (p4d_none(p4d)) + __p4d_populate(p4dp, __pa_symbol(bm_pud), PUD_TYPE_TABLE); + pudp = fixmap_pud(addr); + } + if (pud_none(READ_ONCE(*pudp))) + __pud_populate(pudp, __pa_symbol(bm_pmd), PMD_TYPE_TABLE); + pmdp = fixmap_pmd(addr); + __pmd_populate(pmdp, __pa_symbol(bm_pte), PMD_TYPE_TABLE); + + /* + * The boot-ioremap range spans multiple pmds, for which + * we are not prepared: + */ + BUILD_BUG_ON((__fix_to_virt(FIX_BTMAP_BEGIN) >> PMD_SHIFT) + != (__fix_to_virt(FIX_BTMAP_END) >> PMD_SHIFT)); + + if ((pmdp != fixmap_pmd(fix_to_virt(FIX_BTMAP_BEGIN))) + || pmdp != fixmap_pmd(fix_to_virt(FIX_BTMAP_END))) { + WARN_ON(1); + pr_warn("pmdp %p != %p, %p\n", + pmdp, fixmap_pmd(fix_to_virt(FIX_BTMAP_BEGIN)), + fixmap_pmd(fix_to_virt(FIX_BTMAP_END))); + pr_warn("fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n", + fix_to_virt(FIX_BTMAP_BEGIN)); + pr_warn("fix_to_virt(FIX_BTMAP_END): %08lx\n", + fix_to_virt(FIX_BTMAP_END)); + + pr_warn("FIX_BTMAP_END: %d\n", FIX_BTMAP_END); + pr_warn("FIX_BTMAP_BEGIN: %d\n", FIX_BTMAP_BEGIN); + } +} + +/* + * Unusually, this is also called in IRQ context (ghes_iounmap_irq) so if we + * ever need to use IPIs for TLB broadcasting, then we're in trouble here. + */ +void __set_fixmap(enum fixed_addresses idx, + phys_addr_t phys, pgprot_t flags) +{ + unsigned long addr = __fix_to_virt(idx); + pte_t *ptep; + + BUG_ON(idx <= FIX_HOLE || idx >= __end_of_fixed_addresses); + + ptep = fixmap_pte(addr); + + if (pgprot_val(flags)) { + set_pte(ptep, pfn_pte(phys >> PAGE_SHIFT, flags)); + } else { + pte_clear(&init_mm, addr, ptep); + flush_tlb_kernel_range(addr, addr+PAGE_SIZE); + } +} + +void *__init fixmap_remap_fdt(phys_addr_t dt_phys, int *size, pgprot_t prot) +{ + const u64 dt_virt_base = __fix_to_virt(FIX_FDT); + int offset; + void *dt_virt; + + /* + * Check whether the physical FDT address is set and meets the minimum + * alignment requirement. Since we are relying on MIN_FDT_ALIGN to be + * at least 8 bytes so that we can always access the magic and size + * fields of the FDT header after mapping the first chunk, double check + * here if that is indeed the case. + */ + BUILD_BUG_ON(MIN_FDT_ALIGN < 8); + if (!dt_phys || dt_phys % MIN_FDT_ALIGN) + return NULL; + + /* + * Make sure that the FDT region can be mapped without the need to + * allocate additional translation table pages, so that it is safe + * to call create_mapping_noalloc() this early. + * + * On 64k pages, the FDT will be mapped using PTEs, so we need to + * be in the same PMD as the rest of the fixmap. + * On 4k pages, we'll use section mappings for the FDT so we only + * have to be in the same PUD. + */ + BUILD_BUG_ON(dt_virt_base % SZ_2M); + + BUILD_BUG_ON(__fix_to_virt(FIX_FDT_END) >> SWAPPER_TABLE_SHIFT != + __fix_to_virt(FIX_BTMAP_BEGIN) >> SWAPPER_TABLE_SHIFT); + + offset = dt_phys % SWAPPER_BLOCK_SIZE; + dt_virt = (void *)dt_virt_base + offset; + + /* map the first chunk so we can read the size from the header */ + create_mapping_noalloc(round_down(dt_phys, SWAPPER_BLOCK_SIZE), + dt_virt_base, SWAPPER_BLOCK_SIZE, prot); + + if (fdt_magic(dt_virt) != FDT_MAGIC) + return NULL; + + *size = fdt_totalsize(dt_virt); + if (*size > MAX_FDT_SIZE) + return NULL; + + if (offset + *size > SWAPPER_BLOCK_SIZE) + create_mapping_noalloc(round_down(dt_phys, SWAPPER_BLOCK_SIZE), dt_virt_base, + round_up(offset + *size, SWAPPER_BLOCK_SIZE), prot); + + return dt_virt; +} + +int __init arch_ioremap_p4d_supported(void) +{ + return 0; +} + +int __init arch_ioremap_pud_supported(void) +{ + /* + * Only 4k granule supports level 1 block mappings. + * SW table walks can't handle removal of intermediate entries. + */ + return IS_ENABLED(CONFIG_ARM64_4K_PAGES) && + !IS_ENABLED(CONFIG_PTDUMP_DEBUGFS); +} + +int __init arch_ioremap_pmd_supported(void) +{ + /* See arch_ioremap_pud_supported() */ + return !IS_ENABLED(CONFIG_PTDUMP_DEBUGFS); +} + +int pud_set_huge(pud_t *pudp, phys_addr_t phys, pgprot_t prot) +{ + pud_t new_pud = pfn_pud(__phys_to_pfn(phys), mk_pud_sect_prot(prot)); + + /* Only allow permission changes for now */ + if (!pgattr_change_is_safe(READ_ONCE(pud_val(*pudp)), + pud_val(new_pud))) + return 0; + + VM_BUG_ON(phys & ~PUD_MASK); + set_pud(pudp, new_pud); + return 1; +} + +int pmd_set_huge(pmd_t *pmdp, phys_addr_t phys, pgprot_t prot) +{ + pmd_t new_pmd = pfn_pmd(__phys_to_pfn(phys), mk_pmd_sect_prot(prot)); + + /* Only allow permission changes for now */ + if (!pgattr_change_is_safe(READ_ONCE(pmd_val(*pmdp)), + pmd_val(new_pmd))) + return 0; + + VM_BUG_ON(phys & ~PMD_MASK); + set_pmd(pmdp, new_pmd); + return 1; +} + +int pud_clear_huge(pud_t *pudp) +{ + if (!pud_sect(READ_ONCE(*pudp))) + return 0; + pud_clear(pudp); + return 1; +} + +int pmd_clear_huge(pmd_t *pmdp) +{ + if (!pmd_sect(READ_ONCE(*pmdp))) + return 0; + pmd_clear(pmdp); + return 1; +} + +int pmd_free_pte_page(pmd_t *pmdp, unsigned long addr) +{ + pte_t *table; + pmd_t pmd; + + pmd = READ_ONCE(*pmdp); + + if (!pmd_table(pmd)) { + VM_WARN_ON(1); + return 1; + } + + table = pte_offset_kernel(pmdp, addr); + pmd_clear(pmdp); + __flush_tlb_kernel_pgtable(addr); + pte_free_kernel(NULL, table); + return 1; +} + +int pud_free_pmd_page(pud_t *pudp, unsigned long addr) +{ + pmd_t *table; + pmd_t *pmdp; + pud_t pud; + unsigned long next, end; + + pud = READ_ONCE(*pudp); + + if (!pud_table(pud)) { + VM_WARN_ON(1); + return 1; + } + + table = pmd_offset(pudp, addr); + pmdp = table; + next = addr; + end = addr + PUD_SIZE; + do { + pmd_free_pte_page(pmdp, next); + } while (pmdp++, next += PMD_SIZE, next != end); + + pud_clear(pudp); + __flush_tlb_kernel_pgtable(addr); + pmd_free(NULL, table); + return 1; +} + +int p4d_free_pud_page(p4d_t *p4d, unsigned long addr) +{ + return 0; /* Don't attempt a block mapping */ +} + +#ifdef CONFIG_MEMORY_HOTPLUG +static void __remove_pgd_mapping(pgd_t *pgdir, unsigned long start, u64 size) +{ + unsigned long end = start + size; + + WARN_ON(pgdir != init_mm.pgd); + WARN_ON((start < PAGE_OFFSET) || (end > PAGE_END)); + + unmap_hotplug_range(start, end, false, NULL); + free_empty_tables(start, end, PAGE_OFFSET, PAGE_END); +} + +static bool inside_linear_region(u64 start, u64 size) +{ + u64 start_linear_pa = __pa(_PAGE_OFFSET(vabits_actual)); + u64 end_linear_pa = __pa(PAGE_END - 1); + + if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) { + /* + * Check for a wrap, it is possible because of randomized linear + * mapping the start physical address is actually bigger than + * the end physical address. In this case set start to zero + * because [0, end_linear_pa] range must still be able to cover + * all addressable physical addresses. + */ + if (start_linear_pa > end_linear_pa) + start_linear_pa = 0; + } + + WARN_ON(start_linear_pa > end_linear_pa); + + /* + * Linear mapping region is the range [PAGE_OFFSET..(PAGE_END - 1)] + * accommodating both its ends but excluding PAGE_END. Max physical + * range which can be mapped inside this linear mapping range, must + * also be derived from its end points. + */ + return start >= start_linear_pa && (start + size - 1) <= end_linear_pa; +} + +int arch_add_memory(int nid, u64 start, u64 size, + struct mhp_params *params) +{ + int ret, flags = 0; + + if (!inside_linear_region(start, size)) { + pr_err("[%llx %llx] is outside linear mapping region\n", start, start + size); + return -EINVAL; + } + + if (rodata_full || debug_pagealloc_enabled()) + flags = NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS; + + __create_pgd_mapping(swapper_pg_dir, start, __phys_to_virt(start), + size, params->pgprot, __pgd_pgtable_alloc, + flags); + + memblock_clear_nomap(start, size); + + ret = __add_pages(nid, start >> PAGE_SHIFT, size >> PAGE_SHIFT, + params); + if (ret) + __remove_pgd_mapping(swapper_pg_dir, + __phys_to_virt(start), size); + else { + max_pfn = PFN_UP(start + size); + max_low_pfn = max_pfn; + } + + return ret; +} + +void arch_remove_memory(int nid, u64 start, u64 size, + struct vmem_altmap *altmap) +{ + unsigned long start_pfn = start >> PAGE_SHIFT; + unsigned long nr_pages = size >> PAGE_SHIFT; + + __remove_pages(start_pfn, nr_pages, altmap); + __remove_pgd_mapping(swapper_pg_dir, __phys_to_virt(start), size); +} + +/* + * This memory hotplug notifier helps prevent boot memory from being + * inadvertently removed as it blocks pfn range offlining process in + * __offline_pages(). Hence this prevents both offlining as well as + * removal process for boot memory which is initially always online. + * In future if and when boot memory could be removed, this notifier + * should be dropped and free_hotplug_page_range() should handle any + * reserved pages allocated during boot. + */ +static int prevent_bootmem_remove_notifier(struct notifier_block *nb, + unsigned long action, void *data) +{ + struct mem_section *ms; + struct memory_notify *arg = data; + unsigned long end_pfn = arg->start_pfn + arg->nr_pages; + unsigned long pfn = arg->start_pfn; + + if (action != MEM_GOING_OFFLINE) + return NOTIFY_OK; + + for (; pfn < end_pfn; pfn += PAGES_PER_SECTION) { + ms = __pfn_to_section(pfn); + if (early_section(ms)) + return NOTIFY_BAD; + } + return NOTIFY_OK; +} + +static struct notifier_block prevent_bootmem_remove_nb = { + .notifier_call = prevent_bootmem_remove_notifier, +}; + +static int __init prevent_bootmem_remove_init(void) +{ + return register_memory_notifier(&prevent_bootmem_remove_nb); +} +device_initcall(prevent_bootmem_remove_init); +#endif diff --git a/arch/arm64/mm/mteswap.c b/arch/arm64/mm/mteswap.c new file mode 100644 index 000000000..c52c18470 --- /dev/null +++ b/arch/arm64/mm/mteswap.c @@ -0,0 +1,83 @@ +// SPDX-License-Identifier: GPL-2.0-only + +#include <linux/pagemap.h> +#include <linux/xarray.h> +#include <linux/slab.h> +#include <linux/swap.h> +#include <linux/swapops.h> +#include <asm/mte.h> + +static DEFINE_XARRAY(mte_pages); + +void *mte_allocate_tag_storage(void) +{ + /* tags granule is 16 bytes, 2 tags stored per byte */ + return kmalloc(PAGE_SIZE / 16 / 2, GFP_KERNEL); +} + +void mte_free_tag_storage(char *storage) +{ + kfree(storage); +} + +int mte_save_tags(struct page *page) +{ + void *tag_storage, *ret; + + if (!test_bit(PG_mte_tagged, &page->flags)) + return 0; + + tag_storage = mte_allocate_tag_storage(); + if (!tag_storage) + return -ENOMEM; + + mte_save_page_tags(page_address(page), tag_storage); + + /* page_private contains the swap entry.val set in do_swap_page */ + ret = xa_store(&mte_pages, page_private(page), tag_storage, GFP_KERNEL); + if (WARN(xa_is_err(ret), "Failed to store MTE tags")) { + mte_free_tag_storage(tag_storage); + return xa_err(ret); + } else if (ret) { + /* Entry is being replaced, free the old entry */ + mte_free_tag_storage(ret); + } + + return 0; +} + +bool mte_restore_tags(swp_entry_t entry, struct page *page) +{ + void *tags = xa_load(&mte_pages, entry.val); + + if (!tags) + return false; + + mte_restore_page_tags(page_address(page), tags); + + return true; +} + +void mte_invalidate_tags(int type, pgoff_t offset) +{ + swp_entry_t entry = swp_entry(type, offset); + void *tags = xa_erase(&mte_pages, entry.val); + + mte_free_tag_storage(tags); +} + +void mte_invalidate_tags_area(int type) +{ + swp_entry_t entry = swp_entry(type, 0); + swp_entry_t last_entry = swp_entry(type + 1, 0); + void *tags; + + XA_STATE(xa_state, &mte_pages, entry.val); + + xa_lock(&mte_pages); + xas_for_each(&xa_state, tags, last_entry.val - 1) { + __xa_erase(&mte_pages, xa_state.xa_index); + mte_free_tag_storage(tags); + } + xa_unlock(&mte_pages); +} diff --git a/arch/arm64/mm/numa.c b/arch/arm64/mm/numa.c new file mode 100644 index 000000000..a8303bc6b --- /dev/null +++ b/arch/arm64/mm/numa.c @@ -0,0 +1,464 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * NUMA support, based on the x86 implementation. + * + * Copyright (C) 2015 Cavium Inc. + * Author: Ganapatrao Kulkarni <gkulkarni@cavium.com> + */ + +#define pr_fmt(fmt) "NUMA: " fmt + +#include <linux/acpi.h> +#include <linux/memblock.h> +#include <linux/module.h> +#include <linux/of.h> + +#include <asm/acpi.h> +#include <asm/sections.h> + +struct pglist_data *node_data[MAX_NUMNODES] __read_mostly; +EXPORT_SYMBOL(node_data); +nodemask_t numa_nodes_parsed __initdata; +static int cpu_to_node_map[NR_CPUS] = { [0 ... NR_CPUS-1] = NUMA_NO_NODE }; + +static int numa_distance_cnt; +static u8 *numa_distance; +bool numa_off; + +static __init int numa_parse_early_param(char *opt) +{ + if (!opt) + return -EINVAL; + if (str_has_prefix(opt, "off")) + numa_off = true; + + return 0; +} +early_param("numa", numa_parse_early_param); + +cpumask_var_t node_to_cpumask_map[MAX_NUMNODES]; +EXPORT_SYMBOL(node_to_cpumask_map); + +#ifdef CONFIG_DEBUG_PER_CPU_MAPS + +/* + * Returns a pointer to the bitmask of CPUs on Node 'node'. + */ +const struct cpumask *cpumask_of_node(int node) +{ + + if (node == NUMA_NO_NODE) + return cpu_all_mask; + + if (WARN_ON(node < 0 || node >= nr_node_ids)) + return cpu_none_mask; + + if (WARN_ON(node_to_cpumask_map[node] == NULL)) + return cpu_online_mask; + + return node_to_cpumask_map[node]; +} +EXPORT_SYMBOL(cpumask_of_node); + +#endif + +static void numa_update_cpu(unsigned int cpu, bool remove) +{ + int nid = cpu_to_node(cpu); + + if (nid == NUMA_NO_NODE) + return; + + if (remove) + cpumask_clear_cpu(cpu, node_to_cpumask_map[nid]); + else + cpumask_set_cpu(cpu, node_to_cpumask_map[nid]); +} + +void numa_add_cpu(unsigned int cpu) +{ + numa_update_cpu(cpu, false); +} + +void numa_remove_cpu(unsigned int cpu) +{ + numa_update_cpu(cpu, true); +} + +void numa_clear_node(unsigned int cpu) +{ + numa_remove_cpu(cpu); + set_cpu_numa_node(cpu, NUMA_NO_NODE); +} + +/* + * Allocate node_to_cpumask_map based on number of available nodes + * Requires node_possible_map to be valid. + * + * Note: cpumask_of_node() is not valid until after this is done. + * (Use CONFIG_DEBUG_PER_CPU_MAPS to check this.) + */ +static void __init setup_node_to_cpumask_map(void) +{ + int node; + + /* setup nr_node_ids if not done yet */ + if (nr_node_ids == MAX_NUMNODES) + setup_nr_node_ids(); + + /* allocate and clear the mapping */ + for (node = 0; node < nr_node_ids; node++) { + alloc_bootmem_cpumask_var(&node_to_cpumask_map[node]); + cpumask_clear(node_to_cpumask_map[node]); + } + + /* cpumask_of_node() will now work */ + pr_debug("Node to cpumask map for %u nodes\n", nr_node_ids); +} + +/* + * Set the cpu to node and mem mapping + */ +void numa_store_cpu_info(unsigned int cpu) +{ + set_cpu_numa_node(cpu, cpu_to_node_map[cpu]); +} + +void __init early_map_cpu_to_node(unsigned int cpu, int nid) +{ + /* fallback to node 0 */ + if (nid < 0 || nid >= MAX_NUMNODES || numa_off) + nid = 0; + + cpu_to_node_map[cpu] = nid; + + /* + * We should set the numa node of cpu0 as soon as possible, because it + * has already been set up online before. cpu_to_node(0) will soon be + * called. + */ + if (!cpu) + set_cpu_numa_node(cpu, nid); +} + +#ifdef CONFIG_HAVE_SETUP_PER_CPU_AREA +unsigned long __per_cpu_offset[NR_CPUS] __read_mostly; +EXPORT_SYMBOL(__per_cpu_offset); + +static int __init early_cpu_to_node(int cpu) +{ + return cpu_to_node_map[cpu]; +} + +static int __init pcpu_cpu_distance(unsigned int from, unsigned int to) +{ + return node_distance(early_cpu_to_node(from), early_cpu_to_node(to)); +} + +static void * __init pcpu_fc_alloc(unsigned int cpu, size_t size, + size_t align) +{ + int nid = early_cpu_to_node(cpu); + + return memblock_alloc_try_nid(size, align, + __pa(MAX_DMA_ADDRESS), MEMBLOCK_ALLOC_ACCESSIBLE, nid); +} + +static void __init pcpu_fc_free(void *ptr, size_t size) +{ + memblock_free_early(__pa(ptr), size); +} + +void __init setup_per_cpu_areas(void) +{ + unsigned long delta; + unsigned int cpu; + int rc; + + /* + * Always reserve area for module percpu variables. That's + * what the legacy allocator did. + */ + rc = pcpu_embed_first_chunk(PERCPU_MODULE_RESERVE, + PERCPU_DYNAMIC_RESERVE, PAGE_SIZE, + pcpu_cpu_distance, + pcpu_fc_alloc, pcpu_fc_free); + if (rc < 0) + panic("Failed to initialize percpu areas."); + + delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start; + for_each_possible_cpu(cpu) + __per_cpu_offset[cpu] = delta + pcpu_unit_offsets[cpu]; +} +#endif + +/** + * numa_add_memblk() - Set node id to memblk + * @nid: NUMA node ID of the new memblk + * @start: Start address of the new memblk + * @end: End address of the new memblk + * + * RETURNS: + * 0 on success, -errno on failure. + */ +int __init numa_add_memblk(int nid, u64 start, u64 end) +{ + int ret; + + ret = memblock_set_node(start, (end - start), &memblock.memory, nid); + if (ret < 0) { + pr_err("memblock [0x%llx - 0x%llx] failed to add on node %d\n", + start, (end - 1), nid); + return ret; + } + + node_set(nid, numa_nodes_parsed); + return ret; +} + +/* + * Initialize NODE_DATA for a node on the local memory + */ +static void __init setup_node_data(int nid, u64 start_pfn, u64 end_pfn) +{ + const size_t nd_size = roundup(sizeof(pg_data_t), SMP_CACHE_BYTES); + u64 nd_pa; + void *nd; + int tnid; + + if (start_pfn >= end_pfn) + pr_info("Initmem setup node %d [<memory-less node>]\n", nid); + + nd_pa = memblock_phys_alloc_try_nid(nd_size, SMP_CACHE_BYTES, nid); + if (!nd_pa) + panic("Cannot allocate %zu bytes for node %d data\n", + nd_size, nid); + + nd = __va(nd_pa); + + /* report and initialize */ + pr_info("NODE_DATA [mem %#010Lx-%#010Lx]\n", + nd_pa, nd_pa + nd_size - 1); + tnid = early_pfn_to_nid(nd_pa >> PAGE_SHIFT); + if (tnid != nid) + pr_info("NODE_DATA(%d) on node %d\n", nid, tnid); + + node_data[nid] = nd; + memset(NODE_DATA(nid), 0, sizeof(pg_data_t)); + NODE_DATA(nid)->node_id = nid; + NODE_DATA(nid)->node_start_pfn = start_pfn; + NODE_DATA(nid)->node_spanned_pages = end_pfn - start_pfn; +} + +/* + * numa_free_distance + * + * The current table is freed. + */ +void __init numa_free_distance(void) +{ + size_t size; + + if (!numa_distance) + return; + + size = numa_distance_cnt * numa_distance_cnt * + sizeof(numa_distance[0]); + + memblock_free(__pa(numa_distance), size); + numa_distance_cnt = 0; + numa_distance = NULL; +} + +/* + * Create a new NUMA distance table. + */ +static int __init numa_alloc_distance(void) +{ + size_t size; + u64 phys; + int i, j; + + size = nr_node_ids * nr_node_ids * sizeof(numa_distance[0]); + phys = memblock_find_in_range(0, PFN_PHYS(max_pfn), + size, PAGE_SIZE); + if (WARN_ON(!phys)) + return -ENOMEM; + + memblock_reserve(phys, size); + + numa_distance = __va(phys); + numa_distance_cnt = nr_node_ids; + + /* fill with the default distances */ + for (i = 0; i < numa_distance_cnt; i++) + for (j = 0; j < numa_distance_cnt; j++) + numa_distance[i * numa_distance_cnt + j] = i == j ? + LOCAL_DISTANCE : REMOTE_DISTANCE; + + pr_debug("Initialized distance table, cnt=%d\n", numa_distance_cnt); + + return 0; +} + +/** + * numa_set_distance() - Set inter node NUMA distance from node to node. + * @from: the 'from' node to set distance + * @to: the 'to' node to set distance + * @distance: NUMA distance + * + * Set the distance from node @from to @to to @distance. + * If distance table doesn't exist, a warning is printed. + * + * If @from or @to is higher than the highest known node or lower than zero + * or @distance doesn't make sense, the call is ignored. + */ +void __init numa_set_distance(int from, int to, int distance) +{ + if (!numa_distance) { + pr_warn_once("Warning: distance table not allocated yet\n"); + return; + } + + if (from >= numa_distance_cnt || to >= numa_distance_cnt || + from < 0 || to < 0) { + pr_warn_once("Warning: node ids are out of bound, from=%d to=%d distance=%d\n", + from, to, distance); + return; + } + + if ((u8)distance != distance || + (from == to && distance != LOCAL_DISTANCE)) { + pr_warn_once("Warning: invalid distance parameter, from=%d to=%d distance=%d\n", + from, to, distance); + return; + } + + numa_distance[from * numa_distance_cnt + to] = distance; +} + +/* + * Return NUMA distance @from to @to + */ +int __node_distance(int from, int to) +{ + if (from >= numa_distance_cnt || to >= numa_distance_cnt) + return from == to ? LOCAL_DISTANCE : REMOTE_DISTANCE; + return numa_distance[from * numa_distance_cnt + to]; +} +EXPORT_SYMBOL(__node_distance); + +static int __init numa_register_nodes(void) +{ + int nid; + struct memblock_region *mblk; + + /* Check that valid nid is set to memblks */ + for_each_mem_region(mblk) { + int mblk_nid = memblock_get_region_node(mblk); + + if (mblk_nid == NUMA_NO_NODE || mblk_nid >= MAX_NUMNODES) { + pr_warn("Warning: invalid memblk node %d [mem %#010Lx-%#010Lx]\n", + mblk_nid, mblk->base, + mblk->base + mblk->size - 1); + return -EINVAL; + } + } + + /* Finally register nodes. */ + for_each_node_mask(nid, numa_nodes_parsed) { + unsigned long start_pfn, end_pfn; + + get_pfn_range_for_nid(nid, &start_pfn, &end_pfn); + setup_node_data(nid, start_pfn, end_pfn); + node_set_online(nid); + } + + /* Setup online nodes to actual nodes*/ + node_possible_map = numa_nodes_parsed; + + return 0; +} + +static int __init numa_init(int (*init_func)(void)) +{ + int ret; + + nodes_clear(numa_nodes_parsed); + nodes_clear(node_possible_map); + nodes_clear(node_online_map); + + ret = numa_alloc_distance(); + if (ret < 0) + return ret; + + ret = init_func(); + if (ret < 0) + goto out_free_distance; + + if (nodes_empty(numa_nodes_parsed)) { + pr_info("No NUMA configuration found\n"); + ret = -EINVAL; + goto out_free_distance; + } + + ret = numa_register_nodes(); + if (ret < 0) + goto out_free_distance; + + setup_node_to_cpumask_map(); + + return 0; +out_free_distance: + numa_free_distance(); + return ret; +} + +/** + * dummy_numa_init() - Fallback dummy NUMA init + * + * Used if there's no underlying NUMA architecture, NUMA initialization + * fails, or NUMA is disabled on the command line. + * + * Must online at least one node (node 0) and add memory blocks that cover all + * allowed memory. It is unlikely that this function fails. + * + * Return: 0 on success, -errno on failure. + */ +static int __init dummy_numa_init(void) +{ + phys_addr_t start = memblock_start_of_DRAM(); + phys_addr_t end = memblock_end_of_DRAM(); + int ret; + + if (numa_off) + pr_info("NUMA disabled\n"); /* Forced off on command line. */ + pr_info("Faking a node at [mem %#018Lx-%#018Lx]\n", start, end - 1); + + ret = numa_add_memblk(0, start, end); + if (ret) { + pr_err("NUMA init failed\n"); + return ret; + } + + numa_off = true; + return 0; +} + +/** + * arm64_numa_init() - Initialize NUMA + * + * Try each configured NUMA initialization method until one succeeds. The + * last fallback is dummy single node config encompassing whole memory. + */ +void __init arm64_numa_init(void) +{ + if (!numa_off) { + if (!acpi_disabled && !numa_init(arm64_acpi_numa_init)) + return; + if (acpi_disabled && !numa_init(of_numa_init)) + return; + } + + numa_init(dummy_numa_init); +} diff --git a/arch/arm64/mm/pageattr.c b/arch/arm64/mm/pageattr.c new file mode 100644 index 000000000..1b94f5b82 --- /dev/null +++ b/arch/arm64/mm/pageattr.c @@ -0,0 +1,234 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2014, The Linux Foundation. All rights reserved. + */ +#include <linux/kernel.h> +#include <linux/mm.h> +#include <linux/module.h> +#include <linux/sched.h> +#include <linux/vmalloc.h> + +#include <asm/cacheflush.h> +#include <asm/set_memory.h> +#include <asm/tlbflush.h> + +struct page_change_data { + pgprot_t set_mask; + pgprot_t clear_mask; +}; + +bool rodata_full __ro_after_init = IS_ENABLED(CONFIG_RODATA_FULL_DEFAULT_ENABLED); + +static int change_page_range(pte_t *ptep, unsigned long addr, void *data) +{ + struct page_change_data *cdata = data; + pte_t pte = READ_ONCE(*ptep); + + pte = clear_pte_bit(pte, cdata->clear_mask); + pte = set_pte_bit(pte, cdata->set_mask); + + set_pte(ptep, pte); + return 0; +} + +/* + * This function assumes that the range is mapped with PAGE_SIZE pages. + */ +static int __change_memory_common(unsigned long start, unsigned long size, + pgprot_t set_mask, pgprot_t clear_mask) +{ + struct page_change_data data; + int ret; + + data.set_mask = set_mask; + data.clear_mask = clear_mask; + + ret = apply_to_page_range(&init_mm, start, size, change_page_range, + &data); + + flush_tlb_kernel_range(start, start + size); + return ret; +} + +static int change_memory_common(unsigned long addr, int numpages, + pgprot_t set_mask, pgprot_t clear_mask) +{ + unsigned long start = addr; + unsigned long size = PAGE_SIZE * numpages; + unsigned long end = start + size; + struct vm_struct *area; + int i; + + if (!PAGE_ALIGNED(addr)) { + start &= PAGE_MASK; + end = start + size; + WARN_ON_ONCE(1); + } + + /* + * Kernel VA mappings are always live, and splitting live section + * mappings into page mappings may cause TLB conflicts. This means + * we have to ensure that changing the permission bits of the range + * we are operating on does not result in such splitting. + * + * Let's restrict ourselves to mappings created by vmalloc (or vmap). + * Those are guaranteed to consist entirely of page mappings, and + * splitting is never needed. + * + * So check whether the [addr, addr + size) interval is entirely + * covered by precisely one VM area that has the VM_ALLOC flag set. + */ + area = find_vm_area((void *)addr); + if (!area || + end > (unsigned long)area->addr + area->size || + !(area->flags & VM_ALLOC)) + return -EINVAL; + + if (!numpages) + return 0; + + /* + * If we are manipulating read-only permissions, apply the same + * change to the linear mapping of the pages that back this VM area. + */ + if (rodata_full && (pgprot_val(set_mask) == PTE_RDONLY || + pgprot_val(clear_mask) == PTE_RDONLY)) { + for (i = 0; i < area->nr_pages; i++) { + __change_memory_common((u64)page_address(area->pages[i]), + PAGE_SIZE, set_mask, clear_mask); + } + } + + /* + * Get rid of potentially aliasing lazily unmapped vm areas that may + * have permissions set that deviate from the ones we are setting here. + */ + vm_unmap_aliases(); + + return __change_memory_common(start, size, set_mask, clear_mask); +} + +int set_memory_ro(unsigned long addr, int numpages) +{ + return change_memory_common(addr, numpages, + __pgprot(PTE_RDONLY), + __pgprot(PTE_WRITE)); +} + +int set_memory_rw(unsigned long addr, int numpages) +{ + return change_memory_common(addr, numpages, + __pgprot(PTE_WRITE), + __pgprot(PTE_RDONLY)); +} + +int set_memory_nx(unsigned long addr, int numpages) +{ + return change_memory_common(addr, numpages, + __pgprot(PTE_PXN), + __pgprot(PTE_MAYBE_GP)); +} + +int set_memory_x(unsigned long addr, int numpages) +{ + return change_memory_common(addr, numpages, + __pgprot(PTE_MAYBE_GP), + __pgprot(PTE_PXN)); +} + +int set_memory_valid(unsigned long addr, int numpages, int enable) +{ + if (enable) + return __change_memory_common(addr, PAGE_SIZE * numpages, + __pgprot(PTE_VALID), + __pgprot(0)); + else + return __change_memory_common(addr, PAGE_SIZE * numpages, + __pgprot(0), + __pgprot(PTE_VALID)); +} + +int set_direct_map_invalid_noflush(struct page *page) +{ + struct page_change_data data = { + .set_mask = __pgprot(0), + .clear_mask = __pgprot(PTE_VALID), + }; + + if (!rodata_full) + return 0; + + return apply_to_page_range(&init_mm, + (unsigned long)page_address(page), + PAGE_SIZE, change_page_range, &data); +} + +int set_direct_map_default_noflush(struct page *page) +{ + struct page_change_data data = { + .set_mask = __pgprot(PTE_VALID | PTE_WRITE), + .clear_mask = __pgprot(PTE_RDONLY), + }; + + if (!rodata_full) + return 0; + + return apply_to_page_range(&init_mm, + (unsigned long)page_address(page), + PAGE_SIZE, change_page_range, &data); +} + +void __kernel_map_pages(struct page *page, int numpages, int enable) +{ + if (!debug_pagealloc_enabled() && !rodata_full) + return; + + set_memory_valid((unsigned long)page_address(page), numpages, enable); +} + +/* + * This function is used to determine if a linear map page has been marked as + * not-valid. Walk the page table and check the PTE_VALID bit. This is based + * on kern_addr_valid(), which almost does what we need. + * + * Because this is only called on the kernel linear map, p?d_sect() implies + * p?d_present(). When debug_pagealloc is enabled, sections mappings are + * disabled. + */ +bool kernel_page_present(struct page *page) +{ + pgd_t *pgdp; + p4d_t *p4dp; + pud_t *pudp, pud; + pmd_t *pmdp, pmd; + pte_t *ptep; + unsigned long addr = (unsigned long)page_address(page); + + if (!debug_pagealloc_enabled() && !rodata_full) + return true; + + pgdp = pgd_offset_k(addr); + if (pgd_none(READ_ONCE(*pgdp))) + return false; + + p4dp = p4d_offset(pgdp, addr); + if (p4d_none(READ_ONCE(*p4dp))) + return false; + + pudp = pud_offset(p4dp, addr); + pud = READ_ONCE(*pudp); + if (pud_none(pud)) + return false; + if (pud_sect(pud)) + return true; + + pmdp = pmd_offset(pudp, addr); + pmd = READ_ONCE(*pmdp); + if (pmd_none(pmd)) + return false; + if (pmd_sect(pmd)) + return true; + + ptep = pte_offset_kernel(pmdp, addr); + return pte_valid(READ_ONCE(*ptep)); +} diff --git a/arch/arm64/mm/pgd.c b/arch/arm64/mm/pgd.c new file mode 100644 index 000000000..4a64089e5 --- /dev/null +++ b/arch/arm64/mm/pgd.c @@ -0,0 +1,56 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * PGD allocation/freeing + * + * Copyright (C) 2012 ARM Ltd. + * Author: Catalin Marinas <catalin.marinas@arm.com> + */ + +#include <linux/mm.h> +#include <linux/gfp.h> +#include <linux/highmem.h> +#include <linux/slab.h> + +#include <asm/pgalloc.h> +#include <asm/page.h> +#include <asm/tlbflush.h> + +static struct kmem_cache *pgd_cache __ro_after_init; + +pgd_t *pgd_alloc(struct mm_struct *mm) +{ + gfp_t gfp = GFP_PGTABLE_USER; + + if (PGD_SIZE == PAGE_SIZE) + return (pgd_t *)__get_free_page(gfp); + else + return kmem_cache_alloc(pgd_cache, gfp); +} + +void pgd_free(struct mm_struct *mm, pgd_t *pgd) +{ + if (PGD_SIZE == PAGE_SIZE) + free_page((unsigned long)pgd); + else + kmem_cache_free(pgd_cache, pgd); +} + +void __init pgtable_cache_init(void) +{ + if (PGD_SIZE == PAGE_SIZE) + return; + +#ifdef CONFIG_ARM64_PA_BITS_52 + /* + * With 52-bit physical addresses, the architecture requires the + * top-level table to be aligned to at least 64 bytes. + */ + BUILD_BUG_ON(PGD_SIZE < 64); +#endif + + /* + * Naturally aligned pgds required by the architecture. + */ + pgd_cache = kmem_cache_create("pgd_cache", PGD_SIZE, PGD_SIZE, + SLAB_PANIC, NULL); +} diff --git a/arch/arm64/mm/physaddr.c b/arch/arm64/mm/physaddr.c new file mode 100644 index 000000000..cde44c13d --- /dev/null +++ b/arch/arm64/mm/physaddr.c @@ -0,0 +1,31 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <linux/bug.h> +#include <linux/export.h> +#include <linux/types.h> +#include <linux/mmdebug.h> +#include <linux/mm.h> + +#include <asm/memory.h> + +phys_addr_t __virt_to_phys(unsigned long x) +{ + WARN(!__is_lm_address(__tag_reset(x)), + "virt_to_phys used for non-linear address: %pK (%pS)\n", + (void *)x, + (void *)x); + + return __virt_to_phys_nodebug(x); +} +EXPORT_SYMBOL(__virt_to_phys); + +phys_addr_t __phys_addr_symbol(unsigned long x) +{ + /* + * This is bounds checking against the kernel image only. + * __pa_symbol should only be used on kernel symbol addresses. + */ + VIRTUAL_BUG_ON(x < (unsigned long) KERNEL_START || + x > (unsigned long) KERNEL_END); + return __pa_symbol_nodebug(x); +} +EXPORT_SYMBOL(__phys_addr_symbol); diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S new file mode 100644 index 000000000..aacc7eab9 --- /dev/null +++ b/arch/arm64/mm/proc.S @@ -0,0 +1,506 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Based on arch/arm/mm/proc.S + * + * Copyright (C) 2001 Deep Blue Solutions Ltd. + * Copyright (C) 2012 ARM Ltd. + * Author: Catalin Marinas <catalin.marinas@arm.com> + */ + +#include <linux/init.h> +#include <linux/linkage.h> +#include <linux/pgtable.h> +#include <asm/assembler.h> +#include <asm/asm-offsets.h> +#include <asm/asm_pointer_auth.h> +#include <asm/hwcap.h> +#include <asm/pgtable-hwdef.h> +#include <asm/cpufeature.h> +#include <asm/alternative.h> +#include <asm/smp.h> +#include <asm/sysreg.h> + +#ifdef CONFIG_ARM64_64K_PAGES +#define TCR_TG_FLAGS TCR_TG0_64K | TCR_TG1_64K +#elif defined(CONFIG_ARM64_16K_PAGES) +#define TCR_TG_FLAGS TCR_TG0_16K | TCR_TG1_16K +#else /* CONFIG_ARM64_4K_PAGES */ +#define TCR_TG_FLAGS TCR_TG0_4K | TCR_TG1_4K +#endif + +#ifdef CONFIG_RANDOMIZE_BASE +#define TCR_KASLR_FLAGS TCR_NFD1 +#else +#define TCR_KASLR_FLAGS 0 +#endif + +#define TCR_SMP_FLAGS TCR_SHARED + +/* PTWs cacheable, inner/outer WBWA */ +#define TCR_CACHE_FLAGS TCR_IRGN_WBWA | TCR_ORGN_WBWA + +#ifdef CONFIG_KASAN_SW_TAGS +#define TCR_KASAN_FLAGS TCR_TBI1 +#else +#define TCR_KASAN_FLAGS 0 +#endif + +/* + * Default MAIR_EL1. MT_NORMAL_TAGGED is initially mapped as Normal memory and + * changed during __cpu_setup to Normal Tagged if the system supports MTE. + */ +#define MAIR_EL1_SET \ + (MAIR_ATTRIDX(MAIR_ATTR_DEVICE_nGnRnE, MT_DEVICE_nGnRnE) | \ + MAIR_ATTRIDX(MAIR_ATTR_DEVICE_nGnRE, MT_DEVICE_nGnRE) | \ + MAIR_ATTRIDX(MAIR_ATTR_DEVICE_GRE, MT_DEVICE_GRE) | \ + MAIR_ATTRIDX(MAIR_ATTR_NORMAL_NC, MT_NORMAL_NC) | \ + MAIR_ATTRIDX(MAIR_ATTR_NORMAL, MT_NORMAL) | \ + MAIR_ATTRIDX(MAIR_ATTR_NORMAL_WT, MT_NORMAL_WT) | \ + MAIR_ATTRIDX(MAIR_ATTR_NORMAL, MT_NORMAL_TAGGED)) + +#ifdef CONFIG_CPU_PM +/** + * cpu_do_suspend - save CPU registers context + * + * x0: virtual address of context pointer + * + * This must be kept in sync with struct cpu_suspend_ctx in <asm/suspend.h>. + */ +SYM_FUNC_START(cpu_do_suspend) + mrs x2, tpidr_el0 + mrs x3, tpidrro_el0 + mrs x4, contextidr_el1 + mrs x5, osdlr_el1 + mrs x6, cpacr_el1 + mrs x7, tcr_el1 + mrs x8, vbar_el1 + mrs x9, mdscr_el1 + mrs x10, oslsr_el1 + mrs x11, sctlr_el1 +alternative_if_not ARM64_HAS_VIRT_HOST_EXTN + mrs x12, tpidr_el1 +alternative_else + mrs x12, tpidr_el2 +alternative_endif + mrs x13, sp_el0 + stp x2, x3, [x0] + stp x4, x5, [x0, #16] + stp x6, x7, [x0, #32] + stp x8, x9, [x0, #48] + stp x10, x11, [x0, #64] + stp x12, x13, [x0, #80] + /* + * Save x18 as it may be used as a platform register, e.g. by shadow + * call stack. + */ + str x18, [x0, #96] + ret +SYM_FUNC_END(cpu_do_suspend) + +/** + * cpu_do_resume - restore CPU register context + * + * x0: Address of context pointer + */ + .pushsection ".idmap.text", "awx" +SYM_FUNC_START(cpu_do_resume) + ldp x2, x3, [x0] + ldp x4, x5, [x0, #16] + ldp x6, x8, [x0, #32] + ldp x9, x10, [x0, #48] + ldp x11, x12, [x0, #64] + ldp x13, x14, [x0, #80] + /* + * Restore x18, as it may be used as a platform register, and clear + * the buffer to minimize the risk of exposure when used for shadow + * call stack. + */ + ldr x18, [x0, #96] + str xzr, [x0, #96] + msr tpidr_el0, x2 + msr tpidrro_el0, x3 + msr contextidr_el1, x4 + msr cpacr_el1, x6 + + /* Don't change t0sz here, mask those bits when restoring */ + mrs x7, tcr_el1 + bfi x8, x7, TCR_T0SZ_OFFSET, TCR_TxSZ_WIDTH + + msr tcr_el1, x8 + msr vbar_el1, x9 + + /* + * __cpu_setup() cleared MDSCR_EL1.MDE and friends, before unmasking + * debug exceptions. By restoring MDSCR_EL1 here, we may take a debug + * exception. Mask them until local_daif_restore() in cpu_suspend() + * resets them. + */ + disable_daif + msr mdscr_el1, x10 + + msr sctlr_el1, x12 +alternative_if_not ARM64_HAS_VIRT_HOST_EXTN + msr tpidr_el1, x13 +alternative_else + msr tpidr_el2, x13 +alternative_endif + msr sp_el0, x14 + /* + * Restore oslsr_el1 by writing oslar_el1 + */ + msr osdlr_el1, x5 + ubfx x11, x11, #1, #1 + msr oslar_el1, x11 + reset_pmuserenr_el0 x0 // Disable PMU access from EL0 + reset_amuserenr_el0 x0 // Disable AMU access from EL0 + +alternative_if ARM64_HAS_RAS_EXTN + msr_s SYS_DISR_EL1, xzr +alternative_else_nop_endif + + ptrauth_keys_install_kernel_nosync x14, x1, x2, x3 + isb + ret +SYM_FUNC_END(cpu_do_resume) + .popsection +#endif + + .pushsection ".idmap.text", "awx" + +.macro __idmap_cpu_set_reserved_ttbr1, tmp1, tmp2 + adrp \tmp1, reserved_pg_dir + phys_to_ttbr \tmp2, \tmp1 + offset_ttbr1 \tmp2, \tmp1 + msr ttbr1_el1, \tmp2 + isb + tlbi vmalle1 + dsb nsh + isb +.endm + +/* + * void idmap_cpu_replace_ttbr1(phys_addr_t ttbr1) + * + * This is the low-level counterpart to cpu_replace_ttbr1, and should not be + * called by anything else. It can only be executed from a TTBR0 mapping. + */ +SYM_FUNC_START(idmap_cpu_replace_ttbr1) + save_and_disable_daif flags=x2 + + __idmap_cpu_set_reserved_ttbr1 x1, x3 + + offset_ttbr1 x0, x3 + msr ttbr1_el1, x0 + isb + + restore_daif x2 + + ret +SYM_FUNC_END(idmap_cpu_replace_ttbr1) + .popsection + +#ifdef CONFIG_UNMAP_KERNEL_AT_EL0 + .pushsection ".idmap.text", "awx" + + .macro __idmap_kpti_get_pgtable_ent, type + dc cvac, cur_\()\type\()p // Ensure any existing dirty + dmb sy // lines are written back before + ldr \type, [cur_\()\type\()p] // loading the entry + tbz \type, #0, skip_\()\type // Skip invalid and + tbnz \type, #11, skip_\()\type // non-global entries + .endm + + .macro __idmap_kpti_put_pgtable_ent_ng, type + orr \type, \type, #PTE_NG // Same bit for blocks and pages + str \type, [cur_\()\type\()p] // Update the entry and ensure + dmb sy // that it is visible to all + dc civac, cur_\()\type\()p // CPUs. + .endm + +/* + * void __kpti_install_ng_mappings(int cpu, int num_cpus, phys_addr_t swapper) + * + * Called exactly once from stop_machine context by each CPU found during boot. + */ +__idmap_kpti_flag: + .long 1 +SYM_FUNC_START(idmap_kpti_install_ng_mappings) + cpu .req w0 + num_cpus .req w1 + swapper_pa .req x2 + swapper_ttb .req x3 + flag_ptr .req x4 + cur_pgdp .req x5 + end_pgdp .req x6 + pgd .req x7 + cur_pudp .req x8 + end_pudp .req x9 + pud .req x10 + cur_pmdp .req x11 + end_pmdp .req x12 + pmd .req x13 + cur_ptep .req x14 + end_ptep .req x15 + pte .req x16 + + mrs swapper_ttb, ttbr1_el1 + restore_ttbr1 swapper_ttb + adr flag_ptr, __idmap_kpti_flag + + cbnz cpu, __idmap_kpti_secondary + + /* We're the boot CPU. Wait for the others to catch up */ + sevl +1: wfe + ldaxr w17, [flag_ptr] + eor w17, w17, num_cpus + cbnz w17, 1b + + /* We need to walk swapper, so turn off the MMU. */ + pre_disable_mmu_workaround + mrs x17, sctlr_el1 + bic x17, x17, #SCTLR_ELx_M + msr sctlr_el1, x17 + isb + + /* Everybody is enjoying the idmap, so we can rewrite swapper. */ + /* PGD */ + mov cur_pgdp, swapper_pa + add end_pgdp, cur_pgdp, #(PTRS_PER_PGD * 8) +do_pgd: __idmap_kpti_get_pgtable_ent pgd + tbnz pgd, #1, walk_puds +next_pgd: + __idmap_kpti_put_pgtable_ent_ng pgd +skip_pgd: + add cur_pgdp, cur_pgdp, #8 + cmp cur_pgdp, end_pgdp + b.ne do_pgd + + /* Publish the updated tables and nuke all the TLBs */ + dsb sy + tlbi vmalle1is + dsb ish + isb + + /* We're done: fire up the MMU again */ + mrs x17, sctlr_el1 + orr x17, x17, #SCTLR_ELx_M + msr sctlr_el1, x17 + isb + + /* + * Invalidate the local I-cache so that any instructions fetched + * speculatively from the PoC are discarded, since they may have + * been dynamically patched at the PoU. + */ + ic iallu + dsb nsh + isb + + /* Set the flag to zero to indicate that we're all done */ + str wzr, [flag_ptr] + ret + + /* PUD */ +walk_puds: + .if CONFIG_PGTABLE_LEVELS > 3 + pte_to_phys cur_pudp, pgd + add end_pudp, cur_pudp, #(PTRS_PER_PUD * 8) +do_pud: __idmap_kpti_get_pgtable_ent pud + tbnz pud, #1, walk_pmds +next_pud: + __idmap_kpti_put_pgtable_ent_ng pud +skip_pud: + add cur_pudp, cur_pudp, 8 + cmp cur_pudp, end_pudp + b.ne do_pud + b next_pgd + .else /* CONFIG_PGTABLE_LEVELS <= 3 */ + mov pud, pgd + b walk_pmds +next_pud: + b next_pgd + .endif + + /* PMD */ +walk_pmds: + .if CONFIG_PGTABLE_LEVELS > 2 + pte_to_phys cur_pmdp, pud + add end_pmdp, cur_pmdp, #(PTRS_PER_PMD * 8) +do_pmd: __idmap_kpti_get_pgtable_ent pmd + tbnz pmd, #1, walk_ptes +next_pmd: + __idmap_kpti_put_pgtable_ent_ng pmd +skip_pmd: + add cur_pmdp, cur_pmdp, #8 + cmp cur_pmdp, end_pmdp + b.ne do_pmd + b next_pud + .else /* CONFIG_PGTABLE_LEVELS <= 2 */ + mov pmd, pud + b walk_ptes +next_pmd: + b next_pud + .endif + + /* PTE */ +walk_ptes: + pte_to_phys cur_ptep, pmd + add end_ptep, cur_ptep, #(PTRS_PER_PTE * 8) +do_pte: __idmap_kpti_get_pgtable_ent pte + __idmap_kpti_put_pgtable_ent_ng pte +skip_pte: + add cur_ptep, cur_ptep, #8 + cmp cur_ptep, end_ptep + b.ne do_pte + b next_pmd + + .unreq cpu + .unreq num_cpus + .unreq swapper_pa + .unreq cur_pgdp + .unreq end_pgdp + .unreq pgd + .unreq cur_pudp + .unreq end_pudp + .unreq pud + .unreq cur_pmdp + .unreq end_pmdp + .unreq pmd + .unreq cur_ptep + .unreq end_ptep + .unreq pte + + /* Secondary CPUs end up here */ +__idmap_kpti_secondary: + /* Uninstall swapper before surgery begins */ + __idmap_cpu_set_reserved_ttbr1 x16, x17 + + /* Increment the flag to let the boot CPU we're ready */ +1: ldxr w16, [flag_ptr] + add w16, w16, #1 + stxr w17, w16, [flag_ptr] + cbnz w17, 1b + + /* Wait for the boot CPU to finish messing around with swapper */ + sevl +1: wfe + ldxr w16, [flag_ptr] + cbnz w16, 1b + + /* All done, act like nothing happened */ + offset_ttbr1 swapper_ttb, x16 + msr ttbr1_el1, swapper_ttb + isb + ret + + .unreq swapper_ttb + .unreq flag_ptr +SYM_FUNC_END(idmap_kpti_install_ng_mappings) + .popsection +#endif + +/* + * __cpu_setup + * + * Initialise the processor for turning the MMU on. + * + * Output: + * Return in x0 the value of the SCTLR_EL1 register. + */ + .pushsection ".idmap.text", "awx" +SYM_FUNC_START(__cpu_setup) + tlbi vmalle1 // Invalidate local TLB + dsb nsh + + mov x1, #3 << 20 + msr cpacr_el1, x1 // Enable FP/ASIMD + mov x1, #1 << 12 // Reset mdscr_el1 and disable + msr mdscr_el1, x1 // access to the DCC from EL0 + isb // Unmask debug exceptions now, + enable_dbg // since this is per-cpu + reset_pmuserenr_el0 x1 // Disable PMU access from EL0 + reset_amuserenr_el0 x1 // Disable AMU access from EL0 + + /* + * Memory region attributes + */ + mov_q x5, MAIR_EL1_SET +#ifdef CONFIG_ARM64_MTE + /* + * Update MAIR_EL1, GCR_EL1 and TFSR*_EL1 if MTE is supported + * (ID_AA64PFR1_EL1[11:8] > 1). + */ + mrs x10, ID_AA64PFR1_EL1 + ubfx x10, x10, #ID_AA64PFR1_MTE_SHIFT, #4 + cmp x10, #ID_AA64PFR1_MTE + b.lt 1f + + /* Normal Tagged memory type at the corresponding MAIR index */ + mov x10, #MAIR_ATTR_NORMAL_TAGGED + bfi x5, x10, #(8 * MT_NORMAL_TAGGED), #8 + + /* initialize GCR_EL1: all non-zero tags excluded by default */ + mov x10, #(SYS_GCR_EL1_RRND | SYS_GCR_EL1_EXCL_MASK) + msr_s SYS_GCR_EL1, x10 + + /* + * If GCR_EL1.RRND=1 is implemented the same way as RRND=0, then + * RGSR_EL1.SEED must be non-zero for IRG to produce + * pseudorandom numbers. As RGSR_EL1 is UNKNOWN out of reset, we + * must initialize it. + */ + mrs x10, CNTVCT_EL0 + ands x10, x10, #SYS_RGSR_EL1_SEED_MASK + csinc x10, x10, xzr, ne + lsl x10, x10, #SYS_RGSR_EL1_SEED_SHIFT + msr_s SYS_RGSR_EL1, x10 + + /* clear any pending tag check faults in TFSR*_EL1 */ + msr_s SYS_TFSR_EL1, xzr + msr_s SYS_TFSRE0_EL1, xzr +1: +#endif + msr mair_el1, x5 + /* + * Set/prepare TCR and TTBR. We use 512GB (39-bit) address range for + * both user and kernel. + */ + mov_q x10, TCR_TxSZ(VA_BITS) | TCR_CACHE_FLAGS | TCR_SMP_FLAGS | \ + TCR_TG_FLAGS | TCR_KASLR_FLAGS | TCR_ASID16 | \ + TCR_TBI0 | TCR_A1 | TCR_KASAN_FLAGS + tcr_clear_errata_bits x10, x9, x5 + +#ifdef CONFIG_ARM64_VA_BITS_52 + ldr_l x9, vabits_actual + sub x9, xzr, x9 + add x9, x9, #64 + tcr_set_t1sz x10, x9 +#else + ldr_l x9, idmap_t0sz +#endif + tcr_set_t0sz x10, x9 + + /* + * Set the IPS bits in TCR_EL1. + */ + tcr_compute_pa_size x10, #TCR_IPS_SHIFT, x5, x6 +#ifdef CONFIG_ARM64_HW_AFDBM + /* + * Enable hardware update of the Access Flags bit. + * Hardware dirty bit management is enabled later, + * via capabilities. + */ + mrs x9, ID_AA64MMFR1_EL1 + and x9, x9, #0xf + cbz x9, 1f + orr x10, x10, #TCR_HA // hardware Access flag update +1: +#endif /* CONFIG_ARM64_HW_AFDBM */ + msr tcr_el1, x10 + /* + * Prepare SCTLR + */ + mov_q x0, SCTLR_EL1_SET + ret // return to head.S +SYM_FUNC_END(__cpu_setup) diff --git a/arch/arm64/mm/ptdump.c b/arch/arm64/mm/ptdump.c new file mode 100644 index 000000000..807dc634b --- /dev/null +++ b/arch/arm64/mm/ptdump.c @@ -0,0 +1,393 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2014, The Linux Foundation. All rights reserved. + * Debug helper to dump the current kernel pagetables of the system + * so that we can see what the various memory ranges are set to. + * + * Derived from x86 and arm implementation: + * (C) Copyright 2008 Intel Corporation + * + * Author: Arjan van de Ven <arjan@linux.intel.com> + */ +#include <linux/debugfs.h> +#include <linux/errno.h> +#include <linux/fs.h> +#include <linux/io.h> +#include <linux/init.h> +#include <linux/mm.h> +#include <linux/ptdump.h> +#include <linux/sched.h> +#include <linux/seq_file.h> + +#include <asm/fixmap.h> +#include <asm/kasan.h> +#include <asm/memory.h> +#include <asm/pgtable-hwdef.h> +#include <asm/ptdump.h> + + +enum address_markers_idx { + PAGE_OFFSET_NR = 0, + PAGE_END_NR, +#ifdef CONFIG_KASAN + KASAN_START_NR, +#endif +}; + +static struct addr_marker address_markers[] = { + { PAGE_OFFSET, "Linear Mapping start" }, + { 0 /* PAGE_END */, "Linear Mapping end" }, +#ifdef CONFIG_KASAN + { 0 /* KASAN_SHADOW_START */, "Kasan shadow start" }, + { KASAN_SHADOW_END, "Kasan shadow end" }, +#endif + { BPF_JIT_REGION_START, "BPF start" }, + { BPF_JIT_REGION_END, "BPF end" }, + { MODULES_VADDR, "Modules start" }, + { MODULES_END, "Modules end" }, + { VMALLOC_START, "vmalloc() area" }, + { VMALLOC_END, "vmalloc() end" }, + { FIXADDR_START, "Fixmap start" }, + { FIXADDR_TOP, "Fixmap end" }, + { PCI_IO_START, "PCI I/O start" }, + { PCI_IO_END, "PCI I/O end" }, +#ifdef CONFIG_SPARSEMEM_VMEMMAP + { VMEMMAP_START, "vmemmap start" }, + { VMEMMAP_START + VMEMMAP_SIZE, "vmemmap end" }, +#endif + { -1, NULL }, +}; + +#define pt_dump_seq_printf(m, fmt, args...) \ +({ \ + if (m) \ + seq_printf(m, fmt, ##args); \ +}) + +#define pt_dump_seq_puts(m, fmt) \ +({ \ + if (m) \ + seq_printf(m, fmt); \ +}) + +/* + * The page dumper groups page table entries of the same type into a single + * description. It uses pg_state to track the range information while + * iterating over the pte entries. When the continuity is broken it then + * dumps out a description of the range. + */ +struct pg_state { + struct ptdump_state ptdump; + struct seq_file *seq; + const struct addr_marker *marker; + unsigned long start_address; + int level; + u64 current_prot; + bool check_wx; + unsigned long wx_pages; + unsigned long uxn_pages; +}; + +struct prot_bits { + u64 mask; + u64 val; + const char *set; + const char *clear; +}; + +static const struct prot_bits pte_bits[] = { + { + .mask = PTE_VALID, + .val = PTE_VALID, + .set = " ", + .clear = "F", + }, { + .mask = PTE_USER, + .val = PTE_USER, + .set = "USR", + .clear = " ", + }, { + .mask = PTE_RDONLY, + .val = PTE_RDONLY, + .set = "ro", + .clear = "RW", + }, { + .mask = PTE_PXN, + .val = PTE_PXN, + .set = "NX", + .clear = "x ", + }, { + .mask = PTE_SHARED, + .val = PTE_SHARED, + .set = "SHD", + .clear = " ", + }, { + .mask = PTE_AF, + .val = PTE_AF, + .set = "AF", + .clear = " ", + }, { + .mask = PTE_NG, + .val = PTE_NG, + .set = "NG", + .clear = " ", + }, { + .mask = PTE_CONT, + .val = PTE_CONT, + .set = "CON", + .clear = " ", + }, { + .mask = PTE_TABLE_BIT, + .val = PTE_TABLE_BIT, + .set = " ", + .clear = "BLK", + }, { + .mask = PTE_UXN, + .val = PTE_UXN, + .set = "UXN", + .clear = " ", + }, { + .mask = PTE_GP, + .val = PTE_GP, + .set = "GP", + .clear = " ", + }, { + .mask = PTE_ATTRINDX_MASK, + .val = PTE_ATTRINDX(MT_DEVICE_nGnRnE), + .set = "DEVICE/nGnRnE", + }, { + .mask = PTE_ATTRINDX_MASK, + .val = PTE_ATTRINDX(MT_DEVICE_nGnRE), + .set = "DEVICE/nGnRE", + }, { + .mask = PTE_ATTRINDX_MASK, + .val = PTE_ATTRINDX(MT_DEVICE_GRE), + .set = "DEVICE/GRE", + }, { + .mask = PTE_ATTRINDX_MASK, + .val = PTE_ATTRINDX(MT_NORMAL_NC), + .set = "MEM/NORMAL-NC", + }, { + .mask = PTE_ATTRINDX_MASK, + .val = PTE_ATTRINDX(MT_NORMAL), + .set = "MEM/NORMAL", + }, { + .mask = PTE_ATTRINDX_MASK, + .val = PTE_ATTRINDX(MT_NORMAL_TAGGED), + .set = "MEM/NORMAL-TAGGED", + } +}; + +struct pg_level { + const struct prot_bits *bits; + const char *name; + size_t num; + u64 mask; +}; + +static struct pg_level pg_level[] = { + { /* pgd */ + .name = "PGD", + .bits = pte_bits, + .num = ARRAY_SIZE(pte_bits), + }, { /* p4d */ + .name = "P4D", + .bits = pte_bits, + .num = ARRAY_SIZE(pte_bits), + }, { /* pud */ + .name = (CONFIG_PGTABLE_LEVELS > 3) ? "PUD" : "PGD", + .bits = pte_bits, + .num = ARRAY_SIZE(pte_bits), + }, { /* pmd */ + .name = (CONFIG_PGTABLE_LEVELS > 2) ? "PMD" : "PGD", + .bits = pte_bits, + .num = ARRAY_SIZE(pte_bits), + }, { /* pte */ + .name = "PTE", + .bits = pte_bits, + .num = ARRAY_SIZE(pte_bits), + }, +}; + +static void dump_prot(struct pg_state *st, const struct prot_bits *bits, + size_t num) +{ + unsigned i; + + for (i = 0; i < num; i++, bits++) { + const char *s; + + if ((st->current_prot & bits->mask) == bits->val) + s = bits->set; + else + s = bits->clear; + + if (s) + pt_dump_seq_printf(st->seq, " %s", s); + } +} + +static void note_prot_uxn(struct pg_state *st, unsigned long addr) +{ + if (!st->check_wx) + return; + + if ((st->current_prot & PTE_UXN) == PTE_UXN) + return; + + WARN_ONCE(1, "arm64/mm: Found non-UXN mapping at address %p/%pS\n", + (void *)st->start_address, (void *)st->start_address); + + st->uxn_pages += (addr - st->start_address) / PAGE_SIZE; +} + +static void note_prot_wx(struct pg_state *st, unsigned long addr) +{ + if (!st->check_wx) + return; + if ((st->current_prot & PTE_RDONLY) == PTE_RDONLY) + return; + if ((st->current_prot & PTE_PXN) == PTE_PXN) + return; + + WARN_ONCE(1, "arm64/mm: Found insecure W+X mapping at address %p/%pS\n", + (void *)st->start_address, (void *)st->start_address); + + st->wx_pages += (addr - st->start_address) / PAGE_SIZE; +} + +static void note_page(struct ptdump_state *pt_st, unsigned long addr, int level, + u64 val) +{ + struct pg_state *st = container_of(pt_st, struct pg_state, ptdump); + static const char units[] = "KMGTPE"; + u64 prot = 0; + + if (level >= 0) + prot = val & pg_level[level].mask; + + if (st->level == -1) { + st->level = level; + st->current_prot = prot; + st->start_address = addr; + pt_dump_seq_printf(st->seq, "---[ %s ]---\n", st->marker->name); + } else if (prot != st->current_prot || level != st->level || + addr >= st->marker[1].start_address) { + const char *unit = units; + unsigned long delta; + + if (st->current_prot) { + note_prot_uxn(st, addr); + note_prot_wx(st, addr); + } + + pt_dump_seq_printf(st->seq, "0x%016lx-0x%016lx ", + st->start_address, addr); + + delta = (addr - st->start_address) >> 10; + while (!(delta & 1023) && unit[1]) { + delta >>= 10; + unit++; + } + pt_dump_seq_printf(st->seq, "%9lu%c %s", delta, *unit, + pg_level[st->level].name); + if (st->current_prot && pg_level[st->level].bits) + dump_prot(st, pg_level[st->level].bits, + pg_level[st->level].num); + pt_dump_seq_puts(st->seq, "\n"); + + if (addr >= st->marker[1].start_address) { + st->marker++; + pt_dump_seq_printf(st->seq, "---[ %s ]---\n", st->marker->name); + } + + st->start_address = addr; + st->current_prot = prot; + st->level = level; + } + + if (addr >= st->marker[1].start_address) { + st->marker++; + pt_dump_seq_printf(st->seq, "---[ %s ]---\n", st->marker->name); + } + +} + +void ptdump_walk(struct seq_file *s, struct ptdump_info *info) +{ + unsigned long end = ~0UL; + struct pg_state st; + + if (info->base_addr < TASK_SIZE_64) + end = TASK_SIZE_64; + + st = (struct pg_state){ + .seq = s, + .marker = info->markers, + .ptdump = { + .note_page = note_page, + .range = (struct ptdump_range[]){ + {info->base_addr, end}, + {0, 0} + } + } + }; + + ptdump_walk_pgd(&st.ptdump, info->mm, NULL); +} + +static void ptdump_initialize(void) +{ + unsigned i, j; + + for (i = 0; i < ARRAY_SIZE(pg_level); i++) + if (pg_level[i].bits) + for (j = 0; j < pg_level[i].num; j++) + pg_level[i].mask |= pg_level[i].bits[j].mask; +} + +static struct ptdump_info kernel_ptdump_info = { + .mm = &init_mm, + .markers = address_markers, + .base_addr = PAGE_OFFSET, +}; + +void ptdump_check_wx(void) +{ + struct pg_state st = { + .seq = NULL, + .marker = (struct addr_marker[]) { + { 0, NULL}, + { -1, NULL}, + }, + .level = -1, + .check_wx = true, + .ptdump = { + .note_page = note_page, + .range = (struct ptdump_range[]) { + {PAGE_OFFSET, ~0UL}, + {0, 0} + } + } + }; + + ptdump_walk_pgd(&st.ptdump, &init_mm, NULL); + + if (st.wx_pages || st.uxn_pages) + pr_warn("Checked W+X mappings: FAILED, %lu W+X pages found, %lu non-UXN pages found\n", + st.wx_pages, st.uxn_pages); + else + pr_info("Checked W+X mappings: passed, no W+X pages found\n"); +} + +static int ptdump_init(void) +{ + address_markers[PAGE_END_NR].start_address = PAGE_END; +#ifdef CONFIG_KASAN + address_markers[KASAN_START_NR].start_address = KASAN_SHADOW_START; +#endif + ptdump_initialize(); + ptdump_debugfs_register(&kernel_ptdump_info, "kernel_page_tables"); + return 0; +} +device_initcall(ptdump_init); diff --git a/arch/arm64/mm/ptdump_debugfs.c b/arch/arm64/mm/ptdump_debugfs.c new file mode 100644 index 000000000..d29d722ec --- /dev/null +++ b/arch/arm64/mm/ptdump_debugfs.c @@ -0,0 +1,22 @@ +// SPDX-License-Identifier: GPL-2.0 +#include <linux/debugfs.h> +#include <linux/memory_hotplug.h> +#include <linux/seq_file.h> + +#include <asm/ptdump.h> + +static int ptdump_show(struct seq_file *m, void *v) +{ + struct ptdump_info *info = m->private; + + get_online_mems(); + ptdump_walk(m, info); + put_online_mems(); + return 0; +} +DEFINE_SHOW_ATTRIBUTE(ptdump); + +void ptdump_debugfs_register(struct ptdump_info *info, const char *name) +{ + debugfs_create_file(name, 0400, NULL, info, &ptdump_fops); +} |