diff options
Diffstat (limited to 'arch/riscv/mm')
-rw-r--r-- | arch/riscv/mm/cacheflush.c | 4 | ||||
-rw-r--r-- | arch/riscv/mm/context.c | 2 | ||||
-rw-r--r-- | arch/riscv/mm/init.c | 35 | ||||
-rw-r--r-- | arch/riscv/mm/pageattr.c | 28 | ||||
-rw-r--r-- | arch/riscv/mm/pgtable.c | 2 | ||||
-rw-r--r-- | arch/riscv/mm/ptdump.c | 12 |
6 files changed, 56 insertions, 27 deletions
diff --git a/arch/riscv/mm/cacheflush.c b/arch/riscv/mm/cacheflush.c index 55a34f2020..bc61ee5975 100644 --- a/arch/riscv/mm/cacheflush.c +++ b/arch/riscv/mm/cacheflush.c @@ -82,12 +82,12 @@ void flush_icache_mm(struct mm_struct *mm, bool local) #endif /* CONFIG_SMP */ #ifdef CONFIG_MMU -void flush_icache_pte(pte_t pte) +void flush_icache_pte(struct mm_struct *mm, pte_t pte) { struct folio *folio = page_folio(pte_page(pte)); if (!test_bit(PG_dcache_clean, &folio->flags)) { - flush_icache_all(); + flush_icache_mm(mm, false); set_bit(PG_dcache_clean, &folio->flags); } } diff --git a/arch/riscv/mm/context.c b/arch/riscv/mm/context.c index 217fd4de61..ba8eb39446 100644 --- a/arch/riscv/mm/context.c +++ b/arch/riscv/mm/context.c @@ -323,6 +323,8 @@ void switch_mm(struct mm_struct *prev, struct mm_struct *next, if (unlikely(prev == next)) return; + membarrier_arch_switch_mm(prev, next, task); + /* * Mark the current MM context as inactive, and the next as * active. This is at least used by the icache flushing diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c index 0c00efc756..46b4ad418f 100644 --- a/arch/riscv/mm/init.c +++ b/arch/riscv/mm/init.c @@ -29,7 +29,6 @@ #include <asm/io.h> #include <asm/numa.h> #include <asm/pgtable.h> -#include <asm/ptdump.h> #include <asm/sections.h> #include <asm/soc.h> #include <asm/tlbflush.h> @@ -236,18 +235,19 @@ static void __init setup_bootmem(void) kernel_map.va_pa_offset = PAGE_OFFSET - phys_ram_base; /* - * memblock allocator is not aware of the fact that last 4K bytes of - * the addressable memory can not be mapped because of IS_ERR_VALUE - * macro. Make sure that last 4k bytes are not usable by memblock - * if end of dram is equal to maximum addressable memory. For 64-bit - * kernel, this problem can't happen here as the end of the virtual - * address space is occupied by the kernel mapping then this check must - * be done as soon as the kernel mapping base address is determined. + * Reserve physical address space that would be mapped to virtual + * addresses greater than (void *)(-PAGE_SIZE) because: + * - This memory would overlap with ERR_PTR + * - This memory belongs to high memory, which is not supported + * + * This is not applicable to 64-bit kernel, because virtual addresses + * after (void *)(-PAGE_SIZE) are not linearly mapped: they are + * occupied by kernel mapping. Also it is unrealistic for high memory + * to exist on 64-bit platforms. */ if (!IS_ENABLED(CONFIG_64BIT)) { - max_mapped_addr = __pa(~(ulong)0); - if (max_mapped_addr == (phys_ram_end - 1)) - memblock_set_current_limit(max_mapped_addr - 4096); + max_mapped_addr = __va_to_pa_nodebug(-PAGE_SIZE); + memblock_reserve(max_mapped_addr, (phys_addr_t)-max_mapped_addr); } min_low_pfn = PFN_UP(phys_ram_base); @@ -669,6 +669,9 @@ void __init create_pgd_mapping(pgd_t *pgdp, static uintptr_t __init best_map_size(phys_addr_t pa, uintptr_t va, phys_addr_t size) { + if (debug_pagealloc_enabled()) + return PAGE_SIZE; + if (pgtable_l5_enabled && !(pa & (P4D_SIZE - 1)) && !(va & (P4D_SIZE - 1)) && size >= P4D_SIZE) return P4D_SIZE; @@ -723,8 +726,6 @@ void mark_rodata_ro(void) if (IS_ENABLED(CONFIG_64BIT)) set_kernel_memory(lm_alias(__start_rodata), lm_alias(_data), set_memory_ro); - - debug_checkwx(); } #else static __init pgprot_t pgprot_from_va(uintptr_t va) @@ -767,6 +768,11 @@ static int __init print_no5lvl(char *p) } early_param("no5lvl", print_no5lvl); +static void __init set_mmap_rnd_bits_max(void) +{ + mmap_rnd_bits_max = MMAP_VA_BITS - PAGE_SHIFT - 3; +} + /* * There is a simple way to determine if 4-level is supported by the * underlying hardware: establish 1:1 mapping in 4-level page table mode @@ -1081,6 +1087,7 @@ asmlinkage void __init setup_vm(uintptr_t dtb_pa) #if defined(CONFIG_64BIT) && !defined(CONFIG_XIP_KERNEL) set_satp_mode(dtb_pa); + set_mmap_rnd_bits_max(); #endif /* @@ -1358,7 +1365,7 @@ static void __init arch_reserve_crashkernel(void) bool high = false; int ret; - if (!IS_ENABLED(CONFIG_KEXEC_CORE)) + if (!IS_ENABLED(CONFIG_CRASH_RESERVE)) return; ret = parse_crashkernel(cmdline, memblock_phys_mem_size(), diff --git a/arch/riscv/mm/pageattr.c b/arch/riscv/mm/pageattr.c index 410056a50a..271d01a5ba 100644 --- a/arch/riscv/mm/pageattr.c +++ b/arch/riscv/mm/pageattr.c @@ -387,17 +387,33 @@ int set_direct_map_default_noflush(struct page *page) } #ifdef CONFIG_DEBUG_PAGEALLOC +static int debug_pagealloc_set_page(pte_t *pte, unsigned long addr, void *data) +{ + int enable = *(int *)data; + + unsigned long val = pte_val(ptep_get(pte)); + + if (enable) + val |= _PAGE_PRESENT; + else + val &= ~_PAGE_PRESENT; + + set_pte(pte, __pte(val)); + + return 0; +} + void __kernel_map_pages(struct page *page, int numpages, int enable) { if (!debug_pagealloc_enabled()) return; - if (enable) - __set_memory((unsigned long)page_address(page), numpages, - __pgprot(_PAGE_PRESENT), __pgprot(0)); - else - __set_memory((unsigned long)page_address(page), numpages, - __pgprot(0), __pgprot(_PAGE_PRESENT)); + unsigned long start = (unsigned long)page_address(page); + unsigned long size = PAGE_SIZE * numpages; + + apply_to_existing_page_range(&init_mm, start, size, debug_pagealloc_set_page, &enable); + + flush_tlb_kernel_range(start, start + size); } #endif diff --git a/arch/riscv/mm/pgtable.c b/arch/riscv/mm/pgtable.c index ef887efcb6..533ec9055f 100644 --- a/arch/riscv/mm/pgtable.c +++ b/arch/riscv/mm/pgtable.c @@ -10,7 +10,7 @@ int ptep_set_access_flags(struct vm_area_struct *vma, pte_t entry, int dirty) { if (!pte_same(ptep_get(ptep), entry)) - __set_pte_at(ptep, entry); + __set_pte_at(vma->vm_mm, ptep, entry); /* * update_mmu_cache will unconditionally execute, handling both * the case that the PTE changed and the spurious fault case. diff --git a/arch/riscv/mm/ptdump.c b/arch/riscv/mm/ptdump.c index 657c27bc07..1289cc6d37 100644 --- a/arch/riscv/mm/ptdump.c +++ b/arch/riscv/mm/ptdump.c @@ -9,7 +9,6 @@ #include <linux/seq_file.h> #include <linux/ptdump.h> -#include <asm/ptdump.h> #include <linux/pgtable.h> #include <asm/kasan.h> @@ -336,7 +335,7 @@ static void ptdump_walk(struct seq_file *s, struct ptd_mm_info *pinfo) ptdump_walk_pgd(&st.ptdump, pinfo->mm, NULL); } -void ptdump_check_wx(void) +bool ptdump_check_wx(void) { struct pg_state st = { .seq = NULL, @@ -357,11 +356,16 @@ void ptdump_check_wx(void) ptdump_walk_pgd(&st.ptdump, &init_mm, NULL); - if (st.wx_pages) + if (st.wx_pages) { pr_warn("Checked W+X mappings: failed, %lu W+X pages found\n", st.wx_pages); - else + + return false; + } else { pr_info("Checked W+X mappings: passed, no W+X pages found\n"); + + return true; + } } static int ptdump_show(struct seq_file *m, void *v) |