From 94ac2ab3fff96814d7460a27a0e9d004abbd4128 Mon Sep 17 00:00:00 2001 From: Daniel Baumann Date: Wed, 19 Jun 2024 23:00:37 +0200 Subject: Merging upstream version 6.9.2. Signed-off-by: Daniel Baumann --- arch/powerpc/mm/book3s32/mmu.c | 7 +++-- arch/powerpc/mm/book3s64/hash_hugepage.c | 15 ++++------ arch/powerpc/mm/book3s64/hash_utils.c | 3 +- arch/powerpc/mm/book3s64/pgtable.c | 2 +- arch/powerpc/mm/book3s64/radix_pgtable.c | 30 +++++-------------- arch/powerpc/mm/drmem.c | 10 +++---- arch/powerpc/mm/hugetlbpage.c | 6 ++-- arch/powerpc/mm/mmu_decl.h | 16 ++++------ arch/powerpc/mm/nohash/8xx.c | 33 +++++++++++++-------- arch/powerpc/mm/nohash/e500.c | 10 +++++-- arch/powerpc/mm/nohash/kaslr_booke.c | 4 +-- arch/powerpc/mm/numa.c | 6 ++-- arch/powerpc/mm/pageattr.c | 28 ++++++++++++++++++ arch/powerpc/mm/pgtable.c | 11 +++---- arch/powerpc/mm/pgtable_32.c | 51 ++++++++++++++++---------------- arch/powerpc/mm/pgtable_64.c | 11 +++---- arch/powerpc/mm/ptdump/ptdump.c | 21 ++++++++----- 17 files changed, 144 insertions(+), 120 deletions(-) (limited to 'arch/powerpc/mm') diff --git a/arch/powerpc/mm/book3s32/mmu.c b/arch/powerpc/mm/book3s32/mmu.c index 5445587bfe..100f999871 100644 --- a/arch/powerpc/mm/book3s32/mmu.c +++ b/arch/powerpc/mm/book3s32/mmu.c @@ -193,7 +193,7 @@ static bool is_module_segment(unsigned long addr) return true; } -void mmu_mark_initmem_nx(void) +int mmu_mark_initmem_nx(void) { int nb = mmu_has_feature(MMU_FTR_USE_HIGH_BATS) ? 8 : 4; int i; @@ -230,9 +230,10 @@ void mmu_mark_initmem_nx(void) mtsr(mfsr(i << 28) | 0x10000000, i << 28); } + return 0; } -void mmu_mark_rodata_ro(void) +int mmu_mark_rodata_ro(void) { int nb = mmu_has_feature(MMU_FTR_USE_HIGH_BATS) ? 8 : 4; int i; @@ -245,6 +246,8 @@ void mmu_mark_rodata_ro(void) } update_bats(); + + return 0; } /* diff --git a/arch/powerpc/mm/book3s64/hash_hugepage.c b/arch/powerpc/mm/book3s64/hash_hugepage.c index c0fabe6c5a..15d6f3ea71 100644 --- a/arch/powerpc/mm/book3s64/hash_hugepage.c +++ b/arch/powerpc/mm/book3s64/hash_hugepage.c @@ -59,16 +59,13 @@ int __hash_page_thp(unsigned long ea, unsigned long access, unsigned long vsid, rflags = htab_convert_pte_flags(new_pmd, flags); -#if 0 - if (!cpu_has_feature(CPU_FTR_COHERENT_ICACHE)) { + /* + * THPs are only supported on platforms that can do mixed page size + * segments (MPSS) and all such platforms have coherent icache. Hence we + * don't need to do lazy icache flush (hash_page_do_lazy_icache()) on + * noexecute fault. + */ - /* - * No CPU has hugepages but lacks no execute, so we - * don't need to worry about that case - */ - rflags = hash_page_do_lazy_icache(rflags, __pte(old_pte), trap); - } -#endif /* * Find the slot index details for this ea, using base page size. */ diff --git a/arch/powerpc/mm/book3s64/hash_utils.c b/arch/powerpc/mm/book3s64/hash_utils.c index 0626a25b0d..01c3b4b652 100644 --- a/arch/powerpc/mm/book3s64/hash_utils.c +++ b/arch/powerpc/mm/book3s64/hash_utils.c @@ -2172,7 +2172,7 @@ static void kernel_unmap_linear_page(unsigned long vaddr, unsigned long lmi) mmu_kernel_ssize, 0); } -void hash__kernel_map_pages(struct page *page, int numpages, int enable) +int hash__kernel_map_pages(struct page *page, int numpages, int enable) { unsigned long flags, vaddr, lmi; int i; @@ -2189,6 +2189,7 @@ void hash__kernel_map_pages(struct page *page, int numpages, int enable) kernel_unmap_linear_page(vaddr, lmi); } local_irq_restore(flags); + return 0; } #endif /* CONFIG_DEBUG_PAGEALLOC || CONFIG_KFENCE */ diff --git a/arch/powerpc/mm/book3s64/pgtable.c b/arch/powerpc/mm/book3s64/pgtable.c index d975fb5d7c..83823db348 100644 --- a/arch/powerpc/mm/book3s64/pgtable.c +++ b/arch/powerpc/mm/book3s64/pgtable.c @@ -113,7 +113,7 @@ void set_pmd_at(struct mm_struct *mm, unsigned long addr, WARN_ON(pte_hw_valid(pmd_pte(*pmdp)) && !pte_protnone(pmd_pte(*pmdp))); assert_spin_locked(pmd_lockptr(mm, pmdp)); - WARN_ON(!(pmd_large(pmd))); + WARN_ON(!(pmd_leaf(pmd))); #endif trace_hugepage_set_pmd(addr, pmd_val(pmd)); return set_pte_at(mm, addr, pmdp_ptep(pmdp), pmd_pte(pmd)); diff --git a/arch/powerpc/mm/book3s64/radix_pgtable.c b/arch/powerpc/mm/book3s64/radix_pgtable.c index c6a4ac766b..15e88f1439 100644 --- a/arch/powerpc/mm/book3s64/radix_pgtable.c +++ b/arch/powerpc/mm/book3s64/radix_pgtable.c @@ -204,14 +204,14 @@ static void radix__change_memory_range(unsigned long start, unsigned long end, pudp = pud_alloc(&init_mm, p4dp, idx); if (!pudp) continue; - if (pud_is_leaf(*pudp)) { + if (pud_leaf(*pudp)) { ptep = (pte_t *)pudp; goto update_the_pte; } pmdp = pmd_alloc(&init_mm, pudp, idx); if (!pmdp) continue; - if (pmd_is_leaf(*pmdp)) { + if (pmd_leaf(*pmdp)) { ptep = pmdp_ptep(pmdp); goto update_the_pte; } @@ -767,7 +767,7 @@ static void __meminit remove_pmd_table(pmd_t *pmd_start, unsigned long addr, if (!pmd_present(*pmd)) continue; - if (pmd_is_leaf(*pmd)) { + if (pmd_leaf(*pmd)) { if (IS_ALIGNED(addr, PMD_SIZE) && IS_ALIGNED(next, PMD_SIZE)) { if (!direct) @@ -807,7 +807,7 @@ static void __meminit remove_pud_table(pud_t *pud_start, unsigned long addr, if (!pud_present(*pud)) continue; - if (pud_is_leaf(*pud)) { + if (pud_leaf(*pud)) { if (!IS_ALIGNED(addr, PUD_SIZE) || !IS_ALIGNED(next, PUD_SIZE)) { WARN_ONCE(1, "%s: unaligned range\n", __func__); @@ -845,7 +845,7 @@ remove_pagetable(unsigned long start, unsigned long end, bool direct, if (!p4d_present(*p4d)) continue; - if (p4d_is_leaf(*p4d)) { + if (p4d_leaf(*p4d)) { if (!IS_ALIGNED(addr, P4D_SIZE) || !IS_ALIGNED(next, P4D_SIZE)) { WARN_ONCE(1, "%s: unaligned range\n", __func__); @@ -924,7 +924,7 @@ bool vmemmap_can_optimize(struct vmem_altmap *altmap, struct dev_pagemap *pgmap) int __meminit vmemmap_check_pmd(pmd_t *pmdp, int node, unsigned long addr, unsigned long next) { - int large = pmd_large(*pmdp); + int large = pmd_leaf(*pmdp); if (large) vmemmap_verify(pmdp_ptep(pmdp), node, addr, next); @@ -1339,20 +1339,6 @@ void __ref radix__vmemmap_free(unsigned long start, unsigned long end, #endif #endif -#if defined(CONFIG_DEBUG_PAGEALLOC) || defined(CONFIG_KFENCE) -void radix__kernel_map_pages(struct page *page, int numpages, int enable) -{ - unsigned long addr; - - addr = (unsigned long)page_address(page); - - if (enable) - set_memory_p(addr, numpages); - else - set_memory_np(addr, numpages); -} -#endif - #ifdef CONFIG_TRANSPARENT_HUGEPAGE unsigned long radix__pmd_hugepage_update(struct mm_struct *mm, unsigned long addr, @@ -1554,7 +1540,7 @@ int pud_set_huge(pud_t *pud, phys_addr_t addr, pgprot_t prot) int pud_clear_huge(pud_t *pud) { - if (pud_is_leaf(*pud)) { + if (pud_leaf(*pud)) { pud_clear(pud); return 1; } @@ -1601,7 +1587,7 @@ int pmd_set_huge(pmd_t *pmd, phys_addr_t addr, pgprot_t prot) int pmd_clear_huge(pmd_t *pmd) { - if (pmd_is_leaf(*pmd)) { + if (pmd_leaf(*pmd)) { pmd_clear(pmd); return 1; } diff --git a/arch/powerpc/mm/drmem.c b/arch/powerpc/mm/drmem.c index fde7790277..c110ab8fa8 100644 --- a/arch/powerpc/mm/drmem.c +++ b/arch/powerpc/mm/drmem.c @@ -393,17 +393,17 @@ static const __be32 *of_get_usable_memory(struct device_node *dn) int walk_drmem_lmbs(struct device_node *dn, void *data, int (*func)(struct drmem_lmb *, const __be32 **, void *)) { + struct device_node *root = of_find_node_by_path("/"); const __be32 *prop, *usm; int ret = -ENODEV; - if (!of_root) + if (!root) return ret; /* Get the address & size cells */ - of_node_get(of_root); - n_root_addr_cells = of_n_addr_cells(of_root); - n_root_size_cells = of_n_size_cells(of_root); - of_node_put(of_root); + n_root_addr_cells = of_n_addr_cells(root); + n_root_size_cells = of_n_size_cells(root); + of_node_put(root); if (init_drmem_lmb_size(dn)) return ret; diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c index 0a540b37aa..594a4b7b2c 100644 --- a/arch/powerpc/mm/hugetlbpage.c +++ b/arch/powerpc/mm/hugetlbpage.c @@ -226,7 +226,7 @@ static int __init pseries_alloc_bootmem_huge_page(struct hstate *hstate) return 0; m = phys_to_virt(gpage_freearray[--nr_gpages]); gpage_freearray[nr_gpages] = 0; - list_add(&m->list, &huge_boot_pages); + list_add(&m->list, &huge_boot_pages[0]); m->hstate = hstate; return 1; } @@ -614,8 +614,6 @@ void __init gigantic_hugetlb_cma_reserve(void) */ order = mmu_psize_to_shift(MMU_PAGE_16G) - PAGE_SHIFT; - if (order) { - VM_WARN_ON(order <= MAX_PAGE_ORDER); + if (order) hugetlb_cma_reserve(order); - } } diff --git a/arch/powerpc/mm/mmu_decl.h b/arch/powerpc/mm/mmu_decl.h index 72341b9fb5..6949c2c937 100644 --- a/arch/powerpc/mm/mmu_decl.h +++ b/arch/powerpc/mm/mmu_decl.h @@ -160,23 +160,17 @@ static inline unsigned long p_block_mapped(phys_addr_t pa) { return 0; } #endif #if defined(CONFIG_PPC_BOOK3S_32) || defined(CONFIG_PPC_8xx) || defined(CONFIG_PPC_E500) -void mmu_mark_initmem_nx(void); -void mmu_mark_rodata_ro(void); +int mmu_mark_initmem_nx(void); +int mmu_mark_rodata_ro(void); #else -static inline void mmu_mark_initmem_nx(void) { } -static inline void mmu_mark_rodata_ro(void) { } +static inline int mmu_mark_initmem_nx(void) { return 0; } +static inline int mmu_mark_rodata_ro(void) { return 0; } #endif #ifdef CONFIG_PPC_8xx void __init mmu_mapin_immr(void); #endif -#ifdef CONFIG_DEBUG_WX -void ptdump_check_wx(void); -#else -static inline void ptdump_check_wx(void) { } -#endif - static inline bool debug_pagealloc_enabled_or_kfence(void) { return IS_ENABLED(CONFIG_KFENCE) || debug_pagealloc_enabled(); @@ -186,3 +180,5 @@ static inline bool debug_pagealloc_enabled_or_kfence(void) int create_section_mapping(unsigned long start, unsigned long end, int nid, pgprot_t prot); #endif + +int hash__kernel_map_pages(struct page *page, int numpages, int enable); diff --git a/arch/powerpc/mm/nohash/8xx.c b/arch/powerpc/mm/nohash/8xx.c index 6be6421086..43d4842bb1 100644 --- a/arch/powerpc/mm/nohash/8xx.c +++ b/arch/powerpc/mm/nohash/8xx.c @@ -119,23 +119,26 @@ void __init mmu_mapin_immr(void) PAGE_KERNEL_NCG, MMU_PAGE_512K, true); } -static void mmu_mapin_ram_chunk(unsigned long offset, unsigned long top, - pgprot_t prot, bool new) +static int mmu_mapin_ram_chunk(unsigned long offset, unsigned long top, + pgprot_t prot, bool new) { unsigned long v = PAGE_OFFSET + offset; unsigned long p = offset; + int err = 0; WARN_ON(!IS_ALIGNED(offset, SZ_512K) || !IS_ALIGNED(top, SZ_512K)); - for (; p < ALIGN(p, SZ_8M) && p < top; p += SZ_512K, v += SZ_512K) - __early_map_kernel_hugepage(v, p, prot, MMU_PAGE_512K, new); - for (; p < ALIGN_DOWN(top, SZ_8M) && p < top; p += SZ_8M, v += SZ_8M) - __early_map_kernel_hugepage(v, p, prot, MMU_PAGE_8M, new); - for (; p < ALIGN_DOWN(top, SZ_512K) && p < top; p += SZ_512K, v += SZ_512K) - __early_map_kernel_hugepage(v, p, prot, MMU_PAGE_512K, new); + for (; p < ALIGN(p, SZ_8M) && p < top && !err; p += SZ_512K, v += SZ_512K) + err = __early_map_kernel_hugepage(v, p, prot, MMU_PAGE_512K, new); + for (; p < ALIGN_DOWN(top, SZ_8M) && p < top && !err; p += SZ_8M, v += SZ_8M) + err = __early_map_kernel_hugepage(v, p, prot, MMU_PAGE_8M, new); + for (; p < ALIGN_DOWN(top, SZ_512K) && p < top && !err; p += SZ_512K, v += SZ_512K) + err = __early_map_kernel_hugepage(v, p, prot, MMU_PAGE_512K, new); if (!new) flush_tlb_kernel_range(PAGE_OFFSET + v, PAGE_OFFSET + top); + + return err; } unsigned long __init mmu_mapin_ram(unsigned long base, unsigned long top) @@ -166,27 +169,33 @@ unsigned long __init mmu_mapin_ram(unsigned long base, unsigned long top) return top; } -void mmu_mark_initmem_nx(void) +int mmu_mark_initmem_nx(void) { unsigned long etext8 = ALIGN(__pa(_etext), SZ_8M); unsigned long sinittext = __pa(_sinittext); unsigned long boundary = strict_kernel_rwx_enabled() ? sinittext : etext8; unsigned long einittext8 = ALIGN(__pa(_einittext), SZ_8M); + int err = 0; if (!debug_pagealloc_enabled_or_kfence()) - mmu_mapin_ram_chunk(boundary, einittext8, PAGE_KERNEL, false); + err = mmu_mapin_ram_chunk(boundary, einittext8, PAGE_KERNEL, false); mmu_pin_tlb(block_mapped_ram, false); + + return err; } #ifdef CONFIG_STRICT_KERNEL_RWX -void mmu_mark_rodata_ro(void) +int mmu_mark_rodata_ro(void) { unsigned long sinittext = __pa(_sinittext); + int err; - mmu_mapin_ram_chunk(0, sinittext, PAGE_KERNEL_ROX, false); + err = mmu_mapin_ram_chunk(0, sinittext, PAGE_KERNEL_ROX, false); if (IS_ENABLED(CONFIG_PIN_TLB_DATA)) mmu_pin_tlb(block_mapped_ram, true); + + return err; } #endif diff --git a/arch/powerpc/mm/nohash/e500.c b/arch/powerpc/mm/nohash/e500.c index 921c3521ec..266fb22131 100644 --- a/arch/powerpc/mm/nohash/e500.c +++ b/arch/powerpc/mm/nohash/e500.c @@ -285,19 +285,23 @@ void __init adjust_total_lowmem(void) } #ifdef CONFIG_STRICT_KERNEL_RWX -void mmu_mark_rodata_ro(void) +int mmu_mark_rodata_ro(void) { unsigned long remapped; remapped = map_mem_in_cams(__max_low_memory, CONFIG_LOWMEM_CAM_NUM, false, false); - WARN_ON(__max_low_memory != remapped); + if (WARN_ON(__max_low_memory != remapped)) + return -EINVAL; + + return 0; } #endif -void mmu_mark_initmem_nx(void) +int mmu_mark_initmem_nx(void) { /* Everything is done in mmu_mark_rodata_ro() */ + return 0; } void setup_initial_memory_limit(phys_addr_t first_memblock_base, diff --git a/arch/powerpc/mm/nohash/kaslr_booke.c b/arch/powerpc/mm/nohash/kaslr_booke.c index b4f2786a7d..cdff129abb 100644 --- a/arch/powerpc/mm/nohash/kaslr_booke.c +++ b/arch/powerpc/mm/nohash/kaslr_booke.c @@ -13,7 +13,7 @@ #include #include #include -#include +#include #include #include #include @@ -173,7 +173,7 @@ static __init bool overlaps_region(const void *fdt, u32 start, static void __init get_crash_kernel(void *fdt, unsigned long size) { -#ifdef CONFIG_CRASH_CORE +#ifdef CONFIG_CRASH_RESERVE unsigned long long crash_size, crash_base; int ret; diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c index f6c4ace3b2..a490724e84 100644 --- a/arch/powerpc/mm/numa.c +++ b/arch/powerpc/mm/numa.c @@ -1111,7 +1111,7 @@ static void __init setup_node_data(int nid, u64 start_pfn, u64 end_pfn) static void __init find_possible_nodes(void) { - struct device_node *rtas; + struct device_node *rtas, *root; const __be32 *domains = NULL; int prop_length, max_nodes; u32 i; @@ -1132,10 +1132,12 @@ static void __init find_possible_nodes(void) * If the LPAR is migratable, new nodes might be activated after a LPM, * so we should consider the max number in that case. */ - if (!of_get_property(of_root, "ibm,migratable-partition", NULL)) + root = of_find_node_by_path("/"); + if (!of_get_property(root, "ibm,migratable-partition", NULL)) domains = of_get_property(rtas, "ibm,current-associativity-domains", &prop_length); + of_node_put(root); if (!domains) { domains = of_get_property(rtas, "ibm,max-associativity-domains", &prop_length); diff --git a/arch/powerpc/mm/pageattr.c b/arch/powerpc/mm/pageattr.c index 6163e484bc..ac22bf2808 100644 --- a/arch/powerpc/mm/pageattr.c +++ b/arch/powerpc/mm/pageattr.c @@ -14,6 +14,7 @@ #include #include +#include static pte_basic_t pte_update_delta(pte_t *ptep, unsigned long addr, unsigned long old, unsigned long new) @@ -38,6 +39,10 @@ static int change_page_attr(pte_t *ptep, unsigned long addr, void *data) /* Don't clear DIRTY bit */ pte_update_delta(ptep, addr, _PAGE_KERNEL_RW & ~_PAGE_DIRTY, _PAGE_KERNEL_RO); break; + case SET_MEMORY_ROX: + /* Don't clear DIRTY bit */ + pte_update_delta(ptep, addr, _PAGE_KERNEL_RW & ~_PAGE_DIRTY, _PAGE_KERNEL_ROX); + break; case SET_MEMORY_RW: pte_update_delta(ptep, addr, _PAGE_KERNEL_RO, _PAGE_KERNEL_RW); break; @@ -97,3 +102,26 @@ int change_memory_attr(unsigned long addr, int numpages, long action) return apply_to_existing_page_range(&init_mm, start, size, change_page_attr, (void *)action); } + +#if defined(CONFIG_DEBUG_PAGEALLOC) || defined(CONFIG_KFENCE) +#ifdef CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC +void __kernel_map_pages(struct page *page, int numpages, int enable) +{ + int err; + unsigned long addr = (unsigned long)page_address(page); + + if (PageHighMem(page)) + return; + + if (IS_ENABLED(CONFIG_PPC_BOOK3S_64) && !radix_enabled()) + err = hash__kernel_map_pages(page, numpages, enable); + else if (enable) + err = set_memory_p(addr, numpages); + else + err = set_memory_np(addr, numpages); + + if (err) + panic("%s: changing memory protections failed\n", __func__); +} +#endif +#endif diff --git a/arch/powerpc/mm/pgtable.c b/arch/powerpc/mm/pgtable.c index a04ae4449a..9e7ba9c385 100644 --- a/arch/powerpc/mm/pgtable.c +++ b/arch/powerpc/mm/pgtable.c @@ -220,10 +220,7 @@ void set_ptes(struct mm_struct *mm, unsigned long addr, pte_t *ptep, break; ptep++; addr += PAGE_SIZE; - /* - * increment the pfn. - */ - pte = pfn_pte(pte_pfn(pte) + 1, pte_pgprot((pte))); + pte = pte_next_pfn(pte); } } @@ -413,7 +410,7 @@ pte_t *__find_linux_pte(pgd_t *pgdir, unsigned long ea, if (p4d_none(p4d)) return NULL; - if (p4d_is_leaf(p4d)) { + if (p4d_leaf(p4d)) { ret_pte = (pte_t *)p4dp; goto out; } @@ -435,7 +432,7 @@ pte_t *__find_linux_pte(pgd_t *pgdir, unsigned long ea, if (pud_none(pud)) return NULL; - if (pud_is_leaf(pud)) { + if (pud_leaf(pud)) { ret_pte = (pte_t *)pudp; goto out; } @@ -474,7 +471,7 @@ pte_t *__find_linux_pte(pgd_t *pgdir, unsigned long ea, goto out; } - if (pmd_is_leaf(pmd)) { + if (pmd_leaf(pmd)) { ret_pte = (pte_t *)pmdp; goto out; } diff --git a/arch/powerpc/mm/pgtable_32.c b/arch/powerpc/mm/pgtable_32.c index 5c02fd08d6..cfd622ebf7 100644 --- a/arch/powerpc/mm/pgtable_32.c +++ b/arch/powerpc/mm/pgtable_32.c @@ -130,32 +130,41 @@ void __init mapin_ram(void) } } -void mark_initmem_nx(void) +static int __mark_initmem_nx(void) { unsigned long numpages = PFN_UP((unsigned long)_einittext) - PFN_DOWN((unsigned long)_sinittext); + int err; - mmu_mark_initmem_nx(); + err = mmu_mark_initmem_nx(); if (!v_block_mapped((unsigned long)_sinittext)) { - set_memory_nx((unsigned long)_sinittext, numpages); - set_memory_rw((unsigned long)_sinittext, numpages); + err = set_memory_nx((unsigned long)_sinittext, numpages); + if (err) + return err; + err = set_memory_rw((unsigned long)_sinittext, numpages); } + return err; +} + +void mark_initmem_nx(void) +{ + int err = __mark_initmem_nx(); + + if (err) + panic("%s() failed, err = %d\n", __func__, err); } #ifdef CONFIG_STRICT_KERNEL_RWX -void mark_rodata_ro(void) +static int __mark_rodata_ro(void) { unsigned long numpages; if (IS_ENABLED(CONFIG_STRICT_MODULE_RWX) && mmu_has_feature(MMU_FTR_HPTE_TABLE)) pr_warn("This platform has HASH MMU, STRICT_MODULE_RWX won't work\n"); - if (v_block_mapped((unsigned long)_stext + 1)) { - mmu_mark_rodata_ro(); - ptdump_check_wx(); - return; - } + if (v_block_mapped((unsigned long)_stext + 1)) + return mmu_mark_rodata_ro(); /* * mark text and rodata as read only. __end_rodata is set by @@ -165,24 +174,14 @@ void mark_rodata_ro(void) numpages = PFN_UP((unsigned long)__end_rodata) - PFN_DOWN((unsigned long)_stext); - set_memory_ro((unsigned long)_stext, numpages); - - // mark_initmem_nx() should have already run by now - ptdump_check_wx(); + return set_memory_ro((unsigned long)_stext, numpages); } -#endif -#if defined(CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC) && defined(CONFIG_DEBUG_PAGEALLOC) -void __kernel_map_pages(struct page *page, int numpages, int enable) +void mark_rodata_ro(void) { - unsigned long addr = (unsigned long)page_address(page); + int err = __mark_rodata_ro(); - if (PageHighMem(page)) - return; - - if (enable) - set_memory_p(addr, numpages); - else - set_memory_np(addr, numpages); + if (err) + panic("%s() failed, err = %d\n", __func__, err); } -#endif /* CONFIG_DEBUG_PAGEALLOC */ +#endif diff --git a/arch/powerpc/mm/pgtable_64.c b/arch/powerpc/mm/pgtable_64.c index 5ac1fd3034..9b99113cb5 100644 --- a/arch/powerpc/mm/pgtable_64.c +++ b/arch/powerpc/mm/pgtable_64.c @@ -100,7 +100,7 @@ EXPORT_SYMBOL(__pte_frag_size_shift); /* 4 level page table */ struct page *p4d_page(p4d_t p4d) { - if (p4d_is_leaf(p4d)) { + if (p4d_leaf(p4d)) { if (!IS_ENABLED(CONFIG_HAVE_ARCH_HUGE_VMAP)) VM_WARN_ON(!p4d_huge(p4d)); return pte_page(p4d_pte(p4d)); @@ -111,7 +111,7 @@ struct page *p4d_page(p4d_t p4d) struct page *pud_page(pud_t pud) { - if (pud_is_leaf(pud)) { + if (pud_leaf(pud)) { if (!IS_ENABLED(CONFIG_HAVE_ARCH_HUGE_VMAP)) VM_WARN_ON(!pud_huge(pud)); return pte_page(pud_pte(pud)); @@ -125,14 +125,14 @@ struct page *pud_page(pud_t pud) */ struct page *pmd_page(pmd_t pmd) { - if (pmd_is_leaf(pmd)) { + if (pmd_leaf(pmd)) { /* * vmalloc_to_page may be called on any vmap address (not only * vmalloc), and it uses pmd_page() etc., when huge vmap is * enabled so these checks can't be used. */ if (!IS_ENABLED(CONFIG_HAVE_ARCH_HUGE_VMAP)) - VM_WARN_ON(!(pmd_large(pmd) || pmd_huge(pmd))); + VM_WARN_ON(!(pmd_leaf(pmd) || pmd_huge(pmd))); return pte_page(pmd_pte(pmd)); } return virt_to_page(pmd_page_vaddr(pmd)); @@ -150,9 +150,6 @@ void mark_rodata_ro(void) radix__mark_rodata_ro(); else hash__mark_rodata_ro(); - - // mark_initmem_nx() should have already run by now - ptdump_check_wx(); } void mark_initmem_nx(void) diff --git a/arch/powerpc/mm/ptdump/ptdump.c b/arch/powerpc/mm/ptdump/ptdump.c index 2313053fe6..9dc239967b 100644 --- a/arch/powerpc/mm/ptdump/ptdump.c +++ b/arch/powerpc/mm/ptdump/ptdump.c @@ -184,13 +184,14 @@ static void note_prot_wx(struct pg_state *st, unsigned long addr) { pte_t pte = __pte(st->current_flags); - if (!IS_ENABLED(CONFIG_DEBUG_WX) || !st->check_wx) + if (!st->check_wx) return; if (!pte_write(pte) || !pte_exec(pte)) return; - WARN_ONCE(1, "powerpc/mm: Found insecure W+X mapping at address %p/%pS\n", + WARN_ONCE(IS_ENABLED(CONFIG_DEBUG_WX), + "powerpc/mm: Found insecure W+X mapping at address %p/%pS\n", (void *)st->start_address, (void *)st->start_address); st->wx_pages += (addr - st->start_address) / PAGE_SIZE; @@ -326,8 +327,7 @@ static void __init build_pgtable_complete_mask(void) pg_level[i].mask |= pg_level[i].flag[j].mask; } -#ifdef CONFIG_DEBUG_WX -void ptdump_check_wx(void) +bool ptdump_check_wx(void) { struct pg_state st = { .seq = NULL, @@ -343,15 +343,22 @@ void ptdump_check_wx(void) } }; + if (IS_ENABLED(CONFIG_PPC_BOOK3S_64) && !mmu_has_feature(MMU_FTR_KERNEL_RO)) + return true; + ptdump_walk_pgd(&st.ptdump, &init_mm, NULL); - if (st.wx_pages) + if (st.wx_pages) { pr_warn("Checked W+X mappings: FAILED, %lu W+X pages found\n", st.wx_pages); - else + + return false; + } else { pr_info("Checked W+X mappings: passed, no W+X pages found\n"); + + return true; + } } -#endif static int __init ptdump_init(void) { -- cgit v1.2.3