diff options
Diffstat (limited to 'arch/riscv/mm')
-rw-r--r-- | arch/riscv/mm/Makefile | 3 | ||||
-rw-r--r-- | arch/riscv/mm/dma-noncoherent.c | 2 | ||||
-rw-r--r-- | arch/riscv/mm/extable.c | 31 | ||||
-rw-r--r-- | arch/riscv/mm/fault.c | 16 | ||||
-rw-r--r-- | arch/riscv/mm/hugetlbpage.c | 12 | ||||
-rw-r--r-- | arch/riscv/mm/init.c | 23 | ||||
-rw-r--r-- | arch/riscv/mm/kasan_init.c | 53 | ||||
-rw-r--r-- | arch/riscv/mm/pageattr.c | 44 | ||||
-rw-r--r-- | arch/riscv/mm/pgtable.c | 51 | ||||
-rw-r--r-- | arch/riscv/mm/tlbflush.c | 70 |
10 files changed, 218 insertions, 87 deletions
diff --git a/arch/riscv/mm/Makefile b/arch/riscv/mm/Makefile index 3a4dfc8bab..2c869f8026 100644 --- a/arch/riscv/mm/Makefile +++ b/arch/riscv/mm/Makefile @@ -13,10 +13,9 @@ endif KCOV_INSTRUMENT_init.o := n obj-y += init.o -obj-$(CONFIG_MMU) += extable.o fault.o pageattr.o +obj-$(CONFIG_MMU) += extable.o fault.o pageattr.o pgtable.o obj-y += cacheflush.o obj-y += context.o -obj-y += pgtable.o obj-y += pmem.o ifeq ($(CONFIG_MMU),y) diff --git a/arch/riscv/mm/dma-noncoherent.c b/arch/riscv/mm/dma-noncoherent.c index 4e4e469b8d..843107f834 100644 --- a/arch/riscv/mm/dma-noncoherent.c +++ b/arch/riscv/mm/dma-noncoherent.c @@ -129,7 +129,7 @@ void arch_dma_prep_coherent(struct page *page, size_t size) } void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size, - const struct iommu_ops *iommu, bool coherent) + bool coherent) { WARN_TAINT(!coherent && riscv_cbom_block_size > ARCH_DMA_MINALIGN, TAINT_CPU_OUT_OF_SPEC, diff --git a/arch/riscv/mm/extable.c b/arch/riscv/mm/extable.c index 35484d830f..dd1530af3e 100644 --- a/arch/riscv/mm/extable.c +++ b/arch/riscv/mm/extable.c @@ -27,6 +27,14 @@ static bool ex_handler_fixup(const struct exception_table_entry *ex, return true; } +static inline unsigned long regs_get_gpr(struct pt_regs *regs, unsigned int offset) +{ + if (unlikely(!offset || offset > MAX_REG_OFFSET)) + return 0; + + return *(unsigned long *)((unsigned long)regs + offset); +} + static inline void regs_set_gpr(struct pt_regs *regs, unsigned int offset, unsigned long val) { @@ -50,6 +58,27 @@ static bool ex_handler_uaccess_err_zero(const struct exception_table_entry *ex, return true; } +static bool +ex_handler_load_unaligned_zeropad(const struct exception_table_entry *ex, + struct pt_regs *regs) +{ + int reg_data = FIELD_GET(EX_DATA_REG_DATA, ex->data); + int reg_addr = FIELD_GET(EX_DATA_REG_ADDR, ex->data); + unsigned long data, addr, offset; + + addr = regs_get_gpr(regs, reg_addr * sizeof(unsigned long)); + + offset = addr & 0x7UL; + addr &= ~0x7UL; + + data = *(unsigned long *)addr >> (offset * 8); + + regs_set_gpr(regs, reg_data * sizeof(unsigned long), data); + + regs->epc = get_ex_fixup(ex); + return true; +} + bool fixup_exception(struct pt_regs *regs) { const struct exception_table_entry *ex; @@ -65,6 +94,8 @@ bool fixup_exception(struct pt_regs *regs) return ex_handler_bpf(ex, regs); case EX_TYPE_UACCESS_ERR_ZERO: return ex_handler_uaccess_err_zero(ex, regs); + case EX_TYPE_LOAD_UNALIGNED_ZEROPAD: + return ex_handler_load_unaligned_zeropad(ex, regs); } BUG(); diff --git a/arch/riscv/mm/fault.c b/arch/riscv/mm/fault.c index 081339ddf4..3ba1d4dde5 100644 --- a/arch/riscv/mm/fault.c +++ b/arch/riscv/mm/fault.c @@ -136,24 +136,24 @@ static inline void vmalloc_fault(struct pt_regs *regs, int code, unsigned long a pgd = (pgd_t *)pfn_to_virt(pfn) + index; pgd_k = init_mm.pgd + index; - if (!pgd_present(*pgd_k)) { + if (!pgd_present(pgdp_get(pgd_k))) { no_context(regs, addr); return; } - set_pgd(pgd, *pgd_k); + set_pgd(pgd, pgdp_get(pgd_k)); p4d_k = p4d_offset(pgd_k, addr); - if (!p4d_present(*p4d_k)) { + if (!p4d_present(p4dp_get(p4d_k))) { no_context(regs, addr); return; } pud_k = pud_offset(p4d_k, addr); - if (!pud_present(*pud_k)) { + if (!pud_present(pudp_get(pud_k))) { no_context(regs, addr); return; } - if (pud_leaf(*pud_k)) + if (pud_leaf(pudp_get(pud_k))) goto flush_tlb; /* @@ -161,11 +161,11 @@ static inline void vmalloc_fault(struct pt_regs *regs, int code, unsigned long a * to copy individual PTEs */ pmd_k = pmd_offset(pud_k, addr); - if (!pmd_present(*pmd_k)) { + if (!pmd_present(pmdp_get(pmd_k))) { no_context(regs, addr); return; } - if (pmd_leaf(*pmd_k)) + if (pmd_leaf(pmdp_get(pmd_k))) goto flush_tlb; /* @@ -175,7 +175,7 @@ static inline void vmalloc_fault(struct pt_regs *regs, int code, unsigned long a * silently loop forever. */ pte_k = pte_offset_kernel(pmd_k, addr); - if (!pte_present(*pte_k)) { + if (!pte_present(ptep_get(pte_k))) { no_context(regs, addr); return; } diff --git a/arch/riscv/mm/hugetlbpage.c b/arch/riscv/mm/hugetlbpage.c index fbe9188016..5ef2a68911 100644 --- a/arch/riscv/mm/hugetlbpage.c +++ b/arch/riscv/mm/hugetlbpage.c @@ -54,7 +54,7 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, } if (sz == PMD_SIZE) { - if (want_pmd_share(vma, addr) && pud_none(*pud)) + if (want_pmd_share(vma, addr) && pud_none(pudp_get(pud))) pte = huge_pmd_share(mm, vma, addr, pud); else pte = (pte_t *)pmd_alloc(mm, pud, addr); @@ -93,11 +93,11 @@ pte_t *huge_pte_offset(struct mm_struct *mm, pmd_t *pmd; pgd = pgd_offset(mm, addr); - if (!pgd_present(*pgd)) + if (!pgd_present(pgdp_get(pgd))) return NULL; p4d = p4d_offset(pgd, addr); - if (!p4d_present(*p4d)) + if (!p4d_present(p4dp_get(p4d))) return NULL; pud = pud_offset(p4d, addr); @@ -105,7 +105,7 @@ pte_t *huge_pte_offset(struct mm_struct *mm, /* must be pud huge, non-present or none */ return (pte_t *)pud; - if (!pud_present(*pud)) + if (!pud_present(pudp_get(pud))) return NULL; pmd = pmd_offset(pud, addr); @@ -113,7 +113,7 @@ pte_t *huge_pte_offset(struct mm_struct *mm, /* must be pmd huge, non-present or none */ return (pte_t *)pmd; - if (!pmd_present(*pmd)) + if (!pmd_present(pmdp_get(pmd))) return NULL; for_each_napot_order(order) { @@ -351,7 +351,7 @@ void huge_pte_clear(struct mm_struct *mm, pte_t *ptep, unsigned long sz) { - pte_t pte = READ_ONCE(*ptep); + pte_t pte = ptep_get(ptep); int i, pte_num; if (!pte_napot(pte)) { diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c index ee224fe18d..0c00efc756 100644 --- a/arch/riscv/mm/init.c +++ b/arch/riscv/mm/init.c @@ -232,7 +232,7 @@ static void __init setup_bootmem(void) * In 64-bit, any use of __va/__pa before this point is wrong as we * did not know the start of DRAM before. */ - if (IS_ENABLED(CONFIG_64BIT)) + if (IS_ENABLED(CONFIG_64BIT) && IS_ENABLED(CONFIG_MMU)) kernel_map.va_pa_offset = PAGE_OFFSET - phys_ram_base; /* @@ -1395,10 +1395,29 @@ void __init misc_mem_init(void) } #ifdef CONFIG_SPARSEMEM_VMEMMAP +void __meminit vmemmap_set_pmd(pmd_t *pmd, void *p, int node, + unsigned long addr, unsigned long next) +{ + pmd_set_huge(pmd, virt_to_phys(p), PAGE_KERNEL); +} + +int __meminit vmemmap_check_pmd(pmd_t *pmdp, int node, + unsigned long addr, unsigned long next) +{ + vmemmap_verify((pte_t *)pmdp, node, addr, next); + return 1; +} + int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node, struct vmem_altmap *altmap) { - return vmemmap_populate_basepages(start, end, node, NULL); + /* + * Note that SPARSEMEM_VMEMMAP is only selected for rv64 and that we + * can't use hugepage mappings for 2-level page table because in case of + * memory hotplug, we are not able to update all the page tables with + * the new PMDs. + */ + return vmemmap_populate_hugepages(start, end, node, NULL); } #endif diff --git a/arch/riscv/mm/kasan_init.c b/arch/riscv/mm/kasan_init.c index 5e39dcf23f..c301c8d291 100644 --- a/arch/riscv/mm/kasan_init.c +++ b/arch/riscv/mm/kasan_init.c @@ -31,7 +31,7 @@ static void __init kasan_populate_pte(pmd_t *pmd, unsigned long vaddr, unsigned phys_addr_t phys_addr; pte_t *ptep, *p; - if (pmd_none(*pmd)) { + if (pmd_none(pmdp_get(pmd))) { p = memblock_alloc(PTRS_PER_PTE * sizeof(pte_t), PAGE_SIZE); set_pmd(pmd, pfn_pmd(PFN_DOWN(__pa(p)), PAGE_TABLE)); } @@ -39,7 +39,7 @@ static void __init kasan_populate_pte(pmd_t *pmd, unsigned long vaddr, unsigned ptep = pte_offset_kernel(pmd, vaddr); do { - if (pte_none(*ptep)) { + if (pte_none(ptep_get(ptep))) { phys_addr = memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE); set_pte(ptep, pfn_pte(PFN_DOWN(phys_addr), PAGE_KERNEL)); memset(__va(phys_addr), KASAN_SHADOW_INIT, PAGE_SIZE); @@ -53,7 +53,7 @@ static void __init kasan_populate_pmd(pud_t *pud, unsigned long vaddr, unsigned pmd_t *pmdp, *p; unsigned long next; - if (pud_none(*pud)) { + if (pud_none(pudp_get(pud))) { p = memblock_alloc(PTRS_PER_PMD * sizeof(pmd_t), PAGE_SIZE); set_pud(pud, pfn_pud(PFN_DOWN(__pa(p)), PAGE_TABLE)); } @@ -63,7 +63,8 @@ static void __init kasan_populate_pmd(pud_t *pud, unsigned long vaddr, unsigned do { next = pmd_addr_end(vaddr, end); - if (pmd_none(*pmdp) && IS_ALIGNED(vaddr, PMD_SIZE) && (next - vaddr) >= PMD_SIZE) { + if (pmd_none(pmdp_get(pmdp)) && IS_ALIGNED(vaddr, PMD_SIZE) && + (next - vaddr) >= PMD_SIZE) { phys_addr = memblock_phys_alloc(PMD_SIZE, PMD_SIZE); if (phys_addr) { set_pmd(pmdp, pfn_pmd(PFN_DOWN(phys_addr), PAGE_KERNEL)); @@ -83,7 +84,7 @@ static void __init kasan_populate_pud(p4d_t *p4d, pud_t *pudp, *p; unsigned long next; - if (p4d_none(*p4d)) { + if (p4d_none(p4dp_get(p4d))) { p = memblock_alloc(PTRS_PER_PUD * sizeof(pud_t), PAGE_SIZE); set_p4d(p4d, pfn_p4d(PFN_DOWN(__pa(p)), PAGE_TABLE)); } @@ -93,7 +94,8 @@ static void __init kasan_populate_pud(p4d_t *p4d, do { next = pud_addr_end(vaddr, end); - if (pud_none(*pudp) && IS_ALIGNED(vaddr, PUD_SIZE) && (next - vaddr) >= PUD_SIZE) { + if (pud_none(pudp_get(pudp)) && IS_ALIGNED(vaddr, PUD_SIZE) && + (next - vaddr) >= PUD_SIZE) { phys_addr = memblock_phys_alloc(PUD_SIZE, PUD_SIZE); if (phys_addr) { set_pud(pudp, pfn_pud(PFN_DOWN(phys_addr), PAGE_KERNEL)); @@ -113,7 +115,7 @@ static void __init kasan_populate_p4d(pgd_t *pgd, p4d_t *p4dp, *p; unsigned long next; - if (pgd_none(*pgd)) { + if (pgd_none(pgdp_get(pgd))) { p = memblock_alloc(PTRS_PER_P4D * sizeof(p4d_t), PAGE_SIZE); set_pgd(pgd, pfn_pgd(PFN_DOWN(__pa(p)), PAGE_TABLE)); } @@ -123,7 +125,8 @@ static void __init kasan_populate_p4d(pgd_t *pgd, do { next = p4d_addr_end(vaddr, end); - if (p4d_none(*p4dp) && IS_ALIGNED(vaddr, P4D_SIZE) && (next - vaddr) >= P4D_SIZE) { + if (p4d_none(p4dp_get(p4dp)) && IS_ALIGNED(vaddr, P4D_SIZE) && + (next - vaddr) >= P4D_SIZE) { phys_addr = memblock_phys_alloc(P4D_SIZE, P4D_SIZE); if (phys_addr) { set_p4d(p4dp, pfn_p4d(PFN_DOWN(phys_addr), PAGE_KERNEL)); @@ -145,7 +148,7 @@ static void __init kasan_populate_pgd(pgd_t *pgdp, do { next = pgd_addr_end(vaddr, end); - if (pgd_none(*pgdp) && IS_ALIGNED(vaddr, PGDIR_SIZE) && + if (pgd_none(pgdp_get(pgdp)) && IS_ALIGNED(vaddr, PGDIR_SIZE) && (next - vaddr) >= PGDIR_SIZE) { phys_addr = memblock_phys_alloc(PGDIR_SIZE, PGDIR_SIZE); if (phys_addr) { @@ -168,7 +171,7 @@ static void __init kasan_early_clear_pud(p4d_t *p4dp, if (!pgtable_l4_enabled) { pudp = (pud_t *)p4dp; } else { - base_pud = pt_ops.get_pud_virt(pfn_to_phys(_p4d_pfn(*p4dp))); + base_pud = pt_ops.get_pud_virt(pfn_to_phys(_p4d_pfn(p4dp_get(p4dp)))); pudp = base_pud + pud_index(vaddr); } @@ -193,7 +196,7 @@ static void __init kasan_early_clear_p4d(pgd_t *pgdp, if (!pgtable_l5_enabled) { p4dp = (p4d_t *)pgdp; } else { - base_p4d = pt_ops.get_p4d_virt(pfn_to_phys(_pgd_pfn(*pgdp))); + base_p4d = pt_ops.get_p4d_virt(pfn_to_phys(_pgd_pfn(pgdp_get(pgdp)))); p4dp = base_p4d + p4d_index(vaddr); } @@ -239,14 +242,14 @@ static void __init kasan_early_populate_pud(p4d_t *p4dp, if (!pgtable_l4_enabled) { pudp = (pud_t *)p4dp; } else { - base_pud = pt_ops.get_pud_virt(pfn_to_phys(_p4d_pfn(*p4dp))); + base_pud = pt_ops.get_pud_virt(pfn_to_phys(_p4d_pfn(p4dp_get(p4dp)))); pudp = base_pud + pud_index(vaddr); } do { next = pud_addr_end(vaddr, end); - if (pud_none(*pudp) && IS_ALIGNED(vaddr, PUD_SIZE) && + if (pud_none(pudp_get(pudp)) && IS_ALIGNED(vaddr, PUD_SIZE) && (next - vaddr) >= PUD_SIZE) { phys_addr = __pa((uintptr_t)kasan_early_shadow_pmd); set_pud(pudp, pfn_pud(PFN_DOWN(phys_addr), PAGE_TABLE)); @@ -277,14 +280,14 @@ static void __init kasan_early_populate_p4d(pgd_t *pgdp, if (!pgtable_l5_enabled) { p4dp = (p4d_t *)pgdp; } else { - base_p4d = pt_ops.get_p4d_virt(pfn_to_phys(_pgd_pfn(*pgdp))); + base_p4d = pt_ops.get_p4d_virt(pfn_to_phys(_pgd_pfn(pgdp_get(pgdp)))); p4dp = base_p4d + p4d_index(vaddr); } do { next = p4d_addr_end(vaddr, end); - if (p4d_none(*p4dp) && IS_ALIGNED(vaddr, P4D_SIZE) && + if (p4d_none(p4dp_get(p4dp)) && IS_ALIGNED(vaddr, P4D_SIZE) && (next - vaddr) >= P4D_SIZE) { phys_addr = __pa((uintptr_t)kasan_early_shadow_pud); set_p4d(p4dp, pfn_p4d(PFN_DOWN(phys_addr), PAGE_TABLE)); @@ -305,7 +308,7 @@ static void __init kasan_early_populate_pgd(pgd_t *pgdp, do { next = pgd_addr_end(vaddr, end); - if (pgd_none(*pgdp) && IS_ALIGNED(vaddr, PGDIR_SIZE) && + if (pgd_none(pgdp_get(pgdp)) && IS_ALIGNED(vaddr, PGDIR_SIZE) && (next - vaddr) >= PGDIR_SIZE) { phys_addr = __pa((uintptr_t)kasan_early_shadow_p4d); set_pgd(pgdp, pfn_pgd(PFN_DOWN(phys_addr), PAGE_TABLE)); @@ -381,7 +384,7 @@ static void __init kasan_shallow_populate_pud(p4d_t *p4d, do { next = pud_addr_end(vaddr, end); - if (pud_none(*pud_k)) { + if (pud_none(pudp_get(pud_k))) { p = memblock_alloc(PAGE_SIZE, PAGE_SIZE); set_pud(pud_k, pfn_pud(PFN_DOWN(__pa(p)), PAGE_TABLE)); continue; @@ -401,7 +404,7 @@ static void __init kasan_shallow_populate_p4d(pgd_t *pgd, do { next = p4d_addr_end(vaddr, end); - if (p4d_none(*p4d_k)) { + if (p4d_none(p4dp_get(p4d_k))) { p = memblock_alloc(PAGE_SIZE, PAGE_SIZE); set_p4d(p4d_k, pfn_p4d(PFN_DOWN(__pa(p)), PAGE_TABLE)); continue; @@ -420,7 +423,7 @@ static void __init kasan_shallow_populate_pgd(unsigned long vaddr, unsigned long do { next = pgd_addr_end(vaddr, end); - if (pgd_none(*pgd_k)) { + if (pgd_none(pgdp_get(pgd_k))) { p = memblock_alloc(PAGE_SIZE, PAGE_SIZE); set_pgd(pgd_k, pfn_pgd(PFN_DOWN(__pa(p)), PAGE_TABLE)); continue; @@ -438,6 +441,14 @@ static void __init kasan_shallow_populate(void *start, void *end) kasan_shallow_populate_pgd(vaddr, vend); } +#ifdef CONFIG_KASAN_VMALLOC +void __init kasan_populate_early_vm_area_shadow(void *start, unsigned long size) +{ + kasan_populate(kasan_mem_to_shadow(start), + kasan_mem_to_shadow(start + size)); +} +#endif + static void __init create_tmp_mapping(void) { void *ptr; @@ -451,7 +462,7 @@ static void __init create_tmp_mapping(void) /* Copy the last p4d since it is shared with the kernel mapping. */ if (pgtable_l5_enabled) { - ptr = (p4d_t *)pgd_page_vaddr(*pgd_offset_k(KASAN_SHADOW_END)); + ptr = (p4d_t *)pgd_page_vaddr(pgdp_get(pgd_offset_k(KASAN_SHADOW_END))); memcpy(tmp_p4d, ptr, sizeof(p4d_t) * PTRS_PER_P4D); set_pgd(&tmp_pg_dir[pgd_index(KASAN_SHADOW_END)], pfn_pgd(PFN_DOWN(__pa(tmp_p4d)), PAGE_TABLE)); @@ -462,7 +473,7 @@ static void __init create_tmp_mapping(void) /* Copy the last pud since it is shared with the kernel mapping. */ if (pgtable_l4_enabled) { - ptr = (pud_t *)p4d_page_vaddr(*(base_p4d + p4d_index(KASAN_SHADOW_END))); + ptr = (pud_t *)p4d_page_vaddr(p4dp_get(base_p4d + p4d_index(KASAN_SHADOW_END))); memcpy(tmp_pud, ptr, sizeof(pud_t) * PTRS_PER_PUD); set_p4d(&base_p4d[p4d_index(KASAN_SHADOW_END)], pfn_p4d(PFN_DOWN(__pa(tmp_pud)), PAGE_TABLE)); diff --git a/arch/riscv/mm/pageattr.c b/arch/riscv/mm/pageattr.c index 01398fee5c..410056a50a 100644 --- a/arch/riscv/mm/pageattr.c +++ b/arch/riscv/mm/pageattr.c @@ -29,7 +29,7 @@ static unsigned long set_pageattr_masks(unsigned long val, struct mm_walk *walk) static int pageattr_p4d_entry(p4d_t *p4d, unsigned long addr, unsigned long next, struct mm_walk *walk) { - p4d_t val = READ_ONCE(*p4d); + p4d_t val = p4dp_get(p4d); if (p4d_leaf(val)) { val = __p4d(set_pageattr_masks(p4d_val(val), walk)); @@ -42,7 +42,7 @@ static int pageattr_p4d_entry(p4d_t *p4d, unsigned long addr, static int pageattr_pud_entry(pud_t *pud, unsigned long addr, unsigned long next, struct mm_walk *walk) { - pud_t val = READ_ONCE(*pud); + pud_t val = pudp_get(pud); if (pud_leaf(val)) { val = __pud(set_pageattr_masks(pud_val(val), walk)); @@ -55,7 +55,7 @@ static int pageattr_pud_entry(pud_t *pud, unsigned long addr, static int pageattr_pmd_entry(pmd_t *pmd, unsigned long addr, unsigned long next, struct mm_walk *walk) { - pmd_t val = READ_ONCE(*pmd); + pmd_t val = pmdp_get(pmd); if (pmd_leaf(val)) { val = __pmd(set_pageattr_masks(pmd_val(val), walk)); @@ -68,7 +68,7 @@ static int pageattr_pmd_entry(pmd_t *pmd, unsigned long addr, static int pageattr_pte_entry(pte_t *pte, unsigned long addr, unsigned long next, struct mm_walk *walk) { - pte_t val = READ_ONCE(*pte); + pte_t val = ptep_get(pte); val = __pte(set_pageattr_masks(pte_val(val), walk)); set_pte(pte, val); @@ -108,10 +108,10 @@ static int __split_linear_mapping_pmd(pud_t *pudp, vaddr <= (vaddr & PMD_MASK) && end >= next) continue; - if (pmd_leaf(*pmdp)) { + if (pmd_leaf(pmdp_get(pmdp))) { struct page *pte_page; - unsigned long pfn = _pmd_pfn(*pmdp); - pgprot_t prot = __pgprot(pmd_val(*pmdp) & ~_PAGE_PFN_MASK); + unsigned long pfn = _pmd_pfn(pmdp_get(pmdp)); + pgprot_t prot = __pgprot(pmd_val(pmdp_get(pmdp)) & ~_PAGE_PFN_MASK); pte_t *ptep_new; int i; @@ -148,10 +148,10 @@ static int __split_linear_mapping_pud(p4d_t *p4dp, vaddr <= (vaddr & PUD_MASK) && end >= next) continue; - if (pud_leaf(*pudp)) { + if (pud_leaf(pudp_get(pudp))) { struct page *pmd_page; - unsigned long pfn = _pud_pfn(*pudp); - pgprot_t prot = __pgprot(pud_val(*pudp) & ~_PAGE_PFN_MASK); + unsigned long pfn = _pud_pfn(pudp_get(pudp)); + pgprot_t prot = __pgprot(pud_val(pudp_get(pudp)) & ~_PAGE_PFN_MASK); pmd_t *pmdp_new; int i; @@ -197,10 +197,10 @@ static int __split_linear_mapping_p4d(pgd_t *pgdp, vaddr <= (vaddr & P4D_MASK) && end >= next) continue; - if (p4d_leaf(*p4dp)) { + if (p4d_leaf(p4dp_get(p4dp))) { struct page *pud_page; - unsigned long pfn = _p4d_pfn(*p4dp); - pgprot_t prot = __pgprot(p4d_val(*p4dp) & ~_PAGE_PFN_MASK); + unsigned long pfn = _p4d_pfn(p4dp_get(p4dp)); + pgprot_t prot = __pgprot(p4d_val(p4dp_get(p4dp)) & ~_PAGE_PFN_MASK); pud_t *pudp_new; int i; @@ -411,29 +411,29 @@ bool kernel_page_present(struct page *page) pte_t *pte; pgd = pgd_offset_k(addr); - if (!pgd_present(*pgd)) + if (!pgd_present(pgdp_get(pgd))) return false; - if (pgd_leaf(*pgd)) + if (pgd_leaf(pgdp_get(pgd))) return true; p4d = p4d_offset(pgd, addr); - if (!p4d_present(*p4d)) + if (!p4d_present(p4dp_get(p4d))) return false; - if (p4d_leaf(*p4d)) + if (p4d_leaf(p4dp_get(p4d))) return true; pud = pud_offset(p4d, addr); - if (!pud_present(*pud)) + if (!pud_present(pudp_get(pud))) return false; - if (pud_leaf(*pud)) + if (pud_leaf(pudp_get(pud))) return true; pmd = pmd_offset(pud, addr); - if (!pmd_present(*pmd)) + if (!pmd_present(pmdp_get(pmd))) return false; - if (pmd_leaf(*pmd)) + if (pmd_leaf(pmdp_get(pmd))) return true; pte = pte_offset_kernel(pmd, addr); - return pte_present(*pte); + return pte_present(ptep_get(pte)); } diff --git a/arch/riscv/mm/pgtable.c b/arch/riscv/mm/pgtable.c index fef4e7328e..ef887efcb6 100644 --- a/arch/riscv/mm/pgtable.c +++ b/arch/riscv/mm/pgtable.c @@ -5,6 +5,47 @@ #include <linux/kernel.h> #include <linux/pgtable.h> +int ptep_set_access_flags(struct vm_area_struct *vma, + unsigned long address, pte_t *ptep, + pte_t entry, int dirty) +{ + if (!pte_same(ptep_get(ptep), entry)) + __set_pte_at(ptep, entry); + /* + * update_mmu_cache will unconditionally execute, handling both + * the case that the PTE changed and the spurious fault case. + */ + return true; +} + +int ptep_test_and_clear_young(struct vm_area_struct *vma, + unsigned long address, + pte_t *ptep) +{ + if (!pte_young(ptep_get(ptep))) + return 0; + return test_and_clear_bit(_PAGE_ACCESSED_OFFSET, &pte_val(*ptep)); +} +EXPORT_SYMBOL_GPL(ptep_test_and_clear_young); + +#ifdef CONFIG_64BIT +pud_t *pud_offset(p4d_t *p4d, unsigned long address) +{ + if (pgtable_l4_enabled) + return p4d_pgtable(p4dp_get(p4d)) + pud_index(address); + + return (pud_t *)p4d; +} + +p4d_t *p4d_offset(pgd_t *pgd, unsigned long address) +{ + if (pgtable_l5_enabled) + return pgd_pgtable(pgdp_get(pgd)) + p4d_index(address); + + return (p4d_t *)pgd; +} +#endif + #ifdef CONFIG_HAVE_ARCH_HUGE_VMAP int p4d_set_huge(p4d_t *p4d, phys_addr_t addr, pgprot_t prot) { @@ -25,7 +66,7 @@ int pud_set_huge(pud_t *pud, phys_addr_t phys, pgprot_t prot) int pud_clear_huge(pud_t *pud) { - if (!pud_leaf(READ_ONCE(*pud))) + if (!pud_leaf(pudp_get(pud))) return 0; pud_clear(pud); return 1; @@ -33,7 +74,7 @@ int pud_clear_huge(pud_t *pud) int pud_free_pmd_page(pud_t *pud, unsigned long addr) { - pmd_t *pmd = pud_pgtable(*pud); + pmd_t *pmd = pud_pgtable(pudp_get(pud)); int i; pud_clear(pud); @@ -63,7 +104,7 @@ int pmd_set_huge(pmd_t *pmd, phys_addr_t phys, pgprot_t prot) int pmd_clear_huge(pmd_t *pmd) { - if (!pmd_leaf(READ_ONCE(*pmd))) + if (!pmd_leaf(pmdp_get(pmd))) return 0; pmd_clear(pmd); return 1; @@ -71,7 +112,7 @@ int pmd_clear_huge(pmd_t *pmd) int pmd_free_pte_page(pmd_t *pmd, unsigned long addr) { - pte_t *pte = (pte_t *)pmd_page_vaddr(*pmd); + pte_t *pte = (pte_t *)pmd_page_vaddr(pmdp_get(pmd)); pmd_clear(pmd); @@ -88,7 +129,7 @@ pmd_t pmdp_collapse_flush(struct vm_area_struct *vma, pmd_t pmd = pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp); VM_BUG_ON(address & ~HPAGE_PMD_MASK); - VM_BUG_ON(pmd_trans_huge(*pmdp)); + VM_BUG_ON(pmd_trans_huge(pmdp_get(pmdp))); /* * When leaf PTE entries (regular pages) are collapsed into a leaf * PMD entry (huge page), a valid non-leaf PTE is converted into a diff --git a/arch/riscv/mm/tlbflush.c b/arch/riscv/mm/tlbflush.c index 1f90721d22..07d743f87b 100644 --- a/arch/riscv/mm/tlbflush.c +++ b/arch/riscv/mm/tlbflush.c @@ -99,29 +99,23 @@ static void __ipi_flush_tlb_range_asid(void *info) local_flush_tlb_range_asid(d->start, d->size, d->stride, d->asid); } -static void __flush_tlb_range(struct mm_struct *mm, unsigned long start, - unsigned long size, unsigned long stride) +static void __flush_tlb_range(const struct cpumask *cmask, unsigned long asid, + unsigned long start, unsigned long size, + unsigned long stride) { struct flush_tlb_range_data ftd; - const struct cpumask *cmask; - unsigned long asid = FLUSH_TLB_NO_ASID; bool broadcast; - if (mm) { - unsigned int cpuid; + if (cpumask_empty(cmask)) + return; - cmask = mm_cpumask(mm); - if (cpumask_empty(cmask)) - return; + if (cmask != cpu_online_mask) { + unsigned int cpuid; cpuid = get_cpu(); /* check if the tlbflush needs to be sent to other CPUs */ broadcast = cpumask_any_but(cmask, cpuid) < nr_cpu_ids; - - if (static_branch_unlikely(&use_asid_allocator)) - asid = atomic_long_read(&mm->context.id) & asid_mask; } else { - cmask = cpu_online_mask; broadcast = true; } @@ -141,25 +135,34 @@ static void __flush_tlb_range(struct mm_struct *mm, unsigned long start, local_flush_tlb_range_asid(start, size, stride, asid); } - if (mm) + if (cmask != cpu_online_mask) put_cpu(); } +static inline unsigned long get_mm_asid(struct mm_struct *mm) +{ + return static_branch_unlikely(&use_asid_allocator) ? + atomic_long_read(&mm->context.id) & asid_mask : FLUSH_TLB_NO_ASID; +} + void flush_tlb_mm(struct mm_struct *mm) { - __flush_tlb_range(mm, 0, FLUSH_TLB_MAX_SIZE, PAGE_SIZE); + __flush_tlb_range(mm_cpumask(mm), get_mm_asid(mm), + 0, FLUSH_TLB_MAX_SIZE, PAGE_SIZE); } void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned int page_size) { - __flush_tlb_range(mm, start, end - start, page_size); + __flush_tlb_range(mm_cpumask(mm), get_mm_asid(mm), + start, end - start, page_size); } void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr) { - __flush_tlb_range(vma->vm_mm, addr, PAGE_SIZE, PAGE_SIZE); + __flush_tlb_range(mm_cpumask(vma->vm_mm), get_mm_asid(vma->vm_mm), + addr, PAGE_SIZE, PAGE_SIZE); } void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, @@ -191,18 +194,45 @@ void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, } } - __flush_tlb_range(vma->vm_mm, start, end - start, stride_size); + __flush_tlb_range(mm_cpumask(vma->vm_mm), get_mm_asid(vma->vm_mm), + start, end - start, stride_size); } void flush_tlb_kernel_range(unsigned long start, unsigned long end) { - __flush_tlb_range(NULL, start, end - start, PAGE_SIZE); + __flush_tlb_range(cpu_online_mask, FLUSH_TLB_NO_ASID, + start, end - start, PAGE_SIZE); } #ifdef CONFIG_TRANSPARENT_HUGEPAGE void flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) { - __flush_tlb_range(vma->vm_mm, start, end - start, PMD_SIZE); + __flush_tlb_range(mm_cpumask(vma->vm_mm), get_mm_asid(vma->vm_mm), + start, end - start, PMD_SIZE); } #endif + +bool arch_tlbbatch_should_defer(struct mm_struct *mm) +{ + return true; +} + +void arch_tlbbatch_add_pending(struct arch_tlbflush_unmap_batch *batch, + struct mm_struct *mm, + unsigned long uaddr) +{ + cpumask_or(&batch->cpumask, &batch->cpumask, mm_cpumask(mm)); +} + +void arch_flush_tlb_batched_pending(struct mm_struct *mm) +{ + flush_tlb_mm(mm); +} + +void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch) +{ + __flush_tlb_range(&batch->cpumask, FLUSH_TLB_NO_ASID, 0, + FLUSH_TLB_MAX_SIZE, PAGE_SIZE); + cpumask_clear(&batch->cpumask); +} |