summaryrefslogtreecommitdiffstats
path: root/arch/riscv/mm
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-18 17:35:05 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-18 17:39:31 +0000
commit85c675d0d09a45a135bddd15d7b385f8758c32fb (patch)
tree76267dbc9b9a130337be3640948fe397b04ac629 /arch/riscv/mm
parentAdding upstream version 6.6.15. (diff)
downloadlinux-85c675d0d09a45a135bddd15d7b385f8758c32fb.tar.xz
linux-85c675d0d09a45a135bddd15d7b385f8758c32fb.zip
Adding upstream version 6.7.7.upstream/6.7.7
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'arch/riscv/mm')
-rw-r--r--arch/riscv/mm/cacheflush.c25
-rw-r--r--arch/riscv/mm/dma-noncoherent.c8
-rw-r--r--arch/riscv/mm/fault.c2
-rw-r--r--arch/riscv/mm/hugetlbpage.c78
-rw-r--r--arch/riscv/mm/init.c171
-rw-r--r--arch/riscv/mm/pmem.c4
-rw-r--r--arch/riscv/mm/ptdump.c53
-rw-r--r--arch/riscv/mm/tlbflush.c187
8 files changed, 289 insertions, 239 deletions
diff --git a/arch/riscv/mm/cacheflush.c b/arch/riscv/mm/cacheflush.c
index f1387272a..55a34f202 100644
--- a/arch/riscv/mm/cacheflush.c
+++ b/arch/riscv/mm/cacheflush.c
@@ -3,7 +3,9 @@
* Copyright (C) 2017 SiFive
*/
+#include <linux/acpi.h>
#include <linux/of.h>
+#include <asm/acpi.h>
#include <asm/cacheflush.h>
#ifdef CONFIG_SMP
@@ -124,13 +126,24 @@ void __init riscv_init_cbo_blocksizes(void)
unsigned long cbom_hartid, cboz_hartid;
u32 cbom_block_size = 0, cboz_block_size = 0;
struct device_node *node;
+ struct acpi_table_header *rhct;
+ acpi_status status;
+
+ if (acpi_disabled) {
+ for_each_of_cpu_node(node) {
+ /* set block-size for cbom and/or cboz extension if available */
+ cbo_get_block_size(node, "riscv,cbom-block-size",
+ &cbom_block_size, &cbom_hartid);
+ cbo_get_block_size(node, "riscv,cboz-block-size",
+ &cboz_block_size, &cboz_hartid);
+ }
+ } else {
+ status = acpi_get_table(ACPI_SIG_RHCT, 0, &rhct);
+ if (ACPI_FAILURE(status))
+ return;
- for_each_of_cpu_node(node) {
- /* set block-size for cbom and/or cboz extension if available */
- cbo_get_block_size(node, "riscv,cbom-block-size",
- &cbom_block_size, &cbom_hartid);
- cbo_get_block_size(node, "riscv,cboz-block-size",
- &cboz_block_size, &cboz_hartid);
+ acpi_get_cbo_block_size(rhct, &cbom_block_size, &cboz_block_size, NULL);
+ acpi_put_table((struct acpi_table_header *)rhct);
}
if (cbom_block_size)
diff --git a/arch/riscv/mm/dma-noncoherent.c b/arch/riscv/mm/dma-noncoherent.c
index 341bd6706..4e4e469b8 100644
--- a/arch/riscv/mm/dma-noncoherent.c
+++ b/arch/riscv/mm/dma-noncoherent.c
@@ -25,7 +25,7 @@ static inline void arch_dma_cache_wback(phys_addr_t paddr, size_t size)
return;
}
#endif
- ALT_CMO_OP(clean, vaddr, size, riscv_cbom_block_size);
+ ALT_CMO_OP(CLEAN, vaddr, size, riscv_cbom_block_size);
}
static inline void arch_dma_cache_inv(phys_addr_t paddr, size_t size)
@@ -39,7 +39,7 @@ static inline void arch_dma_cache_inv(phys_addr_t paddr, size_t size)
}
#endif
- ALT_CMO_OP(inval, vaddr, size, riscv_cbom_block_size);
+ ALT_CMO_OP(INVAL, vaddr, size, riscv_cbom_block_size);
}
static inline void arch_dma_cache_wback_inv(phys_addr_t paddr, size_t size)
@@ -53,7 +53,7 @@ static inline void arch_dma_cache_wback_inv(phys_addr_t paddr, size_t size)
}
#endif
- ALT_CMO_OP(flush, vaddr, size, riscv_cbom_block_size);
+ ALT_CMO_OP(FLUSH, vaddr, size, riscv_cbom_block_size);
}
static inline bool arch_sync_dma_clean_before_fromdevice(void)
@@ -125,7 +125,7 @@ void arch_dma_prep_coherent(struct page *page, size_t size)
}
#endif
- ALT_CMO_OP(flush, flush_addr, size, riscv_cbom_block_size);
+ ALT_CMO_OP(FLUSH, flush_addr, size, riscv_cbom_block_size);
}
void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
diff --git a/arch/riscv/mm/fault.c b/arch/riscv/mm/fault.c
index 90d4ba36d..081339ddf 100644
--- a/arch/riscv/mm/fault.c
+++ b/arch/riscv/mm/fault.c
@@ -304,6 +304,8 @@ void handle_page_fault(struct pt_regs *regs)
goto done;
}
count_vm_vma_lock_event(VMA_LOCK_RETRY);
+ if (fault & VM_FAULT_MAJOR)
+ flags |= FAULT_FLAG_TRIED;
if (fault_signal_pending(fault, regs)) {
if (!user_mode(regs))
diff --git a/arch/riscv/mm/hugetlbpage.c b/arch/riscv/mm/hugetlbpage.c
index b52f02104..e7b692818 100644
--- a/arch/riscv/mm/hugetlbpage.c
+++ b/arch/riscv/mm/hugetlbpage.c
@@ -125,6 +125,26 @@ pte_t *huge_pte_offset(struct mm_struct *mm,
return pte;
}
+unsigned long hugetlb_mask_last_page(struct hstate *h)
+{
+ unsigned long hp_size = huge_page_size(h);
+
+ switch (hp_size) {
+#ifndef __PAGETABLE_PMD_FOLDED
+ case PUD_SIZE:
+ return P4D_SIZE - PUD_SIZE;
+#endif
+ case PMD_SIZE:
+ return PUD_SIZE - PMD_SIZE;
+ case napot_cont_size(NAPOT_CONT64KB_ORDER):
+ return PMD_SIZE - napot_cont_size(NAPOT_CONT64KB_ORDER);
+ default:
+ break;
+ }
+
+ return 0UL;
+}
+
static pte_t get_clear_contig(struct mm_struct *mm,
unsigned long addr,
pte_t *ptep,
@@ -177,13 +197,36 @@ pte_t arch_make_huge_pte(pte_t entry, unsigned int shift, vm_flags_t flags)
return entry;
}
+static void clear_flush(struct mm_struct *mm,
+ unsigned long addr,
+ pte_t *ptep,
+ unsigned long pgsize,
+ unsigned long ncontig)
+{
+ struct vm_area_struct vma = TLB_FLUSH_VMA(mm, 0);
+ unsigned long i, saddr = addr;
+
+ for (i = 0; i < ncontig; i++, addr += pgsize, ptep++)
+ ptep_get_and_clear(mm, addr, ptep);
+
+ flush_tlb_range(&vma, saddr, addr);
+}
+
+/*
+ * When dealing with NAPOT mappings, the privileged specification indicates that
+ * "if an update needs to be made, the OS generally should first mark all of the
+ * PTEs invalid, then issue SFENCE.VMA instruction(s) covering all 4 KiB regions
+ * within the range, [...] then update the PTE(s), as described in Section
+ * 4.2.1.". That's the equivalent of the Break-Before-Make approach used by
+ * arm64.
+ */
void set_huge_pte_at(struct mm_struct *mm,
unsigned long addr,
pte_t *ptep,
pte_t pte,
unsigned long sz)
{
- unsigned long hugepage_shift;
+ unsigned long hugepage_shift, pgsize;
int i, pte_num;
if (sz >= PGDIR_SIZE)
@@ -198,7 +241,22 @@ void set_huge_pte_at(struct mm_struct *mm,
hugepage_shift = PAGE_SHIFT;
pte_num = sz >> hugepage_shift;
- for (i = 0; i < pte_num; i++, ptep++, addr += (1 << hugepage_shift))
+ pgsize = 1 << hugepage_shift;
+
+ if (!pte_present(pte)) {
+ for (i = 0; i < pte_num; i++, ptep++, addr += pgsize)
+ set_ptes(mm, addr, ptep, pte, 1);
+ return;
+ }
+
+ if (!pte_napot(pte)) {
+ set_ptes(mm, addr, ptep, pte, 1);
+ return;
+ }
+
+ clear_flush(mm, addr, ptep, pgsize, pte_num);
+
+ for (i = 0; i < pte_num; i++, ptep++, addr += pgsize)
set_pte_at(mm, addr, ptep, pte);
}
@@ -306,7 +364,7 @@ void huge_pte_clear(struct mm_struct *mm,
pte_clear(mm, addr, ptep);
}
-static __init bool is_napot_size(unsigned long size)
+static bool is_napot_size(unsigned long size)
{
unsigned long order;
@@ -334,7 +392,7 @@ arch_initcall(napot_hugetlbpages_init);
#else
-static __init bool is_napot_size(unsigned long size)
+static bool is_napot_size(unsigned long size)
{
return false;
}
@@ -351,7 +409,7 @@ int pmd_huge(pmd_t pmd)
return pmd_leaf(pmd);
}
-bool __init arch_hugetlb_valid_size(unsigned long size)
+static bool __hugetlb_valid_size(unsigned long size)
{
if (size == HPAGE_SIZE)
return true;
@@ -363,6 +421,16 @@ bool __init arch_hugetlb_valid_size(unsigned long size)
return false;
}
+bool __init arch_hugetlb_valid_size(unsigned long size)
+{
+ return __hugetlb_valid_size(size);
+}
+
+bool arch_hugetlb_migration_supported(struct hstate *h)
+{
+ return __hugetlb_valid_size(huge_page_size(h));
+}
+
#ifdef CONFIG_CONTIG_ALLOC
static __init int gigantic_pages_init(void)
{
diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c
index 0798bd861..ee224fe18 100644
--- a/arch/riscv/mm/init.c
+++ b/arch/riscv/mm/init.c
@@ -49,10 +49,12 @@ u64 satp_mode __ro_after_init = SATP_MODE_32;
#endif
EXPORT_SYMBOL(satp_mode);
+#ifdef CONFIG_64BIT
bool pgtable_l4_enabled = IS_ENABLED(CONFIG_64BIT) && !IS_ENABLED(CONFIG_XIP_KERNEL);
bool pgtable_l5_enabled = IS_ENABLED(CONFIG_64BIT) && !IS_ENABLED(CONFIG_XIP_KERNEL);
EXPORT_SYMBOL(pgtable_l4_enabled);
EXPORT_SYMBOL(pgtable_l5_enabled);
+#endif
phys_addr_t phys_ram_base __ro_after_init;
EXPORT_SYMBOL(phys_ram_base);
@@ -65,7 +67,7 @@ extern char _start[];
void *_dtb_early_va __initdata;
uintptr_t _dtb_early_pa __initdata;
-static phys_addr_t dma32_phys_limit __initdata;
+phys_addr_t dma32_phys_limit __initdata;
static void __init zone_sizes_init(void)
{
@@ -172,6 +174,9 @@ void __init mem_init(void)
/* Limit the memory size via mem. */
static phys_addr_t memory_limit;
+#ifdef CONFIG_XIP_KERNEL
+#define memory_limit (*(phys_addr_t *)XIP_FIXUP(&memory_limit))
+#endif /* CONFIG_XIP_KERNEL */
static int __init early_mem(char *p)
{
@@ -664,16 +669,16 @@ void __init create_pgd_mapping(pgd_t *pgdp,
static uintptr_t __init best_map_size(phys_addr_t pa, uintptr_t va,
phys_addr_t size)
{
- if (!(pa & (PGDIR_SIZE - 1)) && !(va & (PGDIR_SIZE - 1)) && size >= PGDIR_SIZE)
- return PGDIR_SIZE;
-
- if (!(pa & (P4D_SIZE - 1)) && !(va & (P4D_SIZE - 1)) && size >= P4D_SIZE)
+ if (pgtable_l5_enabled &&
+ !(pa & (P4D_SIZE - 1)) && !(va & (P4D_SIZE - 1)) && size >= P4D_SIZE)
return P4D_SIZE;
- if (!(pa & (PUD_SIZE - 1)) && !(va & (PUD_SIZE - 1)) && size >= PUD_SIZE)
+ if (pgtable_l4_enabled &&
+ !(pa & (PUD_SIZE - 1)) && !(va & (PUD_SIZE - 1)) && size >= PUD_SIZE)
return PUD_SIZE;
- if (!(pa & (PMD_SIZE - 1)) && !(va & (PMD_SIZE - 1)) && size >= PMD_SIZE)
+ if (IS_ENABLED(CONFIG_64BIT) &&
+ !(pa & (PMD_SIZE - 1)) && !(va & (PMD_SIZE - 1)) && size >= PMD_SIZE)
return PMD_SIZE;
return PAGE_SIZE;
@@ -950,7 +955,7 @@ static void __init create_fdt_early_page_table(uintptr_t fix_fdt_va,
* setup_vm_final installs the linear mapping. For 32-bit kernel, as the
* kernel is mapped in the linear mapping, that makes no difference.
*/
- dtb_early_va = kernel_mapping_pa_to_va(XIP_FIXUP(dtb_pa));
+ dtb_early_va = kernel_mapping_pa_to_va(dtb_pa);
#endif
dtb_early_pa = dtb_pa;
@@ -1053,9 +1058,13 @@ asmlinkage void __init setup_vm(uintptr_t dtb_pa)
#endif
kernel_map.virt_addr = KERNEL_LINK_ADDR + kernel_map.virt_offset;
- kernel_map.page_offset = _AC(CONFIG_PAGE_OFFSET, UL);
#ifdef CONFIG_XIP_KERNEL
+#ifdef CONFIG_64BIT
+ kernel_map.page_offset = PAGE_OFFSET_L3;
+#else
+ kernel_map.page_offset = _AC(CONFIG_PAGE_OFFSET, UL);
+#endif
kernel_map.xiprom = (uintptr_t)CONFIG_XIP_PHYS_ADDR;
kernel_map.xiprom_sz = (uintptr_t)(&_exiprom) - (uintptr_t)(&_xiprom);
@@ -1065,6 +1074,7 @@ asmlinkage void __init setup_vm(uintptr_t dtb_pa)
kernel_map.va_kernel_xip_pa_offset = kernel_map.virt_addr - kernel_map.xiprom;
#else
+ kernel_map.page_offset = _AC(CONFIG_PAGE_OFFSET, UL);
kernel_map.phys_addr = (uintptr_t)(&_start);
kernel_map.size = (uintptr_t)(&_end) - kernel_map.phys_addr;
#endif
@@ -1333,28 +1343,6 @@ static inline void setup_vm_final(void)
}
#endif /* CONFIG_MMU */
-/* Reserve 128M low memory by default for swiotlb buffer */
-#define DEFAULT_CRASH_KERNEL_LOW_SIZE (128UL << 20)
-
-static int __init reserve_crashkernel_low(unsigned long long low_size)
-{
- unsigned long long low_base;
-
- low_base = memblock_phys_alloc_range(low_size, PMD_SIZE, 0, dma32_phys_limit);
- if (!low_base) {
- pr_err("cannot allocate crashkernel low memory (size:0x%llx).\n", low_size);
- return -ENOMEM;
- }
-
- pr_info("crashkernel low memory reserved: 0x%016llx - 0x%016llx (%lld MB)\n",
- low_base, low_base + low_size, low_size >> 20);
-
- crashk_low_res.start = low_base;
- crashk_low_res.end = low_base + low_size - 1;
-
- return 0;
-}
-
/*
* reserve_crashkernel() - reserves memory for crash kernel
*
@@ -1362,122 +1350,25 @@ static int __init reserve_crashkernel_low(unsigned long long low_size)
* line parameter. The memory reserved is used by dump capture kernel when
* primary kernel is crashing.
*/
-static void __init reserve_crashkernel(void)
+static void __init arch_reserve_crashkernel(void)
{
- unsigned long long crash_base = 0;
- unsigned long long crash_size = 0;
- unsigned long long crash_low_size = 0;
- unsigned long search_start = memblock_start_of_DRAM();
- unsigned long search_end = (unsigned long)dma32_phys_limit;
+ unsigned long long low_size = 0;
+ unsigned long long crash_base, crash_size;
char *cmdline = boot_command_line;
- bool fixed_base = false;
bool high = false;
-
- int ret = 0;
+ int ret;
if (!IS_ENABLED(CONFIG_KEXEC_CORE))
return;
- /*
- * Don't reserve a region for a crash kernel on a crash kernel
- * since it doesn't make much sense and we have limited memory
- * resources.
- */
- if (is_kdump_kernel()) {
- pr_info("crashkernel: ignoring reservation request\n");
- return;
- }
ret = parse_crashkernel(cmdline, memblock_phys_mem_size(),
- &crash_size, &crash_base);
- if (ret == -ENOENT) {
- /* Fallback to crashkernel=X,[high,low] */
- ret = parse_crashkernel_high(cmdline, 0, &crash_size, &crash_base);
- if (ret || !crash_size)
- return;
-
- /*
- * crashkernel=Y,low is valid only when crashkernel=X,high
- * is passed.
- */
- ret = parse_crashkernel_low(cmdline, 0, &crash_low_size, &crash_base);
- if (ret == -ENOENT)
- crash_low_size = DEFAULT_CRASH_KERNEL_LOW_SIZE;
- else if (ret)
- return;
-
- search_start = (unsigned long)dma32_phys_limit;
- search_end = memblock_end_of_DRAM();
- high = true;
- } else if (ret || !crash_size) {
- /* Invalid argument value specified */
+ &crash_size, &crash_base,
+ &low_size, &high);
+ if (ret)
return;
- }
-
- crash_size = PAGE_ALIGN(crash_size);
- if (crash_base) {
- fixed_base = true;
- search_start = crash_base;
- search_end = crash_base + crash_size;
- }
-
- /*
- * Current riscv boot protocol requires 2MB alignment for
- * RV64 and 4MB alignment for RV32 (hugepage size)
- *
- * Try to alloc from 32bit addressible physical memory so that
- * swiotlb can work on the crash kernel.
- */
- crash_base = memblock_phys_alloc_range(crash_size, PMD_SIZE,
- search_start, search_end);
- if (crash_base == 0) {
- /*
- * For crashkernel=size[KMG]@offset[KMG], print out failure
- * message if can't reserve the specified region.
- */
- if (fixed_base) {
- pr_warn("crashkernel: allocating failed with given size@offset\n");
- return;
- }
-
- if (high) {
- /*
- * For crashkernel=size[KMG],high, if the first attempt was
- * for high memory, fall back to low memory.
- */
- search_start = memblock_start_of_DRAM();
- search_end = (unsigned long)dma32_phys_limit;
- } else {
- /*
- * For crashkernel=size[KMG], if the first attempt was for
- * low memory, fall back to high memory, the minimum required
- * low memory will be reserved later.
- */
- search_start = (unsigned long)dma32_phys_limit;
- search_end = memblock_end_of_DRAM();
- crash_low_size = DEFAULT_CRASH_KERNEL_LOW_SIZE;
- }
-
- crash_base = memblock_phys_alloc_range(crash_size, PMD_SIZE,
- search_start, search_end);
- if (crash_base == 0) {
- pr_warn("crashkernel: couldn't allocate %lldKB\n",
- crash_size >> 10);
- return;
- }
- }
-
- if ((crash_base >= dma32_phys_limit) && crash_low_size &&
- reserve_crashkernel_low(crash_low_size)) {
- memblock_phys_free(crash_base, crash_size);
- return;
- }
-
- pr_info("crashkernel: reserved 0x%016llx - 0x%016llx (%lld MB)\n",
- crash_base, crash_base + crash_size, crash_size >> 20);
-
- crashk_res.start = crash_base;
- crashk_res.end = crash_base + crash_size - 1;
+ reserve_crashkernel_generic(cmdline, crash_size, crash_base,
+ low_size, high);
}
void __init paging_init(void)
@@ -1494,8 +1385,12 @@ void __init misc_mem_init(void)
early_memtest(min_low_pfn << PAGE_SHIFT, max_low_pfn << PAGE_SHIFT);
arch_numa_init();
sparse_init();
+#ifdef CONFIG_SPARSEMEM_VMEMMAP
+ /* The entire VMEMMAP region has been populated. Flush TLB for this region */
+ local_flush_tlb_kernel_range(VMEMMAP_START, VMEMMAP_END);
+#endif
zone_sizes_init();
- reserve_crashkernel();
+ arch_reserve_crashkernel();
memblock_dump_all();
}
diff --git a/arch/riscv/mm/pmem.c b/arch/riscv/mm/pmem.c
index c5fc5ec96..370a422ed 100644
--- a/arch/riscv/mm/pmem.c
+++ b/arch/riscv/mm/pmem.c
@@ -17,7 +17,7 @@ void arch_wb_cache_pmem(void *addr, size_t size)
return;
}
#endif
- ALT_CMO_OP(clean, addr, size, riscv_cbom_block_size);
+ ALT_CMO_OP(CLEAN, addr, size, riscv_cbom_block_size);
}
EXPORT_SYMBOL_GPL(arch_wb_cache_pmem);
@@ -29,6 +29,6 @@ void arch_invalidate_pmem(void *addr, size_t size)
return;
}
#endif
- ALT_CMO_OP(inval, addr, size, riscv_cbom_block_size);
+ ALT_CMO_OP(INVAL, addr, size, riscv_cbom_block_size);
}
EXPORT_SYMBOL_GPL(arch_invalidate_pmem);
diff --git a/arch/riscv/mm/ptdump.c b/arch/riscv/mm/ptdump.c
index e9090b38f..657c27bc0 100644
--- a/arch/riscv/mm/ptdump.c
+++ b/arch/riscv/mm/ptdump.c
@@ -129,55 +129,55 @@ static struct ptd_mm_info efi_ptd_info = {
/* Page Table Entry */
struct prot_bits {
u64 mask;
- u64 val;
const char *set;
const char *clear;
};
static const struct prot_bits pte_bits[] = {
{
+#ifdef CONFIG_64BIT
+ .mask = _PAGE_NAPOT,
+ .set = "N",
+ .clear = ".",
+ }, {
+ .mask = _PAGE_MTMASK_SVPBMT,
+ .set = "MT(%s)",
+ .clear = " .. ",
+ }, {
+#endif
.mask = _PAGE_SOFT,
- .val = _PAGE_SOFT,
- .set = "RSW",
- .clear = " ",
+ .set = "RSW(%d)",
+ .clear = " .. ",
}, {
.mask = _PAGE_DIRTY,
- .val = _PAGE_DIRTY,
.set = "D",
.clear = ".",
}, {
.mask = _PAGE_ACCESSED,
- .val = _PAGE_ACCESSED,
.set = "A",
.clear = ".",
}, {
.mask = _PAGE_GLOBAL,
- .val = _PAGE_GLOBAL,
.set = "G",
.clear = ".",
}, {
.mask = _PAGE_USER,
- .val = _PAGE_USER,
.set = "U",
.clear = ".",
}, {
.mask = _PAGE_EXEC,
- .val = _PAGE_EXEC,
.set = "X",
.clear = ".",
}, {
.mask = _PAGE_WRITE,
- .val = _PAGE_WRITE,
.set = "W",
.clear = ".",
}, {
.mask = _PAGE_READ,
- .val = _PAGE_READ,
.set = "R",
.clear = ".",
}, {
.mask = _PAGE_PRESENT,
- .val = _PAGE_PRESENT,
.set = "V",
.clear = ".",
}
@@ -208,15 +208,30 @@ static void dump_prot(struct pg_state *st)
unsigned int i;
for (i = 0; i < ARRAY_SIZE(pte_bits); i++) {
- const char *s;
+ char s[7];
+ unsigned long val;
- if ((st->current_prot & pte_bits[i].mask) == pte_bits[i].val)
- s = pte_bits[i].set;
- else
- s = pte_bits[i].clear;
+ val = st->current_prot & pte_bits[i].mask;
+ if (val) {
+ if (pte_bits[i].mask == _PAGE_SOFT)
+ sprintf(s, pte_bits[i].set, val >> 8);
+#ifdef CONFIG_64BIT
+ else if (pte_bits[i].mask == _PAGE_MTMASK_SVPBMT) {
+ if (val == _PAGE_NOCACHE_SVPBMT)
+ sprintf(s, pte_bits[i].set, "NC");
+ else if (val == _PAGE_IO_SVPBMT)
+ sprintf(s, pte_bits[i].set, "IO");
+ else
+ sprintf(s, pte_bits[i].set, "??");
+ }
+#endif
+ else
+ sprintf(s, "%s", pte_bits[i].set);
+ } else {
+ sprintf(s, "%s", pte_bits[i].clear);
+ }
- if (s)
- pt_dump_seq_printf(st->seq, " %s", s);
+ pt_dump_seq_printf(st->seq, " %s", s);
}
}
diff --git a/arch/riscv/mm/tlbflush.c b/arch/riscv/mm/tlbflush.c
index 77be59aad..1f90721d2 100644
--- a/arch/riscv/mm/tlbflush.c
+++ b/arch/riscv/mm/tlbflush.c
@@ -3,33 +3,56 @@
#include <linux/mm.h>
#include <linux/smp.h>
#include <linux/sched.h>
+#include <linux/hugetlb.h>
#include <asm/sbi.h>
#include <asm/mmu_context.h>
static inline void local_flush_tlb_all_asid(unsigned long asid)
{
- __asm__ __volatile__ ("sfence.vma x0, %0"
- :
- : "r" (asid)
- : "memory");
+ if (asid != FLUSH_TLB_NO_ASID)
+ __asm__ __volatile__ ("sfence.vma x0, %0"
+ :
+ : "r" (asid)
+ : "memory");
+ else
+ local_flush_tlb_all();
}
static inline void local_flush_tlb_page_asid(unsigned long addr,
unsigned long asid)
{
- __asm__ __volatile__ ("sfence.vma %0, %1"
- :
- : "r" (addr), "r" (asid)
- : "memory");
+ if (asid != FLUSH_TLB_NO_ASID)
+ __asm__ __volatile__ ("sfence.vma %0, %1"
+ :
+ : "r" (addr), "r" (asid)
+ : "memory");
+ else
+ local_flush_tlb_page(addr);
}
-static inline void local_flush_tlb_range(unsigned long start,
- unsigned long size, unsigned long stride)
+/*
+ * Flush entire TLB if number of entries to be flushed is greater
+ * than the threshold below.
+ */
+static unsigned long tlb_flush_all_threshold __read_mostly = 64;
+
+static void local_flush_tlb_range_threshold_asid(unsigned long start,
+ unsigned long size,
+ unsigned long stride,
+ unsigned long asid)
{
- if (size <= stride)
- local_flush_tlb_page(start);
- else
- local_flush_tlb_all();
+ unsigned long nr_ptes_in_range = DIV_ROUND_UP(size, stride);
+ int i;
+
+ if (nr_ptes_in_range > tlb_flush_all_threshold) {
+ local_flush_tlb_all_asid(asid);
+ return;
+ }
+
+ for (i = 0; i < nr_ptes_in_range; ++i) {
+ local_flush_tlb_page_asid(start, asid);
+ start += stride;
+ }
}
static inline void local_flush_tlb_range_asid(unsigned long start,
@@ -37,8 +60,16 @@ static inline void local_flush_tlb_range_asid(unsigned long start,
{
if (size <= stride)
local_flush_tlb_page_asid(start, asid);
- else
+ else if (size == FLUSH_TLB_MAX_SIZE)
local_flush_tlb_all_asid(asid);
+ else
+ local_flush_tlb_range_threshold_asid(start, size, stride, asid);
+}
+
+/* Flush a range of kernel pages without broadcasting */
+void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
+{
+ local_flush_tlb_range_asid(start, end - start, PAGE_SIZE, FLUSH_TLB_NO_ASID);
}
static void __ipi_flush_tlb_all(void *info)
@@ -51,7 +82,7 @@ void flush_tlb_all(void)
if (riscv_use_ipi_for_rfence())
on_each_cpu(__ipi_flush_tlb_all, NULL, 1);
else
- sbi_remote_sfence_vma(NULL, 0, -1);
+ sbi_remote_sfence_vma_asid(NULL, 0, FLUSH_TLB_MAX_SIZE, FLUSH_TLB_NO_ASID);
}
struct flush_tlb_range_data {
@@ -68,68 +99,62 @@ static void __ipi_flush_tlb_range_asid(void *info)
local_flush_tlb_range_asid(d->start, d->size, d->stride, d->asid);
}
-static void __ipi_flush_tlb_range(void *info)
-{
- struct flush_tlb_range_data *d = info;
-
- local_flush_tlb_range(d->start, d->size, d->stride);
-}
-
static void __flush_tlb_range(struct mm_struct *mm, unsigned long start,
unsigned long size, unsigned long stride)
{
struct flush_tlb_range_data ftd;
- struct cpumask *cmask = mm_cpumask(mm);
- unsigned int cpuid;
+ const struct cpumask *cmask;
+ unsigned long asid = FLUSH_TLB_NO_ASID;
bool broadcast;
- if (cpumask_empty(cmask))
- return;
+ if (mm) {
+ unsigned int cpuid;
- cpuid = get_cpu();
- /* check if the tlbflush needs to be sent to other CPUs */
- broadcast = cpumask_any_but(cmask, cpuid) < nr_cpu_ids;
- if (static_branch_unlikely(&use_asid_allocator)) {
- unsigned long asid = atomic_long_read(&mm->context.id) & asid_mask;
-
- if (broadcast) {
- if (riscv_use_ipi_for_rfence()) {
- ftd.asid = asid;
- ftd.start = start;
- ftd.size = size;
- ftd.stride = stride;
- on_each_cpu_mask(cmask,
- __ipi_flush_tlb_range_asid,
- &ftd, 1);
- } else
- sbi_remote_sfence_vma_asid(cmask,
- start, size, asid);
- } else {
- local_flush_tlb_range_asid(start, size, stride, asid);
- }
+ cmask = mm_cpumask(mm);
+ if (cpumask_empty(cmask))
+ return;
+
+ cpuid = get_cpu();
+ /* check if the tlbflush needs to be sent to other CPUs */
+ broadcast = cpumask_any_but(cmask, cpuid) < nr_cpu_ids;
+
+ if (static_branch_unlikely(&use_asid_allocator))
+ asid = atomic_long_read(&mm->context.id) & asid_mask;
} else {
- if (broadcast) {
- if (riscv_use_ipi_for_rfence()) {
- ftd.asid = 0;
- ftd.start = start;
- ftd.size = size;
- ftd.stride = stride;
- on_each_cpu_mask(cmask,
- __ipi_flush_tlb_range,
- &ftd, 1);
- } else
- sbi_remote_sfence_vma(cmask, start, size);
- } else {
- local_flush_tlb_range(start, size, stride);
- }
+ cmask = cpu_online_mask;
+ broadcast = true;
}
- put_cpu();
+ if (broadcast) {
+ if (riscv_use_ipi_for_rfence()) {
+ ftd.asid = asid;
+ ftd.start = start;
+ ftd.size = size;
+ ftd.stride = stride;
+ on_each_cpu_mask(cmask,
+ __ipi_flush_tlb_range_asid,
+ &ftd, 1);
+ } else
+ sbi_remote_sfence_vma_asid(cmask,
+ start, size, asid);
+ } else {
+ local_flush_tlb_range_asid(start, size, stride, asid);
+ }
+
+ if (mm)
+ put_cpu();
}
void flush_tlb_mm(struct mm_struct *mm)
{
- __flush_tlb_range(mm, 0, -1, PAGE_SIZE);
+ __flush_tlb_range(mm, 0, FLUSH_TLB_MAX_SIZE, PAGE_SIZE);
+}
+
+void flush_tlb_mm_range(struct mm_struct *mm,
+ unsigned long start, unsigned long end,
+ unsigned int page_size)
+{
+ __flush_tlb_range(mm, start, end - start, page_size);
}
void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
@@ -140,8 +165,40 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
unsigned long end)
{
- __flush_tlb_range(vma->vm_mm, start, end - start, PAGE_SIZE);
+ unsigned long stride_size;
+
+ if (!is_vm_hugetlb_page(vma)) {
+ stride_size = PAGE_SIZE;
+ } else {
+ stride_size = huge_page_size(hstate_vma(vma));
+
+ /*
+ * As stated in the privileged specification, every PTE in a
+ * NAPOT region must be invalidated, so reset the stride in that
+ * case.
+ */
+ if (has_svnapot()) {
+ if (stride_size >= PGDIR_SIZE)
+ stride_size = PGDIR_SIZE;
+ else if (stride_size >= P4D_SIZE)
+ stride_size = P4D_SIZE;
+ else if (stride_size >= PUD_SIZE)
+ stride_size = PUD_SIZE;
+ else if (stride_size >= PMD_SIZE)
+ stride_size = PMD_SIZE;
+ else
+ stride_size = PAGE_SIZE;
+ }
+ }
+
+ __flush_tlb_range(vma->vm_mm, start, end - start, stride_size);
}
+
+void flush_tlb_kernel_range(unsigned long start, unsigned long end)
+{
+ __flush_tlb_range(NULL, start, end - start, PAGE_SIZE);
+}
+
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
void flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start,
unsigned long end)