From 2c3c1048746a4622d8c89a29670120dc8fab93c4 Mon Sep 17 00:00:00 2001 From: Daniel Baumann Date: Sun, 7 Apr 2024 20:49:45 +0200 Subject: Adding upstream version 6.1.76. Signed-off-by: Daniel Baumann --- arch/riscv/mm/cacheflush.c | 128 +++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 128 insertions(+) create mode 100644 arch/riscv/mm/cacheflush.c (limited to 'arch/riscv/mm/cacheflush.c') diff --git a/arch/riscv/mm/cacheflush.c b/arch/riscv/mm/cacheflush.c new file mode 100644 index 000000000..8a2e7040f --- /dev/null +++ b/arch/riscv/mm/cacheflush.c @@ -0,0 +1,128 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2017 SiFive + */ + +#include +#include + +#ifdef CONFIG_SMP + +#include + +static void ipi_remote_fence_i(void *info) +{ + return local_flush_icache_all(); +} + +void flush_icache_all(void) +{ + local_flush_icache_all(); + + if (IS_ENABLED(CONFIG_RISCV_SBI)) + sbi_remote_fence_i(NULL); + else + on_each_cpu(ipi_remote_fence_i, NULL, 1); +} +EXPORT_SYMBOL(flush_icache_all); + +/* + * Performs an icache flush for the given MM context. RISC-V has no direct + * mechanism for instruction cache shoot downs, so instead we send an IPI that + * informs the remote harts they need to flush their local instruction caches. + * To avoid pathologically slow behavior in a common case (a bunch of + * single-hart processes on a many-hart machine, ie 'make -j') we avoid the + * IPIs for harts that are not currently executing a MM context and instead + * schedule a deferred local instruction cache flush to be performed before + * execution resumes on each hart. + */ +void flush_icache_mm(struct mm_struct *mm, bool local) +{ + unsigned int cpu; + cpumask_t others, *mask; + + preempt_disable(); + + /* Mark every hart's icache as needing a flush for this MM. */ + mask = &mm->context.icache_stale_mask; + cpumask_setall(mask); + /* Flush this hart's I$ now, and mark it as flushed. */ + cpu = smp_processor_id(); + cpumask_clear_cpu(cpu, mask); + local_flush_icache_all(); + + /* + * Flush the I$ of other harts concurrently executing, and mark them as + * flushed. + */ + cpumask_andnot(&others, mm_cpumask(mm), cpumask_of(cpu)); + local |= cpumask_empty(&others); + if (mm == current->active_mm && local) { + /* + * It's assumed that at least one strongly ordered operation is + * performed on this hart between setting a hart's cpumask bit + * and scheduling this MM context on that hart. Sending an SBI + * remote message will do this, but in the case where no + * messages are sent we still need to order this hart's writes + * with flush_icache_deferred(). + */ + smp_mb(); + } else if (IS_ENABLED(CONFIG_RISCV_SBI)) { + sbi_remote_fence_i(&others); + } else { + on_each_cpu_mask(&others, ipi_remote_fence_i, NULL, 1); + } + + preempt_enable(); +} + +#endif /* CONFIG_SMP */ + +#ifdef CONFIG_MMU +void flush_icache_pte(pte_t pte) +{ + struct page *page = pte_page(pte); + + if (!test_bit(PG_dcache_clean, &page->flags)) { + flush_icache_all(); + set_bit(PG_dcache_clean, &page->flags); + } +} +#endif /* CONFIG_MMU */ + +unsigned int riscv_cbom_block_size; +EXPORT_SYMBOL_GPL(riscv_cbom_block_size); + +void riscv_init_cbom_blocksize(void) +{ + struct device_node *node; + unsigned long cbom_hartid; + u32 val, probed_block_size; + int ret; + + probed_block_size = 0; + for_each_of_cpu_node(node) { + unsigned long hartid; + + ret = riscv_of_processor_hartid(node, &hartid); + if (ret) + continue; + + /* set block-size for cbom extension if available */ + ret = of_property_read_u32(node, "riscv,cbom-block-size", &val); + if (ret) + continue; + + if (!probed_block_size) { + probed_block_size = val; + cbom_hartid = hartid; + } else { + if (probed_block_size != val) + pr_warn("cbom-block-size mismatched between harts %lu and %lu\n", + cbom_hartid, hartid); + } + } + + if (probed_block_size) + riscv_cbom_block_size = probed_block_size; +} -- cgit v1.2.3