diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-27 10:05:51 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-27 10:05:51 +0000 |
commit | 5d1646d90e1f2cceb9f0828f4b28318cd0ec7744 (patch) | |
tree | a94efe259b9009378be6d90eb30d2b019d95c194 /arch/riscv/mm/cacheflush.c | |
parent | Initial commit. (diff) | |
download | linux-5d1646d90e1f2cceb9f0828f4b28318cd0ec7744.tar.xz linux-5d1646d90e1f2cceb9f0828f4b28318cd0ec7744.zip |
Adding upstream version 5.10.209.upstream/5.10.209upstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'arch/riscv/mm/cacheflush.c')
-rw-r--r-- | arch/riscv/mm/cacheflush.c | 93 |
1 files changed, 93 insertions, 0 deletions
diff --git a/arch/riscv/mm/cacheflush.c b/arch/riscv/mm/cacheflush.c new file mode 100644 index 000000000..2ae1201cf --- /dev/null +++ b/arch/riscv/mm/cacheflush.c @@ -0,0 +1,93 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2017 SiFive + */ + +#include <asm/cacheflush.h> + +#ifdef CONFIG_SMP + +#include <asm/sbi.h> + +static void ipi_remote_fence_i(void *info) +{ + return local_flush_icache_all(); +} + +void flush_icache_all(void) +{ + local_flush_icache_all(); + + if (IS_ENABLED(CONFIG_RISCV_SBI)) + sbi_remote_fence_i(NULL); + else + on_each_cpu(ipi_remote_fence_i, NULL, 1); +} +EXPORT_SYMBOL(flush_icache_all); + +/* + * Performs an icache flush for the given MM context. RISC-V has no direct + * mechanism for instruction cache shoot downs, so instead we send an IPI that + * informs the remote harts they need to flush their local instruction caches. + * To avoid pathologically slow behavior in a common case (a bunch of + * single-hart processes on a many-hart machine, ie 'make -j') we avoid the + * IPIs for harts that are not currently executing a MM context and instead + * schedule a deferred local instruction cache flush to be performed before + * execution resumes on each hart. + */ +void flush_icache_mm(struct mm_struct *mm, bool local) +{ + unsigned int cpu; + cpumask_t others, *mask; + + preempt_disable(); + + /* Mark every hart's icache as needing a flush for this MM. */ + mask = &mm->context.icache_stale_mask; + cpumask_setall(mask); + /* Flush this hart's I$ now, and mark it as flushed. */ + cpu = smp_processor_id(); + cpumask_clear_cpu(cpu, mask); + local_flush_icache_all(); + + /* + * Flush the I$ of other harts concurrently executing, and mark them as + * flushed. + */ + cpumask_andnot(&others, mm_cpumask(mm), cpumask_of(cpu)); + local |= cpumask_empty(&others); + if (mm == current->active_mm && local) { + /* + * It's assumed that at least one strongly ordered operation is + * performed on this hart between setting a hart's cpumask bit + * and scheduling this MM context on that hart. Sending an SBI + * remote message will do this, but in the case where no + * messages are sent we still need to order this hart's writes + * with flush_icache_deferred(). + */ + smp_mb(); + } else if (IS_ENABLED(CONFIG_RISCV_SBI)) { + cpumask_t hartid_mask; + + riscv_cpuid_to_hartid_mask(&others, &hartid_mask); + sbi_remote_fence_i(cpumask_bits(&hartid_mask)); + } else { + on_each_cpu_mask(&others, ipi_remote_fence_i, NULL, 1); + } + + preempt_enable(); +} + +#endif /* CONFIG_SMP */ + +#ifdef CONFIG_MMU +void flush_icache_pte(pte_t pte) +{ + struct page *page = pte_page(pte); + + if (!test_bit(PG_dcache_clean, &page->flags)) { + flush_icache_all(); + set_bit(PG_dcache_clean, &page->flags); + } +} +#endif /* CONFIG_MMU */ |