summaryrefslogtreecommitdiffstats
path: root/arch/mips/include/asm/cacheflush.h
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-06 01:02:30 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-06 01:02:30 +0000
commit76cb841cb886eef6b3bee341a2266c76578724ad (patch)
treef5892e5ba6cc11949952a6ce4ecbe6d516d6ce58 /arch/mips/include/asm/cacheflush.h
parentInitial commit. (diff)
downloadlinux-76cb841cb886eef6b3bee341a2266c76578724ad.tar.xz
linux-76cb841cb886eef6b3bee341a2266c76578724ad.zip
Adding upstream version 4.19.249.upstream/4.19.249upstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'arch/mips/include/asm/cacheflush.h')
-rw-r--r--arch/mips/include/asm/cacheflush.h155
1 files changed, 155 insertions, 0 deletions
diff --git a/arch/mips/include/asm/cacheflush.h b/arch/mips/include/asm/cacheflush.h
new file mode 100644
index 000000000..4812d1fed
--- /dev/null
+++ b/arch/mips/include/asm/cacheflush.h
@@ -0,0 +1,155 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1994, 95, 96, 97, 98, 99, 2000, 01, 02, 03 by Ralf Baechle
+ * Copyright (C) 1999, 2000, 2001 Silicon Graphics, Inc.
+ */
+#ifndef _ASM_CACHEFLUSH_H
+#define _ASM_CACHEFLUSH_H
+
+/* Keep includes the same across arches. */
+#include <linux/mm.h>
+#include <asm/cpu-features.h>
+
+/* Cache flushing:
+ *
+ * - flush_cache_all() flushes entire cache
+ * - flush_cache_mm(mm) flushes the specified mm context's cache lines
+ * - flush_cache_dup mm(mm) handles cache flushing when forking
+ * - flush_cache_page(mm, vmaddr, pfn) flushes a single page
+ * - flush_cache_range(vma, start, end) flushes a range of pages
+ * - flush_icache_range(start, end) flush a range of instructions
+ * - flush_dcache_page(pg) flushes(wback&invalidates) a page for dcache
+ *
+ * MIPS specific flush operations:
+ *
+ * - flush_cache_sigtramp() flush signal trampoline
+ * - flush_icache_all() flush the entire instruction cache
+ * - flush_data_cache_page() flushes a page from the data cache
+ * - __flush_icache_user_range(start, end) flushes range of user instructions
+ */
+
+ /*
+ * This flag is used to indicate that the page pointed to by a pte
+ * is dirty and requires cleaning before returning it to the user.
+ */
+#define PG_dcache_dirty PG_arch_1
+
+#define Page_dcache_dirty(page) \
+ test_bit(PG_dcache_dirty, &(page)->flags)
+#define SetPageDcacheDirty(page) \
+ set_bit(PG_dcache_dirty, &(page)->flags)
+#define ClearPageDcacheDirty(page) \
+ clear_bit(PG_dcache_dirty, &(page)->flags)
+
+extern void (*flush_cache_all)(void);
+extern void (*__flush_cache_all)(void);
+extern void (*flush_cache_mm)(struct mm_struct *mm);
+#define flush_cache_dup_mm(mm) do { (void) (mm); } while (0)
+extern void (*flush_cache_range)(struct vm_area_struct *vma,
+ unsigned long start, unsigned long end);
+extern void (*flush_cache_page)(struct vm_area_struct *vma, unsigned long page, unsigned long pfn);
+extern void __flush_dcache_page(struct page *page);
+
+#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
+static inline void flush_dcache_page(struct page *page)
+{
+ if (cpu_has_dc_aliases)
+ __flush_dcache_page(page);
+ else if (!cpu_has_ic_fills_f_dc)
+ SetPageDcacheDirty(page);
+}
+
+#define flush_dcache_mmap_lock(mapping) do { } while (0)
+#define flush_dcache_mmap_unlock(mapping) do { } while (0)
+
+#define ARCH_HAS_FLUSH_ANON_PAGE
+extern void __flush_anon_page(struct page *, unsigned long);
+static inline void flush_anon_page(struct vm_area_struct *vma,
+ struct page *page, unsigned long vmaddr)
+{
+ if (cpu_has_dc_aliases && PageAnon(page))
+ __flush_anon_page(page, vmaddr);
+}
+
+static inline void flush_icache_page(struct vm_area_struct *vma,
+ struct page *page)
+{
+}
+
+extern void (*flush_icache_range)(unsigned long start, unsigned long end);
+extern void (*local_flush_icache_range)(unsigned long start, unsigned long end);
+extern void (*__flush_icache_user_range)(unsigned long start,
+ unsigned long end);
+extern void (*__local_flush_icache_user_range)(unsigned long start,
+ unsigned long end);
+
+extern void (*__flush_cache_vmap)(void);
+
+static inline void flush_cache_vmap(unsigned long start, unsigned long end)
+{
+ if (cpu_has_dc_aliases)
+ __flush_cache_vmap();
+}
+
+extern void (*__flush_cache_vunmap)(void);
+
+static inline void flush_cache_vunmap(unsigned long start, unsigned long end)
+{
+ if (cpu_has_dc_aliases)
+ __flush_cache_vunmap();
+}
+
+extern void copy_to_user_page(struct vm_area_struct *vma,
+ struct page *page, unsigned long vaddr, void *dst, const void *src,
+ unsigned long len);
+
+extern void copy_from_user_page(struct vm_area_struct *vma,
+ struct page *page, unsigned long vaddr, void *dst, const void *src,
+ unsigned long len);
+
+extern void (*flush_cache_sigtramp)(unsigned long addr);
+extern void (*flush_icache_all)(void);
+extern void (*local_flush_data_cache_page)(void * addr);
+extern void (*flush_data_cache_page)(unsigned long addr);
+
+/* Run kernel code uncached, useful for cache probing functions. */
+unsigned long run_uncached(void *func);
+
+extern void *kmap_coherent(struct page *page, unsigned long addr);
+extern void kunmap_coherent(void);
+extern void *kmap_noncoherent(struct page *page, unsigned long addr);
+
+static inline void kunmap_noncoherent(void)
+{
+ kunmap_coherent();
+}
+
+#define ARCH_HAS_FLUSH_KERNEL_DCACHE_PAGE
+static inline void flush_kernel_dcache_page(struct page *page)
+{
+ BUG_ON(cpu_has_dc_aliases && PageHighMem(page));
+ flush_dcache_page(page);
+}
+
+/*
+ * For now flush_kernel_vmap_range and invalidate_kernel_vmap_range both do a
+ * cache writeback and invalidate operation.
+ */
+extern void (*__flush_kernel_vmap_range)(unsigned long vaddr, int size);
+
+static inline void flush_kernel_vmap_range(void *vaddr, int size)
+{
+ if (cpu_has_dc_aliases)
+ __flush_kernel_vmap_range((unsigned long) vaddr, size);
+}
+
+static inline void invalidate_kernel_vmap_range(void *vaddr, int size)
+{
+ if (cpu_has_dc_aliases)
+ __flush_kernel_vmap_range((unsigned long) vaddr, size);
+}
+
+#endif /* _ASM_CACHEFLUSH_H */