diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-11 08:27:49 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-11 08:27:49 +0000 |
commit | ace9429bb58fd418f0c81d4c2835699bddf6bde6 (patch) | |
tree | b2d64bc10158fdd5497876388cd68142ca374ed3 /include/asm-generic/cacheflush.h | |
parent | Initial commit. (diff) | |
download | linux-ace9429bb58fd418f0c81d4c2835699bddf6bde6.tar.xz linux-ace9429bb58fd418f0c81d4c2835699bddf6bde6.zip |
Adding upstream version 6.6.15.upstream/6.6.15
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to '')
-rw-r--r-- | include/asm-generic/cacheflush.h | 121 |
1 files changed, 121 insertions, 0 deletions
diff --git a/include/asm-generic/cacheflush.h b/include/asm-generic/cacheflush.h new file mode 100644 index 0000000000..84ec53ccc4 --- /dev/null +++ b/include/asm-generic/cacheflush.h @@ -0,0 +1,121 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_GENERIC_CACHEFLUSH_H +#define _ASM_GENERIC_CACHEFLUSH_H + +#include <linux/instrumented.h> + +struct mm_struct; +struct vm_area_struct; +struct page; +struct address_space; + +/* + * The cache doesn't need to be flushed when TLB entries change when + * the cache is mapped to physical memory, not virtual memory + */ +#ifndef flush_cache_all +static inline void flush_cache_all(void) +{ +} +#endif + +#ifndef flush_cache_mm +static inline void flush_cache_mm(struct mm_struct *mm) +{ +} +#endif + +#ifndef flush_cache_dup_mm +static inline void flush_cache_dup_mm(struct mm_struct *mm) +{ +} +#endif + +#ifndef flush_cache_range +static inline void flush_cache_range(struct vm_area_struct *vma, + unsigned long start, + unsigned long end) +{ +} +#endif + +#ifndef flush_cache_page +static inline void flush_cache_page(struct vm_area_struct *vma, + unsigned long vmaddr, + unsigned long pfn) +{ +} +#endif + +#ifndef ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE +static inline void flush_dcache_page(struct page *page) +{ +} + +#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 0 +#endif + +#ifndef flush_dcache_mmap_lock +static inline void flush_dcache_mmap_lock(struct address_space *mapping) +{ +} +#endif + +#ifndef flush_dcache_mmap_unlock +static inline void flush_dcache_mmap_unlock(struct address_space *mapping) +{ +} +#endif + +#ifndef flush_icache_range +static inline void flush_icache_range(unsigned long start, unsigned long end) +{ +} +#endif + +#ifndef flush_icache_user_range +#define flush_icache_user_range flush_icache_range +#endif + +#ifndef flush_icache_user_page +static inline void flush_icache_user_page(struct vm_area_struct *vma, + struct page *page, + unsigned long addr, int len) +{ +} +#endif + +#ifndef flush_cache_vmap +static inline void flush_cache_vmap(unsigned long start, unsigned long end) +{ +} +#endif + +#ifndef flush_cache_vunmap +static inline void flush_cache_vunmap(unsigned long start, unsigned long end) +{ +} +#endif + +#ifndef copy_to_user_page +#define copy_to_user_page(vma, page, vaddr, dst, src, len) \ + do { \ + instrument_copy_to_user((void __user *)dst, src, len); \ + memcpy(dst, src, len); \ + flush_icache_user_page(vma, page, vaddr, len); \ + } while (0) +#endif + + +#ifndef copy_from_user_page +#define copy_from_user_page(vma, page, vaddr, dst, src, len) \ + do { \ + instrument_copy_from_user_before(dst, (void __user *)src, \ + len); \ + memcpy(dst, src, len); \ + instrument_copy_from_user_after(dst, (void __user *)src, len, \ + 0); \ + } while (0) +#endif + +#endif /* _ASM_GENERIC_CACHEFLUSH_H */ |