From 5d1646d90e1f2cceb9f0828f4b28318cd0ec7744 Mon Sep 17 00:00:00 2001 From: Daniel Baumann Date: Sat, 27 Apr 2024 12:05:51 +0200 Subject: Adding upstream version 5.10.209. Signed-off-by: Daniel Baumann --- arch/arm/include/asm/page.h | 168 ++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 168 insertions(+) create mode 100644 arch/arm/include/asm/page.h (limited to 'arch/arm/include/asm/page.h') diff --git a/arch/arm/include/asm/page.h b/arch/arm/include/asm/page.h new file mode 100644 index 000000000..11b058a72 --- /dev/null +++ b/arch/arm/include/asm/page.h @@ -0,0 +1,168 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * arch/arm/include/asm/page.h + * + * Copyright (C) 1995-2003 Russell King + */ +#ifndef _ASMARM_PAGE_H +#define _ASMARM_PAGE_H + +/* PAGE_SHIFT determines the page size */ +#define PAGE_SHIFT 12 +#define PAGE_SIZE (_AC(1,UL) << PAGE_SHIFT) +#define PAGE_MASK (~((1 << PAGE_SHIFT) - 1)) + +#ifndef __ASSEMBLY__ + +#ifndef CONFIG_MMU + +#include + +#else + +#include + +/* + * User Space Model + * ================ + * + * This section selects the correct set of functions for dealing with + * page-based copying and clearing for user space for the particular + * processor(s) we're building for. + * + * We have the following to choose from: + * v4wt - ARMv4 with writethrough cache, without minicache + * v4wb - ARMv4 with writeback cache, without minicache + * v4_mc - ARMv4 with minicache + * xscale - Xscale + * xsc3 - XScalev3 + */ +#undef _USER +#undef MULTI_USER + +#ifdef CONFIG_CPU_COPY_V4WT +# ifdef _USER +# define MULTI_USER 1 +# else +# define _USER v4wt +# endif +#endif + +#ifdef CONFIG_CPU_COPY_V4WB +# ifdef _USER +# define MULTI_USER 1 +# else +# define _USER v4wb +# endif +#endif + +#ifdef CONFIG_CPU_COPY_FEROCEON +# ifdef _USER +# define MULTI_USER 1 +# else +# define _USER feroceon +# endif +#endif + +#ifdef CONFIG_CPU_COPY_FA +# ifdef _USER +# define MULTI_USER 1 +# else +# define _USER fa +# endif +#endif + +#ifdef CONFIG_CPU_SA1100 +# ifdef _USER +# define MULTI_USER 1 +# else +# define _USER v4_mc +# endif +#endif + +#ifdef CONFIG_CPU_XSCALE +# ifdef _USER +# define MULTI_USER 1 +# else +# define _USER xscale_mc +# endif +#endif + +#ifdef CONFIG_CPU_XSC3 +# ifdef _USER +# define MULTI_USER 1 +# else +# define _USER xsc3_mc +# endif +#endif + +#ifdef CONFIG_CPU_COPY_V6 +# define MULTI_USER 1 +#endif + +#if !defined(_USER) && !defined(MULTI_USER) +#error Unknown user operations model +#endif + +struct page; +struct vm_area_struct; + +struct cpu_user_fns { + void (*cpu_clear_user_highpage)(struct page *page, unsigned long vaddr); + void (*cpu_copy_user_highpage)(struct page *to, struct page *from, + unsigned long vaddr, struct vm_area_struct *vma); +}; + +#ifdef MULTI_USER +extern struct cpu_user_fns cpu_user; + +#define __cpu_clear_user_highpage cpu_user.cpu_clear_user_highpage +#define __cpu_copy_user_highpage cpu_user.cpu_copy_user_highpage + +#else + +#define __cpu_clear_user_highpage __glue(_USER,_clear_user_highpage) +#define __cpu_copy_user_highpage __glue(_USER,_copy_user_highpage) + +extern void __cpu_clear_user_highpage(struct page *page, unsigned long vaddr); +extern void __cpu_copy_user_highpage(struct page *to, struct page *from, + unsigned long vaddr, struct vm_area_struct *vma); +#endif + +#define clear_user_highpage(page,vaddr) \ + __cpu_clear_user_highpage(page, vaddr) + +#define __HAVE_ARCH_COPY_USER_HIGHPAGE +#define copy_user_highpage(to,from,vaddr,vma) \ + __cpu_copy_user_highpage(to, from, vaddr, vma) + +#define clear_page(page) memset((void *)(page), 0, PAGE_SIZE) +extern void copy_page(void *to, const void *from); + +#ifdef CONFIG_KUSER_HELPERS +#define __HAVE_ARCH_GATE_AREA 1 +#endif + +#ifdef CONFIG_ARM_LPAE +#include +#else +#include +#endif + +#endif /* CONFIG_MMU */ + +typedef struct page *pgtable_t; + +#ifdef CONFIG_HAVE_ARCH_PFN_VALID +extern int pfn_valid(unsigned long); +#endif + +#include + +#endif /* !__ASSEMBLY__ */ + +#define VM_DATA_DEFAULT_FLAGS VM_DATA_FLAGS_TSK_EXEC + +#include + +#endif -- cgit v1.2.3