From 2c3c1048746a4622d8c89a29670120dc8fab93c4 Mon Sep 17 00:00:00 2001 From: Daniel Baumann Date: Sun, 7 Apr 2024 20:49:45 +0200 Subject: Adding upstream version 6.1.76. Signed-off-by: Daniel Baumann --- arch/s390/include/asm/page.h | 211 +++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 211 insertions(+) create mode 100644 arch/s390/include/asm/page.h (limited to 'arch/s390/include/asm/page.h') diff --git a/arch/s390/include/asm/page.h b/arch/s390/include/asm/page.h new file mode 100644 index 000000000..61dea67bb --- /dev/null +++ b/arch/s390/include/asm/page.h @@ -0,0 +1,211 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * S390 version + * Copyright IBM Corp. 1999, 2000 + * Author(s): Hartmut Penner (hp@de.ibm.com) + */ + +#ifndef _S390_PAGE_H +#define _S390_PAGE_H + +#include +#include + +#define _PAGE_SHIFT 12 +#define _PAGE_SIZE (_AC(1, UL) << _PAGE_SHIFT) +#define _PAGE_MASK (~(_PAGE_SIZE - 1)) + +/* PAGE_SHIFT determines the page size */ +#define PAGE_SHIFT _PAGE_SHIFT +#define PAGE_SIZE _PAGE_SIZE +#define PAGE_MASK _PAGE_MASK +#define PAGE_DEFAULT_ACC 0 +/* storage-protection override */ +#define PAGE_SPO_ACC 9 +#define PAGE_DEFAULT_KEY (PAGE_DEFAULT_ACC << 4) + +#define HPAGE_SHIFT 20 +#define HPAGE_SIZE (1UL << HPAGE_SHIFT) +#define HPAGE_MASK (~(HPAGE_SIZE - 1)) +#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT) +#define HUGE_MAX_HSTATE 2 + +#define ARCH_HAS_SETCLEAR_HUGE_PTE +#define ARCH_HAS_HUGE_PTE_TYPE +#define ARCH_HAS_PREPARE_HUGEPAGE +#define ARCH_HAS_HUGEPAGE_CLEAR_FLUSH + +#define HAVE_ARCH_HUGETLB_UNMAPPED_AREA + +#include +#ifndef __ASSEMBLY__ + +void __storage_key_init_range(unsigned long start, unsigned long end); + +static inline void storage_key_init_range(unsigned long start, unsigned long end) +{ + if (PAGE_DEFAULT_KEY != 0) + __storage_key_init_range(start, end); +} + +#define clear_page(page) memset((page), 0, PAGE_SIZE) + +/* + * copy_page uses the mvcl instruction with 0xb0 padding byte in order to + * bypass caches when copying a page. Especially when copying huge pages + * this keeps L1 and L2 data caches alive. + */ +static inline void copy_page(void *to, void *from) +{ + union register_pair dst, src; + + dst.even = (unsigned long) to; + dst.odd = 0x1000; + src.even = (unsigned long) from; + src.odd = 0xb0001000; + + asm volatile( + " mvcl %[dst],%[src]" + : [dst] "+&d" (dst.pair), [src] "+&d" (src.pair) + : : "memory", "cc"); +} + +#define clear_user_page(page, vaddr, pg) clear_page(page) +#define copy_user_page(to, from, vaddr, pg) copy_page(to, from) + +#define alloc_zeroed_user_highpage_movable(vma, vaddr) \ + alloc_page_vma(GFP_HIGHUSER_MOVABLE | __GFP_ZERO, vma, vaddr) +#define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE_MOVABLE + +/* + * These are used to make use of C type-checking.. + */ + +typedef struct { unsigned long pgprot; } pgprot_t; +typedef struct { unsigned long pgste; } pgste_t; +typedef struct { unsigned long pte; } pte_t; +typedef struct { unsigned long pmd; } pmd_t; +typedef struct { unsigned long pud; } pud_t; +typedef struct { unsigned long p4d; } p4d_t; +typedef struct { unsigned long pgd; } pgd_t; +typedef pte_t *pgtable_t; + +#define pgprot_val(x) ((x).pgprot) +#define pgste_val(x) ((x).pgste) + +static inline unsigned long pte_val(pte_t pte) +{ + return pte.pte; +} + +static inline unsigned long pmd_val(pmd_t pmd) +{ + return pmd.pmd; +} + +static inline unsigned long pud_val(pud_t pud) +{ + return pud.pud; +} + +static inline unsigned long p4d_val(p4d_t p4d) +{ + return p4d.p4d; +} + +static inline unsigned long pgd_val(pgd_t pgd) +{ + return pgd.pgd; +} + +#define __pgste(x) ((pgste_t) { (x) } ) +#define __pte(x) ((pte_t) { (x) } ) +#define __pmd(x) ((pmd_t) { (x) } ) +#define __pud(x) ((pud_t) { (x) } ) +#define __p4d(x) ((p4d_t) { (x) } ) +#define __pgd(x) ((pgd_t) { (x) } ) +#define __pgprot(x) ((pgprot_t) { (x) } ) + +static inline void page_set_storage_key(unsigned long addr, + unsigned char skey, int mapped) +{ + if (!mapped) + asm volatile(".insn rrf,0xb22b0000,%0,%1,8,0" + : : "d" (skey), "a" (addr)); + else + asm volatile("sske %0,%1" : : "d" (skey), "a" (addr)); +} + +static inline unsigned char page_get_storage_key(unsigned long addr) +{ + unsigned char skey; + + asm volatile("iske %0,%1" : "=d" (skey) : "a" (addr)); + return skey; +} + +static inline int page_reset_referenced(unsigned long addr) +{ + int cc; + + asm volatile( + " rrbe 0,%1\n" + " ipm %0\n" + " srl %0,28\n" + : "=d" (cc) : "a" (addr) : "cc"); + return cc; +} + +/* Bits int the storage key */ +#define _PAGE_CHANGED 0x02 /* HW changed bit */ +#define _PAGE_REFERENCED 0x04 /* HW referenced bit */ +#define _PAGE_FP_BIT 0x08 /* HW fetch protection bit */ +#define _PAGE_ACC_BITS 0xf0 /* HW access control bits */ + +struct page; +void arch_free_page(struct page *page, int order); +void arch_alloc_page(struct page *page, int order); +void arch_set_page_dat(struct page *page, int order); + +static inline int devmem_is_allowed(unsigned long pfn) +{ + return 0; +} + +#define HAVE_ARCH_FREE_PAGE +#define HAVE_ARCH_ALLOC_PAGE + +#if IS_ENABLED(CONFIG_PGSTE) +int arch_make_page_accessible(struct page *page); +#define HAVE_ARCH_MAKE_PAGE_ACCESSIBLE +#endif + +#endif /* !__ASSEMBLY__ */ + +#define __PAGE_OFFSET 0x0UL +#define PAGE_OFFSET 0x0UL + +#define __pa(x) ((unsigned long)(x)) +#define __va(x) ((void *)(unsigned long)(x)) + +#define phys_to_pfn(phys) ((phys) >> PAGE_SHIFT) +#define pfn_to_phys(pfn) ((pfn) << PAGE_SHIFT) + +#define phys_to_page(phys) pfn_to_page(phys_to_pfn(phys)) +#define page_to_phys(page) pfn_to_phys(page_to_pfn(page)) + +#define pfn_to_virt(pfn) __va(pfn_to_phys(pfn)) +#define virt_to_pfn(kaddr) (phys_to_pfn(__pa(kaddr))) +#define pfn_to_kaddr(pfn) pfn_to_virt(pfn) + +#define virt_to_page(kaddr) pfn_to_page(virt_to_pfn(kaddr)) +#define page_to_virt(page) pfn_to_virt(page_to_pfn(page)) + +#define virt_addr_valid(kaddr) pfn_valid(virt_to_pfn(kaddr)) + +#define VM_DATA_DEFAULT_FLAGS VM_DATA_FLAGS_NON_EXEC + +#include +#include + +#endif /* _S390_PAGE_H */ -- cgit v1.2.3