diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-07 18:49:45 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-04-07 18:49:45 +0000 |
commit | 2c3c1048746a4622d8c89a29670120dc8fab93c4 (patch) | |
tree | 848558de17fb3008cdf4d861b01ac7781903ce39 /arch/powerpc/include/asm/nohash | |
parent | Initial commit. (diff) | |
download | linux-2c3c1048746a4622d8c89a29670120dc8fab93c4.tar.xz linux-2c3c1048746a4622d8c89a29670120dc8fab93c4.zip |
Adding upstream version 6.1.76.upstream/6.1.76upstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'arch/powerpc/include/asm/nohash')
22 files changed, 3082 insertions, 0 deletions
diff --git a/arch/powerpc/include/asm/nohash/32/hugetlb-8xx.h b/arch/powerpc/include/asm/nohash/32/hugetlb-8xx.h new file mode 100644 index 000000000..de092b04e --- /dev/null +++ b/arch/powerpc/include/asm/nohash/32/hugetlb-8xx.h @@ -0,0 +1,81 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_POWERPC_NOHASH_32_HUGETLB_8XX_H +#define _ASM_POWERPC_NOHASH_32_HUGETLB_8XX_H + +#define PAGE_SHIFT_8M 23 + +static inline pte_t *hugepd_page(hugepd_t hpd) +{ + BUG_ON(!hugepd_ok(hpd)); + + return (pte_t *)__va(hpd_val(hpd) & ~HUGEPD_SHIFT_MASK); +} + +static inline unsigned int hugepd_shift(hugepd_t hpd) +{ + return PAGE_SHIFT_8M; +} + +static inline pte_t *hugepte_offset(hugepd_t hpd, unsigned long addr, + unsigned int pdshift) +{ + unsigned long idx = (addr & (SZ_4M - 1)) >> PAGE_SHIFT; + + return hugepd_page(hpd) + idx; +} + +static inline void flush_hugetlb_page(struct vm_area_struct *vma, + unsigned long vmaddr) +{ + flush_tlb_page(vma, vmaddr); +} + +static inline void hugepd_populate(hugepd_t *hpdp, pte_t *new, unsigned int pshift) +{ + *hpdp = __hugepd(__pa(new) | _PMD_USER | _PMD_PRESENT | _PMD_PAGE_8M); +} + +static inline void hugepd_populate_kernel(hugepd_t *hpdp, pte_t *new, unsigned int pshift) +{ + *hpdp = __hugepd(__pa(new) | _PMD_PRESENT | _PMD_PAGE_8M); +} + +static inline int check_and_get_huge_psize(int shift) +{ + return shift_to_mmu_psize(shift); +} + +#define __HAVE_ARCH_HUGE_SET_HUGE_PTE_AT +void set_huge_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, pte_t pte); + +#define __HAVE_ARCH_HUGE_PTE_CLEAR +static inline void huge_pte_clear(struct mm_struct *mm, unsigned long addr, + pte_t *ptep, unsigned long sz) +{ + pte_update(mm, addr, ptep, ~0UL, 0, 1); +} + +#define __HAVE_ARCH_HUGE_PTEP_SET_WRPROTECT +static inline void huge_ptep_set_wrprotect(struct mm_struct *mm, + unsigned long addr, pte_t *ptep) +{ + unsigned long clr = ~pte_val(pte_wrprotect(__pte(~0))); + unsigned long set = pte_val(pte_wrprotect(__pte(0))); + + pte_update(mm, addr, ptep, clr, set, 1); +} + +#ifdef CONFIG_PPC_4K_PAGES +static inline pte_t arch_make_huge_pte(pte_t entry, unsigned int shift, vm_flags_t flags) +{ + size_t size = 1UL << shift; + + if (size == SZ_16K) + return __pte(pte_val(entry) | _PAGE_SPS); + else + return __pte(pte_val(entry) | _PAGE_SPS | _PAGE_HUGE); +} +#define arch_make_huge_pte arch_make_huge_pte +#endif + +#endif /* _ASM_POWERPC_NOHASH_32_HUGETLB_8XX_H */ diff --git a/arch/powerpc/include/asm/nohash/32/kup-8xx.h b/arch/powerpc/include/asm/nohash/32/kup-8xx.h new file mode 100644 index 000000000..c44d97751 --- /dev/null +++ b/arch/powerpc/include/asm/nohash/32/kup-8xx.h @@ -0,0 +1,91 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_POWERPC_KUP_8XX_H_ +#define _ASM_POWERPC_KUP_8XX_H_ + +#include <asm/bug.h> +#include <asm/mmu.h> + +#ifdef CONFIG_PPC_KUAP + +#ifndef __ASSEMBLY__ + +#include <linux/jump_label.h> + +#include <asm/reg.h> + +extern struct static_key_false disable_kuap_key; + +static __always_inline bool kuap_is_disabled(void) +{ + return static_branch_unlikely(&disable_kuap_key); +} + +static inline void __kuap_lock(void) +{ +} + +static inline void __kuap_save_and_lock(struct pt_regs *regs) +{ + regs->kuap = mfspr(SPRN_MD_AP); + mtspr(SPRN_MD_AP, MD_APG_KUAP); +} + +static inline void kuap_user_restore(struct pt_regs *regs) +{ +} + +static inline void __kuap_kernel_restore(struct pt_regs *regs, unsigned long kuap) +{ + mtspr(SPRN_MD_AP, regs->kuap); +} + +static inline unsigned long __kuap_get_and_assert_locked(void) +{ + unsigned long kuap; + + kuap = mfspr(SPRN_MD_AP); + + if (IS_ENABLED(CONFIG_PPC_KUAP_DEBUG)) + WARN_ON_ONCE(kuap >> 16 != MD_APG_KUAP >> 16); + + return kuap; +} + +static inline void __allow_user_access(void __user *to, const void __user *from, + unsigned long size, unsigned long dir) +{ + mtspr(SPRN_MD_AP, MD_APG_INIT); +} + +static inline void __prevent_user_access(unsigned long dir) +{ + mtspr(SPRN_MD_AP, MD_APG_KUAP); +} + +static inline unsigned long __prevent_user_access_return(void) +{ + unsigned long flags; + + flags = mfspr(SPRN_MD_AP); + + mtspr(SPRN_MD_AP, MD_APG_KUAP); + + return flags; +} + +static inline void __restore_user_access(unsigned long flags) +{ + mtspr(SPRN_MD_AP, flags); +} + +static inline bool +__bad_kuap_fault(struct pt_regs *regs, unsigned long address, bool is_write) +{ + return !((regs->kuap ^ MD_APG_KUAP) & 0xff000000); +} + +#endif /* !__ASSEMBLY__ */ + +#endif /* CONFIG_PPC_KUAP */ + +#endif /* _ASM_POWERPC_KUP_8XX_H_ */ diff --git a/arch/powerpc/include/asm/nohash/32/mmu-40x.h b/arch/powerpc/include/asm/nohash/32/mmu-40x.h new file mode 100644 index 000000000..8a8f13a22 --- /dev/null +++ b/arch/powerpc/include/asm/nohash/32/mmu-40x.h @@ -0,0 +1,68 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_POWERPC_MMU_40X_H_ +#define _ASM_POWERPC_MMU_40X_H_ + +/* + * PPC40x support + */ + +#define PPC40X_TLB_SIZE 64 + +/* + * TLB entries are defined by a "high" tag portion and a "low" data + * portion. On all architectures, the data portion is 32-bits. + * + * TLB entries are managed entirely under software control by reading, + * writing, and searchoing using the 4xx-specific tlbre, tlbwr, and tlbsx + * instructions. + */ + +#define TLB_LO 1 +#define TLB_HI 0 + +#define TLB_DATA TLB_LO +#define TLB_TAG TLB_HI + +/* Tag portion */ + +#define TLB_EPN_MASK 0xFFFFFC00 /* Effective Page Number */ +#define TLB_PAGESZ_MASK 0x00000380 +#define TLB_PAGESZ(x) (((x) & 0x7) << 7) +#define PAGESZ_1K 0 +#define PAGESZ_4K 1 +#define PAGESZ_16K 2 +#define PAGESZ_64K 3 +#define PAGESZ_256K 4 +#define PAGESZ_1M 5 +#define PAGESZ_4M 6 +#define PAGESZ_16M 7 +#define TLB_VALID 0x00000040 /* Entry is valid */ + +/* Data portion */ + +#define TLB_RPN_MASK 0xFFFFFC00 /* Real Page Number */ +#define TLB_PERM_MASK 0x00000300 +#define TLB_EX 0x00000200 /* Instruction execution allowed */ +#define TLB_WR 0x00000100 /* Writes permitted */ +#define TLB_ZSEL_MASK 0x000000F0 +#define TLB_ZSEL(x) (((x) & 0xF) << 4) +#define TLB_ATTR_MASK 0x0000000F +#define TLB_W 0x00000008 /* Caching is write-through */ +#define TLB_I 0x00000004 /* Caching is inhibited */ +#define TLB_M 0x00000002 /* Memory is coherent */ +#define TLB_G 0x00000001 /* Memory is guarded from prefetch */ + +#ifndef __ASSEMBLY__ + +typedef struct { + unsigned int id; + unsigned int active; + void __user *vdso; +} mm_context_t; + +#endif /* !__ASSEMBLY__ */ + +#define mmu_virtual_psize MMU_PAGE_4K +#define mmu_linear_psize MMU_PAGE_256M + +#endif /* _ASM_POWERPC_MMU_40X_H_ */ diff --git a/arch/powerpc/include/asm/nohash/32/mmu-44x.h b/arch/powerpc/include/asm/nohash/32/mmu-44x.h new file mode 100644 index 000000000..2d92a39d8 --- /dev/null +++ b/arch/powerpc/include/asm/nohash/32/mmu-44x.h @@ -0,0 +1,156 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_POWERPC_MMU_44X_H_ +#define _ASM_POWERPC_MMU_44X_H_ +/* + * PPC440 support + */ + +#include <asm/asm-const.h> + +#define PPC44x_MMUCR_TID 0x000000ff +#define PPC44x_MMUCR_STS 0x00010000 + +#define PPC44x_TLB_PAGEID 0 +#define PPC44x_TLB_XLAT 1 +#define PPC44x_TLB_ATTRIB 2 + +/* Page identification fields */ +#define PPC44x_TLB_EPN_MASK 0xfffffc00 /* Effective Page Number */ +#define PPC44x_TLB_VALID 0x00000200 /* Valid flag */ +#define PPC44x_TLB_TS 0x00000100 /* Translation address space */ +#define PPC44x_TLB_1K 0x00000000 /* Page sizes */ +#define PPC44x_TLB_4K 0x00000010 +#define PPC44x_TLB_16K 0x00000020 +#define PPC44x_TLB_64K 0x00000030 +#define PPC44x_TLB_256K 0x00000040 +#define PPC44x_TLB_1M 0x00000050 +#define PPC44x_TLB_16M 0x00000070 +#define PPC44x_TLB_256M 0x00000090 + +/* Translation fields */ +#define PPC44x_TLB_RPN_MASK 0xfffffc00 /* Real Page Number */ +#define PPC44x_TLB_ERPN_MASK 0x0000000f + +/* Storage attribute and access control fields */ +#define PPC44x_TLB_ATTR_MASK 0x0000ff80 +#define PPC44x_TLB_U0 0x00008000 /* User 0 */ +#define PPC44x_TLB_U1 0x00004000 /* User 1 */ +#define PPC44x_TLB_U2 0x00002000 /* User 2 */ +#define PPC44x_TLB_U3 0x00001000 /* User 3 */ +#define PPC44x_TLB_W 0x00000800 /* Caching is write-through */ +#define PPC44x_TLB_I 0x00000400 /* Caching is inhibited */ +#define PPC44x_TLB_M 0x00000200 /* Memory is coherent */ +#define PPC44x_TLB_G 0x00000100 /* Memory is guarded */ +#define PPC44x_TLB_E 0x00000080 /* Memory is little endian */ + +#define PPC44x_TLB_PERM_MASK 0x0000003f +#define PPC44x_TLB_UX 0x00000020 /* User execution */ +#define PPC44x_TLB_UW 0x00000010 /* User write */ +#define PPC44x_TLB_UR 0x00000008 /* User read */ +#define PPC44x_TLB_SX 0x00000004 /* Super execution */ +#define PPC44x_TLB_SW 0x00000002 /* Super write */ +#define PPC44x_TLB_SR 0x00000001 /* Super read */ + +/* Number of TLB entries */ +#define PPC44x_TLB_SIZE 64 + +/* 47x bits */ +#define PPC47x_MMUCR_TID 0x0000ffff +#define PPC47x_MMUCR_STS 0x00010000 + +/* Page identification fields */ +#define PPC47x_TLB0_EPN_MASK 0xfffff000 /* Effective Page Number */ +#define PPC47x_TLB0_VALID 0x00000800 /* Valid flag */ +#define PPC47x_TLB0_TS 0x00000400 /* Translation address space */ +#define PPC47x_TLB0_4K 0x00000000 +#define PPC47x_TLB0_16K 0x00000010 +#define PPC47x_TLB0_64K 0x00000030 +#define PPC47x_TLB0_1M 0x00000070 +#define PPC47x_TLB0_16M 0x000000f0 +#define PPC47x_TLB0_256M 0x000001f0 +#define PPC47x_TLB0_1G 0x000003f0 +#define PPC47x_TLB0_BOLTED_R 0x00000008 /* tlbre only */ + +/* Translation fields */ +#define PPC47x_TLB1_RPN_MASK 0xfffff000 /* Real Page Number */ +#define PPC47x_TLB1_ERPN_MASK 0x000003ff + +/* Storage attribute and access control fields */ +#define PPC47x_TLB2_ATTR_MASK 0x0003ff80 +#define PPC47x_TLB2_IL1I 0x00020000 /* Memory is guarded */ +#define PPC47x_TLB2_IL1D 0x00010000 /* Memory is guarded */ +#define PPC47x_TLB2_U0 0x00008000 /* User 0 */ +#define PPC47x_TLB2_U1 0x00004000 /* User 1 */ +#define PPC47x_TLB2_U2 0x00002000 /* User 2 */ +#define PPC47x_TLB2_U3 0x00001000 /* User 3 */ +#define PPC47x_TLB2_W 0x00000800 /* Caching is write-through */ +#define PPC47x_TLB2_I 0x00000400 /* Caching is inhibited */ +#define PPC47x_TLB2_M 0x00000200 /* Memory is coherent */ +#define PPC47x_TLB2_G 0x00000100 /* Memory is guarded */ +#define PPC47x_TLB2_E 0x00000080 /* Memory is little endian */ +#define PPC47x_TLB2_PERM_MASK 0x0000003f +#define PPC47x_TLB2_UX 0x00000020 /* User execution */ +#define PPC47x_TLB2_UW 0x00000010 /* User write */ +#define PPC47x_TLB2_UR 0x00000008 /* User read */ +#define PPC47x_TLB2_SX 0x00000004 /* Super execution */ +#define PPC47x_TLB2_SW 0x00000002 /* Super write */ +#define PPC47x_TLB2_SR 0x00000001 /* Super read */ +#define PPC47x_TLB2_U_RWX (PPC47x_TLB2_UX|PPC47x_TLB2_UW|PPC47x_TLB2_UR) +#define PPC47x_TLB2_S_RWX (PPC47x_TLB2_SX|PPC47x_TLB2_SW|PPC47x_TLB2_SR) +#define PPC47x_TLB2_S_RW (PPC47x_TLB2_SW | PPC47x_TLB2_SR) +#define PPC47x_TLB2_IMG (PPC47x_TLB2_I | PPC47x_TLB2_M | PPC47x_TLB2_G) + +#ifndef __ASSEMBLY__ + +extern unsigned int tlb_44x_hwater; +extern unsigned int tlb_44x_index; + +typedef struct { + unsigned int id; + unsigned int active; + void __user *vdso; +} mm_context_t; + +/* patch sites */ +extern s32 patch__tlb_44x_hwater_D, patch__tlb_44x_hwater_I; + +#endif /* !__ASSEMBLY__ */ + +#ifndef CONFIG_PPC_EARLY_DEBUG_44x +#define PPC44x_EARLY_TLBS 1 +#else +#define PPC44x_EARLY_TLBS 2 +#define PPC44x_EARLY_DEBUG_VIRTADDR (ASM_CONST(0xf0000000) \ + | (ASM_CONST(CONFIG_PPC_EARLY_DEBUG_44x_PHYSLOW) & 0xffff)) +#endif + +/* Size of the TLBs used for pinning in lowmem */ +#define PPC_PIN_SIZE (1 << 28) /* 256M */ + +#if defined(CONFIG_PPC_4K_PAGES) +#define PPC44x_TLBE_SIZE PPC44x_TLB_4K +#define PPC47x_TLBE_SIZE PPC47x_TLB0_4K +#define mmu_virtual_psize MMU_PAGE_4K +#elif defined(CONFIG_PPC_16K_PAGES) +#define PPC44x_TLBE_SIZE PPC44x_TLB_16K +#define PPC47x_TLBE_SIZE PPC47x_TLB0_16K +#define mmu_virtual_psize MMU_PAGE_16K +#elif defined(CONFIG_PPC_64K_PAGES) +#define PPC44x_TLBE_SIZE PPC44x_TLB_64K +#define PPC47x_TLBE_SIZE PPC47x_TLB0_64K +#define mmu_virtual_psize MMU_PAGE_64K +#elif defined(CONFIG_PPC_256K_PAGES) +#define PPC44x_TLBE_SIZE PPC44x_TLB_256K +#define mmu_virtual_psize MMU_PAGE_256K +#else +#error "Unsupported PAGE_SIZE" +#endif + +#define mmu_linear_psize MMU_PAGE_256M + +#define PPC44x_PGD_OFF_SHIFT (32 - PGDIR_SHIFT + PGD_T_LOG2) +#define PPC44x_PGD_OFF_MASK_BIT (PGDIR_SHIFT - PGD_T_LOG2) +#define PPC44x_PTE_ADD_SHIFT (32 - PGDIR_SHIFT + PTE_SHIFT + PTE_T_LOG2) +#define PPC44x_PTE_ADD_MASK_BIT (32 - PTE_T_LOG2 - PTE_SHIFT) + +#endif /* _ASM_POWERPC_MMU_44X_H_ */ diff --git a/arch/powerpc/include/asm/nohash/32/mmu-8xx.h b/arch/powerpc/include/asm/nohash/32/mmu-8xx.h new file mode 100644 index 000000000..0e93a4728 --- /dev/null +++ b/arch/powerpc/include/asm/nohash/32/mmu-8xx.h @@ -0,0 +1,275 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_POWERPC_MMU_8XX_H_ +#define _ASM_POWERPC_MMU_8XX_H_ +/* + * PPC8xx support + */ + +/* Control/status registers for the MPC8xx. + * A write operation to these registers causes serialized access. + * During software tablewalk, the registers used perform mask/shift-add + * operations when written/read. A TLB entry is created when the Mx_RPN + * is written, and the contents of several registers are used to + * create the entry. + */ +#define SPRN_MI_CTR 784 /* Instruction TLB control register */ +#define MI_GPM 0x80000000 /* Set domain manager mode */ +#define MI_PPM 0x40000000 /* Set subpage protection */ +#define MI_CIDEF 0x20000000 /* Set cache inhibit when MMU dis */ +#define MI_RSV4I 0x08000000 /* Reserve 4 TLB entries */ +#define MI_PPCS 0x02000000 /* Use MI_RPN prob/priv state */ +#define MI_IDXMASK 0x00001f00 /* TLB index to be loaded */ + +/* These are the Ks and Kp from the PowerPC books. For proper operation, + * Ks = 0, Kp = 1. + */ +#define SPRN_MI_AP 786 +#define MI_Ks 0x80000000 /* Should not be set */ +#define MI_Kp 0x40000000 /* Should always be set */ + +/* + * All pages' PP data bits are set to either 001 or 011 by copying _PAGE_EXEC + * into bit 21 in the ITLBmiss handler (bit 21 is the middle bit), which means + * respectively NA for All or X for Supervisor and no access for User. + * Then we use the APG to say whether accesses are according to Page rules or + * "all Supervisor" rules (Access to all) + * _PAGE_ACCESSED is also managed via APG. When _PAGE_ACCESSED is not set, say + * "all User" rules, that will lead to NA for all. + * Therefore, we define 4 APG groups. lsb is _PAGE_ACCESSED + * 0 => Kernel => 11 (all accesses performed according as user iaw page definition) + * 1 => Kernel+Accessed => 01 (all accesses performed according to page definition) + * 2 => User => 11 (all accesses performed according as user iaw page definition) + * 3 => User+Accessed => 10 (all accesses performed according to swaped page definition) for KUEP + * 4-15 => Not Used + */ +#define MI_APG_INIT 0xde000000 + +/* The effective page number register. When read, contains the information + * about the last instruction TLB miss. When MI_RPN is written, bits in + * this register are used to create the TLB entry. + */ +#define SPRN_MI_EPN 787 +#define MI_EPNMASK 0xfffff000 /* Effective page number for entry */ +#define MI_EVALID 0x00000200 /* Entry is valid */ +#define MI_ASIDMASK 0x0000000f /* ASID match value */ + /* Reset value is undefined */ + +/* A "level 1" or "segment" or whatever you want to call it register. + * For the instruction TLB, it contains bits that get loaded into the + * TLB entry when the MI_RPN is written. + */ +#define SPRN_MI_TWC 789 +#define MI_APG 0x000001e0 /* Access protection group (0) */ +#define MI_GUARDED 0x00000010 /* Guarded storage */ +#define MI_PSMASK 0x0000000c /* Mask of page size bits */ +#define MI_PS8MEG 0x0000000c /* 8M page size */ +#define MI_PS512K 0x00000004 /* 512K page size */ +#define MI_PS4K_16K 0x00000000 /* 4K or 16K page size */ +#define MI_SVALID 0x00000001 /* Segment entry is valid */ + /* Reset value is undefined */ + +/* Real page number. Defined by the pte. Writing this register + * causes a TLB entry to be created for the instruction TLB, using + * additional information from the MI_EPN, and MI_TWC registers. + */ +#define SPRN_MI_RPN 790 +#define MI_SPS16K 0x00000008 /* Small page size (0 = 4k, 1 = 16k) */ + +/* Define an RPN value for mapping kernel memory to large virtual + * pages for boot initialization. This has real page number of 0, + * large page size, shared page, cache enabled, and valid. + * Also mark all subpages valid and write access. + */ +#define MI_BOOTINIT 0x000001fd + +#define SPRN_MD_CTR 792 /* Data TLB control register */ +#define MD_GPM 0x80000000 /* Set domain manager mode */ +#define MD_PPM 0x40000000 /* Set subpage protection */ +#define MD_CIDEF 0x20000000 /* Set cache inhibit when MMU dis */ +#define MD_WTDEF 0x10000000 /* Set writethrough when MMU dis */ +#define MD_RSV4I 0x08000000 /* Reserve 4 TLB entries */ +#define MD_TWAM 0x04000000 /* Use 4K page hardware assist */ +#define MD_PPCS 0x02000000 /* Use MI_RPN prob/priv state */ +#define MD_IDXMASK 0x00001f00 /* TLB index to be loaded */ + +#define SPRN_M_CASID 793 /* Address space ID (context) to match */ +#define MC_ASIDMASK 0x0000000f /* Bits used for ASID value */ + + +/* These are the Ks and Kp from the PowerPC books. For proper operation, + * Ks = 0, Kp = 1. + */ +#define SPRN_MD_AP 794 +#define MD_Ks 0x80000000 /* Should not be set */ +#define MD_Kp 0x40000000 /* Should always be set */ + +/* See explanation above at the definition of MI_APG_INIT */ +#define MD_APG_INIT 0xdc000000 +#define MD_APG_KUAP 0xde000000 + +/* The effective page number register. When read, contains the information + * about the last instruction TLB miss. When MD_RPN is written, bits in + * this register are used to create the TLB entry. + */ +#define SPRN_MD_EPN 795 +#define MD_EPNMASK 0xfffff000 /* Effective page number for entry */ +#define MD_EVALID 0x00000200 /* Entry is valid */ +#define MD_ASIDMASK 0x0000000f /* ASID match value */ + /* Reset value is undefined */ + +/* The pointer to the base address of the first level page table. + * During a software tablewalk, reading this register provides the address + * of the entry associated with MD_EPN. + */ +#define SPRN_M_TWB 796 +#define M_L1TB 0xfffff000 /* Level 1 table base address */ +#define M_L1INDX 0x00000ffc /* Level 1 index, when read */ + /* Reset value is undefined */ + +/* A "level 1" or "segment" or whatever you want to call it register. + * For the data TLB, it contains bits that get loaded into the TLB entry + * when the MD_RPN is written. It is also provides the hardware assist + * for finding the PTE address during software tablewalk. + */ +#define SPRN_MD_TWC 797 +#define MD_L2TB 0xfffff000 /* Level 2 table base address */ +#define MD_L2INDX 0xfffffe00 /* Level 2 index (*pte), when read */ +#define MD_APG 0x000001e0 /* Access protection group (0) */ +#define MD_GUARDED 0x00000010 /* Guarded storage */ +#define MD_PSMASK 0x0000000c /* Mask of page size bits */ +#define MD_PS8MEG 0x0000000c /* 8M page size */ +#define MD_PS512K 0x00000004 /* 512K page size */ +#define MD_PS4K_16K 0x00000000 /* 4K or 16K page size */ +#define MD_WT 0x00000002 /* Use writethrough page attribute */ +#define MD_SVALID 0x00000001 /* Segment entry is valid */ + /* Reset value is undefined */ + + +/* Real page number. Defined by the pte. Writing this register + * causes a TLB entry to be created for the data TLB, using + * additional information from the MD_EPN, and MD_TWC registers. + */ +#define SPRN_MD_RPN 798 +#define MD_SPS16K 0x00000008 /* Small page size (0 = 4k, 1 = 16k) */ + +/* This is a temporary storage register that could be used to save + * a processor working register during a tablewalk. + */ +#define SPRN_M_TW 799 + +#if defined(CONFIG_PPC_4K_PAGES) +#define mmu_virtual_psize MMU_PAGE_4K +#elif defined(CONFIG_PPC_16K_PAGES) +#define mmu_virtual_psize MMU_PAGE_16K +#define PTE_FRAG_NR 4 +#define PTE_FRAG_SIZE_SHIFT 12 +#define PTE_FRAG_SIZE (1UL << 12) +#else +#error "Unsupported PAGE_SIZE" +#endif + +#define mmu_linear_psize MMU_PAGE_8M + +#define MODULES_VADDR (PAGE_OFFSET - SZ_256M) +#define MODULES_END PAGE_OFFSET + +#ifndef __ASSEMBLY__ + +#include <linux/mmdebug.h> +#include <linux/sizes.h> + +void mmu_pin_tlb(unsigned long top, bool readonly); + +typedef struct { + unsigned int id; + unsigned int active; + void __user *vdso; + void *pte_frag; +} mm_context_t; + +#define PHYS_IMMR_BASE (mfspr(SPRN_IMMR) & 0xfff80000) +#define VIRT_IMMR_BASE (__fix_to_virt(FIX_IMMR_BASE)) + +/* Page size definitions, common between 32 and 64-bit + * + * shift : is the "PAGE_SHIFT" value for that page size + * penc : is the pte encoding mask + * + */ +struct mmu_psize_def { + unsigned int shift; /* number of bits */ + unsigned int enc; /* PTE encoding */ + unsigned int ind; /* Corresponding indirect page size shift */ + unsigned int flags; +#define MMU_PAGE_SIZE_DIRECT 0x1 /* Supported as a direct size */ +#define MMU_PAGE_SIZE_INDIRECT 0x2 /* Supported as an indirect size */ +}; + +extern struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT]; + +static inline int shift_to_mmu_psize(unsigned int shift) +{ + int psize; + + for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) + if (mmu_psize_defs[psize].shift == shift) + return psize; + return -1; +} + +static inline unsigned int mmu_psize_to_shift(unsigned int mmu_psize) +{ + if (mmu_psize_defs[mmu_psize].shift) + return mmu_psize_defs[mmu_psize].shift; + BUG(); +} + +static inline bool arch_vmap_try_size(unsigned long addr, unsigned long end, u64 pfn, + unsigned int max_page_shift, unsigned long size) +{ + if (end - addr < size) + return false; + + if ((1UL << max_page_shift) < size) + return false; + + if (!IS_ALIGNED(addr, size)) + return false; + + if (!IS_ALIGNED(PFN_PHYS(pfn), size)) + return false; + + return true; +} + +static inline unsigned long arch_vmap_pte_range_map_size(unsigned long addr, unsigned long end, + u64 pfn, unsigned int max_page_shift) +{ + if (arch_vmap_try_size(addr, end, pfn, max_page_shift, SZ_512K)) + return SZ_512K; + if (PAGE_SIZE == SZ_16K) + return SZ_16K; + if (arch_vmap_try_size(addr, end, pfn, max_page_shift, SZ_16K)) + return SZ_16K; + return PAGE_SIZE; +} +#define arch_vmap_pte_range_map_size arch_vmap_pte_range_map_size + +static inline int arch_vmap_pte_supported_shift(unsigned long size) +{ + if (size >= SZ_512K) + return 19; + else if (size >= SZ_16K) + return 14; + else + return PAGE_SHIFT; +} +#define arch_vmap_pte_supported_shift arch_vmap_pte_supported_shift + +/* patch sites */ +extern s32 patch__itlbmiss_exit_1, patch__dtlbmiss_exit_1; +extern s32 patch__itlbmiss_perf, patch__dtlbmiss_perf; + +#endif /* !__ASSEMBLY__ */ + +#endif /* _ASM_POWERPC_MMU_8XX_H_ */ diff --git a/arch/powerpc/include/asm/nohash/32/pgalloc.h b/arch/powerpc/include/asm/nohash/32/pgalloc.h new file mode 100644 index 000000000..11eac371e --- /dev/null +++ b/arch/powerpc/include/asm/nohash/32/pgalloc.h @@ -0,0 +1,35 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_POWERPC_PGALLOC_32_H +#define _ASM_POWERPC_PGALLOC_32_H + +#include <linux/threads.h> +#include <linux/slab.h> + +/* + * We don't have any real pmd's, and this code never triggers because + * the pgd will always be present.. + */ +/* #define pmd_alloc_one(mm,address) ({ BUG(); ((pmd_t *)2); }) */ +#define pmd_free(mm, x) do { } while (0) +#define __pmd_free_tlb(tlb,x,a) do { } while (0) +/* #define pgd_populate(mm, pmd, pte) BUG() */ + +static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmdp, + pte_t *pte) +{ + if (IS_ENABLED(CONFIG_BOOKE)) + *pmdp = __pmd((unsigned long)pte | _PMD_PRESENT); + else + *pmdp = __pmd(__pa(pte) | _PMD_PRESENT); +} + +static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmdp, + pgtable_t pte_page) +{ + if (IS_ENABLED(CONFIG_BOOKE)) + *pmdp = __pmd((unsigned long)pte_page | _PMD_PRESENT); + else + *pmdp = __pmd(__pa(pte_page) | _PMD_USER | _PMD_PRESENT); +} + +#endif /* _ASM_POWERPC_PGALLOC_32_H */ diff --git a/arch/powerpc/include/asm/nohash/32/pgtable.h b/arch/powerpc/include/asm/nohash/32/pgtable.h new file mode 100644 index 000000000..0d40b3318 --- /dev/null +++ b/arch/powerpc/include/asm/nohash/32/pgtable.h @@ -0,0 +1,371 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_POWERPC_NOHASH_32_PGTABLE_H +#define _ASM_POWERPC_NOHASH_32_PGTABLE_H + +#include <asm-generic/pgtable-nopmd.h> + +#ifndef __ASSEMBLY__ +#include <linux/sched.h> +#include <linux/threads.h> +#include <asm/mmu.h> /* For sub-arch specific PPC_PIN_SIZE */ + +#ifdef CONFIG_44x +extern int icache_44x_need_flush; +#endif + +#endif /* __ASSEMBLY__ */ + +#define PTE_INDEX_SIZE PTE_SHIFT +#define PMD_INDEX_SIZE 0 +#define PUD_INDEX_SIZE 0 +#define PGD_INDEX_SIZE (32 - PGDIR_SHIFT) + +#define PMD_CACHE_INDEX PMD_INDEX_SIZE +#define PUD_CACHE_INDEX PUD_INDEX_SIZE + +#ifndef __ASSEMBLY__ +#define PTE_TABLE_SIZE (sizeof(pte_t) << PTE_INDEX_SIZE) +#define PMD_TABLE_SIZE 0 +#define PUD_TABLE_SIZE 0 +#define PGD_TABLE_SIZE (sizeof(pgd_t) << PGD_INDEX_SIZE) + +#define PMD_MASKED_BITS (PTE_TABLE_SIZE - 1) +#endif /* __ASSEMBLY__ */ + +#define PTRS_PER_PTE (1 << PTE_INDEX_SIZE) +#define PTRS_PER_PGD (1 << PGD_INDEX_SIZE) + +/* + * The normal case is that PTEs are 32-bits and we have a 1-page + * 1024-entry pgdir pointing to 1-page 1024-entry PTE pages. -- paulus + * + * For any >32-bit physical address platform, we can use the following + * two level page table layout where the pgdir is 8KB and the MS 13 bits + * are an index to the second level table. The combined pgdir/pmd first + * level has 2048 entries and the second level has 512 64-bit PTE entries. + * -Matt + */ +/* PGDIR_SHIFT determines what a top-level page table entry can map */ +#define PGDIR_SHIFT (PAGE_SHIFT + PTE_INDEX_SIZE) +#define PGDIR_SIZE (1UL << PGDIR_SHIFT) +#define PGDIR_MASK (~(PGDIR_SIZE-1)) + +/* Bits to mask out from a PGD to get to the PUD page */ +#define PGD_MASKED_BITS 0 + +#define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE) + +#define pte_ERROR(e) \ + pr_err("%s:%d: bad pte %llx.\n", __FILE__, __LINE__, \ + (unsigned long long)pte_val(e)) +#define pgd_ERROR(e) \ + pr_err("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e)) + +#ifndef __ASSEMBLY__ + +int map_kernel_page(unsigned long va, phys_addr_t pa, pgprot_t prot); +void unmap_kernel_page(unsigned long va); + +#endif /* !__ASSEMBLY__ */ + + +/* + * This is the bottom of the PKMAP area with HIGHMEM or an arbitrary + * value (for now) on others, from where we can start layout kernel + * virtual space that goes below PKMAP and FIXMAP + */ +#include <asm/fixmap.h> + +/* + * ioremap_bot starts at that address. Early ioremaps move down from there, + * until mem_init() at which point this becomes the top of the vmalloc + * and ioremap space + */ +#ifdef CONFIG_HIGHMEM +#define IOREMAP_TOP PKMAP_BASE +#else +#define IOREMAP_TOP FIXADDR_START +#endif + +/* PPC32 shares vmalloc area with ioremap */ +#define IOREMAP_START VMALLOC_START +#define IOREMAP_END VMALLOC_END + +/* + * Just any arbitrary offset to the start of the vmalloc VM area: the + * current 16MB value just means that there will be a 64MB "hole" after the + * physical memory until the kernel virtual memory starts. That means that + * any out-of-bounds memory accesses will hopefully be caught. + * The vmalloc() routines leaves a hole of 4kB between each vmalloced + * area for the same reason. ;) + * + * We no longer map larger than phys RAM with the BATs so we don't have + * to worry about the VMALLOC_OFFSET causing problems. We do have to worry + * about clashes between our early calls to ioremap() that start growing down + * from IOREMAP_TOP being run into the VM area allocations (growing upwards + * from VMALLOC_START). For this reason we have ioremap_bot to check when + * we actually run into our mappings setup in the early boot with the VM + * system. This really does become a problem for machines with good amounts + * of RAM. -- Cort + */ +#define VMALLOC_OFFSET (0x1000000) /* 16M */ +#ifdef PPC_PIN_SIZE +#define VMALLOC_START (((ALIGN((long)high_memory, PPC_PIN_SIZE) + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1))) +#else +#define VMALLOC_START ((((long)high_memory + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1))) +#endif + +#ifdef CONFIG_KASAN_VMALLOC +#define VMALLOC_END ALIGN_DOWN(ioremap_bot, PAGE_SIZE << KASAN_SHADOW_SCALE_SHIFT) +#else +#define VMALLOC_END ioremap_bot +#endif + +/* + * Bits in a linux-style PTE. These match the bits in the + * (hardware-defined) PowerPC PTE as closely as possible. + */ + +#if defined(CONFIG_40x) +#include <asm/nohash/32/pte-40x.h> +#elif defined(CONFIG_44x) +#include <asm/nohash/32/pte-44x.h> +#elif defined(CONFIG_PPC_85xx) && defined(CONFIG_PTE_64BIT) +#include <asm/nohash/pte-e500.h> +#elif defined(CONFIG_PPC_85xx) +#include <asm/nohash/32/pte-85xx.h> +#elif defined(CONFIG_PPC_8xx) +#include <asm/nohash/32/pte-8xx.h> +#endif + +/* + * Location of the PFN in the PTE. Most 32-bit platforms use the same + * as _PAGE_SHIFT here (ie, naturally aligned). + * Platform who don't just pre-define the value so we don't override it here. + */ +#ifndef PTE_RPN_SHIFT +#define PTE_RPN_SHIFT (PAGE_SHIFT) +#endif + +/* + * The mask covered by the RPN must be a ULL on 32-bit platforms with + * 64-bit PTEs. + */ +#if defined(CONFIG_PPC32) && defined(CONFIG_PTE_64BIT) +#define PTE_RPN_MASK (~((1ULL << PTE_RPN_SHIFT) - 1)) +#define MAX_POSSIBLE_PHYSMEM_BITS 36 +#else +#define PTE_RPN_MASK (~((1UL << PTE_RPN_SHIFT) - 1)) +#define MAX_POSSIBLE_PHYSMEM_BITS 32 +#endif + +/* + * _PAGE_CHG_MASK masks of bits that are to be preserved across + * pgprot changes. + */ +#define _PAGE_CHG_MASK (PTE_RPN_MASK | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_SPECIAL) + +#ifndef __ASSEMBLY__ + +#define pte_clear(mm, addr, ptep) \ + do { pte_update(mm, addr, ptep, ~0, 0, 0); } while (0) + +#ifndef pte_mkwrite +static inline pte_t pte_mkwrite(pte_t pte) +{ + return __pte(pte_val(pte) | _PAGE_RW); +} +#endif + +static inline pte_t pte_mkdirty(pte_t pte) +{ + return __pte(pte_val(pte) | _PAGE_DIRTY); +} + +static inline pte_t pte_mkyoung(pte_t pte) +{ + return __pte(pte_val(pte) | _PAGE_ACCESSED); +} + +#ifndef pte_wrprotect +static inline pte_t pte_wrprotect(pte_t pte) +{ + return __pte(pte_val(pte) & ~_PAGE_RW); +} +#endif + +#ifndef pte_mkexec +static inline pte_t pte_mkexec(pte_t pte) +{ + return __pte(pte_val(pte) | _PAGE_EXEC); +} +#endif + +#define pmd_none(pmd) (!pmd_val(pmd)) +#define pmd_bad(pmd) (pmd_val(pmd) & _PMD_BAD) +#define pmd_present(pmd) (pmd_val(pmd) & _PMD_PRESENT_MASK) +static inline void pmd_clear(pmd_t *pmdp) +{ + *pmdp = __pmd(0); +} + +/* + * PTE updates. This function is called whenever an existing + * valid PTE is updated. This does -not- include set_pte_at() + * which nowadays only sets a new PTE. + * + * Depending on the type of MMU, we may need to use atomic updates + * and the PTE may be either 32 or 64 bit wide. In the later case, + * when using atomic updates, only the low part of the PTE is + * accessed atomically. + * + * In addition, on 44x, we also maintain a global flag indicating + * that an executable user mapping was modified, which is needed + * to properly flush the virtually tagged instruction cache of + * those implementations. + * + * On the 8xx, the page tables are a bit special. For 16k pages, we have + * 4 identical entries. For 512k pages, we have 128 entries as if it was + * 4k pages, but they are flagged as 512k pages for the hardware. + * For other page sizes, we have a single entry in the table. + */ +#ifdef CONFIG_PPC_8xx +static pmd_t *pmd_off(struct mm_struct *mm, unsigned long addr); +static int hugepd_ok(hugepd_t hpd); + +static int number_of_cells_per_pte(pmd_t *pmd, pte_basic_t val, int huge) +{ + if (!huge) + return PAGE_SIZE / SZ_4K; + else if (hugepd_ok(*((hugepd_t *)pmd))) + return 1; + else if (IS_ENABLED(CONFIG_PPC_4K_PAGES) && !(val & _PAGE_HUGE)) + return SZ_16K / SZ_4K; + else + return SZ_512K / SZ_4K; +} + +static inline pte_basic_t pte_update(struct mm_struct *mm, unsigned long addr, pte_t *p, + unsigned long clr, unsigned long set, int huge) +{ + pte_basic_t *entry = (pte_basic_t *)p; + pte_basic_t old = pte_val(*p); + pte_basic_t new = (old & ~(pte_basic_t)clr) | set; + int num, i; + pmd_t *pmd = pmd_off(mm, addr); + + num = number_of_cells_per_pte(pmd, new, huge); + + for (i = 0; i < num; i++, entry++, new += SZ_4K) + *entry = new; + + return old; +} + +#ifdef CONFIG_PPC_16K_PAGES +#define __HAVE_ARCH_PTEP_GET +static inline pte_t ptep_get(pte_t *ptep) +{ + pte_basic_t val = READ_ONCE(ptep->pte); + pte_t pte = {val, val, val, val}; + + return pte; +} +#endif /* CONFIG_PPC_16K_PAGES */ + +#else +static inline pte_basic_t pte_update(struct mm_struct *mm, unsigned long addr, pte_t *p, + unsigned long clr, unsigned long set, int huge) +{ + pte_basic_t old = pte_val(*p); + pte_basic_t new = (old & ~(pte_basic_t)clr) | set; + + *p = __pte(new); + +#ifdef CONFIG_44x + if ((old & _PAGE_USER) && (old & _PAGE_EXEC)) + icache_44x_need_flush = 1; +#endif + return old; +} +#endif + +#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG +static inline int __ptep_test_and_clear_young(struct mm_struct *mm, + unsigned long addr, pte_t *ptep) +{ + unsigned long old; + old = pte_update(mm, addr, ptep, _PAGE_ACCESSED, 0, 0); + return (old & _PAGE_ACCESSED) != 0; +} +#define ptep_test_and_clear_young(__vma, __addr, __ptep) \ + __ptep_test_and_clear_young((__vma)->vm_mm, __addr, __ptep) + +#define __HAVE_ARCH_PTEP_GET_AND_CLEAR +static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr, + pte_t *ptep) +{ + return __pte(pte_update(mm, addr, ptep, ~0, 0, 0)); +} + +#define __HAVE_ARCH_PTEP_SET_WRPROTECT +#ifndef ptep_set_wrprotect +static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, + pte_t *ptep) +{ + pte_update(mm, addr, ptep, _PAGE_RW, 0, 0); +} +#endif + +#ifndef __ptep_set_access_flags +static inline void __ptep_set_access_flags(struct vm_area_struct *vma, + pte_t *ptep, pte_t entry, + unsigned long address, + int psize) +{ + unsigned long set = pte_val(entry) & + (_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_RW | _PAGE_EXEC); + int huge = psize > mmu_virtual_psize ? 1 : 0; + + pte_update(vma->vm_mm, address, ptep, 0, set, huge); + + flush_tlb_page(vma, address); +} +#endif + +static inline int pte_young(pte_t pte) +{ + return pte_val(pte) & _PAGE_ACCESSED; +} + +/* + * Note that on Book E processors, the pmd contains the kernel virtual + * (lowmem) address of the pte page. The physical address is less useful + * because everything runs with translation enabled (even the TLB miss + * handler). On everything else the pmd contains the physical address + * of the pte page. -- paulus + */ +#ifndef CONFIG_BOOKE +#define pmd_pfn(pmd) (pmd_val(pmd) >> PAGE_SHIFT) +#else +#define pmd_page_vaddr(pmd) \ + ((unsigned long)(pmd_val(pmd) & ~(PTE_TABLE_SIZE - 1))) +#define pmd_pfn(pmd) (__pa(pmd_val(pmd)) >> PAGE_SHIFT) +#endif + +#define pmd_page(pmd) pfn_to_page(pmd_pfn(pmd)) +/* + * Encode and decode a swap entry. + * Note that the bits we use in a PTE for representing a swap entry + * must not include the _PAGE_PRESENT bit. + * -- paulus + */ +#define __swp_type(entry) ((entry).val & 0x1f) +#define __swp_offset(entry) ((entry).val >> 5) +#define __swp_entry(type, offset) ((swp_entry_t) { (type) | ((offset) << 5) }) +#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) >> 3 }) +#define __swp_entry_to_pte(x) ((pte_t) { (x).val << 3 }) + +#endif /* !__ASSEMBLY__ */ + +#endif /* __ASM_POWERPC_NOHASH_32_PGTABLE_H */ diff --git a/arch/powerpc/include/asm/nohash/32/pte-40x.h b/arch/powerpc/include/asm/nohash/32/pte-40x.h new file mode 100644 index 000000000..acf61242e --- /dev/null +++ b/arch/powerpc/include/asm/nohash/32/pte-40x.h @@ -0,0 +1,85 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_POWERPC_NOHASH_32_PTE_40x_H +#define _ASM_POWERPC_NOHASH_32_PTE_40x_H +#ifdef __KERNEL__ + +/* + * At present, all PowerPC 400-class processors share a similar TLB + * architecture. The instruction and data sides share a unified, + * 64-entry, fully-associative TLB which is maintained totally under + * software control. In addition, the instruction side has a + * hardware-managed, 4-entry, fully-associative TLB which serves as a + * first level to the shared TLB. These two TLBs are known as the UTLB + * and ITLB, respectively (see "mmu.h" for definitions). + * + * There are several potential gotchas here. The 40x hardware TLBLO + * field looks like this: + * + * 0 1 2 3 4 ... 18 19 20 21 22 23 24 25 26 27 28 29 30 31 + * RPN..................... 0 0 EX WR ZSEL....... W I M G + * + * Where possible we make the Linux PTE bits match up with this + * + * - bits 20 and 21 must be cleared, because we use 4k pages (40x can + * support down to 1k pages), this is done in the TLBMiss exception + * handler. + * - We use only zones 0 (for kernel pages) and 1 (for user pages) + * of the 16 available. Bit 24-26 of the TLB are cleared in the TLB + * miss handler. Bit 27 is PAGE_USER, thus selecting the correct + * zone. + * - PRESENT *must* be in the bottom two bits because swap cache + * entries use the top 30 bits. Because 40x doesn't support SMP + * anyway, M is irrelevant so we borrow it for PAGE_PRESENT. Bit 30 + * is cleared in the TLB miss handler before the TLB entry is loaded. + * - All other bits of the PTE are loaded into TLBLO without + * modification, leaving us only the bits 20, 21, 24, 25, 26, 30 for + * software PTE bits. We actually use bits 21, 24, 25, and + * 30 respectively for the software bits: ACCESSED, DIRTY, RW, and + * PRESENT. + */ + +#define _PAGE_GUARDED 0x001 /* G: page is guarded from prefetch */ +#define _PAGE_PRESENT 0x002 /* software: PTE contains a translation */ +#define _PAGE_NO_CACHE 0x004 /* I: caching is inhibited */ +#define _PAGE_WRITETHRU 0x008 /* W: caching is write-through */ +#define _PAGE_USER 0x010 /* matches one of the zone permission bits */ +#define _PAGE_SPECIAL 0x020 /* software: Special page */ +#define _PAGE_DIRTY 0x080 /* software: dirty page */ +#define _PAGE_RW 0x100 /* hardware: WR, anded with dirty in exception */ +#define _PAGE_EXEC 0x200 /* hardware: EX permission */ +#define _PAGE_ACCESSED 0x400 /* software: R: page referenced */ + +/* No page size encoding in the linux PTE */ +#define _PAGE_PSIZE 0 + +/* cache related flags non existing on 40x */ +#define _PAGE_COHERENT 0 + +#define _PAGE_KERNEL_RO 0 +#define _PAGE_KERNEL_ROX _PAGE_EXEC +#define _PAGE_KERNEL_RW (_PAGE_DIRTY | _PAGE_RW) +#define _PAGE_KERNEL_RWX (_PAGE_DIRTY | _PAGE_RW | _PAGE_EXEC) + +#define _PMD_PRESENT 0x400 /* PMD points to page of PTEs */ +#define _PMD_PRESENT_MASK _PMD_PRESENT +#define _PMD_BAD 0x802 +#define _PMD_SIZE_4M 0x0c0 +#define _PMD_SIZE_16M 0x0e0 +#define _PMD_USER 0 + +#define _PTE_NONE_MASK 0 + +#define _PAGE_BASE_NC (_PAGE_PRESENT | _PAGE_ACCESSED) +#define _PAGE_BASE (_PAGE_BASE_NC) + +/* Permission masks used to generate the __P and __S table */ +#define PAGE_NONE __pgprot(_PAGE_BASE) +#define PAGE_SHARED __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW) +#define PAGE_SHARED_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW | _PAGE_EXEC) +#define PAGE_COPY __pgprot(_PAGE_BASE | _PAGE_USER) +#define PAGE_COPY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC) +#define PAGE_READONLY __pgprot(_PAGE_BASE | _PAGE_USER) +#define PAGE_READONLY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC) + +#endif /* __KERNEL__ */ +#endif /* _ASM_POWERPC_NOHASH_32_PTE_40x_H */ diff --git a/arch/powerpc/include/asm/nohash/32/pte-44x.h b/arch/powerpc/include/asm/nohash/32/pte-44x.h new file mode 100644 index 000000000..78bc304f7 --- /dev/null +++ b/arch/powerpc/include/asm/nohash/32/pte-44x.h @@ -0,0 +1,128 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_POWERPC_NOHASH_32_PTE_44x_H +#define _ASM_POWERPC_NOHASH_32_PTE_44x_H +#ifdef __KERNEL__ + +/* + * Definitions for PPC440 + * + * Because of the 3 word TLB entries to support 36-bit addressing, + * the attribute are difficult to map in such a fashion that they + * are easily loaded during exception processing. I decided to + * organize the entry so the ERPN is the only portion in the + * upper word of the PTE and the attribute bits below are packed + * in as sensibly as they can be in the area below a 4KB page size + * oriented RPN. This at least makes it easy to load the RPN and + * ERPN fields in the TLB. -Matt + * + * This isn't entirely true anymore, at least some bits are now + * easier to move into the TLB from the PTE. -BenH. + * + * Note that these bits preclude future use of a page size + * less than 4KB. + * + * + * PPC 440 core has following TLB attribute fields; + * + * TLB1: + * 0 1 2 3 4 ... 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 + * RPN................................. - - - - - - ERPN....... + * + * TLB2: + * 0 1 2 3 4 ... 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 + * - - - - - - U0 U1 U2 U3 W I M G E - UX UW UR SX SW SR + * + * Newer 440 cores (440x6 as used on AMCC 460EX/460GT) have additional + * TLB2 storage attribute fields. Those are: + * + * TLB2: + * 0...10 11 12 13 14 15 16...31 + * no change WL1 IL1I IL1D IL2I IL2D no change + * + * There are some constrains and options, to decide mapping software bits + * into TLB entry. + * + * - PRESENT *must* be in the bottom three bits because swap cache + * entries use the top 29 bits for TLB2. + * + * - CACHE COHERENT bit (M) has no effect on original PPC440 cores, + * because it doesn't support SMP. However, some later 460 variants + * have -some- form of SMP support and so I keep the bit there for + * future use + * + * With the PPC 44x Linux implementation, the 0-11th LSBs of the PTE are used + * for memory protection related functions (see PTE structure in + * include/asm-ppc/mmu.h). The _PAGE_XXX definitions in this file map to the + * above bits. Note that the bit values are CPU specific, not architecture + * specific. + * + * The kernel PTE entry holds an arch-dependent swp_entry structure under + * certain situations. In other words, in such situations some portion of + * the PTE bits are used as a swp_entry. In the PPC implementation, the + * 3-24th LSB are shared with swp_entry, however the 0-2nd three LSB still + * hold protection values. That means the three protection bits are + * reserved for both PTE and SWAP entry at the most significant three + * LSBs. + * + * There are three protection bits available for SWAP entry: + * _PAGE_PRESENT + * _PAGE_HASHPTE (if HW has) + * + * So those three bits have to be inside of 0-2nd LSB of PTE. + * + */ + +#define _PAGE_PRESENT 0x00000001 /* S: PTE valid */ +#define _PAGE_RW 0x00000002 /* S: Write permission */ +#define _PAGE_EXEC 0x00000004 /* H: Execute permission */ +#define _PAGE_ACCESSED 0x00000008 /* S: Page referenced */ +#define _PAGE_DIRTY 0x00000010 /* S: Page dirty */ +#define _PAGE_SPECIAL 0x00000020 /* S: Special page */ +#define _PAGE_USER 0x00000040 /* S: User page */ +#define _PAGE_ENDIAN 0x00000080 /* H: E bit */ +#define _PAGE_GUARDED 0x00000100 /* H: G bit */ +#define _PAGE_COHERENT 0x00000200 /* H: M bit */ +#define _PAGE_NO_CACHE 0x00000400 /* H: I bit */ +#define _PAGE_WRITETHRU 0x00000800 /* H: W bit */ + +/* No page size encoding in the linux PTE */ +#define _PAGE_PSIZE 0 + +#define _PAGE_KERNEL_RO 0 +#define _PAGE_KERNEL_ROX _PAGE_EXEC +#define _PAGE_KERNEL_RW (_PAGE_DIRTY | _PAGE_RW) +#define _PAGE_KERNEL_RWX (_PAGE_DIRTY | _PAGE_RW | _PAGE_EXEC) + +/* TODO: Add large page lowmem mapping support */ +#define _PMD_PRESENT 0 +#define _PMD_PRESENT_MASK (PAGE_MASK) +#define _PMD_BAD (~PAGE_MASK) +#define _PMD_USER 0 + +/* ERPN in a PTE never gets cleared, ignore it */ +#define _PTE_NONE_MASK 0xffffffff00000000ULL + +/* + * We define 2 sets of base prot bits, one for basic pages (ie, + * cacheable kernel and user pages) and one for non cacheable + * pages. We always set _PAGE_COHERENT when SMP is enabled or + * the processor might need it for DMA coherency. + */ +#define _PAGE_BASE_NC (_PAGE_PRESENT | _PAGE_ACCESSED) +#if defined(CONFIG_SMP) +#define _PAGE_BASE (_PAGE_BASE_NC | _PAGE_COHERENT) +#else +#define _PAGE_BASE (_PAGE_BASE_NC) +#endif + +/* Permission masks used to generate the __P and __S table */ +#define PAGE_NONE __pgprot(_PAGE_BASE) +#define PAGE_SHARED __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW) +#define PAGE_SHARED_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW | _PAGE_EXEC) +#define PAGE_COPY __pgprot(_PAGE_BASE | _PAGE_USER) +#define PAGE_COPY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC) +#define PAGE_READONLY __pgprot(_PAGE_BASE | _PAGE_USER) +#define PAGE_READONLY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC) + +#endif /* __KERNEL__ */ +#endif /* _ASM_POWERPC_NOHASH_32_PTE_44x_H */ diff --git a/arch/powerpc/include/asm/nohash/32/pte-85xx.h b/arch/powerpc/include/asm/nohash/32/pte-85xx.h new file mode 100644 index 000000000..93fb8e11a --- /dev/null +++ b/arch/powerpc/include/asm/nohash/32/pte-85xx.h @@ -0,0 +1,74 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_POWERPC_NOHASH_32_PTE_85xx_H +#define _ASM_POWERPC_NOHASH_32_PTE_85xx_H +#ifdef __KERNEL__ + +/* PTE bit definitions for Freescale BookE SW loaded TLB MMU based + * processors + * + MMU Assist Register 3: + + 32 33 34 35 36 ... 50 51 52 53 54 55 56 57 58 59 60 61 62 63 + RPN...................... 0 0 U0 U1 U2 U3 UX SX UW SW UR SR + + - PRESENT *must* be in the bottom three bits because swap cache + entries use the top 29 bits. + +*/ + +/* Definitions for FSL Book-E Cores */ +#define _PAGE_PRESENT 0x00001 /* S: PTE contains a translation */ +#define _PAGE_USER 0x00002 /* S: User page (maps to UR) */ +#define _PAGE_RW 0x00004 /* S: Write permission (SW) */ +#define _PAGE_DIRTY 0x00008 /* S: Page dirty */ +#define _PAGE_EXEC 0x00010 /* H: SX permission */ +#define _PAGE_ACCESSED 0x00020 /* S: Page referenced */ + +#define _PAGE_ENDIAN 0x00040 /* H: E bit */ +#define _PAGE_GUARDED 0x00080 /* H: G bit */ +#define _PAGE_COHERENT 0x00100 /* H: M bit */ +#define _PAGE_NO_CACHE 0x00200 /* H: I bit */ +#define _PAGE_WRITETHRU 0x00400 /* H: W bit */ +#define _PAGE_SPECIAL 0x00800 /* S: Special page */ + +#define _PAGE_KERNEL_RO 0 +#define _PAGE_KERNEL_ROX _PAGE_EXEC +#define _PAGE_KERNEL_RW (_PAGE_DIRTY | _PAGE_RW) +#define _PAGE_KERNEL_RWX (_PAGE_DIRTY | _PAGE_RW | _PAGE_EXEC) + +/* No page size encoding in the linux PTE */ +#define _PAGE_PSIZE 0 + +#define _PMD_PRESENT 0 +#define _PMD_PRESENT_MASK (PAGE_MASK) +#define _PMD_BAD (~PAGE_MASK) +#define _PMD_USER 0 + +#define _PTE_NONE_MASK 0 + +#define PTE_WIMGE_SHIFT (6) + +/* + * We define 2 sets of base prot bits, one for basic pages (ie, + * cacheable kernel and user pages) and one for non cacheable + * pages. We always set _PAGE_COHERENT when SMP is enabled or + * the processor might need it for DMA coherency. + */ +#define _PAGE_BASE_NC (_PAGE_PRESENT | _PAGE_ACCESSED) +#if defined(CONFIG_SMP) || defined(CONFIG_PPC_E500MC) +#define _PAGE_BASE (_PAGE_BASE_NC | _PAGE_COHERENT) +#else +#define _PAGE_BASE (_PAGE_BASE_NC) +#endif + +/* Permission masks used to generate the __P and __S table */ +#define PAGE_NONE __pgprot(_PAGE_BASE) +#define PAGE_SHARED __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW) +#define PAGE_SHARED_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW | _PAGE_EXEC) +#define PAGE_COPY __pgprot(_PAGE_BASE | _PAGE_USER) +#define PAGE_COPY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC) +#define PAGE_READONLY __pgprot(_PAGE_BASE | _PAGE_USER) +#define PAGE_READONLY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_EXEC) + +#endif /* __KERNEL__ */ +#endif /* _ASM_POWERPC_NOHASH_32_PTE_FSL_85xx_H */ diff --git a/arch/powerpc/include/asm/nohash/32/pte-8xx.h b/arch/powerpc/include/asm/nohash/32/pte-8xx.h new file mode 100644 index 000000000..0238e6bd0 --- /dev/null +++ b/arch/powerpc/include/asm/nohash/32/pte-8xx.h @@ -0,0 +1,193 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_POWERPC_NOHASH_32_PTE_8xx_H +#define _ASM_POWERPC_NOHASH_32_PTE_8xx_H +#ifdef __KERNEL__ + +/* + * The PowerPC MPC8xx uses a TLB with hardware assisted, software tablewalk. + * We also use the two level tables, but we can put the real bits in them + * needed for the TLB and tablewalk. These definitions require Mx_CTR.PPM = 0, + * Mx_CTR.PPCS = 0, and MD_CTR.TWAM = 1. The level 2 descriptor has + * additional page protection (when Mx_CTR.PPCS = 1) that allows TLB hit + * based upon user/super access. The TLB does not have accessed nor write + * protect. We assume that if the TLB get loaded with an entry it is + * accessed, and overload the changed bit for write protect. We use + * two bits in the software pte that are supposed to be set to zero in + * the TLB entry (24 and 25) for these indicators. Although the level 1 + * descriptor contains the guarded and writethrough/copyback bits, we can + * set these at the page level since they get copied from the Mx_TWC + * register when the TLB entry is loaded. We will use bit 27 for guard, since + * that is where it exists in the MD_TWC, and bit 26 for writethrough. + * These will get masked from the level 2 descriptor at TLB load time, and + * copied to the MD_TWC before it gets loaded. + * Large page sizes added. We currently support two sizes, 4K and 8M. + * This also allows a TLB hander optimization because we can directly + * load the PMD into MD_TWC. The 8M pages are only used for kernel + * mapping of well known areas. The PMD (PGD) entries contain control + * flags in addition to the address, so care must be taken that the + * software no longer assumes these are only pointers. + */ + +/* Definitions for 8xx embedded chips. */ +#define _PAGE_PRESENT 0x0001 /* V: Page is valid */ +#define _PAGE_NO_CACHE 0x0002 /* CI: cache inhibit */ +#define _PAGE_SH 0x0004 /* SH: No ASID (context) compare */ +#define _PAGE_SPS 0x0008 /* SPS: Small Page Size (1 if 16k, 512k or 8M)*/ +#define _PAGE_DIRTY 0x0100 /* C: page changed */ + +/* These 4 software bits must be masked out when the L2 entry is loaded + * into the TLB. + */ +#define _PAGE_GUARDED 0x0010 /* Copied to L1 G entry in DTLB */ +#define _PAGE_ACCESSED 0x0020 /* Copied to L1 APG 1 entry in I/DTLB */ +#define _PAGE_EXEC 0x0040 /* Copied to PP (bit 21) in ITLB */ +#define _PAGE_SPECIAL 0x0080 /* SW entry */ + +#define _PAGE_NA 0x0200 /* Supervisor NA, User no access */ +#define _PAGE_RO 0x0600 /* Supervisor RO, User no access */ + +#define _PAGE_HUGE 0x0800 /* Copied to L1 PS bit 29 */ + +/* cache related flags non existing on 8xx */ +#define _PAGE_COHERENT 0 +#define _PAGE_WRITETHRU 0 + +#define _PAGE_KERNEL_RO (_PAGE_SH | _PAGE_RO) +#define _PAGE_KERNEL_ROX (_PAGE_SH | _PAGE_RO | _PAGE_EXEC) +#define _PAGE_KERNEL_RW (_PAGE_SH | _PAGE_DIRTY) +#define _PAGE_KERNEL_RWX (_PAGE_SH | _PAGE_DIRTY | _PAGE_EXEC) + +#define _PMD_PRESENT 0x0001 +#define _PMD_PRESENT_MASK _PMD_PRESENT +#define _PMD_BAD 0x0f90 +#define _PMD_PAGE_MASK 0x000c +#define _PMD_PAGE_8M 0x000c +#define _PMD_PAGE_512K 0x0004 +#define _PMD_ACCESSED 0x0020 /* APG 1 */ +#define _PMD_USER 0x0040 /* APG 2 */ + +#define _PTE_NONE_MASK 0 + +#ifdef CONFIG_PPC_16K_PAGES +#define _PAGE_PSIZE _PAGE_SPS +#else +#define _PAGE_PSIZE 0 +#endif + +#define _PAGE_BASE_NC (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_PSIZE) +#define _PAGE_BASE (_PAGE_BASE_NC) + +/* Permission masks used to generate the __P and __S table */ +#define PAGE_NONE __pgprot(_PAGE_BASE | _PAGE_NA) +#define PAGE_SHARED __pgprot(_PAGE_BASE) +#define PAGE_SHARED_X __pgprot(_PAGE_BASE | _PAGE_EXEC) +#define PAGE_COPY __pgprot(_PAGE_BASE | _PAGE_RO) +#define PAGE_COPY_X __pgprot(_PAGE_BASE | _PAGE_RO | _PAGE_EXEC) +#define PAGE_READONLY __pgprot(_PAGE_BASE | _PAGE_RO) +#define PAGE_READONLY_X __pgprot(_PAGE_BASE | _PAGE_RO | _PAGE_EXEC) + +#ifndef __ASSEMBLY__ +static inline pte_t pte_wrprotect(pte_t pte) +{ + return __pte(pte_val(pte) | _PAGE_RO); +} + +#define pte_wrprotect pte_wrprotect + +static inline int pte_read(pte_t pte) +{ + return (pte_val(pte) & _PAGE_RO) != _PAGE_NA; +} + +#define pte_read pte_read + +static inline int pte_write(pte_t pte) +{ + return !(pte_val(pte) & _PAGE_RO); +} + +#define pte_write pte_write + +static inline pte_t pte_mkwrite(pte_t pte) +{ + return __pte(pte_val(pte) & ~_PAGE_RO); +} + +#define pte_mkwrite pte_mkwrite + +static inline bool pte_user(pte_t pte) +{ + return !(pte_val(pte) & _PAGE_SH); +} + +#define pte_user pte_user + +static inline pte_t pte_mkprivileged(pte_t pte) +{ + return __pte(pte_val(pte) | _PAGE_SH); +} + +#define pte_mkprivileged pte_mkprivileged + +static inline pte_t pte_mkuser(pte_t pte) +{ + return __pte(pte_val(pte) & ~_PAGE_SH); +} + +#define pte_mkuser pte_mkuser + +static inline pte_t pte_mkhuge(pte_t pte) +{ + return __pte(pte_val(pte) | _PAGE_SPS | _PAGE_HUGE); +} + +#define pte_mkhuge pte_mkhuge + +static inline pte_basic_t pte_update(struct mm_struct *mm, unsigned long addr, pte_t *p, + unsigned long clr, unsigned long set, int huge); + +static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep) +{ + pte_update(mm, addr, ptep, 0, _PAGE_RO, 0); +} +#define ptep_set_wrprotect ptep_set_wrprotect + +static inline void __ptep_set_access_flags(struct vm_area_struct *vma, pte_t *ptep, + pte_t entry, unsigned long address, int psize) +{ + unsigned long set = pte_val(entry) & (_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_EXEC); + unsigned long clr = ~pte_val(entry) & _PAGE_RO; + int huge = psize > mmu_virtual_psize ? 1 : 0; + + pte_update(vma->vm_mm, address, ptep, clr, set, huge); + + flush_tlb_page(vma, address); +} +#define __ptep_set_access_flags __ptep_set_access_flags + +static inline unsigned long pgd_leaf_size(pgd_t pgd) +{ + if (pgd_val(pgd) & _PMD_PAGE_8M) + return SZ_8M; + return SZ_4M; +} + +#define pgd_leaf_size pgd_leaf_size + +static inline unsigned long pte_leaf_size(pte_t pte) +{ + pte_basic_t val = pte_val(pte); + + if (val & _PAGE_HUGE) + return SZ_512K; + if (val & _PAGE_SPS) + return SZ_16K; + return SZ_4K; +} + +#define pte_leaf_size pte_leaf_size + +#endif + +#endif /* __KERNEL__ */ +#endif /* _ASM_POWERPC_NOHASH_32_PTE_8xx_H */ diff --git a/arch/powerpc/include/asm/nohash/64/pgalloc.h b/arch/powerpc/include/asm/nohash/64/pgalloc.h new file mode 100644 index 000000000..e50b211be --- /dev/null +++ b/arch/powerpc/include/asm/nohash/64/pgalloc.h @@ -0,0 +1,67 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +#ifndef _ASM_POWERPC_PGALLOC_64_H +#define _ASM_POWERPC_PGALLOC_64_H +/* + */ + +#include <linux/slab.h> +#include <linux/cpumask.h> +#include <linux/percpu.h> + +struct vmemmap_backing { + struct vmemmap_backing *list; + unsigned long phys; + unsigned long virt_addr; +}; +extern struct vmemmap_backing *vmemmap_list; + +static inline void p4d_populate(struct mm_struct *mm, p4d_t *p4d, pud_t *pud) +{ + p4d_set(p4d, (unsigned long)pud); +} + +static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr) +{ + return kmem_cache_alloc(PGT_CACHE(PUD_INDEX_SIZE), + pgtable_gfp_flags(mm, GFP_KERNEL)); +} + +static inline void pud_free(struct mm_struct *mm, pud_t *pud) +{ + kmem_cache_free(PGT_CACHE(PUD_INDEX_SIZE), pud); +} + +static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd) +{ + pud_set(pud, (unsigned long)pmd); +} + +static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, + pte_t *pte) +{ + pmd_set(pmd, (unsigned long)pte); +} + +static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, + pgtable_t pte_page) +{ + pmd_set(pmd, (unsigned long)pte_page); +} + +static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr) +{ + return kmem_cache_alloc(PGT_CACHE(PMD_CACHE_INDEX), + pgtable_gfp_flags(mm, GFP_KERNEL)); +} + +static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd) +{ + kmem_cache_free(PGT_CACHE(PMD_CACHE_INDEX), pmd); +} + +#define __pmd_free_tlb(tlb, pmd, addr) \ + pgtable_free_tlb(tlb, pmd, PMD_CACHE_INDEX) +#define __pud_free_tlb(tlb, pud, addr) \ + pgtable_free_tlb(tlb, pud, PUD_INDEX_SIZE) + +#endif /* _ASM_POWERPC_PGALLOC_64_H */ diff --git a/arch/powerpc/include/asm/nohash/64/pgtable-4k.h b/arch/powerpc/include/asm/nohash/64/pgtable-4k.h new file mode 100644 index 000000000..10f5cf444 --- /dev/null +++ b/arch/powerpc/include/asm/nohash/64/pgtable-4k.h @@ -0,0 +1,93 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_POWERPC_NOHASH_64_PGTABLE_4K_H +#define _ASM_POWERPC_NOHASH_64_PGTABLE_4K_H + +#include <asm-generic/pgtable-nop4d.h> + +/* + * Entries per page directory level. The PTE level must use a 64b record + * for each page table entry. The PMD and PGD level use a 32b record for + * each entry by assuming that each entry is page aligned. + */ +#define PTE_INDEX_SIZE 9 +#define PMD_INDEX_SIZE 7 +#define PUD_INDEX_SIZE 9 +#define PGD_INDEX_SIZE 9 + +#ifndef __ASSEMBLY__ +#define PTE_TABLE_SIZE (sizeof(pte_t) << PTE_INDEX_SIZE) +#define PMD_TABLE_SIZE (sizeof(pmd_t) << PMD_INDEX_SIZE) +#define PUD_TABLE_SIZE (sizeof(pud_t) << PUD_INDEX_SIZE) +#define PGD_TABLE_SIZE (sizeof(pgd_t) << PGD_INDEX_SIZE) +#endif /* __ASSEMBLY__ */ + +#define PTRS_PER_PTE (1 << PTE_INDEX_SIZE) +#define PTRS_PER_PMD (1 << PMD_INDEX_SIZE) +#define PTRS_PER_PUD (1 << PUD_INDEX_SIZE) +#define PTRS_PER_PGD (1 << PGD_INDEX_SIZE) + +/* PMD_SHIFT determines what a second-level page table entry can map */ +#define PMD_SHIFT (PAGE_SHIFT + PTE_INDEX_SIZE) +#define PMD_SIZE (1UL << PMD_SHIFT) +#define PMD_MASK (~(PMD_SIZE-1)) + +/* PUD_SHIFT determines what a third-level page table entry can map */ +#define PUD_SHIFT (PMD_SHIFT + PMD_INDEX_SIZE) +#define PUD_SIZE (1UL << PUD_SHIFT) +#define PUD_MASK (~(PUD_SIZE-1)) + +/* PGDIR_SHIFT determines what a fourth-level page table entry can map */ +#define PGDIR_SHIFT (PUD_SHIFT + PUD_INDEX_SIZE) +#define PGDIR_SIZE (1UL << PGDIR_SHIFT) +#define PGDIR_MASK (~(PGDIR_SIZE-1)) + +/* Bits to mask out from a PMD to get to the PTE page */ +#define PMD_MASKED_BITS 0 +/* Bits to mask out from a PUD to get to the PMD page */ +#define PUD_MASKED_BITS 0 +/* Bits to mask out from a P4D to get to the PUD page */ +#define P4D_MASKED_BITS 0 + + +/* + * 4-level page tables related bits + */ + +#define p4d_none(p4d) (!p4d_val(p4d)) +#define p4d_bad(p4d) (p4d_val(p4d) == 0) +#define p4d_present(p4d) (p4d_val(p4d) != 0) + +#ifndef __ASSEMBLY__ + +static inline pud_t *p4d_pgtable(p4d_t p4d) +{ + return (pud_t *) (p4d_val(p4d) & ~P4D_MASKED_BITS); +} + +static inline void p4d_clear(p4d_t *p4dp) +{ + *p4dp = __p4d(0); +} + +static inline pte_t p4d_pte(p4d_t p4d) +{ + return __pte(p4d_val(p4d)); +} + +static inline p4d_t pte_p4d(pte_t pte) +{ + return __p4d(pte_val(pte)); +} +extern struct page *p4d_page(p4d_t p4d); + +#endif /* !__ASSEMBLY__ */ + +#define pud_ERROR(e) \ + pr_err("%s:%d: bad pud %08lx.\n", __FILE__, __LINE__, pud_val(e)) + +/* + * On all 4K setups, remap_4k_pfn() equates to remap_pfn_range() */ +#define remap_4k_pfn(vma, addr, pfn, prot) \ + remap_pfn_range((vma), (addr), (pfn), PAGE_SIZE, (prot)) + +#endif /* _ _ASM_POWERPC_NOHASH_64_PGTABLE_4K_H */ diff --git a/arch/powerpc/include/asm/nohash/64/pgtable.h b/arch/powerpc/include/asm/nohash/64/pgtable.h new file mode 100644 index 000000000..00a003d36 --- /dev/null +++ b/arch/powerpc/include/asm/nohash/64/pgtable.h @@ -0,0 +1,310 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_POWERPC_NOHASH_64_PGTABLE_H +#define _ASM_POWERPC_NOHASH_64_PGTABLE_H +/* + * This file contains the functions and defines necessary to modify and use + * the ppc64 non-hashed page table. + */ + +#include <linux/sizes.h> + +#include <asm/nohash/64/pgtable-4k.h> +#include <asm/barrier.h> +#include <asm/asm-const.h> + +/* + * Size of EA range mapped by our pagetables. + */ +#define PGTABLE_EADDR_SIZE (PTE_INDEX_SIZE + PMD_INDEX_SIZE + \ + PUD_INDEX_SIZE + PGD_INDEX_SIZE + PAGE_SHIFT) +#define PGTABLE_RANGE (ASM_CONST(1) << PGTABLE_EADDR_SIZE) + +#define PMD_CACHE_INDEX PMD_INDEX_SIZE +#define PUD_CACHE_INDEX PUD_INDEX_SIZE + +/* + * Define the address range of the kernel non-linear virtual area + */ +#define KERN_VIRT_START ASM_CONST(0xc000100000000000) +#define KERN_VIRT_SIZE ASM_CONST(0x0000100000000000) + +/* + * The vmalloc space starts at the beginning of that region, and + * occupies a quarter of it on Book3E + * (we keep a quarter for the virtual memmap) + */ +#define VMALLOC_START KERN_VIRT_START +#define VMALLOC_SIZE (KERN_VIRT_SIZE >> 2) +#define VMALLOC_END (VMALLOC_START + VMALLOC_SIZE) + +/* + * The third quarter of the kernel virtual space is used for IO mappings, + * it's itself carved into the PIO region (ISA and PHB IO space) and + * the ioremap space + * + * ISA_IO_BASE = KERN_IO_START, 64K reserved area + * PHB_IO_BASE = ISA_IO_BASE + 64K to ISA_IO_BASE + 2G, PHB IO spaces + * IOREMAP_BASE = ISA_IO_BASE + 2G to KERN_IO_START + KERN_IO_SIZE + */ +#define KERN_IO_START (KERN_VIRT_START + (KERN_VIRT_SIZE >> 1)) +#define KERN_IO_SIZE (KERN_VIRT_SIZE >> 2) +#define FULL_IO_SIZE 0x80000000ul +#define ISA_IO_BASE (KERN_IO_START) +#define ISA_IO_END (KERN_IO_START + 0x10000ul) +#define PHB_IO_BASE (ISA_IO_END) +#define PHB_IO_END (KERN_IO_START + FULL_IO_SIZE) +#define IOREMAP_BASE (PHB_IO_END) +#define IOREMAP_START (ioremap_bot) +#define IOREMAP_END (KERN_IO_START + KERN_IO_SIZE - FIXADDR_SIZE) +#define FIXADDR_SIZE SZ_32M + +/* + * Defines the address of the vmemap area, in its own region on + * after the vmalloc space on Book3E + */ +#define VMEMMAP_BASE VMALLOC_END +#define VMEMMAP_END KERN_IO_START +#define vmemmap ((struct page *)VMEMMAP_BASE) + + +/* + * Include the PTE bits definitions + */ +#include <asm/nohash/pte-e500.h> + +#define PTE_RPN_MASK (~((1UL << PTE_RPN_SHIFT) - 1)) + +/* + * _PAGE_CHG_MASK masks of bits that are to be preserved across + * pgprot changes. + */ +#define _PAGE_CHG_MASK (PTE_RPN_MASK | _PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_SPECIAL) + +#define H_PAGE_4K_PFN 0 + +#ifndef __ASSEMBLY__ +/* pte_clear moved to later in this file */ + +static inline pte_t pte_mkwrite(pte_t pte) +{ + return __pte(pte_val(pte) | _PAGE_RW); +} + +static inline pte_t pte_mkdirty(pte_t pte) +{ + return __pte(pte_val(pte) | _PAGE_DIRTY); +} + +static inline pte_t pte_mkyoung(pte_t pte) +{ + return __pte(pte_val(pte) | _PAGE_ACCESSED); +} + +static inline pte_t pte_wrprotect(pte_t pte) +{ + return __pte(pte_val(pte) & ~_PAGE_RW); +} + +#define PMD_BAD_BITS (PTE_TABLE_SIZE-1) +#define PUD_BAD_BITS (PMD_TABLE_SIZE-1) + +static inline void pmd_set(pmd_t *pmdp, unsigned long val) +{ + *pmdp = __pmd(val); +} + +static inline void pmd_clear(pmd_t *pmdp) +{ + *pmdp = __pmd(0); +} + +static inline pte_t pmd_pte(pmd_t pmd) +{ + return __pte(pmd_val(pmd)); +} + +#define pmd_none(pmd) (!pmd_val(pmd)) +#define pmd_bad(pmd) (!is_kernel_addr(pmd_val(pmd)) \ + || (pmd_val(pmd) & PMD_BAD_BITS)) +#define pmd_present(pmd) (!pmd_none(pmd)) +#define pmd_page_vaddr(pmd) (pmd_val(pmd) & ~PMD_MASKED_BITS) +extern struct page *pmd_page(pmd_t pmd); +#define pmd_pfn(pmd) (page_to_pfn(pmd_page(pmd))) + +static inline void pud_set(pud_t *pudp, unsigned long val) +{ + *pudp = __pud(val); +} + +static inline void pud_clear(pud_t *pudp) +{ + *pudp = __pud(0); +} + +#define pud_none(pud) (!pud_val(pud)) +#define pud_bad(pud) (!is_kernel_addr(pud_val(pud)) \ + || (pud_val(pud) & PUD_BAD_BITS)) +#define pud_present(pud) (pud_val(pud) != 0) + +static inline pmd_t *pud_pgtable(pud_t pud) +{ + return (pmd_t *)(pud_val(pud) & ~PUD_MASKED_BITS); +} + +extern struct page *pud_page(pud_t pud); + +static inline pte_t pud_pte(pud_t pud) +{ + return __pte(pud_val(pud)); +} + +static inline pud_t pte_pud(pte_t pte) +{ + return __pud(pte_val(pte)); +} +#define pud_write(pud) pte_write(pud_pte(pud)) +#define p4d_write(pgd) pte_write(p4d_pte(p4d)) + +static inline void p4d_set(p4d_t *p4dp, unsigned long val) +{ + *p4dp = __p4d(val); +} + +/* Atomic PTE updates */ +static inline unsigned long pte_update(struct mm_struct *mm, + unsigned long addr, + pte_t *ptep, unsigned long clr, + unsigned long set, + int huge) +{ + unsigned long old = pte_val(*ptep); + *ptep = __pte((old & ~clr) | set); + + /* huge pages use the old page table lock */ + if (!huge) + assert_pte_locked(mm, addr); + + return old; +} + +static inline int pte_young(pte_t pte) +{ + return pte_val(pte) & _PAGE_ACCESSED; +} + +static inline int __ptep_test_and_clear_young(struct mm_struct *mm, + unsigned long addr, pte_t *ptep) +{ + unsigned long old; + + if (!pte_young(*ptep)) + return 0; + old = pte_update(mm, addr, ptep, _PAGE_ACCESSED, 0, 0); + return (old & _PAGE_ACCESSED) != 0; +} +#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG +#define ptep_test_and_clear_young(__vma, __addr, __ptep) \ +({ \ + int __r; \ + __r = __ptep_test_and_clear_young((__vma)->vm_mm, __addr, __ptep); \ + __r; \ +}) + +#define __HAVE_ARCH_PTEP_SET_WRPROTECT +static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, + pte_t *ptep) +{ + + if ((pte_val(*ptep) & _PAGE_RW) == 0) + return; + + pte_update(mm, addr, ptep, _PAGE_RW, 0, 0); +} + +#define __HAVE_ARCH_HUGE_PTEP_SET_WRPROTECT +static inline void huge_ptep_set_wrprotect(struct mm_struct *mm, + unsigned long addr, pte_t *ptep) +{ + if ((pte_val(*ptep) & _PAGE_RW) == 0) + return; + + pte_update(mm, addr, ptep, _PAGE_RW, 0, 1); +} + +#define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH +#define ptep_clear_flush_young(__vma, __address, __ptep) \ +({ \ + int __young = __ptep_test_and_clear_young((__vma)->vm_mm, __address, \ + __ptep); \ + __young; \ +}) + +#define __HAVE_ARCH_PTEP_GET_AND_CLEAR +static inline pte_t ptep_get_and_clear(struct mm_struct *mm, + unsigned long addr, pte_t *ptep) +{ + unsigned long old = pte_update(mm, addr, ptep, ~0UL, 0, 0); + return __pte(old); +} + +static inline void pte_clear(struct mm_struct *mm, unsigned long addr, + pte_t * ptep) +{ + pte_update(mm, addr, ptep, ~0UL, 0, 0); +} + + +/* Set the dirty and/or accessed bits atomically in a linux PTE */ +static inline void __ptep_set_access_flags(struct vm_area_struct *vma, + pte_t *ptep, pte_t entry, + unsigned long address, + int psize) +{ + unsigned long bits = pte_val(entry) & + (_PAGE_DIRTY | _PAGE_ACCESSED | _PAGE_RW | _PAGE_EXEC); + + unsigned long old = pte_val(*ptep); + *ptep = __pte(old | bits); + + flush_tlb_page(vma, address); +} + +#define pte_ERROR(e) \ + pr_err("%s:%d: bad pte %08lx.\n", __FILE__, __LINE__, pte_val(e)) +#define pmd_ERROR(e) \ + pr_err("%s:%d: bad pmd %08lx.\n", __FILE__, __LINE__, pmd_val(e)) +#define pgd_ERROR(e) \ + pr_err("%s:%d: bad pgd %08lx.\n", __FILE__, __LINE__, pgd_val(e)) + +/* Encode and de-code a swap entry */ +#define MAX_SWAPFILES_CHECK() do { \ + BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > SWP_TYPE_BITS); \ + } while (0) + +#define SWP_TYPE_BITS 5 +#define __swp_type(x) (((x).val >> _PAGE_BIT_SWAP_TYPE) \ + & ((1UL << SWP_TYPE_BITS) - 1)) +#define __swp_offset(x) ((x).val >> PTE_RPN_SHIFT) +#define __swp_entry(type, offset) ((swp_entry_t) { \ + ((type) << _PAGE_BIT_SWAP_TYPE) \ + | ((offset) << PTE_RPN_SHIFT) }) + +#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val((pte)) }) +#define __swp_entry_to_pte(x) __pte((x).val) + +int map_kernel_page(unsigned long ea, unsigned long pa, pgprot_t prot); +void unmap_kernel_page(unsigned long va); +extern int __meminit vmemmap_create_mapping(unsigned long start, + unsigned long page_size, + unsigned long phys); +extern void vmemmap_remove_mapping(unsigned long start, + unsigned long page_size); +void __patch_exception(int exc, unsigned long addr); +#define patch_exception(exc, name) do { \ + extern unsigned int name; \ + __patch_exception((exc), (unsigned long)&name); \ +} while (0) + +#endif /* __ASSEMBLY__ */ + +#endif /* _ASM_POWERPC_NOHASH_64_PGTABLE_H */ diff --git a/arch/powerpc/include/asm/nohash/hugetlb-e500.h b/arch/powerpc/include/asm/nohash/hugetlb-e500.h new file mode 100644 index 000000000..8f04ad20e --- /dev/null +++ b/arch/powerpc/include/asm/nohash/hugetlb-e500.h @@ -0,0 +1,45 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_POWERPC_NOHASH_HUGETLB_E500_H +#define _ASM_POWERPC_NOHASH_HUGETLB_E500_H + +static inline pte_t *hugepd_page(hugepd_t hpd) +{ + if (WARN_ON(!hugepd_ok(hpd))) + return NULL; + + return (pte_t *)((hpd_val(hpd) & ~HUGEPD_SHIFT_MASK) | PD_HUGE); +} + +static inline unsigned int hugepd_shift(hugepd_t hpd) +{ + return hpd_val(hpd) & HUGEPD_SHIFT_MASK; +} + +static inline pte_t *hugepte_offset(hugepd_t hpd, unsigned long addr, + unsigned int pdshift) +{ + /* + * On FSL BookE, we have multiple higher-level table entries that + * point to the same hugepte. Just use the first one since they're all + * identical. So for that case, idx=0. + */ + return hugepd_page(hpd); +} + +void flush_hugetlb_page(struct vm_area_struct *vma, unsigned long vmaddr); + +static inline void hugepd_populate(hugepd_t *hpdp, pte_t *new, unsigned int pshift) +{ + /* We use the old format for PPC_E500 */ + *hpdp = __hugepd(((unsigned long)new & ~PD_HUGE) | pshift); +} + +static inline int check_and_get_huge_psize(int shift) +{ + if (shift & 1) /* Not a power of 4 */ + return -EINVAL; + + return shift_to_mmu_psize(shift); +} + +#endif /* _ASM_POWERPC_NOHASH_HUGETLB_E500_H */ diff --git a/arch/powerpc/include/asm/nohash/kup-booke.h b/arch/powerpc/include/asm/nohash/kup-booke.h new file mode 100644 index 000000000..49bb41ed0 --- /dev/null +++ b/arch/powerpc/include/asm/nohash/kup-booke.h @@ -0,0 +1,110 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_POWERPC_KUP_BOOKE_H_ +#define _ASM_POWERPC_KUP_BOOKE_H_ + +#include <asm/bug.h> + +#ifdef CONFIG_PPC_KUAP + +#ifdef __ASSEMBLY__ + +.macro kuap_check_amr gpr1, gpr2 +.endm + +#else + +#include <linux/jump_label.h> +#include <linux/sched.h> + +#include <asm/reg.h> + +extern struct static_key_false disable_kuap_key; + +static __always_inline bool kuap_is_disabled(void) +{ + return static_branch_unlikely(&disable_kuap_key); +} + +static inline void __kuap_lock(void) +{ + mtspr(SPRN_PID, 0); + isync(); +} + +static inline void __kuap_save_and_lock(struct pt_regs *regs) +{ + regs->kuap = mfspr(SPRN_PID); + mtspr(SPRN_PID, 0); + isync(); +} + +static inline void kuap_user_restore(struct pt_regs *regs) +{ + if (kuap_is_disabled()) + return; + + mtspr(SPRN_PID, current->thread.pid); + + /* Context synchronisation is performed by rfi */ +} + +static inline void __kuap_kernel_restore(struct pt_regs *regs, unsigned long kuap) +{ + if (regs->kuap) + mtspr(SPRN_PID, current->thread.pid); + + /* Context synchronisation is performed by rfi */ +} + +static inline unsigned long __kuap_get_and_assert_locked(void) +{ + unsigned long kuap = mfspr(SPRN_PID); + + if (IS_ENABLED(CONFIG_PPC_KUAP_DEBUG)) + WARN_ON_ONCE(kuap); + + return kuap; +} + +static inline void __allow_user_access(void __user *to, const void __user *from, + unsigned long size, unsigned long dir) +{ + mtspr(SPRN_PID, current->thread.pid); + isync(); +} + +static inline void __prevent_user_access(unsigned long dir) +{ + mtspr(SPRN_PID, 0); + isync(); +} + +static inline unsigned long __prevent_user_access_return(void) +{ + unsigned long flags = mfspr(SPRN_PID); + + mtspr(SPRN_PID, 0); + isync(); + + return flags; +} + +static inline void __restore_user_access(unsigned long flags) +{ + if (flags) { + mtspr(SPRN_PID, current->thread.pid); + isync(); + } +} + +static inline bool +__bad_kuap_fault(struct pt_regs *regs, unsigned long address, bool is_write) +{ + return !regs->kuap; +} + +#endif /* !__ASSEMBLY__ */ + +#endif /* CONFIG_PPC_KUAP */ + +#endif /* _ASM_POWERPC_KUP_BOOKE_H_ */ diff --git a/arch/powerpc/include/asm/nohash/mmu-e500.h b/arch/powerpc/include/asm/nohash/mmu-e500.h new file mode 100644 index 000000000..e43a418d3 --- /dev/null +++ b/arch/powerpc/include/asm/nohash/mmu-e500.h @@ -0,0 +1,324 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_POWERPC_MMU_BOOK3E_H_ +#define _ASM_POWERPC_MMU_BOOK3E_H_ +/* + * Freescale Book-E/Book-3e (ISA 2.06+) MMU support + */ + +/* Book-3e defined page sizes */ +#define BOOK3E_PAGESZ_1K 0 +#define BOOK3E_PAGESZ_2K 1 +#define BOOK3E_PAGESZ_4K 2 +#define BOOK3E_PAGESZ_8K 3 +#define BOOK3E_PAGESZ_16K 4 +#define BOOK3E_PAGESZ_32K 5 +#define BOOK3E_PAGESZ_64K 6 +#define BOOK3E_PAGESZ_128K 7 +#define BOOK3E_PAGESZ_256K 8 +#define BOOK3E_PAGESZ_512K 9 +#define BOOK3E_PAGESZ_1M 10 +#define BOOK3E_PAGESZ_2M 11 +#define BOOK3E_PAGESZ_4M 12 +#define BOOK3E_PAGESZ_8M 13 +#define BOOK3E_PAGESZ_16M 14 +#define BOOK3E_PAGESZ_32M 15 +#define BOOK3E_PAGESZ_64M 16 +#define BOOK3E_PAGESZ_128M 17 +#define BOOK3E_PAGESZ_256M 18 +#define BOOK3E_PAGESZ_512M 19 +#define BOOK3E_PAGESZ_1GB 20 +#define BOOK3E_PAGESZ_2GB 21 +#define BOOK3E_PAGESZ_4GB 22 +#define BOOK3E_PAGESZ_8GB 23 +#define BOOK3E_PAGESZ_16GB 24 +#define BOOK3E_PAGESZ_32GB 25 +#define BOOK3E_PAGESZ_64GB 26 +#define BOOK3E_PAGESZ_128GB 27 +#define BOOK3E_PAGESZ_256GB 28 +#define BOOK3E_PAGESZ_512GB 29 +#define BOOK3E_PAGESZ_1TB 30 +#define BOOK3E_PAGESZ_2TB 31 + +/* MAS registers bit definitions */ + +#define MAS0_TLBSEL_MASK 0x30000000 +#define MAS0_TLBSEL_SHIFT 28 +#define MAS0_TLBSEL(x) (((x) << MAS0_TLBSEL_SHIFT) & MAS0_TLBSEL_MASK) +#define MAS0_GET_TLBSEL(mas0) (((mas0) & MAS0_TLBSEL_MASK) >> \ + MAS0_TLBSEL_SHIFT) +#define MAS0_ESEL_MASK 0x0FFF0000 +#define MAS0_ESEL_SHIFT 16 +#define MAS0_ESEL(x) (((x) << MAS0_ESEL_SHIFT) & MAS0_ESEL_MASK) +#define MAS0_NV(x) ((x) & 0x00000FFF) +#define MAS0_HES 0x00004000 +#define MAS0_WQ_ALLWAYS 0x00000000 +#define MAS0_WQ_COND 0x00001000 +#define MAS0_WQ_CLR_RSRV 0x00002000 + +#define MAS1_VALID 0x80000000 +#define MAS1_IPROT 0x40000000 +#define MAS1_TID(x) (((x) << 16) & 0x3FFF0000) +#define MAS1_IND 0x00002000 +#define MAS1_TS 0x00001000 +#define MAS1_TSIZE_MASK 0x00000f80 +#define MAS1_TSIZE_SHIFT 7 +#define MAS1_TSIZE(x) (((x) << MAS1_TSIZE_SHIFT) & MAS1_TSIZE_MASK) +#define MAS1_GET_TSIZE(mas1) (((mas1) & MAS1_TSIZE_MASK) >> MAS1_TSIZE_SHIFT) + +#define MAS2_EPN (~0xFFFUL) +#define MAS2_X0 0x00000040 +#define MAS2_X1 0x00000020 +#define MAS2_W 0x00000010 +#define MAS2_I 0x00000008 +#define MAS2_M 0x00000004 +#define MAS2_G 0x00000002 +#define MAS2_E 0x00000001 +#define MAS2_WIMGE_MASK 0x0000001f +#define MAS2_EPN_MASK(size) (~0 << (size + 10)) + +#define MAS3_RPN 0xFFFFF000 +#define MAS3_U0 0x00000200 +#define MAS3_U1 0x00000100 +#define MAS3_U2 0x00000080 +#define MAS3_U3 0x00000040 +#define MAS3_UX 0x00000020 +#define MAS3_SX 0x00000010 +#define MAS3_UW 0x00000008 +#define MAS3_SW 0x00000004 +#define MAS3_UR 0x00000002 +#define MAS3_SR 0x00000001 +#define MAS3_BAP_MASK 0x0000003f +#define MAS3_SPSIZE 0x0000003e +#define MAS3_SPSIZE_SHIFT 1 + +#define MAS4_TLBSEL_MASK MAS0_TLBSEL_MASK +#define MAS4_TLBSELD(x) MAS0_TLBSEL(x) +#define MAS4_INDD 0x00008000 /* Default IND */ +#define MAS4_TSIZED(x) MAS1_TSIZE(x) +#define MAS4_X0D 0x00000040 +#define MAS4_X1D 0x00000020 +#define MAS4_WD 0x00000010 +#define MAS4_ID 0x00000008 +#define MAS4_MD 0x00000004 +#define MAS4_GD 0x00000002 +#define MAS4_ED 0x00000001 +#define MAS4_WIMGED_MASK 0x0000001f /* Default WIMGE */ +#define MAS4_WIMGED_SHIFT 0 +#define MAS4_VLED MAS4_X1D /* Default VLE */ +#define MAS4_ACMD 0x000000c0 /* Default ACM */ +#define MAS4_ACMD_SHIFT 6 +#define MAS4_TSIZED_MASK 0x00000f80 /* Default TSIZE */ +#define MAS4_TSIZED_SHIFT 7 + +#define MAS5_SGS 0x80000000 + +#define MAS6_SPID0 0x3FFF0000 +#define MAS6_SPID1 0x00007FFE +#define MAS6_ISIZE(x) MAS1_TSIZE(x) +#define MAS6_SAS 0x00000001 +#define MAS6_SPID MAS6_SPID0 +#define MAS6_SIND 0x00000002 /* Indirect page */ +#define MAS6_SIND_SHIFT 1 +#define MAS6_SPID_MASK 0x3fff0000 +#define MAS6_SPID_SHIFT 16 +#define MAS6_ISIZE_MASK 0x00000f80 +#define MAS6_ISIZE_SHIFT 7 + +#define MAS7_RPN 0xFFFFFFFF + +#define MAS8_TGS 0x80000000 /* Guest space */ +#define MAS8_VF 0x40000000 /* Virtualization Fault */ +#define MAS8_TLPID 0x000000ff + +/* Bit definitions for MMUCFG */ +#define MMUCFG_MAVN 0x00000003 /* MMU Architecture Version Number */ +#define MMUCFG_MAVN_V1 0x00000000 /* v1.0 */ +#define MMUCFG_MAVN_V2 0x00000001 /* v2.0 */ +#define MMUCFG_NTLBS 0x0000000c /* Number of TLBs */ +#define MMUCFG_PIDSIZE 0x000007c0 /* PID Reg Size */ +#define MMUCFG_TWC 0x00008000 /* TLB Write Conditional (v2.0) */ +#define MMUCFG_LRAT 0x00010000 /* LRAT Supported (v2.0) */ +#define MMUCFG_RASIZE 0x00fe0000 /* Real Addr Size */ +#define MMUCFG_LPIDSIZE 0x0f000000 /* LPID Reg Size */ + +/* Bit definitions for MMUCSR0 */ +#define MMUCSR0_TLB1FI 0x00000002 /* TLB1 Flash invalidate */ +#define MMUCSR0_TLB0FI 0x00000004 /* TLB0 Flash invalidate */ +#define MMUCSR0_TLB2FI 0x00000040 /* TLB2 Flash invalidate */ +#define MMUCSR0_TLB3FI 0x00000020 /* TLB3 Flash invalidate */ +#define MMUCSR0_TLBFI (MMUCSR0_TLB0FI | MMUCSR0_TLB1FI | \ + MMUCSR0_TLB2FI | MMUCSR0_TLB3FI) +#define MMUCSR0_TLB0PS 0x00000780 /* TLB0 Page Size */ +#define MMUCSR0_TLB1PS 0x00007800 /* TLB1 Page Size */ +#define MMUCSR0_TLB2PS 0x00078000 /* TLB2 Page Size */ +#define MMUCSR0_TLB3PS 0x00780000 /* TLB3 Page Size */ + +/* MMUCFG bits */ +#define MMUCFG_MAVN_NASK 0x00000003 +#define MMUCFG_MAVN_V1_0 0x00000000 +#define MMUCFG_MAVN_V2_0 0x00000001 +#define MMUCFG_NTLB_MASK 0x0000000c +#define MMUCFG_NTLB_SHIFT 2 +#define MMUCFG_PIDSIZE_MASK 0x000007c0 +#define MMUCFG_PIDSIZE_SHIFT 6 +#define MMUCFG_TWC 0x00008000 +#define MMUCFG_LRAT 0x00010000 +#define MMUCFG_RASIZE_MASK 0x00fe0000 +#define MMUCFG_RASIZE_SHIFT 17 +#define MMUCFG_LPIDSIZE_MASK 0x0f000000 +#define MMUCFG_LPIDSIZE_SHIFT 24 + +/* TLBnCFG encoding */ +#define TLBnCFG_N_ENTRY 0x00000fff /* number of entries */ +#define TLBnCFG_HES 0x00002000 /* HW select supported */ +#define TLBnCFG_IPROT 0x00008000 /* IPROT supported */ +#define TLBnCFG_GTWE 0x00010000 /* Guest can write */ +#define TLBnCFG_IND 0x00020000 /* IND entries supported */ +#define TLBnCFG_PT 0x00040000 /* Can load from page table */ +#define TLBnCFG_MINSIZE 0x00f00000 /* Minimum Page Size (v1.0) */ +#define TLBnCFG_MINSIZE_SHIFT 20 +#define TLBnCFG_MAXSIZE 0x000f0000 /* Maximum Page Size (v1.0) */ +#define TLBnCFG_MAXSIZE_SHIFT 16 +#define TLBnCFG_ASSOC 0xff000000 /* Associativity */ +#define TLBnCFG_ASSOC_SHIFT 24 + +/* TLBnPS encoding */ +#define TLBnPS_4K 0x00000004 +#define TLBnPS_8K 0x00000008 +#define TLBnPS_16K 0x00000010 +#define TLBnPS_32K 0x00000020 +#define TLBnPS_64K 0x00000040 +#define TLBnPS_128K 0x00000080 +#define TLBnPS_256K 0x00000100 +#define TLBnPS_512K 0x00000200 +#define TLBnPS_1M 0x00000400 +#define TLBnPS_2M 0x00000800 +#define TLBnPS_4M 0x00001000 +#define TLBnPS_8M 0x00002000 +#define TLBnPS_16M 0x00004000 +#define TLBnPS_32M 0x00008000 +#define TLBnPS_64M 0x00010000 +#define TLBnPS_128M 0x00020000 +#define TLBnPS_256M 0x00040000 +#define TLBnPS_512M 0x00080000 +#define TLBnPS_1G 0x00100000 +#define TLBnPS_2G 0x00200000 +#define TLBnPS_4G 0x00400000 +#define TLBnPS_8G 0x00800000 +#define TLBnPS_16G 0x01000000 +#define TLBnPS_32G 0x02000000 +#define TLBnPS_64G 0x04000000 +#define TLBnPS_128G 0x08000000 +#define TLBnPS_256G 0x10000000 + +/* tlbilx action encoding */ +#define TLBILX_T_ALL 0 +#define TLBILX_T_TID 1 +#define TLBILX_T_FULLMATCH 3 +#define TLBILX_T_CLASS0 4 +#define TLBILX_T_CLASS1 5 +#define TLBILX_T_CLASS2 6 +#define TLBILX_T_CLASS3 7 + +/* + * The mapping only needs to be cache-coherent on SMP, except on + * Freescale e500mc derivatives where it's also needed for coherent DMA. + */ +#if defined(CONFIG_SMP) || defined(CONFIG_PPC_E500MC) +#define MAS2_M_IF_NEEDED MAS2_M +#else +#define MAS2_M_IF_NEEDED 0 +#endif + +#ifndef __ASSEMBLY__ +#include <asm/bug.h> + +extern unsigned int tlbcam_index; + +typedef struct { + unsigned int id; + unsigned int active; + void __user *vdso; +} mm_context_t; + +/* Page size definitions, common between 32 and 64-bit + * + * shift : is the "PAGE_SHIFT" value for that page size + * penc : is the pte encoding mask + * + */ +struct mmu_psize_def +{ + unsigned int shift; /* number of bits */ + unsigned int enc; /* PTE encoding */ + unsigned int ind; /* Corresponding indirect page size shift */ + unsigned int flags; +#define MMU_PAGE_SIZE_DIRECT 0x1 /* Supported as a direct size */ +#define MMU_PAGE_SIZE_INDIRECT 0x2 /* Supported as an indirect size */ +}; +extern struct mmu_psize_def mmu_psize_defs[MMU_PAGE_COUNT]; + +static inline int shift_to_mmu_psize(unsigned int shift) +{ + int psize; + + for (psize = 0; psize < MMU_PAGE_COUNT; ++psize) + if (mmu_psize_defs[psize].shift == shift) + return psize; + return -1; +} + +static inline unsigned int mmu_psize_to_shift(unsigned int mmu_psize) +{ + if (mmu_psize_defs[mmu_psize].shift) + return mmu_psize_defs[mmu_psize].shift; + BUG(); +} + +/* The page sizes use the same names as 64-bit hash but are + * constants + */ +#if defined(CONFIG_PPC_4K_PAGES) +#define mmu_virtual_psize MMU_PAGE_4K +#else +#error Unsupported page size +#endif + +extern int mmu_linear_psize; +extern int mmu_vmemmap_psize; + +struct tlb_core_data { + /* + * Per-core spinlock for e6500 TLB handlers (no tlbsrx.) + * Must be the first struct element. + */ + u8 lock; + + /* For software way selection, as on Freescale TLB1 */ + u8 esel_next, esel_max, esel_first; +}; + +#ifdef CONFIG_PPC64 +extern unsigned long linear_map_top; +extern int book3e_htw_mode; + +#define PPC_HTW_NONE 0 +#define PPC_HTW_IBM 1 +#define PPC_HTW_E6500 2 + +/* + * 64-bit booke platforms don't load the tlb in the tlb miss handler code. + * HUGETLB_NEED_PRELOAD handles this - it causes huge_ptep_set_access_flags to + * return 1, indicating that the tlb requires preloading. + */ +#define HUGETLB_NEED_PRELOAD + +#define mmu_cleanup_all NULL + +#define MAX_PHYSMEM_BITS 44 + +#endif + +#endif /* !__ASSEMBLY__ */ + +#endif /* _ASM_POWERPC_MMU_BOOK3E_H_ */ diff --git a/arch/powerpc/include/asm/nohash/mmu.h b/arch/powerpc/include/asm/nohash/mmu.h new file mode 100644 index 000000000..e264be219 --- /dev/null +++ b/arch/powerpc/include/asm/nohash/mmu.h @@ -0,0 +1,19 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_POWERPC_NOHASH_MMU_H_ +#define _ASM_POWERPC_NOHASH_MMU_H_ + +#if defined(CONFIG_40x) +/* 40x-style software loaded TLB */ +#include <asm/nohash/32/mmu-40x.h> +#elif defined(CONFIG_44x) +/* 44x-style software loaded TLB */ +#include <asm/nohash/32/mmu-44x.h> +#elif defined(CONFIG_PPC_E500) +/* Freescale Book-E software loaded TLB or Book-3e (ISA 2.06+) MMU */ +#include <asm/nohash/mmu-e500.h> +#elif defined (CONFIG_PPC_8xx) +/* Motorola/Freescale 8xx software loaded TLB */ +#include <asm/nohash/32/mmu-8xx.h> +#endif + +#endif /* _ASM_POWERPC_NOHASH_MMU_H_ */ diff --git a/arch/powerpc/include/asm/nohash/pgalloc.h b/arch/powerpc/include/asm/nohash/pgalloc.h new file mode 100644 index 000000000..4b6237631 --- /dev/null +++ b/arch/powerpc/include/asm/nohash/pgalloc.h @@ -0,0 +1,72 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_POWERPC_NOHASH_PGALLOC_H +#define _ASM_POWERPC_NOHASH_PGALLOC_H + +#include <linux/mm.h> +#include <linux/slab.h> + +extern void tlb_remove_table(struct mmu_gather *tlb, void *table); +#ifdef CONFIG_PPC64 +extern void tlb_flush_pgtable(struct mmu_gather *tlb, unsigned long address); +#else +/* 44x etc which is BOOKE not BOOK3E */ +static inline void tlb_flush_pgtable(struct mmu_gather *tlb, + unsigned long address) +{ + +} +#endif /* !CONFIG_PPC_BOOK3E_64 */ + +static inline pgd_t *pgd_alloc(struct mm_struct *mm) +{ + return kmem_cache_alloc(PGT_CACHE(PGD_INDEX_SIZE), + pgtable_gfp_flags(mm, GFP_KERNEL)); +} + +static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) +{ + kmem_cache_free(PGT_CACHE(PGD_INDEX_SIZE), pgd); +} + +#ifdef CONFIG_PPC64 +#include <asm/nohash/64/pgalloc.h> +#else +#include <asm/nohash/32/pgalloc.h> +#endif + +static inline void pgtable_free(void *table, int shift) +{ + if (!shift) { + pte_fragment_free((unsigned long *)table, 0); + } else { + BUG_ON(shift > MAX_PGTABLE_INDEX_SIZE); + kmem_cache_free(PGT_CACHE(shift), table); + } +} + +#define get_hugepd_cache_index(x) (x) + +static inline void pgtable_free_tlb(struct mmu_gather *tlb, void *table, int shift) +{ + unsigned long pgf = (unsigned long)table; + + BUG_ON(shift > MAX_PGTABLE_INDEX_SIZE); + pgf |= shift; + tlb_remove_table(tlb, (void *)pgf); +} + +static inline void __tlb_remove_table(void *_table) +{ + void *table = (void *)((unsigned long)_table & ~MAX_PGTABLE_INDEX_SIZE); + unsigned shift = (unsigned long)_table & MAX_PGTABLE_INDEX_SIZE; + + pgtable_free(table, shift); +} + +static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table, + unsigned long address) +{ + tlb_flush_pgtable(tlb, address); + pgtable_free_tlb(tlb, table, 0); +} +#endif /* _ASM_POWERPC_NOHASH_PGALLOC_H */ diff --git a/arch/powerpc/include/asm/nohash/pgtable.h b/arch/powerpc/include/asm/nohash/pgtable.h new file mode 100644 index 000000000..3d7dce908 --- /dev/null +++ b/arch/powerpc/include/asm/nohash/pgtable.h @@ -0,0 +1,279 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_POWERPC_NOHASH_PGTABLE_H +#define _ASM_POWERPC_NOHASH_PGTABLE_H + +#if defined(CONFIG_PPC64) +#include <asm/nohash/64/pgtable.h> +#else +#include <asm/nohash/32/pgtable.h> +#endif + +/* Permission masks used for kernel mappings */ +#define PAGE_KERNEL __pgprot(_PAGE_BASE | _PAGE_KERNEL_RW) +#define PAGE_KERNEL_NC __pgprot(_PAGE_BASE_NC | _PAGE_KERNEL_RW | _PAGE_NO_CACHE) +#define PAGE_KERNEL_NCG __pgprot(_PAGE_BASE_NC | _PAGE_KERNEL_RW | _PAGE_NO_CACHE | _PAGE_GUARDED) +#define PAGE_KERNEL_X __pgprot(_PAGE_BASE | _PAGE_KERNEL_RWX) +#define PAGE_KERNEL_RO __pgprot(_PAGE_BASE | _PAGE_KERNEL_RO) +#define PAGE_KERNEL_ROX __pgprot(_PAGE_BASE | _PAGE_KERNEL_ROX) + +#ifndef __ASSEMBLY__ + +/* Generic accessors to PTE bits */ +#ifndef pte_write +static inline int pte_write(pte_t pte) +{ + return pte_val(pte) & _PAGE_RW; +} +#endif +#ifndef pte_read +static inline int pte_read(pte_t pte) { return 1; } +#endif +static inline int pte_dirty(pte_t pte) { return pte_val(pte) & _PAGE_DIRTY; } +static inline int pte_special(pte_t pte) { return pte_val(pte) & _PAGE_SPECIAL; } +static inline int pte_none(pte_t pte) { return (pte_val(pte) & ~_PTE_NONE_MASK) == 0; } +static inline bool pte_hashpte(pte_t pte) { return false; } +static inline bool pte_ci(pte_t pte) { return pte_val(pte) & _PAGE_NO_CACHE; } +static inline bool pte_exec(pte_t pte) { return pte_val(pte) & _PAGE_EXEC; } + +#ifdef CONFIG_NUMA_BALANCING +/* + * These work without NUMA balancing but the kernel does not care. See the + * comment in include/linux/pgtable.h . On powerpc, this will only + * work for user pages and always return true for kernel pages. + */ +static inline int pte_protnone(pte_t pte) +{ + return pte_present(pte) && !pte_user(pte); +} + +static inline int pmd_protnone(pmd_t pmd) +{ + return pte_protnone(pmd_pte(pmd)); +} +#endif /* CONFIG_NUMA_BALANCING */ + +static inline int pte_present(pte_t pte) +{ + return pte_val(pte) & _PAGE_PRESENT; +} + +static inline bool pte_hw_valid(pte_t pte) +{ + return pte_val(pte) & _PAGE_PRESENT; +} + +/* + * Don't just check for any non zero bits in __PAGE_USER, since for book3e + * and PTE_64BIT, PAGE_KERNEL_X contains _PAGE_BAP_SR which is also in + * _PAGE_USER. Need to explicitly match _PAGE_BAP_UR bit in that case too. + */ +#ifndef pte_user +static inline bool pte_user(pte_t pte) +{ + return (pte_val(pte) & _PAGE_USER) == _PAGE_USER; +} +#endif + +/* + * We only find page table entry in the last level + * Hence no need for other accessors + */ +#define pte_access_permitted pte_access_permitted +static inline bool pte_access_permitted(pte_t pte, bool write) +{ + /* + * A read-only access is controlled by _PAGE_USER bit. + * We have _PAGE_READ set for WRITE and EXECUTE + */ + if (!pte_present(pte) || !pte_user(pte) || !pte_read(pte)) + return false; + + if (write && !pte_write(pte)) + return false; + + return true; +} + +/* Conversion functions: convert a page and protection to a page entry, + * and a page entry and page directory to the page they refer to. + * + * Even if PTEs can be unsigned long long, a PFN is always an unsigned + * long for now. + */ +static inline pte_t pfn_pte(unsigned long pfn, pgprot_t pgprot) { + return __pte(((pte_basic_t)(pfn) << PTE_RPN_SHIFT) | + pgprot_val(pgprot)); } +static inline unsigned long pte_pfn(pte_t pte) { + return pte_val(pte) >> PTE_RPN_SHIFT; } + +/* Generic modifiers for PTE bits */ +static inline pte_t pte_exprotect(pte_t pte) +{ + return __pte(pte_val(pte) & ~_PAGE_EXEC); +} + +static inline pte_t pte_mkclean(pte_t pte) +{ + return __pte(pte_val(pte) & ~_PAGE_DIRTY); +} + +static inline pte_t pte_mkold(pte_t pte) +{ + return __pte(pte_val(pte) & ~_PAGE_ACCESSED); +} + +static inline pte_t pte_mkspecial(pte_t pte) +{ + return __pte(pte_val(pte) | _PAGE_SPECIAL); +} + +#ifndef pte_mkhuge +static inline pte_t pte_mkhuge(pte_t pte) +{ + return __pte(pte_val(pte)); +} +#endif + +#ifndef pte_mkprivileged +static inline pte_t pte_mkprivileged(pte_t pte) +{ + return __pte(pte_val(pte) & ~_PAGE_USER); +} +#endif + +#ifndef pte_mkuser +static inline pte_t pte_mkuser(pte_t pte) +{ + return __pte(pte_val(pte) | _PAGE_USER); +} +#endif + +static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) +{ + return __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot)); +} + +/* Insert a PTE, top-level function is out of line. It uses an inline + * low level function in the respective pgtable-* files + */ +extern void set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep, + pte_t pte); + +/* This low level function performs the actual PTE insertion + * Setting the PTE depends on the MMU type and other factors. It's + * an horrible mess that I'm not going to try to clean up now but + * I'm keeping it in one place rather than spread around + */ +static inline void __set_pte_at(struct mm_struct *mm, unsigned long addr, + pte_t *ptep, pte_t pte, int percpu) +{ + /* Second case is 32-bit with 64-bit PTE. In this case, we + * can just store as long as we do the two halves in the right order + * with a barrier in between. + * In the percpu case, we also fallback to the simple update + */ + if (IS_ENABLED(CONFIG_PPC32) && IS_ENABLED(CONFIG_PTE_64BIT) && !percpu) { + __asm__ __volatile__("\ + stw%X0 %2,%0\n\ + mbar\n\ + stw%X1 %L2,%1" + : "=m" (*ptep), "=m" (*((unsigned char *)ptep+4)) + : "r" (pte) : "memory"); + return; + } + /* Anything else just stores the PTE normally. That covers all 64-bit + * cases, and 32-bit non-hash with 32-bit PTEs. + */ +#if defined(CONFIG_PPC_8xx) && defined(CONFIG_PPC_16K_PAGES) + ptep->pte = ptep->pte1 = ptep->pte2 = ptep->pte3 = pte_val(pte); +#else + *ptep = pte; +#endif + + /* + * With hardware tablewalk, a sync is needed to ensure that + * subsequent accesses see the PTE we just wrote. Unlike userspace + * mappings, we can't tolerate spurious faults, so make sure + * the new PTE will be seen the first time. + */ + if (IS_ENABLED(CONFIG_PPC_BOOK3E_64) && is_kernel_addr(addr)) + mb(); +} + + +#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS +extern int ptep_set_access_flags(struct vm_area_struct *vma, unsigned long address, + pte_t *ptep, pte_t entry, int dirty); + +/* + * Macro to mark a page protection value as "uncacheable". + */ + +#define _PAGE_CACHE_CTL (_PAGE_COHERENT | _PAGE_GUARDED | _PAGE_NO_CACHE | \ + _PAGE_WRITETHRU) + +#define pgprot_noncached(prot) (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \ + _PAGE_NO_CACHE | _PAGE_GUARDED)) + +#define pgprot_noncached_wc(prot) (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \ + _PAGE_NO_CACHE)) + +#define pgprot_cached(prot) (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \ + _PAGE_COHERENT)) + +#if _PAGE_WRITETHRU != 0 +#define pgprot_cached_wthru(prot) (__pgprot((pgprot_val(prot) & ~_PAGE_CACHE_CTL) | \ + _PAGE_COHERENT | _PAGE_WRITETHRU)) +#else +#define pgprot_cached_wthru(prot) pgprot_noncached(prot) +#endif + +#define pgprot_cached_noncoherent(prot) \ + (__pgprot(pgprot_val(prot) & ~_PAGE_CACHE_CTL)) + +#define pgprot_writecombine pgprot_noncached_wc + +struct file; +extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, + unsigned long size, pgprot_t vma_prot); +#define __HAVE_PHYS_MEM_ACCESS_PROT + +#ifdef CONFIG_HUGETLB_PAGE +static inline int hugepd_ok(hugepd_t hpd) +{ +#ifdef CONFIG_PPC_8xx + return ((hpd_val(hpd) & _PMD_PAGE_MASK) == _PMD_PAGE_8M); +#else + /* We clear the top bit to indicate hugepd */ + return (hpd_val(hpd) && (hpd_val(hpd) & PD_HUGE) == 0); +#endif +} + +static inline int pmd_huge(pmd_t pmd) +{ + return 0; +} + +static inline int pud_huge(pud_t pud) +{ + return 0; +} + +#define is_hugepd(hpd) (hugepd_ok(hpd)) +#endif + +/* + * This gets called at the end of handling a page fault, when + * the kernel has put a new PTE into the page table for the process. + * We use it to ensure coherency between the i-cache and d-cache + * for the page which has just been mapped in. + */ +#if defined(CONFIG_PPC_E500) && defined(CONFIG_HUGETLB_PAGE) +void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep); +#else +static inline +void update_mmu_cache(struct vm_area_struct *vma, unsigned long address, pte_t *ptep) {} +#endif + +#endif /* __ASSEMBLY__ */ +#endif diff --git a/arch/powerpc/include/asm/nohash/pte-e500.h b/arch/powerpc/include/asm/nohash/pte-e500.h new file mode 100644 index 000000000..0934e8965 --- /dev/null +++ b/arch/powerpc/include/asm/nohash/pte-e500.h @@ -0,0 +1,129 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_POWERPC_NOHASH_PTE_E500_H +#define _ASM_POWERPC_NOHASH_PTE_E500_H +#ifdef __KERNEL__ + +/* PTE bit definitions for processors compliant to the Book3E + * architecture 2.06 or later. The position of the PTE bits + * matches the HW definition of the optional Embedded Page Table + * category. + */ + +/* Architected bits */ +#define _PAGE_PRESENT 0x000001 /* software: pte contains a translation */ +#define _PAGE_SW1 0x000002 +#define _PAGE_BIT_SWAP_TYPE 2 +#define _PAGE_BAP_SR 0x000004 +#define _PAGE_BAP_UR 0x000008 +#define _PAGE_BAP_SW 0x000010 +#define _PAGE_BAP_UW 0x000020 +#define _PAGE_BAP_SX 0x000040 +#define _PAGE_BAP_UX 0x000080 +#define _PAGE_PSIZE_MSK 0x000f00 +#define _PAGE_PSIZE_4K 0x000200 +#define _PAGE_PSIZE_8K 0x000300 +#define _PAGE_PSIZE_16K 0x000400 +#define _PAGE_PSIZE_32K 0x000500 +#define _PAGE_PSIZE_64K 0x000600 +#define _PAGE_PSIZE_128K 0x000700 +#define _PAGE_PSIZE_256K 0x000800 +#define _PAGE_PSIZE_512K 0x000900 +#define _PAGE_PSIZE_1M 0x000a00 +#define _PAGE_PSIZE_2M 0x000b00 +#define _PAGE_PSIZE_4M 0x000c00 +#define _PAGE_PSIZE_8M 0x000d00 +#define _PAGE_PSIZE_16M 0x000e00 +#define _PAGE_PSIZE_32M 0x000f00 +#define _PAGE_DIRTY 0x001000 /* C: page changed */ +#define _PAGE_SW0 0x002000 +#define _PAGE_U3 0x004000 +#define _PAGE_U2 0x008000 +#define _PAGE_U1 0x010000 +#define _PAGE_U0 0x020000 +#define _PAGE_ACCESSED 0x040000 +#define _PAGE_ENDIAN 0x080000 +#define _PAGE_GUARDED 0x100000 +#define _PAGE_COHERENT 0x200000 /* M: enforce memory coherence */ +#define _PAGE_NO_CACHE 0x400000 /* I: cache inhibit */ +#define _PAGE_WRITETHRU 0x800000 /* W: cache write-through */ + +/* "Higher level" linux bit combinations */ +#define _PAGE_EXEC (_PAGE_BAP_SX | _PAGE_BAP_UX) /* .. and was cache cleaned */ +#define _PAGE_RW (_PAGE_BAP_SW | _PAGE_BAP_UW) /* User write permission */ +#define _PAGE_KERNEL_RW (_PAGE_BAP_SW | _PAGE_BAP_SR | _PAGE_DIRTY) +#define _PAGE_KERNEL_RO (_PAGE_BAP_SR) +#define _PAGE_KERNEL_RWX (_PAGE_BAP_SW | _PAGE_BAP_SR | _PAGE_DIRTY | _PAGE_BAP_SX) +#define _PAGE_KERNEL_ROX (_PAGE_BAP_SR | _PAGE_BAP_SX) +#define _PAGE_USER (_PAGE_BAP_UR | _PAGE_BAP_SR) /* Can be read */ +#define _PAGE_PRIVILEGED (_PAGE_BAP_SR) + +#define _PAGE_SPECIAL _PAGE_SW0 + +/* Base page size */ +#define _PAGE_PSIZE _PAGE_PSIZE_4K +#define PTE_RPN_SHIFT (24) + +#define PTE_WIMGE_SHIFT (19) +#define PTE_BAP_SHIFT (2) + +/* On 32-bit, we never clear the top part of the PTE */ +#ifdef CONFIG_PPC32 +#define _PTE_NONE_MASK 0xffffffff00000000ULL +#define _PMD_PRESENT 0 +#define _PMD_PRESENT_MASK (PAGE_MASK) +#define _PMD_BAD (~PAGE_MASK) +#define _PMD_USER 0 +#else +#define _PTE_NONE_MASK 0 +#endif + +/* + * We define 2 sets of base prot bits, one for basic pages (ie, + * cacheable kernel and user pages) and one for non cacheable + * pages. We always set _PAGE_COHERENT when SMP is enabled or + * the processor might need it for DMA coherency. + */ +#define _PAGE_BASE_NC (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_PSIZE) +#if defined(CONFIG_SMP) +#define _PAGE_BASE (_PAGE_BASE_NC | _PAGE_COHERENT) +#else +#define _PAGE_BASE (_PAGE_BASE_NC) +#endif + +/* Permission masks used to generate the __P and __S table */ +#define PAGE_NONE __pgprot(_PAGE_BASE) +#define PAGE_SHARED __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW) +#define PAGE_SHARED_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_RW | _PAGE_BAP_UX) +#define PAGE_COPY __pgprot(_PAGE_BASE | _PAGE_USER) +#define PAGE_COPY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_BAP_UX) +#define PAGE_READONLY __pgprot(_PAGE_BASE | _PAGE_USER) +#define PAGE_READONLY_X __pgprot(_PAGE_BASE | _PAGE_USER | _PAGE_BAP_UX) + +#ifndef __ASSEMBLY__ +static inline pte_t pte_mkprivileged(pte_t pte) +{ + return __pte((pte_val(pte) & ~_PAGE_USER) | _PAGE_PRIVILEGED); +} + +#define pte_mkprivileged pte_mkprivileged + +static inline pte_t pte_mkuser(pte_t pte) +{ + return __pte((pte_val(pte) & ~_PAGE_PRIVILEGED) | _PAGE_USER); +} + +#define pte_mkuser pte_mkuser + +static inline pte_t pte_mkexec(pte_t pte) +{ + if (pte_val(pte) & _PAGE_BAP_UR) + return __pte((pte_val(pte) & ~_PAGE_BAP_SX) | _PAGE_BAP_UX); + else + return __pte((pte_val(pte) & ~_PAGE_BAP_UX) | _PAGE_BAP_SX); +} +#define pte_mkexec pte_mkexec + +#endif /* __ASSEMBLY__ */ + +#endif /* __KERNEL__ */ +#endif /* _ASM_POWERPC_NOHASH_PTE_E500_H */ diff --git a/arch/powerpc/include/asm/nohash/tlbflush.h b/arch/powerpc/include/asm/nohash/tlbflush.h new file mode 100644 index 000000000..bdaf34ad4 --- /dev/null +++ b/arch/powerpc/include/asm/nohash/tlbflush.h @@ -0,0 +1,77 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_POWERPC_NOHASH_TLBFLUSH_H +#define _ASM_POWERPC_NOHASH_TLBFLUSH_H + +/* + * TLB flushing: + * + * - flush_tlb_mm(mm) flushes the specified mm context TLB's + * - flush_tlb_page(vma, vmaddr) flushes one page + * - local_flush_tlb_mm(mm, full) flushes the specified mm context on + * the local processor + * - local_flush_tlb_page(vma, vmaddr) flushes one page on the local processor + * - flush_tlb_range(vma, start, end) flushes a range of pages + * - flush_tlb_kernel_range(start, end) flushes a range of kernel pages + * + */ + +/* + * TLB flushing for software loaded TLB chips + * + * TODO: (CONFIG_PPC_85xx) determine if flush_tlb_range & + * flush_tlb_kernel_range are best implemented as tlbia vs + * specific tlbie's + */ + +struct vm_area_struct; +struct mm_struct; + +#define MMU_NO_CONTEXT ((unsigned int)-1) + +extern void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, + unsigned long end); + +#ifdef CONFIG_PPC_8xx +static inline void local_flush_tlb_mm(struct mm_struct *mm) +{ + unsigned int pid = READ_ONCE(mm->context.id); + + if (pid != MMU_NO_CONTEXT) + asm volatile ("sync; tlbia; isync" : : : "memory"); +} + +static inline void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr) +{ + asm volatile ("tlbie %0; sync" : : "r" (vmaddr) : "memory"); +} + +static inline void flush_tlb_kernel_range(unsigned long start, unsigned long end) +{ + start &= PAGE_MASK; + + if (end - start <= PAGE_SIZE) + asm volatile ("tlbie %0; sync" : : "r" (start) : "memory"); + else + asm volatile ("sync; tlbia; isync" : : : "memory"); +} +#else +extern void flush_tlb_kernel_range(unsigned long start, unsigned long end); +extern void local_flush_tlb_mm(struct mm_struct *mm); +extern void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr); + +extern void __local_flush_tlb_page(struct mm_struct *mm, unsigned long vmaddr, + int tsize, int ind); +#endif + +#ifdef CONFIG_SMP +extern void flush_tlb_mm(struct mm_struct *mm); +extern void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr); +extern void __flush_tlb_page(struct mm_struct *mm, unsigned long vmaddr, + int tsize, int ind); +#else +#define flush_tlb_mm(mm) local_flush_tlb_mm(mm) +#define flush_tlb_page(vma,addr) local_flush_tlb_page(vma,addr) +#define __flush_tlb_page(mm,addr,p,i) __local_flush_tlb_page(mm,addr,p,i) +#endif + +#endif /* _ASM_POWERPC_NOHASH_TLBFLUSH_H */ |