summaryrefslogtreecommitdiffstats
path: root/arch/powerpc/mm/book3s32
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-27 10:05:51 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-27 10:05:51 +0000
commit5d1646d90e1f2cceb9f0828f4b28318cd0ec7744 (patch)
treea94efe259b9009378be6d90eb30d2b019d95c194 /arch/powerpc/mm/book3s32
parentInitial commit. (diff)
downloadlinux-5d1646d90e1f2cceb9f0828f4b28318cd0ec7744.tar.xz
linux-5d1646d90e1f2cceb9f0828f4b28318cd0ec7744.zip
Adding upstream version 5.10.209.upstream/5.10.209upstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'arch/powerpc/mm/book3s32')
-rw-r--r--arch/powerpc/mm/book3s32/Makefile9
-rw-r--r--arch/powerpc/mm/book3s32/hash_low.S709
-rw-r--r--arch/powerpc/mm/book3s32/mmu.c470
-rw-r--r--arch/powerpc/mm/book3s32/mmu_context.c113
-rw-r--r--arch/powerpc/mm/book3s32/tlb.c171
5 files changed, 1472 insertions, 0 deletions
diff --git a/arch/powerpc/mm/book3s32/Makefile b/arch/powerpc/mm/book3s32/Makefile
new file mode 100644
index 000000000..1732eaa74
--- /dev/null
+++ b/arch/powerpc/mm/book3s32/Makefile
@@ -0,0 +1,9 @@
+# SPDX-License-Identifier: GPL-2.0
+
+KASAN_SANITIZE_mmu.o := n
+
+ifdef CONFIG_KASAN
+CFLAGS_mmu.o += -DDISABLE_BRANCH_PROFILING
+endif
+
+obj-y += mmu.o hash_low.o mmu_context.o tlb.o
diff --git a/arch/powerpc/mm/book3s32/hash_low.S b/arch/powerpc/mm/book3s32/hash_low.S
new file mode 100644
index 000000000..b2c912e51
--- /dev/null
+++ b/arch/powerpc/mm/book3s32/hash_low.S
@@ -0,0 +1,709 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * PowerPC version
+ * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
+ * Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP
+ * Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu>
+ * Adapted for Power Macintosh by Paul Mackerras.
+ * Low-level exception handlers and MMU support
+ * rewritten by Paul Mackerras.
+ * Copyright (C) 1996 Paul Mackerras.
+ *
+ * This file contains low-level assembler routines for managing
+ * the PowerPC MMU hash table. (PPC 8xx processors don't use a
+ * hash table, so this file is not used on them.)
+ */
+
+#include <linux/pgtable.h>
+#include <linux/init.h>
+#include <asm/reg.h>
+#include <asm/page.h>
+#include <asm/cputable.h>
+#include <asm/ppc_asm.h>
+#include <asm/thread_info.h>
+#include <asm/asm-offsets.h>
+#include <asm/export.h>
+#include <asm/feature-fixups.h>
+#include <asm/code-patching-asm.h>
+
+#ifdef CONFIG_SMP
+ .section .bss
+ .align 2
+mmu_hash_lock:
+ .space 4
+#endif /* CONFIG_SMP */
+
+/*
+ * Load a PTE into the hash table, if possible.
+ * The address is in r4, and r3 contains an access flag:
+ * _PAGE_RW (0x400) if a write.
+ * r9 contains the SRR1 value, from which we use the MSR_PR bit.
+ * SPRG_THREAD contains the physical address of the current task's thread.
+ *
+ * Returns to the caller if the access is illegal or there is no
+ * mapping for the address. Otherwise it places an appropriate PTE
+ * in the hash table and returns from the exception.
+ * Uses r0, r3 - r6, r8, r10, ctr, lr.
+ */
+ .text
+_GLOBAL(hash_page)
+#ifdef CONFIG_SMP
+ lis r8, (mmu_hash_lock - PAGE_OFFSET)@h
+ ori r8, r8, (mmu_hash_lock - PAGE_OFFSET)@l
+ lis r0,0x0fff
+ b 10f
+11: lwz r6,0(r8)
+ cmpwi 0,r6,0
+ bne 11b
+10: lwarx r6,0,r8
+ cmpwi 0,r6,0
+ bne- 11b
+ stwcx. r0,0,r8
+ bne- 10b
+ isync
+#endif
+ /* Get PTE (linux-style) and check access */
+ lis r0, TASK_SIZE@h /* check if kernel address */
+ cmplw 0,r4,r0
+ ori r3,r3,_PAGE_USER|_PAGE_PRESENT /* test low addresses as user */
+ mfspr r5, SPRN_SPRG_PGDIR /* phys page-table root */
+ blt+ 112f /* assume user more likely */
+ lis r5, (swapper_pg_dir - PAGE_OFFSET)@ha /* if kernel address, use */
+ addi r5 ,r5 ,(swapper_pg_dir - PAGE_OFFSET)@l /* kernel page table */
+ rlwimi r3,r9,32-12,29,29 /* MSR_PR -> _PAGE_USER */
+112:
+#ifndef CONFIG_PTE_64BIT
+ rlwimi r5,r4,12,20,29 /* insert top 10 bits of address */
+ lwz r8,0(r5) /* get pmd entry */
+ rlwinm. r8,r8,0,0,19 /* extract address of pte page */
+#else
+ rlwinm r8,r4,13,19,29 /* Compute pgdir/pmd offset */
+ lwzx r8,r8,r5 /* Get L1 entry */
+ rlwinm. r8,r8,0,0,20 /* extract pt base address */
+#endif
+#ifdef CONFIG_SMP
+ beq- .Lhash_page_out /* return if no mapping */
+#else
+ /* XXX it seems like the 601 will give a machine fault on the
+ rfi if its alignment is wrong (bottom 4 bits of address are
+ 8 or 0xc) and we have had a not-taken conditional branch
+ to the address following the rfi. */
+ beqlr-
+#endif
+#ifndef CONFIG_PTE_64BIT
+ rlwimi r8,r4,22,20,29 /* insert next 10 bits of address */
+#else
+ rlwimi r8,r4,23,20,28 /* compute pte address */
+#endif
+ rlwinm r0,r3,32-3,24,24 /* _PAGE_RW access -> _PAGE_DIRTY */
+ ori r0,r0,_PAGE_ACCESSED|_PAGE_HASHPTE
+
+ /*
+ * Update the linux PTE atomically. We do the lwarx up-front
+ * because almost always, there won't be a permission violation
+ * and there won't already be an HPTE, and thus we will have
+ * to update the PTE to set _PAGE_HASHPTE. -- paulus.
+ *
+ * If PTE_64BIT is set, the low word is the flags word; use that
+ * word for locking since it contains all the interesting bits.
+ */
+#if (PTE_FLAGS_OFFSET != 0)
+ addi r8,r8,PTE_FLAGS_OFFSET
+#endif
+.Lretry:
+ lwarx r6,0,r8 /* get linux-style pte, flag word */
+ andc. r5,r3,r6 /* check access & ~permission */
+#ifdef CONFIG_SMP
+ bne- .Lhash_page_out /* return if access not permitted */
+#else
+ bnelr-
+#endif
+ or r5,r0,r6 /* set accessed/dirty bits */
+#ifdef CONFIG_PTE_64BIT
+#ifdef CONFIG_SMP
+ subf r10,r6,r8 /* create false data dependency */
+ subi r10,r10,PTE_FLAGS_OFFSET
+ lwzx r10,r6,r10 /* Get upper PTE word */
+#else
+ lwz r10,-PTE_FLAGS_OFFSET(r8)
+#endif /* CONFIG_SMP */
+#endif /* CONFIG_PTE_64BIT */
+ stwcx. r5,0,r8 /* attempt to update PTE */
+ bne- .Lretry /* retry if someone got there first */
+
+ mfsrin r3,r4 /* get segment reg for segment */
+#ifndef CONFIG_VMAP_STACK
+ mfctr r0
+ stw r0,_CTR(r11)
+#endif
+ bl create_hpte /* add the hash table entry */
+
+#ifdef CONFIG_SMP
+ eieio
+ lis r8, (mmu_hash_lock - PAGE_OFFSET)@ha
+ li r0,0
+ stw r0, (mmu_hash_lock - PAGE_OFFSET)@l(r8)
+#endif
+
+#ifdef CONFIG_VMAP_STACK
+ b fast_hash_page_return
+#else
+ /* Return from the exception */
+ lwz r5,_CTR(r11)
+ mtctr r5
+ lwz r0,GPR0(r11)
+ lwz r8,GPR8(r11)
+ b fast_exception_return
+#endif
+
+#ifdef CONFIG_SMP
+.Lhash_page_out:
+ eieio
+ lis r8, (mmu_hash_lock - PAGE_OFFSET)@ha
+ li r0,0
+ stw r0, (mmu_hash_lock - PAGE_OFFSET)@l(r8)
+ blr
+#endif /* CONFIG_SMP */
+_ASM_NOKPROBE_SYMBOL(hash_page)
+
+/*
+ * Add an entry for a particular page to the hash table.
+ *
+ * add_hash_page(unsigned context, unsigned long va, unsigned long pmdval)
+ *
+ * We assume any necessary modifications to the pte (e.g. setting
+ * the accessed bit) have already been done and that there is actually
+ * a hash table in use (i.e. we're not on a 603).
+ */
+_GLOBAL(add_hash_page)
+ mflr r0
+ stw r0,4(r1)
+
+ /* Convert context and va to VSID */
+ mulli r3,r3,897*16 /* multiply context by context skew */
+ rlwinm r0,r4,4,28,31 /* get ESID (top 4 bits of va) */
+ mulli r0,r0,0x111 /* multiply by ESID skew */
+ add r3,r3,r0 /* note create_hpte trims to 24 bits */
+
+#ifdef CONFIG_SMP
+ lwz r8,TASK_CPU(r2) /* to go in mmu_hash_lock */
+ oris r8,r8,12
+#endif /* CONFIG_SMP */
+
+ /*
+ * We disable interrupts here, even on UP, because we don't
+ * want to race with hash_page, and because we want the
+ * _PAGE_HASHPTE bit to be a reliable indication of whether
+ * the HPTE exists (or at least whether one did once).
+ * We also turn off the MMU for data accesses so that we
+ * we can't take a hash table miss (assuming the code is
+ * covered by a BAT). -- paulus
+ */
+ mfmsr r9
+ rlwinm r0,r9,0,17,15 /* clear bit 16 (MSR_EE) */
+ rlwinm r0,r0,0,28,26 /* clear MSR_DR */
+ mtmsr r0
+ isync
+
+#ifdef CONFIG_SMP
+ lis r6, (mmu_hash_lock - PAGE_OFFSET)@ha
+ addi r6, r6, (mmu_hash_lock - PAGE_OFFSET)@l
+10: lwarx r0,0,r6 /* take the mmu_hash_lock */
+ cmpi 0,r0,0
+ bne- 11f
+ stwcx. r8,0,r6
+ beq+ 12f
+11: lwz r0,0(r6)
+ cmpi 0,r0,0
+ beq 10b
+ b 11b
+12: isync
+#endif
+
+ /*
+ * Fetch the linux pte and test and set _PAGE_HASHPTE atomically.
+ * If _PAGE_HASHPTE was already set, we don't replace the existing
+ * HPTE, so we just unlock and return.
+ */
+ mr r8,r5
+#ifndef CONFIG_PTE_64BIT
+ rlwimi r8,r4,22,20,29
+#else
+ rlwimi r8,r4,23,20,28
+ addi r8,r8,PTE_FLAGS_OFFSET
+#endif
+1: lwarx r6,0,r8
+ andi. r0,r6,_PAGE_HASHPTE
+ bne 9f /* if HASHPTE already set, done */
+#ifdef CONFIG_PTE_64BIT
+#ifdef CONFIG_SMP
+ subf r10,r6,r8 /* create false data dependency */
+ subi r10,r10,PTE_FLAGS_OFFSET
+ lwzx r10,r6,r10 /* Get upper PTE word */
+#else
+ lwz r10,-PTE_FLAGS_OFFSET(r8)
+#endif /* CONFIG_SMP */
+#endif /* CONFIG_PTE_64BIT */
+ ori r5,r6,_PAGE_HASHPTE
+ stwcx. r5,0,r8
+ bne- 1b
+
+ bl create_hpte
+
+9:
+#ifdef CONFIG_SMP
+ lis r6, (mmu_hash_lock - PAGE_OFFSET)@ha
+ addi r6, r6, (mmu_hash_lock - PAGE_OFFSET)@l
+ eieio
+ li r0,0
+ stw r0,0(r6) /* clear mmu_hash_lock */
+#endif
+
+ /* reenable interrupts and DR */
+ mtmsr r9
+ isync
+
+ lwz r0,4(r1)
+ mtlr r0
+ blr
+_ASM_NOKPROBE_SYMBOL(add_hash_page)
+
+/*
+ * This routine adds a hardware PTE to the hash table.
+ * It is designed to be called with the MMU either on or off.
+ * r3 contains the VSID, r4 contains the virtual address,
+ * r5 contains the linux PTE, r6 contains the old value of the
+ * linux PTE (before setting _PAGE_HASHPTE). r10 contains the
+ * upper half of the PTE if CONFIG_PTE_64BIT.
+ * On SMP, the caller should have the mmu_hash_lock held.
+ * We assume that the caller has (or will) set the _PAGE_HASHPTE
+ * bit in the linux PTE in memory. The value passed in r6 should
+ * be the old linux PTE value; if it doesn't have _PAGE_HASHPTE set
+ * this routine will skip the search for an existing HPTE.
+ * This procedure modifies r0, r3 - r6, r8, cr0.
+ * -- paulus.
+ *
+ * For speed, 4 of the instructions get patched once the size and
+ * physical address of the hash table are known. These definitions
+ * of Hash_base and Hash_bits below are for the early hash table.
+ */
+Hash_base = early_hash
+Hash_bits = 12 /* e.g. 256kB hash table */
+Hash_msk = (((1 << Hash_bits) - 1) * 64)
+
+/* defines for the PTE format for 32-bit PPCs */
+#define HPTE_SIZE 8
+#define PTEG_SIZE 64
+#define LG_PTEG_SIZE 6
+#define LDPTEu lwzu
+#define LDPTE lwz
+#define STPTE stw
+#define CMPPTE cmpw
+#define PTE_H 0x40
+#define PTE_V 0x80000000
+#define TST_V(r) rlwinm. r,r,0,0,0
+#define SET_V(r) oris r,r,PTE_V@h
+#define CLR_V(r,t) rlwinm r,r,0,1,31
+
+#define HASH_LEFT 31-(LG_PTEG_SIZE+Hash_bits-1)
+#define HASH_RIGHT 31-LG_PTEG_SIZE
+
+__REF
+_GLOBAL(create_hpte)
+ /* Convert linux-style PTE (r5) to low word of PPC-style PTE (r8) */
+ rlwinm r8,r5,32-9,30,30 /* _PAGE_RW -> PP msb */
+ rlwinm r0,r5,32-6,30,30 /* _PAGE_DIRTY -> PP msb */
+ and r8,r8,r0 /* writable if _RW & _DIRTY */
+ rlwimi r5,r5,32-1,30,30 /* _PAGE_USER -> PP msb */
+ rlwimi r5,r5,32-2,31,31 /* _PAGE_USER -> PP lsb */
+ ori r8,r8,0xe04 /* clear out reserved bits */
+ andc r8,r5,r8 /* PP = user? (rw&dirty? 1: 3): 0 */
+BEGIN_FTR_SECTION
+ rlwinm r8,r8,0,~_PAGE_COHERENT /* clear M (coherence not required) */
+END_FTR_SECTION_IFCLR(CPU_FTR_NEED_COHERENT)
+#ifdef CONFIG_PTE_64BIT
+ /* Put the XPN bits into the PTE */
+ rlwimi r8,r10,8,20,22
+ rlwimi r8,r10,2,29,29
+#endif
+
+ /* Construct the high word of the PPC-style PTE (r5) */
+ rlwinm r5,r3,7,1,24 /* put VSID in 0x7fffff80 bits */
+ rlwimi r5,r4,10,26,31 /* put in API (abbrev page index) */
+ SET_V(r5) /* set V (valid) bit */
+
+ patch_site 0f, patch__hash_page_A0
+ patch_site 1f, patch__hash_page_A1
+ patch_site 2f, patch__hash_page_A2
+ /* Get the address of the primary PTE group in the hash table (r3) */
+0: lis r0, (Hash_base - PAGE_OFFSET)@h /* base address of hash table */
+1: rlwimi r0,r3,LG_PTEG_SIZE,HASH_LEFT,HASH_RIGHT /* VSID -> hash */
+2: rlwinm r3,r4,20+LG_PTEG_SIZE,HASH_LEFT,HASH_RIGHT /* PI -> hash */
+ xor r3,r3,r0 /* make primary hash */
+ li r0,8 /* PTEs/group */
+
+ /*
+ * Test the _PAGE_HASHPTE bit in the old linux PTE, and skip the search
+ * if it is clear, meaning that the HPTE isn't there already...
+ */
+ andi. r6,r6,_PAGE_HASHPTE
+ beq+ 10f /* no PTE: go look for an empty slot */
+ tlbie r4
+
+ lis r4, (htab_hash_searches - PAGE_OFFSET)@ha
+ lwz r6, (htab_hash_searches - PAGE_OFFSET)@l(r4)
+ addi r6,r6,1 /* count how many searches we do */
+ stw r6, (htab_hash_searches - PAGE_OFFSET)@l(r4)
+
+ /* Search the primary PTEG for a PTE whose 1st (d)word matches r5 */
+ mtctr r0
+ addi r4,r3,-HPTE_SIZE
+1: LDPTEu r6,HPTE_SIZE(r4) /* get next PTE */
+ CMPPTE 0,r6,r5
+ bdnzf 2,1b /* loop while ctr != 0 && !cr0.eq */
+ beq+ .Lfound_slot
+
+ patch_site 0f, patch__hash_page_B
+ /* Search the secondary PTEG for a matching PTE */
+ ori r5,r5,PTE_H /* set H (secondary hash) bit */
+0: xoris r4,r3,Hash_msk>>16 /* compute secondary hash */
+ xori r4,r4,(-PTEG_SIZE & 0xffff)
+ addi r4,r4,-HPTE_SIZE
+ mtctr r0
+2: LDPTEu r6,HPTE_SIZE(r4)
+ CMPPTE 0,r6,r5
+ bdnzf 2,2b
+ beq+ .Lfound_slot
+ xori r5,r5,PTE_H /* clear H bit again */
+
+ /* Search the primary PTEG for an empty slot */
+10: mtctr r0
+ addi r4,r3,-HPTE_SIZE /* search primary PTEG */
+1: LDPTEu r6,HPTE_SIZE(r4) /* get next PTE */
+ TST_V(r6) /* test valid bit */
+ bdnzf 2,1b /* loop while ctr != 0 && !cr0.eq */
+ beq+ .Lfound_empty
+
+ /* update counter of times that the primary PTEG is full */
+ lis r4, (primary_pteg_full - PAGE_OFFSET)@ha
+ lwz r6, (primary_pteg_full - PAGE_OFFSET)@l(r4)
+ addi r6,r6,1
+ stw r6, (primary_pteg_full - PAGE_OFFSET)@l(r4)
+
+ patch_site 0f, patch__hash_page_C
+ /* Search the secondary PTEG for an empty slot */
+ ori r5,r5,PTE_H /* set H (secondary hash) bit */
+0: xoris r4,r3,Hash_msk>>16 /* compute secondary hash */
+ xori r4,r4,(-PTEG_SIZE & 0xffff)
+ addi r4,r4,-HPTE_SIZE
+ mtctr r0
+2: LDPTEu r6,HPTE_SIZE(r4)
+ TST_V(r6)
+ bdnzf 2,2b
+ beq+ .Lfound_empty
+ xori r5,r5,PTE_H /* clear H bit again */
+
+ /*
+ * Choose an arbitrary slot in the primary PTEG to overwrite.
+ * Since both the primary and secondary PTEGs are full, and we
+ * have no information that the PTEs in the primary PTEG are
+ * more important or useful than those in the secondary PTEG,
+ * and we know there is a definite (although small) speed
+ * advantage to putting the PTE in the primary PTEG, we always
+ * put the PTE in the primary PTEG.
+ *
+ * In addition, we skip any slot that is mapping kernel text in
+ * order to avoid a deadlock when not using BAT mappings if
+ * trying to hash in the kernel hash code itself after it has
+ * already taken the hash table lock. This works in conjunction
+ * with pre-faulting of the kernel text.
+ *
+ * If the hash table bucket is full of kernel text entries, we'll
+ * lockup here but that shouldn't happen
+ */
+
+1: lis r4, (next_slot - PAGE_OFFSET)@ha /* get next evict slot */
+ lwz r6, (next_slot - PAGE_OFFSET)@l(r4)
+ addi r6,r6,HPTE_SIZE /* search for candidate */
+ andi. r6,r6,7*HPTE_SIZE
+ stw r6,next_slot@l(r4)
+ add r4,r3,r6
+ LDPTE r0,HPTE_SIZE/2(r4) /* get PTE second word */
+ clrrwi r0,r0,12
+ lis r6,etext@h
+ ori r6,r6,etext@l /* get etext */
+ tophys(r6,r6)
+ cmpl cr0,r0,r6 /* compare and try again */
+ blt 1b
+
+#ifndef CONFIG_SMP
+ /* Store PTE in PTEG */
+.Lfound_empty:
+ STPTE r5,0(r4)
+.Lfound_slot:
+ STPTE r8,HPTE_SIZE/2(r4)
+
+#else /* CONFIG_SMP */
+/*
+ * Between the tlbie above and updating the hash table entry below,
+ * another CPU could read the hash table entry and put it in its TLB.
+ * There are 3 cases:
+ * 1. using an empty slot
+ * 2. updating an earlier entry to change permissions (i.e. enable write)
+ * 3. taking over the PTE for an unrelated address
+ *
+ * In each case it doesn't really matter if the other CPUs have the old
+ * PTE in their TLB. So we don't need to bother with another tlbie here,
+ * which is convenient as we've overwritten the register that had the
+ * address. :-) The tlbie above is mainly to make sure that this CPU comes
+ * and gets the new PTE from the hash table.
+ *
+ * We do however have to make sure that the PTE is never in an invalid
+ * state with the V bit set.
+ */
+.Lfound_empty:
+.Lfound_slot:
+ CLR_V(r5,r0) /* clear V (valid) bit in PTE */
+ STPTE r5,0(r4)
+ sync
+ TLBSYNC
+ STPTE r8,HPTE_SIZE/2(r4) /* put in correct RPN, WIMG, PP bits */
+ sync
+ SET_V(r5)
+ STPTE r5,0(r4) /* finally set V bit in PTE */
+#endif /* CONFIG_SMP */
+
+ sync /* make sure pte updates get to memory */
+ blr
+ .previous
+_ASM_NOKPROBE_SYMBOL(create_hpte)
+
+ .section .bss
+ .align 2
+next_slot:
+ .space 4
+primary_pteg_full:
+ .space 4
+htab_hash_searches:
+ .space 4
+ .previous
+
+/*
+ * Flush the entry for a particular page from the hash table.
+ *
+ * flush_hash_pages(unsigned context, unsigned long va, unsigned long pmdval,
+ * int count)
+ *
+ * We assume that there is a hash table in use (Hash != 0).
+ */
+__REF
+_GLOBAL(flush_hash_pages)
+ /*
+ * We disable interrupts here, even on UP, because we want
+ * the _PAGE_HASHPTE bit to be a reliable indication of
+ * whether the HPTE exists (or at least whether one did once).
+ * We also turn off the MMU for data accesses so that we
+ * we can't take a hash table miss (assuming the code is
+ * covered by a BAT). -- paulus
+ */
+ mfmsr r10
+ rlwinm r0,r10,0,17,15 /* clear bit 16 (MSR_EE) */
+ rlwinm r0,r0,0,28,26 /* clear MSR_DR */
+ mtmsr r0
+ isync
+
+ /* First find a PTE in the range that has _PAGE_HASHPTE set */
+#ifndef CONFIG_PTE_64BIT
+ rlwimi r5,r4,22,20,29
+#else
+ rlwimi r5,r4,23,20,28
+#endif
+1: lwz r0,PTE_FLAGS_OFFSET(r5)
+ cmpwi cr1,r6,1
+ andi. r0,r0,_PAGE_HASHPTE
+ bne 2f
+ ble cr1,19f
+ addi r4,r4,0x1000
+ addi r5,r5,PTE_SIZE
+ addi r6,r6,-1
+ b 1b
+
+ /* Convert context and va to VSID */
+2: mulli r3,r3,897*16 /* multiply context by context skew */
+ rlwinm r0,r4,4,28,31 /* get ESID (top 4 bits of va) */
+ mulli r0,r0,0x111 /* multiply by ESID skew */
+ add r3,r3,r0 /* note code below trims to 24 bits */
+
+ /* Construct the high word of the PPC-style PTE (r11) */
+ rlwinm r11,r3,7,1,24 /* put VSID in 0x7fffff80 bits */
+ rlwimi r11,r4,10,26,31 /* put in API (abbrev page index) */
+ SET_V(r11) /* set V (valid) bit */
+
+#ifdef CONFIG_SMP
+ lis r9, (mmu_hash_lock - PAGE_OFFSET)@ha
+ addi r9, r9, (mmu_hash_lock - PAGE_OFFSET)@l
+ tophys (r8, r2)
+ lwz r8, TASK_CPU(r8)
+ oris r8,r8,9
+10: lwarx r0,0,r9
+ cmpi 0,r0,0
+ bne- 11f
+ stwcx. r8,0,r9
+ beq+ 12f
+11: lwz r0,0(r9)
+ cmpi 0,r0,0
+ beq 10b
+ b 11b
+12: isync
+#endif
+
+ /*
+ * Check the _PAGE_HASHPTE bit in the linux PTE. If it is
+ * already clear, we're done (for this pte). If not,
+ * clear it (atomically) and proceed. -- paulus.
+ */
+#if (PTE_FLAGS_OFFSET != 0)
+ addi r5,r5,PTE_FLAGS_OFFSET
+#endif
+33: lwarx r8,0,r5 /* fetch the pte flags word */
+ andi. r0,r8,_PAGE_HASHPTE
+ beq 8f /* done if HASHPTE is already clear */
+ rlwinm r8,r8,0,31,29 /* clear HASHPTE bit */
+ stwcx. r8,0,r5 /* update the pte */
+ bne- 33b
+
+ patch_site 0f, patch__flush_hash_A0
+ patch_site 1f, patch__flush_hash_A1
+ patch_site 2f, patch__flush_hash_A2
+ /* Get the address of the primary PTE group in the hash table (r3) */
+0: lis r8, (Hash_base - PAGE_OFFSET)@h /* base address of hash table */
+1: rlwimi r8,r3,LG_PTEG_SIZE,HASH_LEFT,HASH_RIGHT /* VSID -> hash */
+2: rlwinm r0,r4,20+LG_PTEG_SIZE,HASH_LEFT,HASH_RIGHT /* PI -> hash */
+ xor r8,r0,r8 /* make primary hash */
+
+ /* Search the primary PTEG for a PTE whose 1st (d)word matches r5 */
+ li r0,8 /* PTEs/group */
+ mtctr r0
+ addi r12,r8,-HPTE_SIZE
+1: LDPTEu r0,HPTE_SIZE(r12) /* get next PTE */
+ CMPPTE 0,r0,r11
+ bdnzf 2,1b /* loop while ctr != 0 && !cr0.eq */
+ beq+ 3f
+
+ patch_site 0f, patch__flush_hash_B
+ /* Search the secondary PTEG for a matching PTE */
+ ori r11,r11,PTE_H /* set H (secondary hash) bit */
+ li r0,8 /* PTEs/group */
+0: xoris r12,r8,Hash_msk>>16 /* compute secondary hash */
+ xori r12,r12,(-PTEG_SIZE & 0xffff)
+ addi r12,r12,-HPTE_SIZE
+ mtctr r0
+2: LDPTEu r0,HPTE_SIZE(r12)
+ CMPPTE 0,r0,r11
+ bdnzf 2,2b
+ xori r11,r11,PTE_H /* clear H again */
+ bne- 4f /* should rarely fail to find it */
+
+3: li r0,0
+ STPTE r0,0(r12) /* invalidate entry */
+4: sync
+ tlbie r4 /* in hw tlb too */
+ sync
+
+8: ble cr1,9f /* if all ptes checked */
+81: addi r6,r6,-1
+ addi r5,r5,PTE_SIZE
+ addi r4,r4,0x1000
+ lwz r0,0(r5) /* check next pte */
+ cmpwi cr1,r6,1
+ andi. r0,r0,_PAGE_HASHPTE
+ bne 33b
+ bgt cr1,81b
+
+9:
+#ifdef CONFIG_SMP
+ TLBSYNC
+ li r0,0
+ stw r0,0(r9) /* clear mmu_hash_lock */
+#endif
+
+19: mtmsr r10
+ isync
+ blr
+ .previous
+EXPORT_SYMBOL(flush_hash_pages)
+_ASM_NOKPROBE_SYMBOL(flush_hash_pages)
+
+/*
+ * Flush an entry from the TLB
+ */
+_GLOBAL(_tlbie)
+#ifdef CONFIG_SMP
+ lwz r8,TASK_CPU(r2)
+ oris r8,r8,11
+ mfmsr r10
+ rlwinm r0,r10,0,17,15 /* clear bit 16 (MSR_EE) */
+ rlwinm r0,r0,0,28,26 /* clear DR */
+ mtmsr r0
+ isync
+ lis r9,mmu_hash_lock@h
+ ori r9,r9,mmu_hash_lock@l
+ tophys(r9,r9)
+10: lwarx r7,0,r9
+ cmpwi 0,r7,0
+ bne- 10b
+ stwcx. r8,0,r9
+ bne- 10b
+ eieio
+ tlbie r3
+ sync
+ TLBSYNC
+ li r0,0
+ stw r0,0(r9) /* clear mmu_hash_lock */
+ mtmsr r10
+ isync
+#else /* CONFIG_SMP */
+ tlbie r3
+ sync
+#endif /* CONFIG_SMP */
+ blr
+_ASM_NOKPROBE_SYMBOL(_tlbie)
+
+/*
+ * Flush the entire TLB. 603/603e only
+ */
+_GLOBAL(_tlbia)
+#if defined(CONFIG_SMP)
+ lwz r8,TASK_CPU(r2)
+ oris r8,r8,10
+ mfmsr r10
+ rlwinm r0,r10,0,17,15 /* clear bit 16 (MSR_EE) */
+ rlwinm r0,r0,0,28,26 /* clear DR */
+ mtmsr r0
+ isync
+ lis r9,mmu_hash_lock@h
+ ori r9,r9,mmu_hash_lock@l
+ tophys(r9,r9)
+10: lwarx r7,0,r9
+ cmpwi 0,r7,0
+ bne- 10b
+ stwcx. r8,0,r9
+ bne- 10b
+#endif /* CONFIG_SMP */
+ li r5, 32
+ lis r4, KERNELBASE@h
+ mtctr r5
+ sync
+0: tlbie r4
+ addi r4, r4, 0x1000
+ bdnz 0b
+ sync
+#ifdef CONFIG_SMP
+ TLBSYNC
+ li r0,0
+ stw r0,0(r9) /* clear mmu_hash_lock */
+ mtmsr r10
+ isync
+#endif /* CONFIG_SMP */
+ blr
+_ASM_NOKPROBE_SYMBOL(_tlbia)
diff --git a/arch/powerpc/mm/book3s32/mmu.c b/arch/powerpc/mm/book3s32/mmu.c
new file mode 100644
index 000000000..602ab1312
--- /dev/null
+++ b/arch/powerpc/mm/book3s32/mmu.c
@@ -0,0 +1,470 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * This file contains the routines for handling the MMU on those
+ * PowerPC implementations where the MMU substantially follows the
+ * architecture specification. This includes the 6xx, 7xx, 7xxx,
+ * and 8260 implementations but excludes the 8xx and 4xx.
+ * -- paulus
+ *
+ * Derived from arch/ppc/mm/init.c:
+ * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
+ *
+ * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
+ * and Cort Dougan (PReP) (cort@cs.nmt.edu)
+ * Copyright (C) 1996 Paul Mackerras
+ *
+ * Derived from "arch/i386/mm/init.c"
+ * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
+ */
+
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/init.h>
+#include <linux/highmem.h>
+#include <linux/memblock.h>
+
+#include <asm/prom.h>
+#include <asm/mmu.h>
+#include <asm/machdep.h>
+#include <asm/code-patching.h>
+#include <asm/sections.h>
+
+#include <mm/mmu_decl.h>
+
+u8 __initdata early_hash[SZ_256K] __aligned(SZ_256K) = {0};
+
+struct hash_pte *Hash;
+static unsigned long Hash_size, Hash_mask;
+unsigned long _SDR1;
+static unsigned int hash_mb, hash_mb2;
+
+struct ppc_bat BATS[8][2]; /* 8 pairs of IBAT, DBAT */
+
+struct batrange { /* stores address ranges mapped by BATs */
+ unsigned long start;
+ unsigned long limit;
+ phys_addr_t phys;
+} bat_addrs[8];
+
+/*
+ * Return PA for this VA if it is mapped by a BAT, or 0
+ */
+phys_addr_t v_block_mapped(unsigned long va)
+{
+ int b;
+ for (b = 0; b < ARRAY_SIZE(bat_addrs); ++b)
+ if (va >= bat_addrs[b].start && va < bat_addrs[b].limit)
+ return bat_addrs[b].phys + (va - bat_addrs[b].start);
+ return 0;
+}
+
+/*
+ * Return VA for a given PA or 0 if not mapped
+ */
+unsigned long p_block_mapped(phys_addr_t pa)
+{
+ int b;
+ for (b = 0; b < ARRAY_SIZE(bat_addrs); ++b)
+ if (pa >= bat_addrs[b].phys
+ && pa < (bat_addrs[b].limit-bat_addrs[b].start)
+ +bat_addrs[b].phys)
+ return bat_addrs[b].start+(pa-bat_addrs[b].phys);
+ return 0;
+}
+
+int __init find_free_bat(void)
+{
+ int b;
+ int n = mmu_has_feature(MMU_FTR_USE_HIGH_BATS) ? 8 : 4;
+
+ for (b = 0; b < n; b++) {
+ struct ppc_bat *bat = BATS[b];
+
+ if (!(bat[1].batu & 3))
+ return b;
+ }
+ return -1;
+}
+
+/*
+ * This function calculates the size of the larger block usable to map the
+ * beginning of an area based on the start address and size of that area:
+ * - max block size is 256 on 6xx.
+ * - base address must be aligned to the block size. So the maximum block size
+ * is identified by the lowest bit set to 1 in the base address (for instance
+ * if base is 0x16000000, max size is 0x02000000).
+ * - block size has to be a power of two. This is calculated by finding the
+ * highest bit set to 1.
+ */
+unsigned int bat_block_size(unsigned long base, unsigned long top)
+{
+ unsigned int max_size = SZ_256M;
+ unsigned int base_shift = (ffs(base) - 1) & 31;
+ unsigned int block_shift = (fls(top - base) - 1) & 31;
+
+ return min3(max_size, 1U << base_shift, 1U << block_shift);
+}
+
+/*
+ * Set up one of the IBAT (block address translation) register pairs.
+ * The parameters are not checked; in particular size must be a power
+ * of 2 between 128k and 256M.
+ */
+static void setibat(int index, unsigned long virt, phys_addr_t phys,
+ unsigned int size, pgprot_t prot)
+{
+ unsigned int bl = (size >> 17) - 1;
+ int wimgxpp;
+ struct ppc_bat *bat = BATS[index];
+ unsigned long flags = pgprot_val(prot);
+
+ if (!cpu_has_feature(CPU_FTR_NEED_COHERENT))
+ flags &= ~_PAGE_COHERENT;
+
+ wimgxpp = (flags & _PAGE_COHERENT) | (_PAGE_EXEC ? BPP_RX : BPP_XX);
+ bat[0].batu = virt | (bl << 2) | 2; /* Vs=1, Vp=0 */
+ bat[0].batl = BAT_PHYS_ADDR(phys) | wimgxpp;
+ if (flags & _PAGE_USER)
+ bat[0].batu |= 1; /* Vp = 1 */
+}
+
+static void clearibat(int index)
+{
+ struct ppc_bat *bat = BATS[index];
+
+ bat[0].batu = 0;
+ bat[0].batl = 0;
+}
+
+static unsigned long __init __mmu_mapin_ram(unsigned long base, unsigned long top)
+{
+ int idx;
+
+ while ((idx = find_free_bat()) != -1 && base != top) {
+ unsigned int size = bat_block_size(base, top);
+
+ if (size < 128 << 10)
+ break;
+ setbat(idx, PAGE_OFFSET + base, base, size, PAGE_KERNEL_X);
+ base += size;
+ }
+
+ return base;
+}
+
+unsigned long __init mmu_mapin_ram(unsigned long base, unsigned long top)
+{
+ unsigned long done;
+ unsigned long border = (unsigned long)__init_begin - PAGE_OFFSET;
+
+ if (__map_without_bats) {
+ pr_debug("RAM mapped without BATs\n");
+ return base;
+ }
+ if (debug_pagealloc_enabled()) {
+ if (base >= border)
+ return base;
+ if (top >= border)
+ top = border;
+ }
+
+ if (!strict_kernel_rwx_enabled() || base >= border || top <= border)
+ return __mmu_mapin_ram(base, top);
+
+ done = __mmu_mapin_ram(base, border);
+ if (done != border)
+ return done;
+
+ return __mmu_mapin_ram(border, top);
+}
+
+static bool is_module_segment(unsigned long addr)
+{
+ if (!IS_ENABLED(CONFIG_MODULES))
+ return false;
+#ifdef MODULES_VADDR
+ if (addr < ALIGN_DOWN(MODULES_VADDR, SZ_256M))
+ return false;
+ if (addr > ALIGN(MODULES_END, SZ_256M) - 1)
+ return false;
+#else
+ if (addr < ALIGN_DOWN(VMALLOC_START, SZ_256M))
+ return false;
+ if (addr > ALIGN(VMALLOC_END, SZ_256M) - 1)
+ return false;
+#endif
+ return true;
+}
+
+void mmu_mark_initmem_nx(void)
+{
+ int nb = mmu_has_feature(MMU_FTR_USE_HIGH_BATS) ? 8 : 4;
+ int i;
+ unsigned long base = (unsigned long)_stext - PAGE_OFFSET;
+ unsigned long top = ALIGN((unsigned long)_etext - PAGE_OFFSET, SZ_128K);
+ unsigned long border = (unsigned long)__init_begin - PAGE_OFFSET;
+ unsigned long size;
+
+ for (i = 0; i < nb - 1 && base < top;) {
+ size = bat_block_size(base, top);
+ setibat(i++, PAGE_OFFSET + base, base, size, PAGE_KERNEL_TEXT);
+ base += size;
+ }
+ if (base < top) {
+ size = bat_block_size(base, top);
+ if ((top - base) > size) {
+ size <<= 1;
+ if (strict_kernel_rwx_enabled() && base + size > border)
+ pr_warn("Some RW data is getting mapped X. "
+ "Adjust CONFIG_DATA_SHIFT to avoid that.\n");
+ }
+ setibat(i++, PAGE_OFFSET + base, base, size, PAGE_KERNEL_TEXT);
+ base += size;
+ }
+ for (; i < nb; i++)
+ clearibat(i);
+
+ update_bats();
+
+ for (i = TASK_SIZE >> 28; i < 16; i++) {
+ /* Do not set NX on VM space for modules */
+ if (is_module_segment(i << 28))
+ continue;
+
+ mtsrin(mfsrin(i << 28) | 0x10000000, i << 28);
+ }
+}
+
+void mmu_mark_rodata_ro(void)
+{
+ int nb = mmu_has_feature(MMU_FTR_USE_HIGH_BATS) ? 8 : 4;
+ int i;
+
+ for (i = 0; i < nb; i++) {
+ struct ppc_bat *bat = BATS[i];
+
+ if (bat_addrs[i].start < (unsigned long)__init_begin)
+ bat[1].batl = (bat[1].batl & ~BPP_RW) | BPP_RX;
+ }
+
+ update_bats();
+}
+
+/*
+ * Set up one of the I/D BAT (block address translation) register pairs.
+ * The parameters are not checked; in particular size must be a power
+ * of 2 between 128k and 256M.
+ * On 603+, only set IBAT when _PAGE_EXEC is set
+ */
+void __init setbat(int index, unsigned long virt, phys_addr_t phys,
+ unsigned int size, pgprot_t prot)
+{
+ unsigned int bl;
+ int wimgxpp;
+ struct ppc_bat *bat;
+ unsigned long flags = pgprot_val(prot);
+
+ if (index == -1)
+ index = find_free_bat();
+ if (index == -1) {
+ pr_err("%s: no BAT available for mapping 0x%llx\n", __func__,
+ (unsigned long long)phys);
+ return;
+ }
+ bat = BATS[index];
+
+ if ((flags & _PAGE_NO_CACHE) ||
+ (cpu_has_feature(CPU_FTR_NEED_COHERENT) == 0))
+ flags &= ~_PAGE_COHERENT;
+
+ bl = (size >> 17) - 1;
+ /* Do DBAT first */
+ wimgxpp = flags & (_PAGE_WRITETHRU | _PAGE_NO_CACHE
+ | _PAGE_COHERENT | _PAGE_GUARDED);
+ wimgxpp |= (flags & _PAGE_RW)? BPP_RW: BPP_RX;
+ bat[1].batu = virt | (bl << 2) | 2; /* Vs=1, Vp=0 */
+ bat[1].batl = BAT_PHYS_ADDR(phys) | wimgxpp;
+ if (flags & _PAGE_USER)
+ bat[1].batu |= 1; /* Vp = 1 */
+ if (flags & _PAGE_GUARDED) {
+ /* G bit must be zero in IBATs */
+ flags &= ~_PAGE_EXEC;
+ }
+ if (flags & _PAGE_EXEC)
+ bat[0] = bat[1];
+ else
+ bat[0].batu = bat[0].batl = 0;
+
+ bat_addrs[index].start = virt;
+ bat_addrs[index].limit = virt + ((bl + 1) << 17) - 1;
+ bat_addrs[index].phys = phys;
+}
+
+/*
+ * Preload a translation in the hash table
+ */
+void hash_preload(struct mm_struct *mm, unsigned long ea)
+{
+ pmd_t *pmd;
+
+ if (!Hash)
+ return;
+ pmd = pmd_off(mm, ea);
+ if (!pmd_none(*pmd))
+ add_hash_page(mm->context.id, ea, pmd_val(*pmd));
+}
+
+/*
+ * This is called at the end of handling a user page fault, when the
+ * fault has been handled by updating a PTE in the linux page tables.
+ * We use it to preload an HPTE into the hash table corresponding to
+ * the updated linux PTE.
+ *
+ * This must always be called with the pte lock held.
+ */
+void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
+ pte_t *ptep)
+{
+ if (!mmu_has_feature(MMU_FTR_HPTE_TABLE))
+ return;
+ /*
+ * We don't need to worry about _PAGE_PRESENT here because we are
+ * called with either mm->page_table_lock held or ptl lock held
+ */
+
+ /* We only want HPTEs for linux PTEs that have _PAGE_ACCESSED set */
+ if (!pte_young(*ptep) || address >= TASK_SIZE)
+ return;
+
+ /* We have to test for regs NULL since init will get here first thing at boot */
+ if (!current->thread.regs)
+ return;
+
+ /* We also avoid filling the hash if not coming from a fault */
+ if (TRAP(current->thread.regs) != 0x300 && TRAP(current->thread.regs) != 0x400)
+ return;
+
+ hash_preload(vma->vm_mm, address);
+}
+
+/*
+ * Initialize the hash table and patch the instructions in hashtable.S.
+ */
+void __init MMU_init_hw(void)
+{
+ unsigned int n_hpteg, lg_n_hpteg;
+
+ if (!mmu_has_feature(MMU_FTR_HPTE_TABLE))
+ return;
+
+ if ( ppc_md.progress ) ppc_md.progress("hash:enter", 0x105);
+
+#define LG_HPTEG_SIZE 6 /* 64 bytes per HPTEG */
+#define SDR1_LOW_BITS ((n_hpteg - 1) >> 10)
+#define MIN_N_HPTEG 1024 /* min 64kB hash table */
+
+ /*
+ * Allow 1 HPTE (1/8 HPTEG) for each page of memory.
+ * This is less than the recommended amount, but then
+ * Linux ain't AIX.
+ */
+ n_hpteg = total_memory / (PAGE_SIZE * 8);
+ if (n_hpteg < MIN_N_HPTEG)
+ n_hpteg = MIN_N_HPTEG;
+ lg_n_hpteg = __ilog2(n_hpteg);
+ if (n_hpteg & (n_hpteg - 1)) {
+ ++lg_n_hpteg; /* round up if not power of 2 */
+ n_hpteg = 1 << lg_n_hpteg;
+ }
+ Hash_size = n_hpteg << LG_HPTEG_SIZE;
+
+ /*
+ * Find some memory for the hash table.
+ */
+ if ( ppc_md.progress ) ppc_md.progress("hash:find piece", 0x322);
+ Hash = memblock_alloc(Hash_size, Hash_size);
+ if (!Hash)
+ panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
+ __func__, Hash_size, Hash_size);
+ _SDR1 = __pa(Hash) | SDR1_LOW_BITS;
+
+ pr_info("Total memory = %lldMB; using %ldkB for hash table\n",
+ (unsigned long long)(total_memory >> 20), Hash_size >> 10);
+
+
+ Hash_mask = n_hpteg - 1;
+ hash_mb2 = hash_mb = 32 - LG_HPTEG_SIZE - lg_n_hpteg;
+ if (lg_n_hpteg > 16)
+ hash_mb2 = 16 - LG_HPTEG_SIZE;
+}
+
+void __init MMU_init_hw_patch(void)
+{
+ unsigned int hmask = Hash_mask >> (16 - LG_HPTEG_SIZE);
+ unsigned int hash = (unsigned int)Hash - PAGE_OFFSET;
+
+ if (!mmu_has_feature(MMU_FTR_HPTE_TABLE))
+ return;
+
+ if (ppc_md.progress)
+ ppc_md.progress("hash:patch", 0x345);
+ if (ppc_md.progress)
+ ppc_md.progress("hash:done", 0x205);
+
+ /* WARNING: Make sure nothing can trigger a KASAN check past this point */
+
+ /*
+ * Patch up the instructions in hashtable.S:create_hpte
+ */
+ modify_instruction_site(&patch__hash_page_A0, 0xffff, hash >> 16);
+ modify_instruction_site(&patch__hash_page_A1, 0x7c0, hash_mb << 6);
+ modify_instruction_site(&patch__hash_page_A2, 0x7c0, hash_mb2 << 6);
+ modify_instruction_site(&patch__hash_page_B, 0xffff, hmask);
+ modify_instruction_site(&patch__hash_page_C, 0xffff, hmask);
+
+ /*
+ * Patch up the instructions in hashtable.S:flush_hash_page
+ */
+ modify_instruction_site(&patch__flush_hash_A0, 0xffff, hash >> 16);
+ modify_instruction_site(&patch__flush_hash_A1, 0x7c0, hash_mb << 6);
+ modify_instruction_site(&patch__flush_hash_A2, 0x7c0, hash_mb2 << 6);
+ modify_instruction_site(&patch__flush_hash_B, 0xffff, hmask);
+}
+
+void setup_initial_memory_limit(phys_addr_t first_memblock_base,
+ phys_addr_t first_memblock_size)
+{
+ /* We don't currently support the first MEMBLOCK not mapping 0
+ * physical on those processors
+ */
+ BUG_ON(first_memblock_base != 0);
+
+ memblock_set_current_limit(min_t(u64, first_memblock_size, SZ_256M));
+}
+
+void __init print_system_hash_info(void)
+{
+ pr_info("Hash_size = 0x%lx\n", Hash_size);
+ if (Hash_mask)
+ pr_info("Hash_mask = 0x%lx\n", Hash_mask);
+}
+
+#ifdef CONFIG_PPC_KUEP
+void __init setup_kuep(bool disabled)
+{
+ pr_info("Activating Kernel Userspace Execution Prevention\n");
+
+ if (disabled)
+ pr_warn("KUEP cannot be disabled yet on 6xx when compiled in\n");
+}
+#endif
+
+#ifdef CONFIG_PPC_KUAP
+void __init setup_kuap(bool disabled)
+{
+ pr_info("Activating Kernel Userspace Access Protection\n");
+
+ if (disabled)
+ pr_warn("KUAP cannot be disabled yet on 6xx when compiled in\n");
+}
+#endif
diff --git a/arch/powerpc/mm/book3s32/mmu_context.c b/arch/powerpc/mm/book3s32/mmu_context.c
new file mode 100644
index 000000000..218996e40
--- /dev/null
+++ b/arch/powerpc/mm/book3s32/mmu_context.c
@@ -0,0 +1,113 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * This file contains the routines for handling the MMU on those
+ * PowerPC implementations where the MMU substantially follows the
+ * architecture specification. This includes the 6xx, 7xx, 7xxx,
+ * and 8260 implementations but excludes the 8xx and 4xx.
+ * -- paulus
+ *
+ * Derived from arch/ppc/mm/init.c:
+ * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
+ *
+ * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
+ * and Cort Dougan (PReP) (cort@cs.nmt.edu)
+ * Copyright (C) 1996 Paul Mackerras
+ *
+ * Derived from "arch/i386/mm/init.c"
+ * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
+ */
+
+#include <linux/mm.h>
+#include <linux/init.h>
+#include <linux/export.h>
+
+#include <asm/mmu_context.h>
+
+/*
+ * On 32-bit PowerPC 6xx/7xx/7xxx CPUs, we use a set of 16 VSIDs
+ * (virtual segment identifiers) for each context. Although the
+ * hardware supports 24-bit VSIDs, and thus >1 million contexts,
+ * we only use 32,768 of them. That is ample, since there can be
+ * at most around 30,000 tasks in the system anyway, and it means
+ * that we can use a bitmap to indicate which contexts are in use.
+ * Using a bitmap means that we entirely avoid all of the problems
+ * that we used to have when the context number overflowed,
+ * particularly on SMP systems.
+ * -- paulus.
+ */
+#define NO_CONTEXT ((unsigned long) -1)
+#define LAST_CONTEXT 32767
+#define FIRST_CONTEXT 1
+
+/*
+ * This function defines the mapping from contexts to VSIDs (virtual
+ * segment IDs). We use a skew on both the context and the high 4 bits
+ * of the 32-bit virtual address (the "effective segment ID") in order
+ * to spread out the entries in the MMU hash table. Note, if this
+ * function is changed then arch/ppc/mm/hashtable.S will have to be
+ * changed to correspond.
+ *
+ *
+ * CTX_TO_VSID(ctx, va) (((ctx) * (897 * 16) + ((va) >> 28) * 0x111) \
+ * & 0xffffff)
+ */
+
+static unsigned long next_mmu_context;
+static unsigned long context_map[LAST_CONTEXT / BITS_PER_LONG + 1];
+
+unsigned long __init_new_context(void)
+{
+ unsigned long ctx = next_mmu_context;
+
+ while (test_and_set_bit(ctx, context_map)) {
+ ctx = find_next_zero_bit(context_map, LAST_CONTEXT+1, ctx);
+ if (ctx > LAST_CONTEXT)
+ ctx = 0;
+ }
+ next_mmu_context = (ctx + 1) & LAST_CONTEXT;
+
+ return ctx;
+}
+EXPORT_SYMBOL_GPL(__init_new_context);
+
+/*
+ * Set up the context for a new address space.
+ */
+int init_new_context(struct task_struct *t, struct mm_struct *mm)
+{
+ mm->context.id = __init_new_context();
+
+ return 0;
+}
+
+/*
+ * Free a context ID. Make sure to call this with preempt disabled!
+ */
+void __destroy_context(unsigned long ctx)
+{
+ clear_bit(ctx, context_map);
+}
+EXPORT_SYMBOL_GPL(__destroy_context);
+
+/*
+ * We're finished using the context for an address space.
+ */
+void destroy_context(struct mm_struct *mm)
+{
+ preempt_disable();
+ if (mm->context.id != NO_CONTEXT) {
+ __destroy_context(mm->context.id);
+ mm->context.id = NO_CONTEXT;
+ }
+ preempt_enable();
+}
+
+/*
+ * Initialize the context management stuff.
+ */
+void __init mmu_context_init(void)
+{
+ /* Reserve context 0 for kernel use */
+ context_map[0] = (1 << FIRST_CONTEXT) - 1;
+ next_mmu_context = FIRST_CONTEXT;
+}
diff --git a/arch/powerpc/mm/book3s32/tlb.c b/arch/powerpc/mm/book3s32/tlb.c
new file mode 100644
index 000000000..b6c7427da
--- /dev/null
+++ b/arch/powerpc/mm/book3s32/tlb.c
@@ -0,0 +1,171 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * This file contains the routines for TLB flushing.
+ * On machines where the MMU uses a hash table to store virtual to
+ * physical translations, these routines flush entries from the
+ * hash table also.
+ * -- paulus
+ *
+ * Derived from arch/ppc/mm/init.c:
+ * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
+ *
+ * Modifications by Paul Mackerras (PowerMac) (paulus@cs.anu.edu.au)
+ * and Cort Dougan (PReP) (cort@cs.nmt.edu)
+ * Copyright (C) 1996 Paul Mackerras
+ *
+ * Derived from "arch/i386/mm/init.c"
+ * Copyright (C) 1991, 1992, 1993, 1994 Linus Torvalds
+ */
+
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/init.h>
+#include <linux/highmem.h>
+#include <linux/pagemap.h>
+#include <linux/export.h>
+
+#include <asm/tlbflush.h>
+#include <asm/tlb.h>
+
+#include <mm/mmu_decl.h>
+
+/*
+ * Called when unmapping pages to flush entries from the TLB/hash table.
+ */
+void flush_hash_entry(struct mm_struct *mm, pte_t *ptep, unsigned long addr)
+{
+ unsigned long ptephys;
+
+ if (Hash) {
+ ptephys = __pa(ptep) & PAGE_MASK;
+ flush_hash_pages(mm->context.id, addr, ptephys, 1);
+ }
+}
+EXPORT_SYMBOL(flush_hash_entry);
+
+/*
+ * Called at the end of a mmu_gather operation to make sure the
+ * TLB flush is completely done.
+ */
+void tlb_flush(struct mmu_gather *tlb)
+{
+ if (!Hash) {
+ /*
+ * 603 needs to flush the whole TLB here since
+ * it doesn't use a hash table.
+ */
+ _tlbia();
+ }
+}
+
+/*
+ * TLB flushing:
+ *
+ * - flush_tlb_mm(mm) flushes the specified mm context TLB's
+ * - flush_tlb_page(vma, vmaddr) flushes one page
+ * - flush_tlb_range(vma, start, end) flushes a range of pages
+ * - flush_tlb_kernel_range(start, end) flushes kernel pages
+ *
+ * since the hardware hash table functions as an extension of the
+ * tlb as far as the linux tables are concerned, flush it too.
+ * -- Cort
+ */
+
+static void flush_range(struct mm_struct *mm, unsigned long start,
+ unsigned long end)
+{
+ pmd_t *pmd;
+ unsigned long pmd_end;
+ int count;
+ unsigned int ctx = mm->context.id;
+
+ start &= PAGE_MASK;
+ if (!Hash) {
+ if (end - start <= PAGE_SIZE)
+ _tlbie(start);
+ else
+ _tlbia();
+ return;
+ }
+ if (start >= end)
+ return;
+ end = (end - 1) | ~PAGE_MASK;
+ pmd = pmd_off(mm, start);
+ for (;;) {
+ pmd_end = ((start + PGDIR_SIZE) & PGDIR_MASK) - 1;
+ if (pmd_end > end)
+ pmd_end = end;
+ if (!pmd_none(*pmd)) {
+ count = ((pmd_end - start) >> PAGE_SHIFT) + 1;
+ flush_hash_pages(ctx, start, pmd_val(*pmd), count);
+ }
+ if (pmd_end == end)
+ break;
+ start = pmd_end + 1;
+ ++pmd;
+ }
+}
+
+/*
+ * Flush kernel TLB entries in the given range
+ */
+void flush_tlb_kernel_range(unsigned long start, unsigned long end)
+{
+ flush_range(&init_mm, start, end);
+}
+EXPORT_SYMBOL(flush_tlb_kernel_range);
+
+/*
+ * Flush all the (user) entries for the address space described by mm.
+ */
+void flush_tlb_mm(struct mm_struct *mm)
+{
+ struct vm_area_struct *mp;
+
+ if (!Hash) {
+ _tlbia();
+ return;
+ }
+
+ /*
+ * It is safe to go down the mm's list of vmas when called
+ * from dup_mmap, holding mmap_lock. It would also be safe from
+ * unmap_region or exit_mmap, but not from vmtruncate on SMP -
+ * but it seems dup_mmap is the only SMP case which gets here.
+ */
+ for (mp = mm->mmap; mp != NULL; mp = mp->vm_next)
+ flush_range(mp->vm_mm, mp->vm_start, mp->vm_end);
+}
+EXPORT_SYMBOL(flush_tlb_mm);
+
+void flush_tlb_page(struct vm_area_struct *vma, unsigned long vmaddr)
+{
+ struct mm_struct *mm;
+ pmd_t *pmd;
+
+ if (!Hash) {
+ _tlbie(vmaddr);
+ return;
+ }
+ mm = (vmaddr < TASK_SIZE)? vma->vm_mm: &init_mm;
+ pmd = pmd_off(mm, vmaddr);
+ if (!pmd_none(*pmd))
+ flush_hash_pages(mm->context.id, vmaddr, pmd_val(*pmd), 1);
+}
+EXPORT_SYMBOL(flush_tlb_page);
+
+/*
+ * For each address in the range, find the pte for the address
+ * and check _PAGE_HASHPTE bit; if it is set, find and destroy
+ * the corresponding HPTE.
+ */
+void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
+ unsigned long end)
+{
+ flush_range(vma->vm_mm, start, end);
+}
+EXPORT_SYMBOL(flush_tlb_range);
+
+void __init early_init_mmu(void)
+{
+}