From 2c3c1048746a4622d8c89a29670120dc8fab93c4 Mon Sep 17 00:00:00 2001 From: Daniel Baumann Date: Sun, 7 Apr 2024 20:49:45 +0200 Subject: Adding upstream version 6.1.76. Signed-off-by: Daniel Baumann --- arch/parisc/include/asm/mmu_context.h | 100 ++++++++++++++++++++++++++++++++++ 1 file changed, 100 insertions(+) create mode 100644 arch/parisc/include/asm/mmu_context.h (limited to 'arch/parisc/include/asm/mmu_context.h') diff --git a/arch/parisc/include/asm/mmu_context.h b/arch/parisc/include/asm/mmu_context.h new file mode 100644 index 000000000..c9187fe83 --- /dev/null +++ b/arch/parisc/include/asm/mmu_context.h @@ -0,0 +1,100 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __PARISC_MMU_CONTEXT_H +#define __PARISC_MMU_CONTEXT_H + +#include +#include +#include +#include +#include + +/* on PA-RISC, we actually have enough contexts to justify an allocator + * for them. prumpf */ + +extern unsigned long alloc_sid(void); +extern void free_sid(unsigned long); + +#define init_new_context init_new_context +static inline int +init_new_context(struct task_struct *tsk, struct mm_struct *mm) +{ + BUG_ON(atomic_read(&mm->mm_users) != 1); + + mm->context.space_id = alloc_sid(); + return 0; +} + +#define destroy_context destroy_context +static inline void +destroy_context(struct mm_struct *mm) +{ + free_sid(mm->context.space_id); + mm->context.space_id = 0; +} + +static inline unsigned long __space_to_prot(mm_context_t context) +{ +#if SPACEID_SHIFT == 0 + return context.space_id << 1; +#else + return context.space_id >> (SPACEID_SHIFT - 1); +#endif +} + +static inline void load_context(mm_context_t context) +{ + mtsp(context.space_id, SR_USER); + mtctl(__space_to_prot(context), 8); +} + +static inline void switch_mm_irqs_off(struct mm_struct *prev, + struct mm_struct *next, struct task_struct *tsk) +{ + if (prev != next) { +#ifdef CONFIG_TLB_PTLOCK + /* put physical address of page_table_lock in cr28 (tr4) + for TLB faults */ + spinlock_t *pgd_lock = &next->page_table_lock; + mtctl(__pa(__ldcw_align(&pgd_lock->rlock.raw_lock)), 28); +#endif + mtctl(__pa(next->pgd), 25); + load_context(next->context); + } +} + +static inline void switch_mm(struct mm_struct *prev, + struct mm_struct *next, struct task_struct *tsk) +{ + unsigned long flags; + + if (prev == next) + return; + + local_irq_save(flags); + switch_mm_irqs_off(prev, next, tsk); + local_irq_restore(flags); +} +#define switch_mm_irqs_off switch_mm_irqs_off + +#define activate_mm activate_mm +static inline void activate_mm(struct mm_struct *prev, struct mm_struct *next) +{ + /* + * Activate_mm is our one chance to allocate a space id + * for a new mm created in the exec path. There's also + * some lazy tlb stuff, which is currently dead code, but + * we only allocate a space id if one hasn't been allocated + * already, so we should be OK. + */ + + BUG_ON(next == &init_mm); /* Should never happen */ + + if (next->context.space_id == 0) + next->context.space_id = alloc_sid(); + + switch_mm(prev,next,current); +} + +#include + +#endif -- cgit v1.2.3