summaryrefslogtreecommitdiffstats
path: root/arch/loongarch/mm
diff options
context:
space:
mode:
Diffstat (limited to 'arch/loongarch/mm')
-rw-r--r--arch/loongarch/mm/Makefile12
-rw-r--r--arch/loongarch/mm/cache.c205
-rw-r--r--arch/loongarch/mm/extable.c63
-rw-r--r--arch/loongarch/mm/fault.c270
-rw-r--r--arch/loongarch/mm/hugetlbpage.c75
-rw-r--r--arch/loongarch/mm/init.c250
-rw-r--r--arch/loongarch/mm/ioremap.c28
-rw-r--r--arch/loongarch/mm/kasan_init.c294
-rw-r--r--arch/loongarch/mm/maccess.c10
-rw-r--r--arch/loongarch/mm/mmap.c146
-rw-r--r--arch/loongarch/mm/page.S84
-rw-r--r--arch/loongarch/mm/pgtable.c146
-rw-r--r--arch/loongarch/mm/tlb.c320
-rw-r--r--arch/loongarch/mm/tlbex.S524
14 files changed, 2427 insertions, 0 deletions
diff --git a/arch/loongarch/mm/Makefile b/arch/loongarch/mm/Makefile
new file mode 100644
index 0000000000..e4d1e581db
--- /dev/null
+++ b/arch/loongarch/mm/Makefile
@@ -0,0 +1,12 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for the Linux/LoongArch-specific parts of the memory manager.
+#
+
+obj-y += init.o cache.o tlb.o tlbex.o extable.o \
+ fault.o ioremap.o maccess.o mmap.o pgtable.o page.o
+
+obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
+obj-$(CONFIG_KASAN) += kasan_init.o
+
+KASAN_SANITIZE_kasan_init.o := n
diff --git a/arch/loongarch/mm/cache.c b/arch/loongarch/mm/cache.c
new file mode 100644
index 0000000000..6be04d36ca
--- /dev/null
+++ b/arch/loongarch/mm/cache.c
@@ -0,0 +1,205 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ *
+ * Derived from MIPS:
+ * Copyright (C) 1994 - 2003, 06, 07 by Ralf Baechle (ralf@linux-mips.org)
+ * Copyright (C) 2007 MIPS Technologies, Inc.
+ */
+#include <linux/cacheinfo.h>
+#include <linux/export.h>
+#include <linux/fs.h>
+#include <linux/highmem.h>
+#include <linux/kernel.h>
+#include <linux/linkage.h>
+#include <linux/mm.h>
+#include <linux/sched.h>
+#include <linux/syscalls.h>
+
+#include <asm/bootinfo.h>
+#include <asm/cacheflush.h>
+#include <asm/cpu.h>
+#include <asm/cpu-features.h>
+#include <asm/loongarch.h>
+#include <asm/numa.h>
+#include <asm/processor.h>
+#include <asm/setup.h>
+
+void cache_error_setup(void)
+{
+ extern char __weak except_vec_cex;
+ set_merr_handler(0x0, &except_vec_cex, 0x80);
+}
+
+/*
+ * LoongArch maintains ICache/DCache coherency by hardware,
+ * we just need "ibar" to avoid instruction hazard here.
+ */
+void local_flush_icache_range(unsigned long start, unsigned long end)
+{
+ asm volatile ("\tibar 0\n"::);
+}
+EXPORT_SYMBOL(local_flush_icache_range);
+
+static void flush_cache_leaf(unsigned int leaf)
+{
+ int i, j, nr_nodes;
+ uint64_t addr = CSR_DMW0_BASE;
+ struct cache_desc *cdesc = current_cpu_data.cache_leaves + leaf;
+
+ nr_nodes = cache_private(cdesc) ? 1 : loongson_sysconf.nr_nodes;
+
+ do {
+ for (i = 0; i < cdesc->sets; i++) {
+ for (j = 0; j < cdesc->ways; j++) {
+ flush_cache_line(leaf, addr);
+ addr++;
+ }
+
+ addr -= cdesc->ways;
+ addr += cdesc->linesz;
+ }
+ addr += (1ULL << NODE_ADDRSPACE_SHIFT);
+ } while (--nr_nodes > 0);
+}
+
+asmlinkage __visible void __flush_cache_all(void)
+{
+ int leaf;
+ struct cache_desc *cdesc = current_cpu_data.cache_leaves;
+ unsigned int cache_present = current_cpu_data.cache_leaves_present;
+
+ leaf = cache_present - 1;
+ if (cache_inclusive(cdesc + leaf)) {
+ flush_cache_leaf(leaf);
+ return;
+ }
+
+ for (leaf = 0; leaf < cache_present; leaf++)
+ flush_cache_leaf(leaf);
+}
+
+#define L1IUPRE (1 << 0)
+#define L1IUUNIFY (1 << 1)
+#define L1DPRE (1 << 2)
+
+#define LXIUPRE (1 << 0)
+#define LXIUUNIFY (1 << 1)
+#define LXIUPRIV (1 << 2)
+#define LXIUINCL (1 << 3)
+#define LXDPRE (1 << 4)
+#define LXDPRIV (1 << 5)
+#define LXDINCL (1 << 6)
+
+#define populate_cache_properties(cfg0, cdesc, level, leaf) \
+do { \
+ unsigned int cfg1; \
+ \
+ cfg1 = read_cpucfg(LOONGARCH_CPUCFG17 + leaf); \
+ if (level == 1) { \
+ cdesc->flags |= CACHE_PRIVATE; \
+ } else { \
+ if (cfg0 & LXIUPRIV) \
+ cdesc->flags |= CACHE_PRIVATE; \
+ if (cfg0 & LXIUINCL) \
+ cdesc->flags |= CACHE_INCLUSIVE; \
+ } \
+ cdesc->level = level; \
+ cdesc->flags |= CACHE_PRESENT; \
+ cdesc->ways = ((cfg1 & CPUCFG_CACHE_WAYS_M) >> CPUCFG_CACHE_WAYS) + 1; \
+ cdesc->sets = 1 << ((cfg1 & CPUCFG_CACHE_SETS_M) >> CPUCFG_CACHE_SETS); \
+ cdesc->linesz = 1 << ((cfg1 & CPUCFG_CACHE_LSIZE_M) >> CPUCFG_CACHE_LSIZE); \
+ cdesc++; leaf++; \
+} while (0)
+
+void cpu_cache_init(void)
+{
+ unsigned int leaf = 0, level = 1;
+ unsigned int config = read_cpucfg(LOONGARCH_CPUCFG16);
+ struct cache_desc *cdesc = current_cpu_data.cache_leaves;
+
+ if (config & L1IUPRE) {
+ if (config & L1IUUNIFY)
+ cdesc->type = CACHE_TYPE_UNIFIED;
+ else
+ cdesc->type = CACHE_TYPE_INST;
+ populate_cache_properties(config, cdesc, level, leaf);
+ }
+
+ if (config & L1DPRE) {
+ cdesc->type = CACHE_TYPE_DATA;
+ populate_cache_properties(config, cdesc, level, leaf);
+ }
+
+ config = config >> 3;
+ for (level = 2; level <= CACHE_LEVEL_MAX; level++) {
+ if (!config)
+ break;
+
+ if (config & LXIUPRE) {
+ if (config & LXIUUNIFY)
+ cdesc->type = CACHE_TYPE_UNIFIED;
+ else
+ cdesc->type = CACHE_TYPE_INST;
+ populate_cache_properties(config, cdesc, level, leaf);
+ }
+
+ if (config & LXDPRE) {
+ cdesc->type = CACHE_TYPE_DATA;
+ populate_cache_properties(config, cdesc, level, leaf);
+ }
+
+ config = config >> 7;
+ }
+
+ BUG_ON(leaf > CACHE_LEAVES_MAX);
+
+ current_cpu_data.cache_leaves_present = leaf;
+ current_cpu_data.options |= LOONGARCH_CPU_PREFETCH;
+}
+
+static const pgprot_t protection_map[16] = {
+ [VM_NONE] = __pgprot(_CACHE_CC | _PAGE_USER |
+ _PAGE_PROTNONE | _PAGE_NO_EXEC |
+ _PAGE_NO_READ),
+ [VM_READ] = __pgprot(_CACHE_CC | _PAGE_VALID |
+ _PAGE_USER | _PAGE_PRESENT |
+ _PAGE_NO_EXEC),
+ [VM_WRITE] = __pgprot(_CACHE_CC | _PAGE_VALID |
+ _PAGE_USER | _PAGE_PRESENT |
+ _PAGE_NO_EXEC),
+ [VM_WRITE | VM_READ] = __pgprot(_CACHE_CC | _PAGE_VALID |
+ _PAGE_USER | _PAGE_PRESENT |
+ _PAGE_NO_EXEC),
+ [VM_EXEC] = __pgprot(_CACHE_CC | _PAGE_VALID |
+ _PAGE_USER | _PAGE_PRESENT),
+ [VM_EXEC | VM_READ] = __pgprot(_CACHE_CC | _PAGE_VALID |
+ _PAGE_USER | _PAGE_PRESENT),
+ [VM_EXEC | VM_WRITE] = __pgprot(_CACHE_CC | _PAGE_VALID |
+ _PAGE_USER | _PAGE_PRESENT),
+ [VM_EXEC | VM_WRITE | VM_READ] = __pgprot(_CACHE_CC | _PAGE_VALID |
+ _PAGE_USER | _PAGE_PRESENT),
+ [VM_SHARED] = __pgprot(_CACHE_CC | _PAGE_USER |
+ _PAGE_PROTNONE | _PAGE_NO_EXEC |
+ _PAGE_NO_READ),
+ [VM_SHARED | VM_READ] = __pgprot(_CACHE_CC | _PAGE_VALID |
+ _PAGE_USER | _PAGE_PRESENT |
+ _PAGE_NO_EXEC),
+ [VM_SHARED | VM_WRITE] = __pgprot(_CACHE_CC | _PAGE_VALID |
+ _PAGE_USER | _PAGE_PRESENT |
+ _PAGE_NO_EXEC | _PAGE_WRITE),
+ [VM_SHARED | VM_WRITE | VM_READ] = __pgprot(_CACHE_CC | _PAGE_VALID |
+ _PAGE_USER | _PAGE_PRESENT |
+ _PAGE_NO_EXEC | _PAGE_WRITE),
+ [VM_SHARED | VM_EXEC] = __pgprot(_CACHE_CC | _PAGE_VALID |
+ _PAGE_USER | _PAGE_PRESENT),
+ [VM_SHARED | VM_EXEC | VM_READ] = __pgprot(_CACHE_CC | _PAGE_VALID |
+ _PAGE_USER | _PAGE_PRESENT),
+ [VM_SHARED | VM_EXEC | VM_WRITE] = __pgprot(_CACHE_CC | _PAGE_VALID |
+ _PAGE_USER | _PAGE_PRESENT |
+ _PAGE_WRITE),
+ [VM_SHARED | VM_EXEC | VM_WRITE | VM_READ] = __pgprot(_CACHE_CC | _PAGE_VALID |
+ _PAGE_USER | _PAGE_PRESENT |
+ _PAGE_WRITE)
+};
+DECLARE_VM_GET_PAGE_PROT
diff --git a/arch/loongarch/mm/extable.c b/arch/loongarch/mm/extable.c
new file mode 100644
index 0000000000..9ab69872dc
--- /dev/null
+++ b/arch/loongarch/mm/extable.c
@@ -0,0 +1,63 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#include <linux/bitfield.h>
+#include <linux/extable.h>
+#include <linux/uaccess.h>
+#include <asm/asm-extable.h>
+#include <asm/branch.h>
+
+static inline unsigned long
+get_ex_fixup(const struct exception_table_entry *ex)
+{
+ return ((unsigned long)&ex->fixup + ex->fixup);
+}
+
+static inline void regs_set_gpr(struct pt_regs *regs,
+ unsigned int offset, unsigned long val)
+{
+ if (offset && offset <= MAX_REG_OFFSET)
+ *(unsigned long *)((unsigned long)regs + offset) = val;
+}
+
+static bool ex_handler_fixup(const struct exception_table_entry *ex,
+ struct pt_regs *regs)
+{
+ regs->csr_era = get_ex_fixup(ex);
+
+ return true;
+}
+
+static bool ex_handler_uaccess_err_zero(const struct exception_table_entry *ex,
+ struct pt_regs *regs)
+{
+ int reg_err = FIELD_GET(EX_DATA_REG_ERR, ex->data);
+ int reg_zero = FIELD_GET(EX_DATA_REG_ZERO, ex->data);
+
+ regs_set_gpr(regs, reg_err * sizeof(unsigned long), -EFAULT);
+ regs_set_gpr(regs, reg_zero * sizeof(unsigned long), 0);
+ regs->csr_era = get_ex_fixup(ex);
+
+ return true;
+}
+
+bool fixup_exception(struct pt_regs *regs)
+{
+ const struct exception_table_entry *ex;
+
+ ex = search_exception_tables(exception_era(regs));
+ if (!ex)
+ return false;
+
+ switch (ex->type) {
+ case EX_TYPE_FIXUP:
+ return ex_handler_fixup(ex, regs);
+ case EX_TYPE_UACCESS_ERR_ZERO:
+ return ex_handler_uaccess_err_zero(ex, regs);
+ case EX_TYPE_BPF:
+ return ex_handler_bpf(ex, regs);
+ }
+
+ BUG();
+}
diff --git a/arch/loongarch/mm/fault.c b/arch/loongarch/mm/fault.c
new file mode 100644
index 0000000000..1fc2f6813e
--- /dev/null
+++ b/arch/loongarch/mm/fault.c
@@ -0,0 +1,270 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ *
+ * Derived from MIPS:
+ * Copyright (C) 1995 - 2000 by Ralf Baechle
+ */
+#include <linux/context_tracking.h>
+#include <linux/signal.h>
+#include <linux/sched.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/entry-common.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/ptrace.h>
+#include <linux/ratelimit.h>
+#include <linux/mman.h>
+#include <linux/mm.h>
+#include <linux/smp.h>
+#include <linux/kdebug.h>
+#include <linux/perf_event.h>
+#include <linux/uaccess.h>
+#include <linux/kfence.h>
+
+#include <asm/branch.h>
+#include <asm/exception.h>
+#include <asm/mmu_context.h>
+#include <asm/ptrace.h>
+
+int show_unhandled_signals = 1;
+
+static void __kprobes no_context(struct pt_regs *regs,
+ unsigned long write, unsigned long address)
+{
+ const int field = sizeof(unsigned long) * 2;
+
+ /* Are we prepared to handle this kernel fault? */
+ if (fixup_exception(regs))
+ return;
+
+ if (kfence_handle_page_fault(address, write, regs))
+ return;
+
+ /*
+ * Oops. The kernel tried to access some bad page. We'll have to
+ * terminate things with extreme prejudice.
+ */
+ bust_spinlocks(1);
+
+ pr_alert("CPU %d Unable to handle kernel paging request at "
+ "virtual address %0*lx, era == %0*lx, ra == %0*lx\n",
+ raw_smp_processor_id(), field, address, field, regs->csr_era,
+ field, regs->regs[1]);
+ die("Oops", regs);
+}
+
+static void __kprobes do_out_of_memory(struct pt_regs *regs,
+ unsigned long write, unsigned long address)
+{
+ /*
+ * We ran out of memory, call the OOM killer, and return the userspace
+ * (which will retry the fault, or kill us if we got oom-killed).
+ */
+ if (!user_mode(regs)) {
+ no_context(regs, write, address);
+ return;
+ }
+ pagefault_out_of_memory();
+}
+
+static void __kprobes do_sigbus(struct pt_regs *regs,
+ unsigned long write, unsigned long address, int si_code)
+{
+ /* Kernel mode? Handle exceptions or die */
+ if (!user_mode(regs)) {
+ no_context(regs, write, address);
+ return;
+ }
+
+ /*
+ * Send a sigbus, regardless of whether we were in kernel
+ * or user mode.
+ */
+ current->thread.csr_badvaddr = address;
+ current->thread.trap_nr = read_csr_excode();
+ force_sig_fault(SIGBUS, BUS_ADRERR, (void __user *)address);
+}
+
+static void __kprobes do_sigsegv(struct pt_regs *regs,
+ unsigned long write, unsigned long address, int si_code)
+{
+ const int field = sizeof(unsigned long) * 2;
+ static DEFINE_RATELIMIT_STATE(ratelimit_state, 5 * HZ, 10);
+
+ /* Kernel mode? Handle exceptions or die */
+ if (!user_mode(regs)) {
+ no_context(regs, write, address);
+ return;
+ }
+
+ /* User mode accesses just cause a SIGSEGV */
+ current->thread.csr_badvaddr = address;
+ if (!write)
+ current->thread.error_code = 1;
+ else
+ current->thread.error_code = 2;
+ current->thread.trap_nr = read_csr_excode();
+
+ if (show_unhandled_signals &&
+ unhandled_signal(current, SIGSEGV) && __ratelimit(&ratelimit_state)) {
+ pr_info("do_page_fault(): sending SIGSEGV to %s for invalid %s %0*lx\n",
+ current->comm,
+ write ? "write access to" : "read access from",
+ field, address);
+ pr_info("era = %0*lx in", field,
+ (unsigned long) regs->csr_era);
+ print_vma_addr(KERN_CONT " ", regs->csr_era);
+ pr_cont("\n");
+ pr_info("ra = %0*lx in", field,
+ (unsigned long) regs->regs[1]);
+ print_vma_addr(KERN_CONT " ", regs->regs[1]);
+ pr_cont("\n");
+ }
+ force_sig_fault(SIGSEGV, si_code, (void __user *)address);
+}
+
+/*
+ * This routine handles page faults. It determines the address,
+ * and the problem, and then passes it off to one of the appropriate
+ * routines.
+ */
+static void __kprobes __do_page_fault(struct pt_regs *regs,
+ unsigned long write, unsigned long address)
+{
+ int si_code = SEGV_MAPERR;
+ unsigned int flags = FAULT_FLAG_DEFAULT;
+ struct task_struct *tsk = current;
+ struct mm_struct *mm = tsk->mm;
+ struct vm_area_struct *vma = NULL;
+ vm_fault_t fault;
+
+ if (kprobe_page_fault(regs, current->thread.trap_nr))
+ return;
+
+ /*
+ * We fault-in kernel-space virtual memory on-demand. The
+ * 'reference' page table is init_mm.pgd.
+ *
+ * NOTE! We MUST NOT take any locks for this case. We may
+ * be in an interrupt or a critical region, and should
+ * only copy the information from the master page table,
+ * nothing more.
+ */
+ if (address & __UA_LIMIT) {
+ if (!user_mode(regs))
+ no_context(regs, write, address);
+ else
+ do_sigsegv(regs, write, address, si_code);
+ return;
+ }
+
+ /*
+ * If we're in an interrupt or have no user
+ * context, we must not take the fault..
+ */
+ if (faulthandler_disabled() || !mm) {
+ do_sigsegv(regs, write, address, si_code);
+ return;
+ }
+
+ if (user_mode(regs))
+ flags |= FAULT_FLAG_USER;
+
+ perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
+retry:
+ vma = lock_mm_and_find_vma(mm, address, regs);
+ if (unlikely(!vma))
+ goto bad_area_nosemaphore;
+ goto good_area;
+
+/*
+ * Something tried to access memory that isn't in our memory map..
+ * Fix it, but check if it's kernel or user first..
+ */
+bad_area:
+ mmap_read_unlock(mm);
+bad_area_nosemaphore:
+ do_sigsegv(regs, write, address, si_code);
+ return;
+
+/*
+ * Ok, we have a good vm_area for this memory access, so
+ * we can handle it..
+ */
+good_area:
+ si_code = SEGV_ACCERR;
+
+ if (write) {
+ flags |= FAULT_FLAG_WRITE;
+ if (!(vma->vm_flags & VM_WRITE))
+ goto bad_area;
+ } else {
+ if (!(vma->vm_flags & VM_READ) && address != exception_era(regs))
+ goto bad_area;
+ if (!(vma->vm_flags & VM_EXEC) && address == exception_era(regs))
+ goto bad_area;
+ }
+
+ /*
+ * If for any reason at all we couldn't handle the fault,
+ * make sure we exit gracefully rather than endlessly redo
+ * the fault.
+ */
+ fault = handle_mm_fault(vma, address, flags, regs);
+
+ if (fault_signal_pending(fault, regs)) {
+ if (!user_mode(regs))
+ no_context(regs, write, address);
+ return;
+ }
+
+ /* The fault is fully completed (including releasing mmap lock) */
+ if (fault & VM_FAULT_COMPLETED)
+ return;
+
+ if (unlikely(fault & VM_FAULT_RETRY)) {
+ flags |= FAULT_FLAG_TRIED;
+
+ /*
+ * No need to mmap_read_unlock(mm) as we would
+ * have already released it in __lock_page_or_retry
+ * in mm/filemap.c.
+ */
+ goto retry;
+ }
+ if (unlikely(fault & VM_FAULT_ERROR)) {
+ mmap_read_unlock(mm);
+ if (fault & VM_FAULT_OOM) {
+ do_out_of_memory(regs, write, address);
+ return;
+ } else if (fault & VM_FAULT_SIGSEGV) {
+ do_sigsegv(regs, write, address, si_code);
+ return;
+ } else if (fault & (VM_FAULT_SIGBUS|VM_FAULT_HWPOISON|VM_FAULT_HWPOISON_LARGE)) {
+ do_sigbus(regs, write, address, si_code);
+ return;
+ }
+ BUG();
+ }
+
+ mmap_read_unlock(mm);
+}
+
+asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
+ unsigned long write, unsigned long address)
+{
+ irqentry_state_t state = irqentry_enter(regs);
+
+ /* Enable interrupt if enabled in parent context */
+ if (likely(regs->csr_prmd & CSR_PRMD_PIE))
+ local_irq_enable();
+
+ __do_page_fault(regs, write, address);
+
+ local_irq_disable();
+
+ irqentry_exit(regs, state);
+}
diff --git a/arch/loongarch/mm/hugetlbpage.c b/arch/loongarch/mm/hugetlbpage.c
new file mode 100644
index 0000000000..1e76fcb830
--- /dev/null
+++ b/arch/loongarch/mm/hugetlbpage.c
@@ -0,0 +1,75 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+
+#include <linux/fs.h>
+#include <linux/mm.h>
+#include <linux/hugetlb.h>
+#include <linux/pagemap.h>
+#include <linux/err.h>
+#include <linux/sysctl.h>
+#include <asm/mman.h>
+#include <asm/tlb.h>
+#include <asm/tlbflush.h>
+
+pte_t *huge_pte_alloc(struct mm_struct *mm, struct vm_area_struct *vma,
+ unsigned long addr, unsigned long sz)
+{
+ pgd_t *pgd;
+ p4d_t *p4d;
+ pud_t *pud;
+ pte_t *pte = NULL;
+
+ pgd = pgd_offset(mm, addr);
+ p4d = p4d_alloc(mm, pgd, addr);
+ pud = pud_alloc(mm, p4d, addr);
+ if (pud)
+ pte = (pte_t *)pmd_alloc(mm, pud, addr);
+
+ return pte;
+}
+
+pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr,
+ unsigned long sz)
+{
+ pgd_t *pgd;
+ p4d_t *p4d;
+ pud_t *pud;
+ pmd_t *pmd = NULL;
+
+ pgd = pgd_offset(mm, addr);
+ if (pgd_present(*pgd)) {
+ p4d = p4d_offset(pgd, addr);
+ if (p4d_present(*p4d)) {
+ pud = pud_offset(p4d, addr);
+ if (pud_present(*pud))
+ pmd = pmd_offset(pud, addr);
+ }
+ }
+ return (pte_t *) pmd;
+}
+
+int pmd_huge(pmd_t pmd)
+{
+ return (pmd_val(pmd) & _PAGE_HUGE) != 0;
+}
+
+int pud_huge(pud_t pud)
+{
+ return (pud_val(pud) & _PAGE_HUGE) != 0;
+}
+
+uint64_t pmd_to_entrylo(unsigned long pmd_val)
+{
+ uint64_t val;
+ /* PMD as PTE. Must be huge page */
+ if (!pmd_huge(__pmd(pmd_val)))
+ panic("%s", __func__);
+
+ val = pmd_val ^ _PAGE_HUGE;
+ val |= ((val & _PAGE_HGLOBAL) >>
+ (_PAGE_HGLOBAL_SHIFT - _PAGE_GLOBAL_SHIFT));
+
+ return val;
+}
diff --git a/arch/loongarch/mm/init.c b/arch/loongarch/mm/init.c
new file mode 100644
index 0000000000..4dd53427f6
--- /dev/null
+++ b/arch/loongarch/mm/init.c
@@ -0,0 +1,250 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#include <linux/init.h>
+#include <linux/export.h>
+#include <linux/signal.h>
+#include <linux/sched.h>
+#include <linux/smp.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/types.h>
+#include <linux/pagemap.h>
+#include <linux/memblock.h>
+#include <linux/memremap.h>
+#include <linux/mm.h>
+#include <linux/mman.h>
+#include <linux/highmem.h>
+#include <linux/swap.h>
+#include <linux/proc_fs.h>
+#include <linux/pfn.h>
+#include <linux/hardirq.h>
+#include <linux/gfp.h>
+#include <linux/hugetlb.h>
+#include <linux/mmzone.h>
+
+#include <asm/asm-offsets.h>
+#include <asm/bootinfo.h>
+#include <asm/cpu.h>
+#include <asm/dma.h>
+#include <asm/mmu_context.h>
+#include <asm/sections.h>
+#include <asm/pgtable.h>
+#include <asm/pgalloc.h>
+#include <asm/tlb.h>
+
+unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)] __page_aligned_bss;
+EXPORT_SYMBOL(empty_zero_page);
+
+void copy_user_highpage(struct page *to, struct page *from,
+ unsigned long vaddr, struct vm_area_struct *vma)
+{
+ void *vfrom, *vto;
+
+ vfrom = kmap_local_page(from);
+ vto = kmap_local_page(to);
+ copy_page(vto, vfrom);
+ kunmap_local(vfrom);
+ kunmap_local(vto);
+ /* Make sure this page is cleared on other CPU's too before using it */
+ smp_wmb();
+}
+
+int __ref page_is_ram(unsigned long pfn)
+{
+ unsigned long addr = PFN_PHYS(pfn);
+
+ return memblock_is_memory(addr) && !memblock_is_reserved(addr);
+}
+
+#ifndef CONFIG_NUMA
+void __init paging_init(void)
+{
+ unsigned long max_zone_pfns[MAX_NR_ZONES];
+
+#ifdef CONFIG_ZONE_DMA
+ max_zone_pfns[ZONE_DMA] = MAX_DMA_PFN;
+#endif
+#ifdef CONFIG_ZONE_DMA32
+ max_zone_pfns[ZONE_DMA32] = MAX_DMA32_PFN;
+#endif
+ max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
+
+ free_area_init(max_zone_pfns);
+}
+
+void __init mem_init(void)
+{
+ max_mapnr = max_low_pfn;
+ high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT);
+
+ memblock_free_all();
+}
+#endif /* !CONFIG_NUMA */
+
+void __ref free_initmem(void)
+{
+ free_initmem_default(POISON_FREE_INITMEM);
+}
+
+#ifdef CONFIG_MEMORY_HOTPLUG
+int arch_add_memory(int nid, u64 start, u64 size, struct mhp_params *params)
+{
+ unsigned long start_pfn = start >> PAGE_SHIFT;
+ unsigned long nr_pages = size >> PAGE_SHIFT;
+ int ret;
+
+ ret = __add_pages(nid, start_pfn, nr_pages, params);
+
+ if (ret)
+ pr_warn("%s: Problem encountered in __add_pages() as ret=%d\n",
+ __func__, ret);
+
+ return ret;
+}
+
+void arch_remove_memory(u64 start, u64 size, struct vmem_altmap *altmap)
+{
+ unsigned long start_pfn = start >> PAGE_SHIFT;
+ unsigned long nr_pages = size >> PAGE_SHIFT;
+ struct page *page = pfn_to_page(start_pfn);
+
+ /* With altmap the first mapped page is offset from @start */
+ if (altmap)
+ page += vmem_altmap_offset(altmap);
+ __remove_pages(start_pfn, nr_pages, altmap);
+}
+
+#ifdef CONFIG_NUMA
+int memory_add_physaddr_to_nid(u64 start)
+{
+ return pa_to_nid(start);
+}
+EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid);
+#endif
+#endif
+
+#ifdef CONFIG_SPARSEMEM_VMEMMAP
+void __meminit vmemmap_set_pmd(pmd_t *pmd, void *p, int node,
+ unsigned long addr, unsigned long next)
+{
+ pmd_t entry;
+
+ entry = pfn_pmd(virt_to_pfn(p), PAGE_KERNEL);
+ pmd_val(entry) |= _PAGE_HUGE | _PAGE_HGLOBAL;
+ set_pmd_at(&init_mm, addr, pmd, entry);
+}
+
+int __meminit vmemmap_check_pmd(pmd_t *pmd, int node,
+ unsigned long addr, unsigned long next)
+{
+ int huge = pmd_val(*pmd) & _PAGE_HUGE;
+
+ if (huge)
+ vmemmap_verify((pte_t *)pmd, node, addr, next);
+
+ return huge;
+}
+
+int __meminit vmemmap_populate(unsigned long start, unsigned long end,
+ int node, struct vmem_altmap *altmap)
+{
+#if CONFIG_PGTABLE_LEVELS == 2
+ return vmemmap_populate_basepages(start, end, node, NULL);
+#else
+ return vmemmap_populate_hugepages(start, end, node, NULL);
+#endif
+}
+
+#ifdef CONFIG_MEMORY_HOTPLUG
+void vmemmap_free(unsigned long start, unsigned long end, struct vmem_altmap *altmap)
+{
+}
+#endif
+#endif
+
+pte_t * __init populate_kernel_pte(unsigned long addr)
+{
+ pgd_t *pgd = pgd_offset_k(addr);
+ p4d_t *p4d = p4d_offset(pgd, addr);
+ pud_t *pud;
+ pmd_t *pmd;
+
+ if (p4d_none(*p4d)) {
+ pud = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
+ if (!pud)
+ panic("%s: Failed to allocate memory\n", __func__);
+ p4d_populate(&init_mm, p4d, pud);
+#ifndef __PAGETABLE_PUD_FOLDED
+ pud_init(pud);
+#endif
+ }
+
+ pud = pud_offset(p4d, addr);
+ if (pud_none(*pud)) {
+ pmd = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
+ if (!pmd)
+ panic("%s: Failed to allocate memory\n", __func__);
+ pud_populate(&init_mm, pud, pmd);
+#ifndef __PAGETABLE_PMD_FOLDED
+ pmd_init(pmd);
+#endif
+ }
+
+ pmd = pmd_offset(pud, addr);
+ if (!pmd_present(*pmd)) {
+ pte_t *pte;
+
+ pte = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
+ if (!pte)
+ panic("%s: Failed to allocate memory\n", __func__);
+ pmd_populate_kernel(&init_mm, pmd, pte);
+ }
+
+ return pte_offset_kernel(pmd, addr);
+}
+
+void __init __set_fixmap(enum fixed_addresses idx,
+ phys_addr_t phys, pgprot_t flags)
+{
+ unsigned long addr = __fix_to_virt(idx);
+ pte_t *ptep;
+
+ BUG_ON(idx <= FIX_HOLE || idx >= __end_of_fixed_addresses);
+
+ ptep = populate_kernel_pte(addr);
+ if (!pte_none(*ptep)) {
+ pte_ERROR(*ptep);
+ return;
+ }
+
+ if (pgprot_val(flags))
+ set_pte(ptep, pfn_pte(phys >> PAGE_SHIFT, flags));
+ else {
+ pte_clear(&init_mm, addr, ptep);
+ flush_tlb_kernel_range(addr, addr + PAGE_SIZE);
+ }
+}
+
+/*
+ * Align swapper_pg_dir in to 64K, allows its address to be loaded
+ * with a single LUI instruction in the TLB handlers. If we used
+ * __aligned(64K), its size would get rounded up to the alignment
+ * size, and waste space. So we place it in its own section and align
+ * it in the linker script.
+ */
+pgd_t swapper_pg_dir[_PTRS_PER_PGD] __section(".bss..swapper_pg_dir");
+
+pgd_t invalid_pg_dir[_PTRS_PER_PGD] __page_aligned_bss;
+#ifndef __PAGETABLE_PUD_FOLDED
+pud_t invalid_pud_table[PTRS_PER_PUD] __page_aligned_bss;
+EXPORT_SYMBOL(invalid_pud_table);
+#endif
+#ifndef __PAGETABLE_PMD_FOLDED
+pmd_t invalid_pmd_table[PTRS_PER_PMD] __page_aligned_bss;
+EXPORT_SYMBOL(invalid_pmd_table);
+#endif
+pte_t invalid_pte_table[PTRS_PER_PTE] __page_aligned_bss;
+EXPORT_SYMBOL(invalid_pte_table);
diff --git a/arch/loongarch/mm/ioremap.c b/arch/loongarch/mm/ioremap.c
new file mode 100644
index 0000000000..70ca730198
--- /dev/null
+++ b/arch/loongarch/mm/ioremap.c
@@ -0,0 +1,28 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+
+#include <asm/io.h>
+#include <asm-generic/early_ioremap.h>
+
+void __init __iomem *early_ioremap(u64 phys_addr, unsigned long size)
+{
+ return ((void __iomem *)TO_CACHE(phys_addr));
+}
+
+void __init early_iounmap(void __iomem *addr, unsigned long size)
+{
+
+}
+
+void *early_memremap_ro(resource_size_t phys_addr, unsigned long size)
+{
+ return early_memremap(phys_addr, size);
+}
+
+void *early_memremap_prot(resource_size_t phys_addr, unsigned long size,
+ unsigned long prot_val)
+{
+ return early_memremap(phys_addr, size);
+}
diff --git a/arch/loongarch/mm/kasan_init.c b/arch/loongarch/mm/kasan_init.c
new file mode 100644
index 0000000000..cc3e81fe01
--- /dev/null
+++ b/arch/loongarch/mm/kasan_init.c
@@ -0,0 +1,294 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2023 Loongson Technology Corporation Limited
+ */
+#define pr_fmt(fmt) "kasan: " fmt
+#include <linux/kasan.h>
+#include <linux/memblock.h>
+#include <linux/sched/task.h>
+
+#include <asm/tlbflush.h>
+#include <asm/pgalloc.h>
+#include <asm-generic/sections.h>
+
+static pgd_t kasan_pg_dir[PTRS_PER_PGD] __initdata __aligned(PAGE_SIZE);
+
+#ifdef __PAGETABLE_PUD_FOLDED
+#define __p4d_none(early, p4d) (0)
+#else
+#define __p4d_none(early, p4d) (early ? (p4d_val(p4d) == 0) : \
+(__pa(p4d_val(p4d)) == (unsigned long)__pa(kasan_early_shadow_pud)))
+#endif
+
+#ifdef __PAGETABLE_PMD_FOLDED
+#define __pud_none(early, pud) (0)
+#else
+#define __pud_none(early, pud) (early ? (pud_val(pud) == 0) : \
+(__pa(pud_val(pud)) == (unsigned long)__pa(kasan_early_shadow_pmd)))
+#endif
+
+#define __pmd_none(early, pmd) (early ? (pmd_val(pmd) == 0) : \
+(__pa(pmd_val(pmd)) == (unsigned long)__pa(kasan_early_shadow_pte)))
+
+#define __pte_none(early, pte) (early ? pte_none(pte) : \
+((pte_val(pte) & _PFN_MASK) == (unsigned long)__pa(kasan_early_shadow_page)))
+
+bool kasan_early_stage = true;
+
+void *kasan_mem_to_shadow(const void *addr)
+{
+ if (!kasan_arch_is_ready()) {
+ return (void *)(kasan_early_shadow_page);
+ } else {
+ unsigned long maddr = (unsigned long)addr;
+ unsigned long xrange = (maddr >> XRANGE_SHIFT) & 0xffff;
+ unsigned long offset = 0;
+
+ maddr &= XRANGE_SHADOW_MASK;
+ switch (xrange) {
+ case XKPRANGE_CC_SEG:
+ offset = XKPRANGE_CC_SHADOW_OFFSET;
+ break;
+ case XKPRANGE_UC_SEG:
+ offset = XKPRANGE_UC_SHADOW_OFFSET;
+ break;
+ case XKVRANGE_VC_SEG:
+ offset = XKVRANGE_VC_SHADOW_OFFSET;
+ break;
+ default:
+ WARN_ON(1);
+ return NULL;
+ }
+
+ return (void *)((maddr >> KASAN_SHADOW_SCALE_SHIFT) + offset);
+ }
+}
+
+const void *kasan_shadow_to_mem(const void *shadow_addr)
+{
+ unsigned long addr = (unsigned long)shadow_addr;
+
+ if (unlikely(addr > KASAN_SHADOW_END) ||
+ unlikely(addr < KASAN_SHADOW_START)) {
+ WARN_ON(1);
+ return NULL;
+ }
+
+ if (addr >= XKVRANGE_VC_SHADOW_OFFSET)
+ return (void *)(((addr - XKVRANGE_VC_SHADOW_OFFSET) << KASAN_SHADOW_SCALE_SHIFT) + XKVRANGE_VC_START);
+ else if (addr >= XKPRANGE_UC_SHADOW_OFFSET)
+ return (void *)(((addr - XKPRANGE_UC_SHADOW_OFFSET) << KASAN_SHADOW_SCALE_SHIFT) + XKPRANGE_UC_START);
+ else if (addr >= XKPRANGE_CC_SHADOW_OFFSET)
+ return (void *)(((addr - XKPRANGE_CC_SHADOW_OFFSET) << KASAN_SHADOW_SCALE_SHIFT) + XKPRANGE_CC_START);
+ else {
+ WARN_ON(1);
+ return NULL;
+ }
+}
+
+/*
+ * Alloc memory for shadow memory page table.
+ */
+static phys_addr_t __init kasan_alloc_zeroed_page(int node)
+{
+ void *p = memblock_alloc_try_nid(PAGE_SIZE, PAGE_SIZE,
+ __pa(MAX_DMA_ADDRESS), MEMBLOCK_ALLOC_ACCESSIBLE, node);
+ if (!p)
+ panic("%s: Failed to allocate %lu bytes align=0x%lx nid=%d from=%llx\n",
+ __func__, PAGE_SIZE, PAGE_SIZE, node, __pa(MAX_DMA_ADDRESS));
+
+ return __pa(p);
+}
+
+static pte_t *__init kasan_pte_offset(pmd_t *pmdp, unsigned long addr, int node, bool early)
+{
+ if (__pmd_none(early, READ_ONCE(*pmdp))) {
+ phys_addr_t pte_phys = early ?
+ __pa_symbol(kasan_early_shadow_pte) : kasan_alloc_zeroed_page(node);
+ if (!early)
+ memcpy(__va(pte_phys), kasan_early_shadow_pte, sizeof(kasan_early_shadow_pte));
+ pmd_populate_kernel(NULL, pmdp, (pte_t *)__va(pte_phys));
+ }
+
+ return pte_offset_kernel(pmdp, addr);
+}
+
+static pmd_t *__init kasan_pmd_offset(pud_t *pudp, unsigned long addr, int node, bool early)
+{
+ if (__pud_none(early, READ_ONCE(*pudp))) {
+ phys_addr_t pmd_phys = early ?
+ __pa_symbol(kasan_early_shadow_pmd) : kasan_alloc_zeroed_page(node);
+ if (!early)
+ memcpy(__va(pmd_phys), kasan_early_shadow_pmd, sizeof(kasan_early_shadow_pmd));
+ pud_populate(&init_mm, pudp, (pmd_t *)__va(pmd_phys));
+ }
+
+ return pmd_offset(pudp, addr);
+}
+
+static pud_t *__init kasan_pud_offset(p4d_t *p4dp, unsigned long addr, int node, bool early)
+{
+ if (__p4d_none(early, READ_ONCE(*p4dp))) {
+ phys_addr_t pud_phys = early ?
+ __pa_symbol(kasan_early_shadow_pud) : kasan_alloc_zeroed_page(node);
+ if (!early)
+ memcpy(__va(pud_phys), kasan_early_shadow_pud, sizeof(kasan_early_shadow_pud));
+ p4d_populate(&init_mm, p4dp, (pud_t *)__va(pud_phys));
+ }
+
+ return pud_offset(p4dp, addr);
+}
+
+static void __init kasan_pte_populate(pmd_t *pmdp, unsigned long addr,
+ unsigned long end, int node, bool early)
+{
+ unsigned long next;
+ pte_t *ptep = kasan_pte_offset(pmdp, addr, node, early);
+
+ do {
+ phys_addr_t page_phys = early ?
+ __pa_symbol(kasan_early_shadow_page)
+ : kasan_alloc_zeroed_page(node);
+ next = addr + PAGE_SIZE;
+ set_pte(ptep, pfn_pte(__phys_to_pfn(page_phys), PAGE_KERNEL));
+ } while (ptep++, addr = next, addr != end && __pte_none(early, READ_ONCE(*ptep)));
+}
+
+static void __init kasan_pmd_populate(pud_t *pudp, unsigned long addr,
+ unsigned long end, int node, bool early)
+{
+ unsigned long next;
+ pmd_t *pmdp = kasan_pmd_offset(pudp, addr, node, early);
+
+ do {
+ next = pmd_addr_end(addr, end);
+ kasan_pte_populate(pmdp, addr, next, node, early);
+ } while (pmdp++, addr = next, addr != end && __pmd_none(early, READ_ONCE(*pmdp)));
+}
+
+static void __init kasan_pud_populate(p4d_t *p4dp, unsigned long addr,
+ unsigned long end, int node, bool early)
+{
+ unsigned long next;
+ pud_t *pudp = kasan_pud_offset(p4dp, addr, node, early);
+
+ do {
+ next = pud_addr_end(addr, end);
+ kasan_pmd_populate(pudp, addr, next, node, early);
+ } while (pudp++, addr = next, addr != end);
+}
+
+static void __init kasan_p4d_populate(pgd_t *pgdp, unsigned long addr,
+ unsigned long end, int node, bool early)
+{
+ unsigned long next;
+ p4d_t *p4dp = p4d_offset(pgdp, addr);
+
+ do {
+ next = p4d_addr_end(addr, end);
+ kasan_pud_populate(p4dp, addr, next, node, early);
+ } while (p4dp++, addr = next, addr != end);
+}
+
+static void __init kasan_pgd_populate(unsigned long addr, unsigned long end,
+ int node, bool early)
+{
+ unsigned long next;
+ pgd_t *pgdp;
+
+ pgdp = pgd_offset_k(addr);
+
+ do {
+ next = pgd_addr_end(addr, end);
+ kasan_p4d_populate(pgdp, addr, next, node, early);
+ } while (pgdp++, addr = next, addr != end);
+
+}
+
+/* Set up full kasan mappings, ensuring that the mapped pages are zeroed */
+static void __init kasan_map_populate(unsigned long start, unsigned long end,
+ int node)
+{
+ kasan_pgd_populate(start & PAGE_MASK, PAGE_ALIGN(end), node, false);
+}
+
+asmlinkage void __init kasan_early_init(void)
+{
+ BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_START, PGDIR_SIZE));
+ BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_END, PGDIR_SIZE));
+}
+
+static inline void kasan_set_pgd(pgd_t *pgdp, pgd_t pgdval)
+{
+ WRITE_ONCE(*pgdp, pgdval);
+}
+
+static void __init clear_pgds(unsigned long start, unsigned long end)
+{
+ /*
+ * Remove references to kasan page tables from
+ * swapper_pg_dir. pgd_clear() can't be used
+ * here because it's nop on 2,3-level pagetable setups
+ */
+ for (; start < end; start += PGDIR_SIZE)
+ kasan_set_pgd((pgd_t *)pgd_offset_k(start), __pgd(0));
+}
+
+void __init kasan_init(void)
+{
+ u64 i;
+ phys_addr_t pa_start, pa_end;
+
+ /*
+ * PGD was populated as invalid_pmd_table or invalid_pud_table
+ * in pagetable_init() which depends on how many levels of page
+ * table you are using, but we had to clean the gpd of kasan
+ * shadow memory, as the pgd value is none-zero.
+ * The assertion pgd_none is going to be false and the formal populate
+ * afterwards is not going to create any new pgd at all.
+ */
+ memcpy(kasan_pg_dir, swapper_pg_dir, sizeof(kasan_pg_dir));
+ csr_write64(__pa_symbol(kasan_pg_dir), LOONGARCH_CSR_PGDH);
+ local_flush_tlb_all();
+
+ clear_pgds(KASAN_SHADOW_START, KASAN_SHADOW_END);
+
+ /* Maps everything to a single page of zeroes */
+ kasan_pgd_populate(KASAN_SHADOW_START, KASAN_SHADOW_END, NUMA_NO_NODE, true);
+
+ kasan_populate_early_shadow(kasan_mem_to_shadow((void *)VMALLOC_START),
+ kasan_mem_to_shadow((void *)KFENCE_AREA_END));
+
+ kasan_early_stage = false;
+
+ /* Populate the linear mapping */
+ for_each_mem_range(i, &pa_start, &pa_end) {
+ void *start = (void *)phys_to_virt(pa_start);
+ void *end = (void *)phys_to_virt(pa_end);
+
+ if (start >= end)
+ break;
+
+ kasan_map_populate((unsigned long)kasan_mem_to_shadow(start),
+ (unsigned long)kasan_mem_to_shadow(end), NUMA_NO_NODE);
+ }
+
+ /* Populate modules mapping */
+ kasan_map_populate((unsigned long)kasan_mem_to_shadow((void *)MODULES_VADDR),
+ (unsigned long)kasan_mem_to_shadow((void *)MODULES_END), NUMA_NO_NODE);
+ /*
+ * KAsan may reuse the contents of kasan_early_shadow_pte directly, so we
+ * should make sure that it maps the zero page read-only.
+ */
+ for (i = 0; i < PTRS_PER_PTE; i++)
+ set_pte(&kasan_early_shadow_pte[i],
+ pfn_pte(__phys_to_pfn(__pa_symbol(kasan_early_shadow_page)), PAGE_KERNEL_RO));
+
+ memset(kasan_early_shadow_page, 0, PAGE_SIZE);
+ csr_write64(__pa_symbol(swapper_pg_dir), LOONGARCH_CSR_PGDH);
+ local_flush_tlb_all();
+
+ /* At this point kasan is fully initialized. Enable error messages */
+ init_task.kasan_depth = 0;
+ pr_info("KernelAddressSanitizer initialized.\n");
+}
diff --git a/arch/loongarch/mm/maccess.c b/arch/loongarch/mm/maccess.c
new file mode 100644
index 0000000000..58173842c6
--- /dev/null
+++ b/arch/loongarch/mm/maccess.c
@@ -0,0 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0-only
+
+#include <linux/uaccess.h>
+#include <linux/kernel.h>
+
+bool copy_from_kernel_nofault_allowed(const void *unsafe_src, size_t size)
+{
+ /* highest bit set means kernel space */
+ return (unsigned long)unsafe_src >> (BITS_PER_LONG - 1);
+}
diff --git a/arch/loongarch/mm/mmap.c b/arch/loongarch/mm/mmap.c
new file mode 100644
index 0000000000..a9630a81b3
--- /dev/null
+++ b/arch/loongarch/mm/mmap.c
@@ -0,0 +1,146 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#include <linux/export.h>
+#include <linux/io.h>
+#include <linux/memblock.h>
+#include <linux/mm.h>
+#include <linux/mman.h>
+
+#define SHM_ALIGN_MASK (SHMLBA - 1)
+
+#define COLOUR_ALIGN(addr, pgoff) \
+ ((((addr) + SHM_ALIGN_MASK) & ~SHM_ALIGN_MASK) \
+ + (((pgoff) << PAGE_SHIFT) & SHM_ALIGN_MASK))
+
+enum mmap_allocation_direction {UP, DOWN};
+
+static unsigned long arch_get_unmapped_area_common(struct file *filp,
+ unsigned long addr0, unsigned long len, unsigned long pgoff,
+ unsigned long flags, enum mmap_allocation_direction dir)
+{
+ struct mm_struct *mm = current->mm;
+ struct vm_area_struct *vma;
+ unsigned long addr = addr0;
+ int do_color_align;
+ struct vm_unmapped_area_info info;
+
+ if (unlikely(len > TASK_SIZE))
+ return -ENOMEM;
+
+ if (flags & MAP_FIXED) {
+ /* Even MAP_FIXED mappings must reside within TASK_SIZE */
+ if (TASK_SIZE - len < addr)
+ return -EINVAL;
+
+ /*
+ * We do not accept a shared mapping if it would violate
+ * cache aliasing constraints.
+ */
+ if ((flags & MAP_SHARED) &&
+ ((addr - (pgoff << PAGE_SHIFT)) & SHM_ALIGN_MASK))
+ return -EINVAL;
+ return addr;
+ }
+
+ do_color_align = 0;
+ if (filp || (flags & MAP_SHARED))
+ do_color_align = 1;
+
+ /* requesting a specific address */
+ if (addr) {
+ if (do_color_align)
+ addr = COLOUR_ALIGN(addr, pgoff);
+ else
+ addr = PAGE_ALIGN(addr);
+
+ vma = find_vma(mm, addr);
+ if (TASK_SIZE - len >= addr &&
+ (!vma || addr + len <= vm_start_gap(vma)))
+ return addr;
+ }
+
+ info.length = len;
+ info.align_mask = do_color_align ? (PAGE_MASK & SHM_ALIGN_MASK) : 0;
+ info.align_offset = pgoff << PAGE_SHIFT;
+
+ if (dir == DOWN) {
+ info.flags = VM_UNMAPPED_AREA_TOPDOWN;
+ info.low_limit = PAGE_SIZE;
+ info.high_limit = mm->mmap_base;
+ addr = vm_unmapped_area(&info);
+
+ if (!(addr & ~PAGE_MASK))
+ return addr;
+
+ /*
+ * A failed mmap() very likely causes application failure,
+ * so fall back to the bottom-up function here. This scenario
+ * can happen with large stack limits and large mmap()
+ * allocations.
+ */
+ }
+
+ info.flags = 0;
+ info.low_limit = mm->mmap_base;
+ info.high_limit = TASK_SIZE;
+ return vm_unmapped_area(&info);
+}
+
+unsigned long arch_get_unmapped_area(struct file *filp, unsigned long addr0,
+ unsigned long len, unsigned long pgoff, unsigned long flags)
+{
+ return arch_get_unmapped_area_common(filp,
+ addr0, len, pgoff, flags, UP);
+}
+
+/*
+ * There is no need to export this but sched.h declares the function as
+ * extern so making it static here results in an error.
+ */
+unsigned long arch_get_unmapped_area_topdown(struct file *filp,
+ unsigned long addr0, unsigned long len, unsigned long pgoff,
+ unsigned long flags)
+{
+ return arch_get_unmapped_area_common(filp,
+ addr0, len, pgoff, flags, DOWN);
+}
+
+int __virt_addr_valid(volatile void *kaddr)
+{
+ unsigned long vaddr = (unsigned long)kaddr;
+
+ if ((vaddr < PAGE_OFFSET) || (vaddr >= vm_map_base))
+ return 0;
+
+ return pfn_valid(PFN_DOWN(PHYSADDR(kaddr)));
+}
+EXPORT_SYMBOL_GPL(__virt_addr_valid);
+
+/*
+ * You really shouldn't be using read() or write() on /dev/mem. This might go
+ * away in the future.
+ */
+int valid_phys_addr_range(phys_addr_t addr, size_t size)
+{
+ /*
+ * Check whether addr is covered by a memory region without the
+ * MEMBLOCK_NOMAP attribute, and whether that region covers the
+ * entire range. In theory, this could lead to false negatives
+ * if the range is covered by distinct but adjacent memory regions
+ * that only differ in other attributes. However, few of such
+ * attributes have been defined, and it is debatable whether it
+ * follows that /dev/mem read() calls should be able traverse
+ * such boundaries.
+ */
+ return memblock_is_region_memory(addr, size) && memblock_is_map_memory(addr);
+}
+
+/*
+ * Do not allow /dev/mem mappings beyond the supported physical range.
+ */
+int valid_mmap_phys_addr_range(unsigned long pfn, size_t size)
+{
+ return !(((pfn << PAGE_SHIFT) + size) & ~(GENMASK_ULL(cpu_pabits, 0)));
+}
diff --git a/arch/loongarch/mm/page.S b/arch/loongarch/mm/page.S
new file mode 100644
index 0000000000..7ad76551d3
--- /dev/null
+++ b/arch/loongarch/mm/page.S
@@ -0,0 +1,84 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#include <linux/export.h>
+#include <linux/linkage.h>
+#include <asm/asm.h>
+#include <asm/page.h>
+#include <asm/regdef.h>
+
+ .align 5
+SYM_FUNC_START(clear_page)
+ lu12i.w t0, 1 << (PAGE_SHIFT - 12)
+ add.d t0, t0, a0
+1:
+ st.d zero, a0, 0
+ st.d zero, a0, 8
+ st.d zero, a0, 16
+ st.d zero, a0, 24
+ st.d zero, a0, 32
+ st.d zero, a0, 40
+ st.d zero, a0, 48
+ st.d zero, a0, 56
+ addi.d a0, a0, 128
+ st.d zero, a0, -64
+ st.d zero, a0, -56
+ st.d zero, a0, -48
+ st.d zero, a0, -40
+ st.d zero, a0, -32
+ st.d zero, a0, -24
+ st.d zero, a0, -16
+ st.d zero, a0, -8
+ bne t0, a0, 1b
+
+ jr ra
+SYM_FUNC_END(clear_page)
+EXPORT_SYMBOL(clear_page)
+
+.align 5
+SYM_FUNC_START(copy_page)
+ lu12i.w t8, 1 << (PAGE_SHIFT - 12)
+ add.d t8, t8, a0
+1:
+ ld.d t0, a1, 0
+ ld.d t1, a1, 8
+ ld.d t2, a1, 16
+ ld.d t3, a1, 24
+ ld.d t4, a1, 32
+ ld.d t5, a1, 40
+ ld.d t6, a1, 48
+ ld.d t7, a1, 56
+
+ st.d t0, a0, 0
+ st.d t1, a0, 8
+ ld.d t0, a1, 64
+ ld.d t1, a1, 72
+ st.d t2, a0, 16
+ st.d t3, a0, 24
+ ld.d t2, a1, 80
+ ld.d t3, a1, 88
+ st.d t4, a0, 32
+ st.d t5, a0, 40
+ ld.d t4, a1, 96
+ ld.d t5, a1, 104
+ st.d t6, a0, 48
+ st.d t7, a0, 56
+ ld.d t6, a1, 112
+ ld.d t7, a1, 120
+ addi.d a0, a0, 128
+ addi.d a1, a1, 128
+
+ st.d t0, a0, -64
+ st.d t1, a0, -56
+ st.d t2, a0, -48
+ st.d t3, a0, -40
+ st.d t4, a0, -32
+ st.d t5, a0, -24
+ st.d t6, a0, -16
+ st.d t7, a0, -8
+
+ bne t8, a0, 1b
+ jr ra
+SYM_FUNC_END(copy_page)
+EXPORT_SYMBOL(copy_page)
diff --git a/arch/loongarch/mm/pgtable.c b/arch/loongarch/mm/pgtable.c
new file mode 100644
index 0000000000..2aae72e638
--- /dev/null
+++ b/arch/loongarch/mm/pgtable.c
@@ -0,0 +1,146 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#include <linux/init.h>
+#include <linux/export.h>
+#include <linux/mm.h>
+#include <asm/pgalloc.h>
+#include <asm/pgtable.h>
+#include <asm/tlbflush.h>
+
+struct page *dmw_virt_to_page(unsigned long kaddr)
+{
+ return pfn_to_page(virt_to_pfn(kaddr));
+}
+EXPORT_SYMBOL(dmw_virt_to_page);
+
+struct page *tlb_virt_to_page(unsigned long kaddr)
+{
+ return pfn_to_page(pte_pfn(*virt_to_kpte(kaddr)));
+}
+EXPORT_SYMBOL(tlb_virt_to_page);
+
+pgd_t *pgd_alloc(struct mm_struct *mm)
+{
+ pgd_t *init, *ret = NULL;
+ struct ptdesc *ptdesc = pagetable_alloc(GFP_KERNEL & ~__GFP_HIGHMEM, 0);
+
+ if (ptdesc) {
+ ret = (pgd_t *)ptdesc_address(ptdesc);
+ init = pgd_offset(&init_mm, 0UL);
+ pgd_init(ret);
+ memcpy(ret + USER_PTRS_PER_PGD, init + USER_PTRS_PER_PGD,
+ (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(pgd_alloc);
+
+void pgd_init(void *addr)
+{
+ unsigned long *p, *end;
+ unsigned long entry;
+
+#if !defined(__PAGETABLE_PUD_FOLDED)
+ entry = (unsigned long)invalid_pud_table;
+#elif !defined(__PAGETABLE_PMD_FOLDED)
+ entry = (unsigned long)invalid_pmd_table;
+#else
+ entry = (unsigned long)invalid_pte_table;
+#endif
+
+ p = (unsigned long *)addr;
+ end = p + PTRS_PER_PGD;
+
+ do {
+ p[0] = entry;
+ p[1] = entry;
+ p[2] = entry;
+ p[3] = entry;
+ p[4] = entry;
+ p += 8;
+ p[-3] = entry;
+ p[-2] = entry;
+ p[-1] = entry;
+ } while (p != end);
+}
+EXPORT_SYMBOL_GPL(pgd_init);
+
+#ifndef __PAGETABLE_PMD_FOLDED
+void pmd_init(void *addr)
+{
+ unsigned long *p, *end;
+ unsigned long pagetable = (unsigned long)invalid_pte_table;
+
+ p = (unsigned long *)addr;
+ end = p + PTRS_PER_PMD;
+
+ do {
+ p[0] = pagetable;
+ p[1] = pagetable;
+ p[2] = pagetable;
+ p[3] = pagetable;
+ p[4] = pagetable;
+ p += 8;
+ p[-3] = pagetable;
+ p[-2] = pagetable;
+ p[-1] = pagetable;
+ } while (p != end);
+}
+EXPORT_SYMBOL_GPL(pmd_init);
+#endif
+
+#ifndef __PAGETABLE_PUD_FOLDED
+void pud_init(void *addr)
+{
+ unsigned long *p, *end;
+ unsigned long pagetable = (unsigned long)invalid_pmd_table;
+
+ p = (unsigned long *)addr;
+ end = p + PTRS_PER_PUD;
+
+ do {
+ p[0] = pagetable;
+ p[1] = pagetable;
+ p[2] = pagetable;
+ p[3] = pagetable;
+ p[4] = pagetable;
+ p += 8;
+ p[-3] = pagetable;
+ p[-2] = pagetable;
+ p[-1] = pagetable;
+ } while (p != end);
+}
+EXPORT_SYMBOL_GPL(pud_init);
+#endif
+
+pmd_t mk_pmd(struct page *page, pgprot_t prot)
+{
+ pmd_t pmd;
+
+ pmd_val(pmd) = (page_to_pfn(page) << PFN_PTE_SHIFT) | pgprot_val(prot);
+
+ return pmd;
+}
+
+void set_pmd_at(struct mm_struct *mm, unsigned long addr,
+ pmd_t *pmdp, pmd_t pmd)
+{
+ *pmdp = pmd;
+ flush_tlb_all();
+}
+
+void __init pagetable_init(void)
+{
+ /* Initialize the entire pgd. */
+ pgd_init(swapper_pg_dir);
+ pgd_init(invalid_pg_dir);
+#ifndef __PAGETABLE_PUD_FOLDED
+ pud_init(invalid_pud_table);
+#endif
+#ifndef __PAGETABLE_PMD_FOLDED
+ pmd_init(invalid_pmd_table);
+#endif
+}
diff --git a/arch/loongarch/mm/tlb.c b/arch/loongarch/mm/tlb.c
new file mode 100644
index 0000000000..2c0a411f23
--- /dev/null
+++ b/arch/loongarch/mm/tlb.c
@@ -0,0 +1,320 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#include <linux/init.h>
+#include <linux/sched.h>
+#include <linux/smp.h>
+#include <linux/mm.h>
+#include <linux/hugetlb.h>
+#include <linux/export.h>
+
+#include <asm/cpu.h>
+#include <asm/bootinfo.h>
+#include <asm/mmu_context.h>
+#include <asm/pgtable.h>
+#include <asm/tlb.h>
+
+void local_flush_tlb_all(void)
+{
+ invtlb_all(INVTLB_CURRENT_ALL, 0, 0);
+}
+EXPORT_SYMBOL(local_flush_tlb_all);
+
+void local_flush_tlb_user(void)
+{
+ invtlb_all(INVTLB_CURRENT_GFALSE, 0, 0);
+}
+EXPORT_SYMBOL(local_flush_tlb_user);
+
+void local_flush_tlb_kernel(void)
+{
+ invtlb_all(INVTLB_CURRENT_GTRUE, 0, 0);
+}
+EXPORT_SYMBOL(local_flush_tlb_kernel);
+
+/*
+ * All entries common to a mm share an asid. To effectively flush
+ * these entries, we just bump the asid.
+ */
+void local_flush_tlb_mm(struct mm_struct *mm)
+{
+ int cpu;
+
+ preempt_disable();
+
+ cpu = smp_processor_id();
+
+ if (asid_valid(mm, cpu))
+ drop_mmu_context(mm, cpu);
+ else
+ cpumask_clear_cpu(cpu, mm_cpumask(mm));
+
+ preempt_enable();
+}
+
+void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
+ unsigned long end)
+{
+ struct mm_struct *mm = vma->vm_mm;
+ int cpu = smp_processor_id();
+
+ if (asid_valid(mm, cpu)) {
+ unsigned long size, flags;
+
+ local_irq_save(flags);
+ start = round_down(start, PAGE_SIZE << 1);
+ end = round_up(end, PAGE_SIZE << 1);
+ size = (end - start) >> (PAGE_SHIFT + 1);
+ if (size <= (current_cpu_data.tlbsizestlbsets ?
+ current_cpu_data.tlbsize / 8 :
+ current_cpu_data.tlbsize / 2)) {
+ int asid = cpu_asid(cpu, mm);
+
+ while (start < end) {
+ invtlb(INVTLB_ADDR_GFALSE_AND_ASID, asid, start);
+ start += (PAGE_SIZE << 1);
+ }
+ } else {
+ drop_mmu_context(mm, cpu);
+ }
+ local_irq_restore(flags);
+ } else {
+ cpumask_clear_cpu(cpu, mm_cpumask(mm));
+ }
+}
+
+void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
+{
+ unsigned long size, flags;
+
+ local_irq_save(flags);
+ size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
+ size = (size + 1) >> 1;
+ if (size <= (current_cpu_data.tlbsizestlbsets ?
+ current_cpu_data.tlbsize / 8 :
+ current_cpu_data.tlbsize / 2)) {
+
+ start &= (PAGE_MASK << 1);
+ end += ((PAGE_SIZE << 1) - 1);
+ end &= (PAGE_MASK << 1);
+
+ while (start < end) {
+ invtlb_addr(INVTLB_ADDR_GTRUE_OR_ASID, 0, start);
+ start += (PAGE_SIZE << 1);
+ }
+ } else {
+ local_flush_tlb_kernel();
+ }
+ local_irq_restore(flags);
+}
+
+void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
+{
+ int cpu = smp_processor_id();
+
+ if (asid_valid(vma->vm_mm, cpu)) {
+ int newpid;
+
+ newpid = cpu_asid(cpu, vma->vm_mm);
+ page &= (PAGE_MASK << 1);
+ invtlb(INVTLB_ADDR_GFALSE_AND_ASID, newpid, page);
+ } else {
+ cpumask_clear_cpu(cpu, mm_cpumask(vma->vm_mm));
+ }
+}
+
+/*
+ * This one is only used for pages with the global bit set so we don't care
+ * much about the ASID.
+ */
+void local_flush_tlb_one(unsigned long page)
+{
+ page &= (PAGE_MASK << 1);
+ invtlb_addr(INVTLB_ADDR_GTRUE_OR_ASID, 0, page);
+}
+
+static void __update_hugetlb(struct vm_area_struct *vma, unsigned long address, pte_t *ptep)
+{
+#ifdef CONFIG_HUGETLB_PAGE
+ int idx;
+ unsigned long lo;
+ unsigned long flags;
+
+ local_irq_save(flags);
+
+ address &= (PAGE_MASK << 1);
+ write_csr_entryhi(address);
+ tlb_probe();
+ idx = read_csr_tlbidx();
+ write_csr_pagesize(PS_HUGE_SIZE);
+ lo = pmd_to_entrylo(pte_val(*ptep));
+ write_csr_entrylo0(lo);
+ write_csr_entrylo1(lo + (HPAGE_SIZE >> 1));
+
+ if (idx < 0)
+ tlb_write_random();
+ else
+ tlb_write_indexed();
+ write_csr_pagesize(PS_DEFAULT_SIZE);
+
+ local_irq_restore(flags);
+#endif
+}
+
+void __update_tlb(struct vm_area_struct *vma, unsigned long address, pte_t *ptep)
+{
+ int idx;
+ unsigned long flags;
+
+ if (cpu_has_ptw)
+ return;
+
+ /*
+ * Handle debugger faulting in for debugee.
+ */
+ if (current->active_mm != vma->vm_mm)
+ return;
+
+ if (pte_val(*ptep) & _PAGE_HUGE) {
+ __update_hugetlb(vma, address, ptep);
+ return;
+ }
+
+ local_irq_save(flags);
+
+ if ((unsigned long)ptep & sizeof(pte_t))
+ ptep--;
+
+ address &= (PAGE_MASK << 1);
+ write_csr_entryhi(address);
+ tlb_probe();
+ idx = read_csr_tlbidx();
+ write_csr_pagesize(PS_DEFAULT_SIZE);
+ write_csr_entrylo0(pte_val(*ptep++));
+ write_csr_entrylo1(pte_val(*ptep));
+ if (idx < 0)
+ tlb_write_random();
+ else
+ tlb_write_indexed();
+
+ local_irq_restore(flags);
+}
+
+static void setup_ptwalker(void)
+{
+ unsigned long pwctl0, pwctl1;
+ unsigned long pgd_i = 0, pgd_w = 0;
+ unsigned long pud_i = 0, pud_w = 0;
+ unsigned long pmd_i = 0, pmd_w = 0;
+ unsigned long pte_i = 0, pte_w = 0;
+
+ pgd_i = PGDIR_SHIFT;
+ pgd_w = PAGE_SHIFT - 3;
+#if CONFIG_PGTABLE_LEVELS > 3
+ pud_i = PUD_SHIFT;
+ pud_w = PAGE_SHIFT - 3;
+#endif
+#if CONFIG_PGTABLE_LEVELS > 2
+ pmd_i = PMD_SHIFT;
+ pmd_w = PAGE_SHIFT - 3;
+#endif
+ pte_i = PAGE_SHIFT;
+ pte_w = PAGE_SHIFT - 3;
+
+ pwctl0 = pte_i | pte_w << 5 | pmd_i << 10 | pmd_w << 15 | pud_i << 20 | pud_w << 25;
+ pwctl1 = pgd_i | pgd_w << 6;
+
+ if (cpu_has_ptw)
+ pwctl1 |= CSR_PWCTL1_PTW;
+
+ csr_write64(pwctl0, LOONGARCH_CSR_PWCTL0);
+ csr_write64(pwctl1, LOONGARCH_CSR_PWCTL1);
+ csr_write64((long)swapper_pg_dir, LOONGARCH_CSR_PGDH);
+ csr_write64((long)invalid_pg_dir, LOONGARCH_CSR_PGDL);
+ csr_write64((long)smp_processor_id(), LOONGARCH_CSR_TMID);
+}
+
+static void output_pgtable_bits_defines(void)
+{
+#define pr_define(fmt, ...) \
+ pr_debug("#define " fmt, ##__VA_ARGS__)
+
+ pr_debug("#include <asm/asm.h>\n");
+ pr_debug("#include <asm/regdef.h>\n");
+ pr_debug("\n");
+
+ pr_define("_PAGE_VALID_SHIFT %d\n", _PAGE_VALID_SHIFT);
+ pr_define("_PAGE_DIRTY_SHIFT %d\n", _PAGE_DIRTY_SHIFT);
+ pr_define("_PAGE_HUGE_SHIFT %d\n", _PAGE_HUGE_SHIFT);
+ pr_define("_PAGE_GLOBAL_SHIFT %d\n", _PAGE_GLOBAL_SHIFT);
+ pr_define("_PAGE_PRESENT_SHIFT %d\n", _PAGE_PRESENT_SHIFT);
+ pr_define("_PAGE_WRITE_SHIFT %d\n", _PAGE_WRITE_SHIFT);
+ pr_define("_PAGE_NO_READ_SHIFT %d\n", _PAGE_NO_READ_SHIFT);
+ pr_define("_PAGE_NO_EXEC_SHIFT %d\n", _PAGE_NO_EXEC_SHIFT);
+ pr_define("PFN_PTE_SHIFT %d\n", PFN_PTE_SHIFT);
+ pr_debug("\n");
+}
+
+#ifdef CONFIG_NUMA
+unsigned long pcpu_handlers[NR_CPUS];
+#endif
+extern long exception_handlers[VECSIZE * 128 / sizeof(long)];
+
+static void setup_tlb_handler(int cpu)
+{
+ setup_ptwalker();
+ local_flush_tlb_all();
+
+ /* The tlb handlers are generated only once */
+ if (cpu == 0) {
+ memcpy((void *)tlbrentry, handle_tlb_refill, 0x80);
+ local_flush_icache_range(tlbrentry, tlbrentry + 0x80);
+ if (!cpu_has_ptw) {
+ set_handler(EXCCODE_TLBI * VECSIZE, handle_tlb_load, VECSIZE);
+ set_handler(EXCCODE_TLBL * VECSIZE, handle_tlb_load, VECSIZE);
+ set_handler(EXCCODE_TLBS * VECSIZE, handle_tlb_store, VECSIZE);
+ set_handler(EXCCODE_TLBM * VECSIZE, handle_tlb_modify, VECSIZE);
+ } else {
+ set_handler(EXCCODE_TLBI * VECSIZE, handle_tlb_load_ptw, VECSIZE);
+ set_handler(EXCCODE_TLBL * VECSIZE, handle_tlb_load_ptw, VECSIZE);
+ set_handler(EXCCODE_TLBS * VECSIZE, handle_tlb_store_ptw, VECSIZE);
+ set_handler(EXCCODE_TLBM * VECSIZE, handle_tlb_modify_ptw, VECSIZE);
+ }
+ set_handler(EXCCODE_TLBNR * VECSIZE, handle_tlb_protect, VECSIZE);
+ set_handler(EXCCODE_TLBNX * VECSIZE, handle_tlb_protect, VECSIZE);
+ set_handler(EXCCODE_TLBPE * VECSIZE, handle_tlb_protect, VECSIZE);
+ }
+#ifdef CONFIG_NUMA
+ else {
+ void *addr;
+ struct page *page;
+ const int vec_sz = sizeof(exception_handlers);
+
+ if (pcpu_handlers[cpu])
+ return;
+
+ page = alloc_pages_node(cpu_to_node(cpu), GFP_ATOMIC, get_order(vec_sz));
+ if (!page)
+ return;
+
+ addr = page_address(page);
+ pcpu_handlers[cpu] = (unsigned long)addr;
+ memcpy((void *)addr, (void *)eentry, vec_sz);
+ local_flush_icache_range((unsigned long)addr, (unsigned long)addr + vec_sz);
+ csr_write64(pcpu_handlers[cpu], LOONGARCH_CSR_EENTRY);
+ csr_write64(pcpu_handlers[cpu], LOONGARCH_CSR_MERRENTRY);
+ csr_write64(pcpu_handlers[cpu] + 80*VECSIZE, LOONGARCH_CSR_TLBRENTRY);
+ }
+#endif
+}
+
+void tlb_init(int cpu)
+{
+ write_csr_pagesize(PS_DEFAULT_SIZE);
+ write_csr_stlbpgsize(PS_DEFAULT_SIZE);
+ write_csr_tlbrefill_pagesize(PS_DEFAULT_SIZE);
+
+ setup_tlb_handler(cpu);
+ output_pgtable_bits_defines();
+}
diff --git a/arch/loongarch/mm/tlbex.S b/arch/loongarch/mm/tlbex.S
new file mode 100644
index 0000000000..d5d682f3d2
--- /dev/null
+++ b/arch/loongarch/mm/tlbex.S
@@ -0,0 +1,524 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#include <asm/asm.h>
+#include <asm/loongarch.h>
+#include <asm/page.h>
+#include <asm/pgtable.h>
+#include <asm/regdef.h>
+#include <asm/stackframe.h>
+
+#define INVTLB_ADDR_GFALSE_AND_ASID 5
+
+#define PTRS_PER_PGD_BITS (PAGE_SHIFT - 3)
+#define PTRS_PER_PUD_BITS (PAGE_SHIFT - 3)
+#define PTRS_PER_PMD_BITS (PAGE_SHIFT - 3)
+#define PTRS_PER_PTE_BITS (PAGE_SHIFT - 3)
+
+ .macro tlb_do_page_fault, write
+ SYM_CODE_START(tlb_do_page_fault_\write)
+ SAVE_ALL
+ csrrd a2, LOONGARCH_CSR_BADV
+ move a0, sp
+ REG_S a2, sp, PT_BVADDR
+ li.w a1, \write
+ bl do_page_fault
+ RESTORE_ALL_AND_RET
+ SYM_CODE_END(tlb_do_page_fault_\write)
+ .endm
+
+ tlb_do_page_fault 0
+ tlb_do_page_fault 1
+
+SYM_CODE_START(handle_tlb_protect)
+ BACKUP_T0T1
+ SAVE_ALL
+ move a0, sp
+ move a1, zero
+ csrrd a2, LOONGARCH_CSR_BADV
+ REG_S a2, sp, PT_BVADDR
+ la_abs t0, do_page_fault
+ jirl ra, t0, 0
+ RESTORE_ALL_AND_RET
+SYM_CODE_END(handle_tlb_protect)
+
+SYM_CODE_START(handle_tlb_load)
+ csrwr t0, EXCEPTION_KS0
+ csrwr t1, EXCEPTION_KS1
+ csrwr ra, EXCEPTION_KS2
+
+ /*
+ * The vmalloc handling is not in the hotpath.
+ */
+ csrrd t0, LOONGARCH_CSR_BADV
+ bltz t0, vmalloc_load
+ csrrd t1, LOONGARCH_CSR_PGDL
+
+vmalloc_done_load:
+ /* Get PGD offset in bytes */
+ bstrpick.d ra, t0, PTRS_PER_PGD_BITS + PGDIR_SHIFT - 1, PGDIR_SHIFT
+ alsl.d t1, ra, t1, 3
+#if CONFIG_PGTABLE_LEVELS > 3
+ ld.d t1, t1, 0
+ bstrpick.d ra, t0, PTRS_PER_PUD_BITS + PUD_SHIFT - 1, PUD_SHIFT
+ alsl.d t1, ra, t1, 3
+#endif
+#if CONFIG_PGTABLE_LEVELS > 2
+ ld.d t1, t1, 0
+ bstrpick.d ra, t0, PTRS_PER_PMD_BITS + PMD_SHIFT - 1, PMD_SHIFT
+ alsl.d t1, ra, t1, 3
+#endif
+ ld.d ra, t1, 0
+
+ /*
+ * For huge tlb entries, pmde doesn't contain an address but
+ * instead contains the tlb pte. Check the PAGE_HUGE bit and
+ * see if we need to jump to huge tlb processing.
+ */
+ rotri.d ra, ra, _PAGE_HUGE_SHIFT + 1
+ bltz ra, tlb_huge_update_load
+
+ rotri.d ra, ra, 64 - (_PAGE_HUGE_SHIFT + 1)
+ bstrpick.d t0, t0, PTRS_PER_PTE_BITS + PAGE_SHIFT - 1, PAGE_SHIFT
+ alsl.d t1, t0, ra, _PTE_T_LOG2
+
+#ifdef CONFIG_SMP
+smp_pgtable_change_load:
+ ll.d t0, t1, 0
+#else
+ ld.d t0, t1, 0
+#endif
+ andi ra, t0, _PAGE_PRESENT
+ beqz ra, nopage_tlb_load
+
+ ori t0, t0, _PAGE_VALID
+#ifdef CONFIG_SMP
+ sc.d t0, t1, 0
+ beqz t0, smp_pgtable_change_load
+#else
+ st.d t0, t1, 0
+#endif
+ tlbsrch
+ bstrins.d t1, zero, 3, 3
+ ld.d t0, t1, 0
+ ld.d t1, t1, 8
+ csrwr t0, LOONGARCH_CSR_TLBELO0
+ csrwr t1, LOONGARCH_CSR_TLBELO1
+ tlbwr
+
+ csrrd t0, EXCEPTION_KS0
+ csrrd t1, EXCEPTION_KS1
+ csrrd ra, EXCEPTION_KS2
+ ertn
+
+#ifdef CONFIG_64BIT
+vmalloc_load:
+ la_abs t1, swapper_pg_dir
+ b vmalloc_done_load
+#endif
+
+ /* This is the entry point of a huge page. */
+tlb_huge_update_load:
+#ifdef CONFIG_SMP
+ ll.d ra, t1, 0
+#endif
+ andi t0, ra, _PAGE_PRESENT
+ beqz t0, nopage_tlb_load
+
+#ifdef CONFIG_SMP
+ ori t0, ra, _PAGE_VALID
+ sc.d t0, t1, 0
+ beqz t0, tlb_huge_update_load
+ ori t0, ra, _PAGE_VALID
+#else
+ rotri.d ra, ra, 64 - (_PAGE_HUGE_SHIFT + 1)
+ ori t0, ra, _PAGE_VALID
+ st.d t0, t1, 0
+#endif
+ csrrd ra, LOONGARCH_CSR_ASID
+ csrrd t1, LOONGARCH_CSR_BADV
+ andi ra, ra, CSR_ASID_ASID
+ invtlb INVTLB_ADDR_GFALSE_AND_ASID, ra, t1
+
+ /*
+ * A huge PTE describes an area the size of the
+ * configured huge page size. This is twice the
+ * of the large TLB entry size we intend to use.
+ * A TLB entry half the size of the configured
+ * huge page size is configured into entrylo0
+ * and entrylo1 to cover the contiguous huge PTE
+ * address space.
+ */
+ /* Huge page: Move Global bit */
+ xori t0, t0, _PAGE_HUGE
+ lu12i.w t1, _PAGE_HGLOBAL >> 12
+ and t1, t0, t1
+ srli.d t1, t1, (_PAGE_HGLOBAL_SHIFT - _PAGE_GLOBAL_SHIFT)
+ or t0, t0, t1
+
+ move ra, t0
+ csrwr ra, LOONGARCH_CSR_TLBELO0
+
+ /* Convert to entrylo1 */
+ addi.d t1, zero, 1
+ slli.d t1, t1, (HPAGE_SHIFT - 1)
+ add.d t0, t0, t1
+ csrwr t0, LOONGARCH_CSR_TLBELO1
+
+ /* Set huge page tlb entry size */
+ addu16i.d t0, zero, (CSR_TLBIDX_PS >> 16)
+ addu16i.d t1, zero, (PS_HUGE_SIZE << (CSR_TLBIDX_PS_SHIFT - 16))
+ csrxchg t1, t0, LOONGARCH_CSR_TLBIDX
+
+ tlbfill
+
+ addu16i.d t0, zero, (CSR_TLBIDX_PS >> 16)
+ addu16i.d t1, zero, (PS_DEFAULT_SIZE << (CSR_TLBIDX_PS_SHIFT - 16))
+ csrxchg t1, t0, LOONGARCH_CSR_TLBIDX
+
+ csrrd t0, EXCEPTION_KS0
+ csrrd t1, EXCEPTION_KS1
+ csrrd ra, EXCEPTION_KS2
+ ertn
+
+nopage_tlb_load:
+ dbar 0x700
+ csrrd ra, EXCEPTION_KS2
+ la_abs t0, tlb_do_page_fault_0
+ jr t0
+SYM_CODE_END(handle_tlb_load)
+
+SYM_CODE_START(handle_tlb_load_ptw)
+ csrwr t0, LOONGARCH_CSR_KS0
+ csrwr t1, LOONGARCH_CSR_KS1
+ la_abs t0, tlb_do_page_fault_0
+ jr t0
+SYM_CODE_END(handle_tlb_load_ptw)
+
+SYM_CODE_START(handle_tlb_store)
+ csrwr t0, EXCEPTION_KS0
+ csrwr t1, EXCEPTION_KS1
+ csrwr ra, EXCEPTION_KS2
+
+ /*
+ * The vmalloc handling is not in the hotpath.
+ */
+ csrrd t0, LOONGARCH_CSR_BADV
+ bltz t0, vmalloc_store
+ csrrd t1, LOONGARCH_CSR_PGDL
+
+vmalloc_done_store:
+ /* Get PGD offset in bytes */
+ bstrpick.d ra, t0, PTRS_PER_PGD_BITS + PGDIR_SHIFT - 1, PGDIR_SHIFT
+ alsl.d t1, ra, t1, 3
+#if CONFIG_PGTABLE_LEVELS > 3
+ ld.d t1, t1, 0
+ bstrpick.d ra, t0, PTRS_PER_PUD_BITS + PUD_SHIFT - 1, PUD_SHIFT
+ alsl.d t1, ra, t1, 3
+#endif
+#if CONFIG_PGTABLE_LEVELS > 2
+ ld.d t1, t1, 0
+ bstrpick.d ra, t0, PTRS_PER_PMD_BITS + PMD_SHIFT - 1, PMD_SHIFT
+ alsl.d t1, ra, t1, 3
+#endif
+ ld.d ra, t1, 0
+
+ /*
+ * For huge tlb entries, pmde doesn't contain an address but
+ * instead contains the tlb pte. Check the PAGE_HUGE bit and
+ * see if we need to jump to huge tlb processing.
+ */
+ rotri.d ra, ra, _PAGE_HUGE_SHIFT + 1
+ bltz ra, tlb_huge_update_store
+
+ rotri.d ra, ra, 64 - (_PAGE_HUGE_SHIFT + 1)
+ bstrpick.d t0, t0, PTRS_PER_PTE_BITS + PAGE_SHIFT - 1, PAGE_SHIFT
+ alsl.d t1, t0, ra, _PTE_T_LOG2
+
+#ifdef CONFIG_SMP
+smp_pgtable_change_store:
+ ll.d t0, t1, 0
+#else
+ ld.d t0, t1, 0
+#endif
+ andi ra, t0, _PAGE_PRESENT | _PAGE_WRITE
+ xori ra, ra, _PAGE_PRESENT | _PAGE_WRITE
+ bnez ra, nopage_tlb_store
+
+ ori t0, t0, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED)
+#ifdef CONFIG_SMP
+ sc.d t0, t1, 0
+ beqz t0, smp_pgtable_change_store
+#else
+ st.d t0, t1, 0
+#endif
+ tlbsrch
+ bstrins.d t1, zero, 3, 3
+ ld.d t0, t1, 0
+ ld.d t1, t1, 8
+ csrwr t0, LOONGARCH_CSR_TLBELO0
+ csrwr t1, LOONGARCH_CSR_TLBELO1
+ tlbwr
+
+ csrrd t0, EXCEPTION_KS0
+ csrrd t1, EXCEPTION_KS1
+ csrrd ra, EXCEPTION_KS2
+ ertn
+
+#ifdef CONFIG_64BIT
+vmalloc_store:
+ la_abs t1, swapper_pg_dir
+ b vmalloc_done_store
+#endif
+
+ /* This is the entry point of a huge page. */
+tlb_huge_update_store:
+#ifdef CONFIG_SMP
+ ll.d ra, t1, 0
+#endif
+ andi t0, ra, _PAGE_PRESENT | _PAGE_WRITE
+ xori t0, t0, _PAGE_PRESENT | _PAGE_WRITE
+ bnez t0, nopage_tlb_store
+
+#ifdef CONFIG_SMP
+ ori t0, ra, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED)
+ sc.d t0, t1, 0
+ beqz t0, tlb_huge_update_store
+ ori t0, ra, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED)
+#else
+ rotri.d ra, ra, 64 - (_PAGE_HUGE_SHIFT + 1)
+ ori t0, ra, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED)
+ st.d t0, t1, 0
+#endif
+ csrrd ra, LOONGARCH_CSR_ASID
+ csrrd t1, LOONGARCH_CSR_BADV
+ andi ra, ra, CSR_ASID_ASID
+ invtlb INVTLB_ADDR_GFALSE_AND_ASID, ra, t1
+
+ /*
+ * A huge PTE describes an area the size of the
+ * configured huge page size. This is twice the
+ * of the large TLB entry size we intend to use.
+ * A TLB entry half the size of the configured
+ * huge page size is configured into entrylo0
+ * and entrylo1 to cover the contiguous huge PTE
+ * address space.
+ */
+ /* Huge page: Move Global bit */
+ xori t0, t0, _PAGE_HUGE
+ lu12i.w t1, _PAGE_HGLOBAL >> 12
+ and t1, t0, t1
+ srli.d t1, t1, (_PAGE_HGLOBAL_SHIFT - _PAGE_GLOBAL_SHIFT)
+ or t0, t0, t1
+
+ move ra, t0
+ csrwr ra, LOONGARCH_CSR_TLBELO0
+
+ /* Convert to entrylo1 */
+ addi.d t1, zero, 1
+ slli.d t1, t1, (HPAGE_SHIFT - 1)
+ add.d t0, t0, t1
+ csrwr t0, LOONGARCH_CSR_TLBELO1
+
+ /* Set huge page tlb entry size */
+ addu16i.d t0, zero, (CSR_TLBIDX_PS >> 16)
+ addu16i.d t1, zero, (PS_HUGE_SIZE << (CSR_TLBIDX_PS_SHIFT - 16))
+ csrxchg t1, t0, LOONGARCH_CSR_TLBIDX
+
+ tlbfill
+
+ /* Reset default page size */
+ addu16i.d t0, zero, (CSR_TLBIDX_PS >> 16)
+ addu16i.d t1, zero, (PS_DEFAULT_SIZE << (CSR_TLBIDX_PS_SHIFT - 16))
+ csrxchg t1, t0, LOONGARCH_CSR_TLBIDX
+
+ csrrd t0, EXCEPTION_KS0
+ csrrd t1, EXCEPTION_KS1
+ csrrd ra, EXCEPTION_KS2
+ ertn
+
+nopage_tlb_store:
+ dbar 0x700
+ csrrd ra, EXCEPTION_KS2
+ la_abs t0, tlb_do_page_fault_1
+ jr t0
+SYM_CODE_END(handle_tlb_store)
+
+SYM_CODE_START(handle_tlb_store_ptw)
+ csrwr t0, LOONGARCH_CSR_KS0
+ csrwr t1, LOONGARCH_CSR_KS1
+ la_abs t0, tlb_do_page_fault_1
+ jr t0
+SYM_CODE_END(handle_tlb_store_ptw)
+
+SYM_CODE_START(handle_tlb_modify)
+ csrwr t0, EXCEPTION_KS0
+ csrwr t1, EXCEPTION_KS1
+ csrwr ra, EXCEPTION_KS2
+
+ /*
+ * The vmalloc handling is not in the hotpath.
+ */
+ csrrd t0, LOONGARCH_CSR_BADV
+ bltz t0, vmalloc_modify
+ csrrd t1, LOONGARCH_CSR_PGDL
+
+vmalloc_done_modify:
+ /* Get PGD offset in bytes */
+ bstrpick.d ra, t0, PTRS_PER_PGD_BITS + PGDIR_SHIFT - 1, PGDIR_SHIFT
+ alsl.d t1, ra, t1, 3
+#if CONFIG_PGTABLE_LEVELS > 3
+ ld.d t1, t1, 0
+ bstrpick.d ra, t0, PTRS_PER_PUD_BITS + PUD_SHIFT - 1, PUD_SHIFT
+ alsl.d t1, ra, t1, 3
+#endif
+#if CONFIG_PGTABLE_LEVELS > 2
+ ld.d t1, t1, 0
+ bstrpick.d ra, t0, PTRS_PER_PMD_BITS + PMD_SHIFT - 1, PMD_SHIFT
+ alsl.d t1, ra, t1, 3
+#endif
+ ld.d ra, t1, 0
+
+ /*
+ * For huge tlb entries, pmde doesn't contain an address but
+ * instead contains the tlb pte. Check the PAGE_HUGE bit and
+ * see if we need to jump to huge tlb processing.
+ */
+ rotri.d ra, ra, _PAGE_HUGE_SHIFT + 1
+ bltz ra, tlb_huge_update_modify
+
+ rotri.d ra, ra, 64 - (_PAGE_HUGE_SHIFT + 1)
+ bstrpick.d t0, t0, PTRS_PER_PTE_BITS + PAGE_SHIFT - 1, PAGE_SHIFT
+ alsl.d t1, t0, ra, _PTE_T_LOG2
+
+#ifdef CONFIG_SMP
+smp_pgtable_change_modify:
+ ll.d t0, t1, 0
+#else
+ ld.d t0, t1, 0
+#endif
+ andi ra, t0, _PAGE_WRITE
+ beqz ra, nopage_tlb_modify
+
+ ori t0, t0, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED)
+#ifdef CONFIG_SMP
+ sc.d t0, t1, 0
+ beqz t0, smp_pgtable_change_modify
+#else
+ st.d t0, t1, 0
+#endif
+ tlbsrch
+ bstrins.d t1, zero, 3, 3
+ ld.d t0, t1, 0
+ ld.d t1, t1, 8
+ csrwr t0, LOONGARCH_CSR_TLBELO0
+ csrwr t1, LOONGARCH_CSR_TLBELO1
+ tlbwr
+
+ csrrd t0, EXCEPTION_KS0
+ csrrd t1, EXCEPTION_KS1
+ csrrd ra, EXCEPTION_KS2
+ ertn
+
+#ifdef CONFIG_64BIT
+vmalloc_modify:
+ la_abs t1, swapper_pg_dir
+ b vmalloc_done_modify
+#endif
+
+ /* This is the entry point of a huge page. */
+tlb_huge_update_modify:
+#ifdef CONFIG_SMP
+ ll.d ra, t1, 0
+#endif
+ andi t0, ra, _PAGE_WRITE
+ beqz t0, nopage_tlb_modify
+
+#ifdef CONFIG_SMP
+ ori t0, ra, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED)
+ sc.d t0, t1, 0
+ beqz t0, tlb_huge_update_modify
+ ori t0, ra, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED)
+#else
+ rotri.d ra, ra, 64 - (_PAGE_HUGE_SHIFT + 1)
+ ori t0, ra, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED)
+ st.d t0, t1, 0
+#endif
+ csrrd ra, LOONGARCH_CSR_ASID
+ csrrd t1, LOONGARCH_CSR_BADV
+ andi ra, ra, CSR_ASID_ASID
+ invtlb INVTLB_ADDR_GFALSE_AND_ASID, ra, t1
+
+ /*
+ * A huge PTE describes an area the size of the
+ * configured huge page size. This is twice the
+ * of the large TLB entry size we intend to use.
+ * A TLB entry half the size of the configured
+ * huge page size is configured into entrylo0
+ * and entrylo1 to cover the contiguous huge PTE
+ * address space.
+ */
+ /* Huge page: Move Global bit */
+ xori t0, t0, _PAGE_HUGE
+ lu12i.w t1, _PAGE_HGLOBAL >> 12
+ and t1, t0, t1
+ srli.d t1, t1, (_PAGE_HGLOBAL_SHIFT - _PAGE_GLOBAL_SHIFT)
+ or t0, t0, t1
+
+ move ra, t0
+ csrwr ra, LOONGARCH_CSR_TLBELO0
+
+ /* Convert to entrylo1 */
+ addi.d t1, zero, 1
+ slli.d t1, t1, (HPAGE_SHIFT - 1)
+ add.d t0, t0, t1
+ csrwr t0, LOONGARCH_CSR_TLBELO1
+
+ /* Set huge page tlb entry size */
+ addu16i.d t0, zero, (CSR_TLBIDX_PS >> 16)
+ addu16i.d t1, zero, (PS_HUGE_SIZE << (CSR_TLBIDX_PS_SHIFT - 16))
+ csrxchg t1, t0, LOONGARCH_CSR_TLBIDX
+
+ tlbfill
+
+ /* Reset default page size */
+ addu16i.d t0, zero, (CSR_TLBIDX_PS >> 16)
+ addu16i.d t1, zero, (PS_DEFAULT_SIZE << (CSR_TLBIDX_PS_SHIFT - 16))
+ csrxchg t1, t0, LOONGARCH_CSR_TLBIDX
+
+ csrrd t0, EXCEPTION_KS0
+ csrrd t1, EXCEPTION_KS1
+ csrrd ra, EXCEPTION_KS2
+ ertn
+
+nopage_tlb_modify:
+ dbar 0x700
+ csrrd ra, EXCEPTION_KS2
+ la_abs t0, tlb_do_page_fault_1
+ jr t0
+SYM_CODE_END(handle_tlb_modify)
+
+SYM_CODE_START(handle_tlb_modify_ptw)
+ csrwr t0, LOONGARCH_CSR_KS0
+ csrwr t1, LOONGARCH_CSR_KS1
+ la_abs t0, tlb_do_page_fault_1
+ jr t0
+SYM_CODE_END(handle_tlb_modify_ptw)
+
+SYM_CODE_START(handle_tlb_refill)
+ csrwr t0, LOONGARCH_CSR_TLBRSAVE
+ csrrd t0, LOONGARCH_CSR_PGD
+ lddir t0, t0, 3
+#if CONFIG_PGTABLE_LEVELS > 3
+ lddir t0, t0, 2
+#endif
+#if CONFIG_PGTABLE_LEVELS > 2
+ lddir t0, t0, 1
+#endif
+ ldpte t0, 0
+ ldpte t0, 1
+ tlbfill
+ csrrd t0, LOONGARCH_CSR_TLBRSAVE
+ ertn
+SYM_CODE_END(handle_tlb_refill)