summaryrefslogtreecommitdiffstats
path: root/arch/loongarch/kernel
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 18:49:45 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-07 18:49:45 +0000
commit2c3c1048746a4622d8c89a29670120dc8fab93c4 (patch)
tree848558de17fb3008cdf4d861b01ac7781903ce39 /arch/loongarch/kernel
parentInitial commit. (diff)
downloadlinux-2c3c1048746a4622d8c89a29670120dc8fab93c4.tar.xz
linux-2c3c1048746a4622d8c89a29670120dc8fab93c4.zip
Adding upstream version 6.1.76.upstream/6.1.76
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'arch/loongarch/kernel')
-rw-r--r--arch/loongarch/kernel/.gitignore2
-rw-r--r--arch/loongarch/kernel/Makefile36
-rw-r--r--arch/loongarch/kernel/access-helper.h13
-rw-r--r--arch/loongarch/kernel/acpi.c299
-rw-r--r--arch/loongarch/kernel/asm-offsets.c259
-rw-r--r--arch/loongarch/kernel/cacheinfo.c86
-rw-r--r--arch/loongarch/kernel/cpu-probe.c299
-rw-r--r--arch/loongarch/kernel/crash_dump.c23
-rw-r--r--arch/loongarch/kernel/dma.c30
-rw-r--r--arch/loongarch/kernel/efi-header.S99
-rw-r--r--arch/loongarch/kernel/efi.c103
-rw-r--r--arch/loongarch/kernel/elf.c25
-rw-r--r--arch/loongarch/kernel/entry.S89
-rw-r--r--arch/loongarch/kernel/env.c76
-rw-r--r--arch/loongarch/kernel/fpu.S251
-rw-r--r--arch/loongarch/kernel/genex.S95
-rw-r--r--arch/loongarch/kernel/head.S130
-rw-r--r--arch/loongarch/kernel/idle.c16
-rw-r--r--arch/loongarch/kernel/image-vars.h27
-rw-r--r--arch/loongarch/kernel/inst.c40
-rw-r--r--arch/loongarch/kernel/io.c94
-rw-r--r--arch/loongarch/kernel/irq.c137
-rw-r--r--arch/loongarch/kernel/machine_kexec.c304
-rw-r--r--arch/loongarch/kernel/mem.c63
-rw-r--r--arch/loongarch/kernel/module-sections.c170
-rw-r--r--arch/loongarch/kernel/module.c458
-rw-r--r--arch/loongarch/kernel/numa.c465
-rw-r--r--arch/loongarch/kernel/perf_event.c887
-rw-r--r--arch/loongarch/kernel/perf_regs.c53
-rw-r--r--arch/loongarch/kernel/proc.c128
-rw-r--r--arch/loongarch/kernel/process.c353
-rw-r--r--arch/loongarch/kernel/ptrace.c433
-rw-r--r--arch/loongarch/kernel/relocate_kernel.S112
-rw-r--r--arch/loongarch/kernel/reset.c74
-rw-r--r--arch/loongarch/kernel/setup.c437
-rw-r--r--arch/loongarch/kernel/signal.c566
-rw-r--r--arch/loongarch/kernel/smp.c666
-rw-r--r--arch/loongarch/kernel/stacktrace.c78
-rw-r--r--arch/loongarch/kernel/switch.S37
-rw-r--r--arch/loongarch/kernel/syscall.c63
-rw-r--r--arch/loongarch/kernel/sysrq.c65
-rw-r--r--arch/loongarch/kernel/time.c222
-rw-r--r--arch/loongarch/kernel/topology.c53
-rw-r--r--arch/loongarch/kernel/traps.c738
-rw-r--r--arch/loongarch/kernel/unwind_guess.c73
-rw-r--r--arch/loongarch/kernel/unwind_prologue.c187
-rw-r--r--arch/loongarch/kernel/vdso.c143
-rw-r--r--arch/loongarch/kernel/vmlinux.lds.S129
48 files changed, 9186 insertions, 0 deletions
diff --git a/arch/loongarch/kernel/.gitignore b/arch/loongarch/kernel/.gitignore
new file mode 100644
index 000000000..bbb90f92d
--- /dev/null
+++ b/arch/loongarch/kernel/.gitignore
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
+vmlinux.lds
diff --git a/arch/loongarch/kernel/Makefile b/arch/loongarch/kernel/Makefile
new file mode 100644
index 000000000..42be56427
--- /dev/null
+++ b/arch/loongarch/kernel/Makefile
@@ -0,0 +1,36 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for the Linux/LoongArch kernel.
+#
+
+extra-y := vmlinux.lds
+
+obj-y += head.o cpu-probe.o cacheinfo.o env.o setup.o entry.o genex.o \
+ traps.o irq.o idle.o process.o dma.o mem.o io.o reset.o switch.o \
+ elf.o syscall.o signal.o time.o topology.o inst.o ptrace.o vdso.o
+
+obj-$(CONFIG_ACPI) += acpi.o
+obj-$(CONFIG_EFI) += efi.o
+
+obj-$(CONFIG_CPU_HAS_FPU) += fpu.o
+
+obj-$(CONFIG_MODULES) += module.o module-sections.o
+obj-$(CONFIG_STACKTRACE) += stacktrace.o
+
+obj-$(CONFIG_PROC_FS) += proc.o
+
+obj-$(CONFIG_SMP) += smp.o
+
+obj-$(CONFIG_NUMA) += numa.o
+
+obj-$(CONFIG_MAGIC_SYSRQ) += sysrq.o
+
+obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o
+obj-$(CONFIG_CRASH_DUMP) += crash_dump.o
+
+obj-$(CONFIG_UNWINDER_GUESS) += unwind_guess.o
+obj-$(CONFIG_UNWINDER_PROLOGUE) += unwind_prologue.o
+
+obj-$(CONFIG_PERF_EVENTS) += perf_event.o perf_regs.o
+
+CPPFLAGS_vmlinux.lds := $(KBUILD_CFLAGS)
diff --git a/arch/loongarch/kernel/access-helper.h b/arch/loongarch/kernel/access-helper.h
new file mode 100644
index 000000000..4a35ca81b
--- /dev/null
+++ b/arch/loongarch/kernel/access-helper.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#include <linux/uaccess.h>
+
+static inline int __get_inst(u32 *i, u32 *p, bool user)
+{
+ return user ? get_user(*i, (u32 __user *)p) : get_kernel_nofault(*i, p);
+}
+
+static inline int __get_addr(unsigned long *a, unsigned long *p, bool user)
+{
+ return user ? get_user(*a, (unsigned long __user *)p) : get_kernel_nofault(*a, p);
+}
diff --git a/arch/loongarch/kernel/acpi.c b/arch/loongarch/kernel/acpi.c
new file mode 100644
index 000000000..8319cc409
--- /dev/null
+++ b/arch/loongarch/kernel/acpi.c
@@ -0,0 +1,299 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * acpi.c - Architecture-Specific Low-Level ACPI Boot Support
+ *
+ * Author: Jianmin Lv <lvjianmin@loongson.cn>
+ * Huacai Chen <chenhuacai@loongson.cn>
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+
+#include <linux/init.h>
+#include <linux/acpi.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/memblock.h>
+#include <linux/serial_core.h>
+#include <asm/io.h>
+#include <asm/numa.h>
+#include <asm/loongson.h>
+
+int acpi_disabled;
+EXPORT_SYMBOL(acpi_disabled);
+int acpi_noirq;
+int acpi_pci_disabled;
+EXPORT_SYMBOL(acpi_pci_disabled);
+int acpi_strict = 1; /* We have no workarounds on LoongArch */
+int num_processors;
+int disabled_cpus;
+
+u64 acpi_saved_sp;
+
+#define MAX_CORE_PIC 256
+
+#define PREFIX "ACPI: "
+
+void __init __iomem * __acpi_map_table(unsigned long phys, unsigned long size)
+{
+
+ if (!phys || !size)
+ return NULL;
+
+ return early_memremap(phys, size);
+}
+void __init __acpi_unmap_table(void __iomem *map, unsigned long size)
+{
+ if (!map || !size)
+ return;
+
+ early_memunmap(map, size);
+}
+
+void __iomem *acpi_os_ioremap(acpi_physical_address phys, acpi_size size)
+{
+ if (!memblock_is_memory(phys))
+ return ioremap(phys, size);
+ else
+ return ioremap_cache(phys, size);
+}
+
+#ifdef CONFIG_SMP
+static int set_processor_mask(u32 id, u32 flags)
+{
+
+ int cpu, cpuid = id;
+
+ if (num_processors >= nr_cpu_ids) {
+ pr_warn(PREFIX "nr_cpus/possible_cpus limit of %i reached."
+ " processor 0x%x ignored.\n", nr_cpu_ids, cpuid);
+
+ return -ENODEV;
+
+ }
+ if (cpuid == loongson_sysconf.boot_cpu_id)
+ cpu = 0;
+ else
+ cpu = cpumask_next_zero(-1, cpu_present_mask);
+
+ if (flags & ACPI_MADT_ENABLED) {
+ num_processors++;
+ set_cpu_possible(cpu, true);
+ set_cpu_present(cpu, true);
+ __cpu_number_map[cpuid] = cpu;
+ __cpu_logical_map[cpu] = cpuid;
+ } else
+ disabled_cpus++;
+
+ return cpu;
+}
+#endif
+
+static int __init
+acpi_parse_processor(union acpi_subtable_headers *header, const unsigned long end)
+{
+ struct acpi_madt_core_pic *processor = NULL;
+
+ processor = (struct acpi_madt_core_pic *)header;
+ if (BAD_MADT_ENTRY(processor, end))
+ return -EINVAL;
+
+ acpi_table_print_madt_entry(&header->common);
+#ifdef CONFIG_SMP
+ set_processor_mask(processor->core_id, processor->flags);
+#endif
+
+ return 0;
+}
+
+static int __init
+acpi_parse_eio_master(union acpi_subtable_headers *header, const unsigned long end)
+{
+ static int core = 0;
+ struct acpi_madt_eio_pic *eiointc = NULL;
+
+ eiointc = (struct acpi_madt_eio_pic *)header;
+ if (BAD_MADT_ENTRY(eiointc, end))
+ return -EINVAL;
+
+ core = eiointc->node * CORES_PER_EIO_NODE;
+ set_bit(core, &(loongson_sysconf.cores_io_master));
+
+ return 0;
+}
+
+static void __init acpi_process_madt(void)
+{
+#ifdef CONFIG_SMP
+ int i;
+
+ for (i = 0; i < NR_CPUS; i++) {
+ __cpu_number_map[i] = -1;
+ __cpu_logical_map[i] = -1;
+ }
+#endif
+ acpi_table_parse_madt(ACPI_MADT_TYPE_CORE_PIC,
+ acpi_parse_processor, MAX_CORE_PIC);
+
+ acpi_table_parse_madt(ACPI_MADT_TYPE_EIO_PIC,
+ acpi_parse_eio_master, MAX_IO_PICS);
+
+ loongson_sysconf.nr_cpus = num_processors;
+}
+
+void __init acpi_boot_table_init(void)
+{
+ /*
+ * If acpi_disabled, bail out
+ */
+ if (acpi_disabled)
+ return;
+
+ /*
+ * Initialize the ACPI boot-time table parser.
+ */
+ if (acpi_table_init()) {
+ disable_acpi();
+ return;
+ }
+
+ loongson_sysconf.boot_cpu_id = read_csr_cpuid();
+
+ /*
+ * Process the Multiple APIC Description Table (MADT), if present
+ */
+ acpi_process_madt();
+
+ /* Do not enable ACPI SPCR console by default */
+ acpi_parse_spcr(earlycon_acpi_spcr_enable, false);
+}
+
+#ifdef CONFIG_ACPI_NUMA
+
+static __init int setup_node(int pxm)
+{
+ return acpi_map_pxm_to_node(pxm);
+}
+
+/*
+ * Callback for SLIT parsing. pxm_to_node() returns NUMA_NO_NODE for
+ * I/O localities since SRAT does not list them. I/O localities are
+ * not supported at this point.
+ */
+unsigned int numa_distance_cnt;
+
+static inline unsigned int get_numa_distances_cnt(struct acpi_table_slit *slit)
+{
+ return slit->locality_count;
+}
+
+void __init numa_set_distance(int from, int to, int distance)
+{
+ if ((u8)distance != distance || (from == to && distance != LOCAL_DISTANCE)) {
+ pr_warn_once("Warning: invalid distance parameter, from=%d to=%d distance=%d\n",
+ from, to, distance);
+ return;
+ }
+
+ node_distances[from][to] = distance;
+}
+
+/* Callback for Proximity Domain -> CPUID mapping */
+void __init
+acpi_numa_processor_affinity_init(struct acpi_srat_cpu_affinity *pa)
+{
+ int pxm, node;
+
+ if (srat_disabled())
+ return;
+ if (pa->header.length != sizeof(struct acpi_srat_cpu_affinity)) {
+ bad_srat();
+ return;
+ }
+ if ((pa->flags & ACPI_SRAT_CPU_ENABLED) == 0)
+ return;
+ pxm = pa->proximity_domain_lo;
+ if (acpi_srat_revision >= 2) {
+ pxm |= (pa->proximity_domain_hi[0] << 8);
+ pxm |= (pa->proximity_domain_hi[1] << 16);
+ pxm |= (pa->proximity_domain_hi[2] << 24);
+ }
+ node = setup_node(pxm);
+ if (node < 0) {
+ pr_err("SRAT: Too many proximity domains %x\n", pxm);
+ bad_srat();
+ return;
+ }
+
+ if (pa->apic_id >= CONFIG_NR_CPUS) {
+ pr_info("SRAT: PXM %u -> CPU 0x%02x -> Node %u skipped apicid that is too big\n",
+ pxm, pa->apic_id, node);
+ return;
+ }
+
+ early_numa_add_cpu(pa->apic_id, node);
+
+ set_cpuid_to_node(pa->apic_id, node);
+ node_set(node, numa_nodes_parsed);
+ pr_info("SRAT: PXM %u -> CPU 0x%02x -> Node %u\n", pxm, pa->apic_id, node);
+}
+
+void __init acpi_numa_arch_fixup(void) {}
+#endif
+
+void __init arch_reserve_mem_area(acpi_physical_address addr, size_t size)
+{
+ memblock_reserve(addr, size);
+}
+
+#ifdef CONFIG_ACPI_HOTPLUG_CPU
+
+#include <acpi/processor.h>
+
+static int __ref acpi_map_cpu2node(acpi_handle handle, int cpu, int physid)
+{
+#ifdef CONFIG_ACPI_NUMA
+ int nid;
+
+ nid = acpi_get_node(handle);
+ if (nid != NUMA_NO_NODE) {
+ set_cpuid_to_node(physid, nid);
+ node_set(nid, numa_nodes_parsed);
+ set_cpu_numa_node(cpu, nid);
+ cpumask_set_cpu(cpu, cpumask_of_node(nid));
+ }
+#endif
+ return 0;
+}
+
+int acpi_map_cpu(acpi_handle handle, phys_cpuid_t physid, u32 acpi_id, int *pcpu)
+{
+ int cpu;
+
+ cpu = set_processor_mask(physid, ACPI_MADT_ENABLED);
+ if (cpu < 0) {
+ pr_info(PREFIX "Unable to map lapic to logical cpu number\n");
+ return cpu;
+ }
+
+ acpi_map_cpu2node(handle, cpu, physid);
+
+ *pcpu = cpu;
+
+ return 0;
+}
+EXPORT_SYMBOL(acpi_map_cpu);
+
+int acpi_unmap_cpu(int cpu)
+{
+#ifdef CONFIG_ACPI_NUMA
+ set_cpuid_to_node(cpu_logical_map(cpu), NUMA_NO_NODE);
+#endif
+ set_cpu_present(cpu, false);
+ num_processors--;
+
+ pr_info("cpu%d hot remove!\n", cpu);
+
+ return 0;
+}
+EXPORT_SYMBOL(acpi_unmap_cpu);
+
+#endif /* CONFIG_ACPI_HOTPLUG_CPU */
diff --git a/arch/loongarch/kernel/asm-offsets.c b/arch/loongarch/kernel/asm-offsets.c
new file mode 100644
index 000000000..bdd88eda9
--- /dev/null
+++ b/arch/loongarch/kernel/asm-offsets.c
@@ -0,0 +1,259 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * asm-offsets.c: Calculate pt_regs and task_struct offsets.
+ *
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#include <linux/types.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/kbuild.h>
+#include <linux/suspend.h>
+#include <asm/cpu-info.h>
+#include <asm/ptrace.h>
+#include <asm/processor.h>
+
+void output_ptreg_defines(void)
+{
+ COMMENT("LoongArch pt_regs offsets.");
+ OFFSET(PT_R0, pt_regs, regs[0]);
+ OFFSET(PT_R1, pt_regs, regs[1]);
+ OFFSET(PT_R2, pt_regs, regs[2]);
+ OFFSET(PT_R3, pt_regs, regs[3]);
+ OFFSET(PT_R4, pt_regs, regs[4]);
+ OFFSET(PT_R5, pt_regs, regs[5]);
+ OFFSET(PT_R6, pt_regs, regs[6]);
+ OFFSET(PT_R7, pt_regs, regs[7]);
+ OFFSET(PT_R8, pt_regs, regs[8]);
+ OFFSET(PT_R9, pt_regs, regs[9]);
+ OFFSET(PT_R10, pt_regs, regs[10]);
+ OFFSET(PT_R11, pt_regs, regs[11]);
+ OFFSET(PT_R12, pt_regs, regs[12]);
+ OFFSET(PT_R13, pt_regs, regs[13]);
+ OFFSET(PT_R14, pt_regs, regs[14]);
+ OFFSET(PT_R15, pt_regs, regs[15]);
+ OFFSET(PT_R16, pt_regs, regs[16]);
+ OFFSET(PT_R17, pt_regs, regs[17]);
+ OFFSET(PT_R18, pt_regs, regs[18]);
+ OFFSET(PT_R19, pt_regs, regs[19]);
+ OFFSET(PT_R20, pt_regs, regs[20]);
+ OFFSET(PT_R21, pt_regs, regs[21]);
+ OFFSET(PT_R22, pt_regs, regs[22]);
+ OFFSET(PT_R23, pt_regs, regs[23]);
+ OFFSET(PT_R24, pt_regs, regs[24]);
+ OFFSET(PT_R25, pt_regs, regs[25]);
+ OFFSET(PT_R26, pt_regs, regs[26]);
+ OFFSET(PT_R27, pt_regs, regs[27]);
+ OFFSET(PT_R28, pt_regs, regs[28]);
+ OFFSET(PT_R29, pt_regs, regs[29]);
+ OFFSET(PT_R30, pt_regs, regs[30]);
+ OFFSET(PT_R31, pt_regs, regs[31]);
+ OFFSET(PT_CRMD, pt_regs, csr_crmd);
+ OFFSET(PT_PRMD, pt_regs, csr_prmd);
+ OFFSET(PT_EUEN, pt_regs, csr_euen);
+ OFFSET(PT_ECFG, pt_regs, csr_ecfg);
+ OFFSET(PT_ESTAT, pt_regs, csr_estat);
+ OFFSET(PT_ERA, pt_regs, csr_era);
+ OFFSET(PT_BVADDR, pt_regs, csr_badvaddr);
+ OFFSET(PT_ORIG_A0, pt_regs, orig_a0);
+ DEFINE(PT_SIZE, sizeof(struct pt_regs));
+ BLANK();
+}
+
+void output_task_defines(void)
+{
+ COMMENT("LoongArch task_struct offsets.");
+ OFFSET(TASK_STATE, task_struct, __state);
+ OFFSET(TASK_THREAD_INFO, task_struct, stack);
+ OFFSET(TASK_FLAGS, task_struct, flags);
+ OFFSET(TASK_MM, task_struct, mm);
+ OFFSET(TASK_PID, task_struct, pid);
+ DEFINE(TASK_STRUCT_SIZE, sizeof(struct task_struct));
+ BLANK();
+}
+
+void output_thread_info_defines(void)
+{
+ COMMENT("LoongArch thread_info offsets.");
+ OFFSET(TI_TASK, thread_info, task);
+ OFFSET(TI_FLAGS, thread_info, flags);
+ OFFSET(TI_TP_VALUE, thread_info, tp_value);
+ OFFSET(TI_CPU, thread_info, cpu);
+ OFFSET(TI_PRE_COUNT, thread_info, preempt_count);
+ OFFSET(TI_REGS, thread_info, regs);
+ DEFINE(_THREAD_SIZE, THREAD_SIZE);
+ DEFINE(_THREAD_MASK, THREAD_MASK);
+ DEFINE(_IRQ_STACK_SIZE, IRQ_STACK_SIZE);
+ DEFINE(_IRQ_STACK_START, IRQ_STACK_START);
+ BLANK();
+}
+
+void output_thread_defines(void)
+{
+ COMMENT("LoongArch specific thread_struct offsets.");
+ OFFSET(THREAD_REG01, task_struct, thread.reg01);
+ OFFSET(THREAD_REG03, task_struct, thread.reg03);
+ OFFSET(THREAD_REG22, task_struct, thread.reg22);
+ OFFSET(THREAD_REG23, task_struct, thread.reg23);
+ OFFSET(THREAD_REG24, task_struct, thread.reg24);
+ OFFSET(THREAD_REG25, task_struct, thread.reg25);
+ OFFSET(THREAD_REG26, task_struct, thread.reg26);
+ OFFSET(THREAD_REG27, task_struct, thread.reg27);
+ OFFSET(THREAD_REG28, task_struct, thread.reg28);
+ OFFSET(THREAD_REG29, task_struct, thread.reg29);
+ OFFSET(THREAD_REG30, task_struct, thread.reg30);
+ OFFSET(THREAD_REG31, task_struct, thread.reg31);
+ OFFSET(THREAD_SCHED_RA, task_struct, thread.sched_ra);
+ OFFSET(THREAD_SCHED_CFA, task_struct, thread.sched_cfa);
+ OFFSET(THREAD_CSRCRMD, task_struct,
+ thread.csr_crmd);
+ OFFSET(THREAD_CSRPRMD, task_struct,
+ thread.csr_prmd);
+ OFFSET(THREAD_CSREUEN, task_struct,
+ thread.csr_euen);
+ OFFSET(THREAD_CSRECFG, task_struct,
+ thread.csr_ecfg);
+
+ OFFSET(THREAD_SCR0, task_struct, thread.scr0);
+ OFFSET(THREAD_SCR1, task_struct, thread.scr1);
+ OFFSET(THREAD_SCR2, task_struct, thread.scr2);
+ OFFSET(THREAD_SCR3, task_struct, thread.scr3);
+
+ OFFSET(THREAD_EFLAGS, task_struct, thread.eflags);
+
+ OFFSET(THREAD_FPU, task_struct, thread.fpu);
+
+ OFFSET(THREAD_BVADDR, task_struct, \
+ thread.csr_badvaddr);
+ OFFSET(THREAD_ECODE, task_struct, \
+ thread.error_code);
+ OFFSET(THREAD_TRAPNO, task_struct, thread.trap_nr);
+ BLANK();
+}
+
+void output_thread_fpu_defines(void)
+{
+ OFFSET(THREAD_FPR0, loongarch_fpu, fpr[0]);
+ OFFSET(THREAD_FPR1, loongarch_fpu, fpr[1]);
+ OFFSET(THREAD_FPR2, loongarch_fpu, fpr[2]);
+ OFFSET(THREAD_FPR3, loongarch_fpu, fpr[3]);
+ OFFSET(THREAD_FPR4, loongarch_fpu, fpr[4]);
+ OFFSET(THREAD_FPR5, loongarch_fpu, fpr[5]);
+ OFFSET(THREAD_FPR6, loongarch_fpu, fpr[6]);
+ OFFSET(THREAD_FPR7, loongarch_fpu, fpr[7]);
+ OFFSET(THREAD_FPR8, loongarch_fpu, fpr[8]);
+ OFFSET(THREAD_FPR9, loongarch_fpu, fpr[9]);
+ OFFSET(THREAD_FPR10, loongarch_fpu, fpr[10]);
+ OFFSET(THREAD_FPR11, loongarch_fpu, fpr[11]);
+ OFFSET(THREAD_FPR12, loongarch_fpu, fpr[12]);
+ OFFSET(THREAD_FPR13, loongarch_fpu, fpr[13]);
+ OFFSET(THREAD_FPR14, loongarch_fpu, fpr[14]);
+ OFFSET(THREAD_FPR15, loongarch_fpu, fpr[15]);
+ OFFSET(THREAD_FPR16, loongarch_fpu, fpr[16]);
+ OFFSET(THREAD_FPR17, loongarch_fpu, fpr[17]);
+ OFFSET(THREAD_FPR18, loongarch_fpu, fpr[18]);
+ OFFSET(THREAD_FPR19, loongarch_fpu, fpr[19]);
+ OFFSET(THREAD_FPR20, loongarch_fpu, fpr[20]);
+ OFFSET(THREAD_FPR21, loongarch_fpu, fpr[21]);
+ OFFSET(THREAD_FPR22, loongarch_fpu, fpr[22]);
+ OFFSET(THREAD_FPR23, loongarch_fpu, fpr[23]);
+ OFFSET(THREAD_FPR24, loongarch_fpu, fpr[24]);
+ OFFSET(THREAD_FPR25, loongarch_fpu, fpr[25]);
+ OFFSET(THREAD_FPR26, loongarch_fpu, fpr[26]);
+ OFFSET(THREAD_FPR27, loongarch_fpu, fpr[27]);
+ OFFSET(THREAD_FPR28, loongarch_fpu, fpr[28]);
+ OFFSET(THREAD_FPR29, loongarch_fpu, fpr[29]);
+ OFFSET(THREAD_FPR30, loongarch_fpu, fpr[30]);
+ OFFSET(THREAD_FPR31, loongarch_fpu, fpr[31]);
+
+ OFFSET(THREAD_FCSR, loongarch_fpu, fcsr);
+ OFFSET(THREAD_FCC, loongarch_fpu, fcc);
+ BLANK();
+}
+
+void output_mm_defines(void)
+{
+ COMMENT("Size of struct page");
+ DEFINE(STRUCT_PAGE_SIZE, sizeof(struct page));
+ BLANK();
+ COMMENT("Linux mm_struct offsets.");
+ OFFSET(MM_USERS, mm_struct, mm_users);
+ OFFSET(MM_PGD, mm_struct, pgd);
+ OFFSET(MM_CONTEXT, mm_struct, context);
+ BLANK();
+ DEFINE(_PGD_T_SIZE, sizeof(pgd_t));
+ DEFINE(_PMD_T_SIZE, sizeof(pmd_t));
+ DEFINE(_PTE_T_SIZE, sizeof(pte_t));
+ BLANK();
+ DEFINE(_PGD_T_LOG2, PGD_T_LOG2);
+#ifndef __PAGETABLE_PMD_FOLDED
+ DEFINE(_PMD_T_LOG2, PMD_T_LOG2);
+#endif
+ DEFINE(_PTE_T_LOG2, PTE_T_LOG2);
+ BLANK();
+ DEFINE(_PMD_SHIFT, PMD_SHIFT);
+ DEFINE(_PGDIR_SHIFT, PGDIR_SHIFT);
+ BLANK();
+ DEFINE(_PTRS_PER_PGD, PTRS_PER_PGD);
+ DEFINE(_PTRS_PER_PMD, PTRS_PER_PMD);
+ DEFINE(_PTRS_PER_PTE, PTRS_PER_PTE);
+ BLANK();
+ DEFINE(_PAGE_SHIFT, PAGE_SHIFT);
+ DEFINE(_PAGE_SIZE, PAGE_SIZE);
+ BLANK();
+}
+
+void output_sc_defines(void)
+{
+ COMMENT("Linux sigcontext offsets.");
+ OFFSET(SC_REGS, sigcontext, sc_regs);
+ OFFSET(SC_PC, sigcontext, sc_pc);
+ BLANK();
+}
+
+void output_signal_defines(void)
+{
+ COMMENT("Linux signal numbers.");
+ DEFINE(_SIGHUP, SIGHUP);
+ DEFINE(_SIGINT, SIGINT);
+ DEFINE(_SIGQUIT, SIGQUIT);
+ DEFINE(_SIGILL, SIGILL);
+ DEFINE(_SIGTRAP, SIGTRAP);
+ DEFINE(_SIGIOT, SIGIOT);
+ DEFINE(_SIGABRT, SIGABRT);
+ DEFINE(_SIGFPE, SIGFPE);
+ DEFINE(_SIGKILL, SIGKILL);
+ DEFINE(_SIGBUS, SIGBUS);
+ DEFINE(_SIGSEGV, SIGSEGV);
+ DEFINE(_SIGSYS, SIGSYS);
+ DEFINE(_SIGPIPE, SIGPIPE);
+ DEFINE(_SIGALRM, SIGALRM);
+ DEFINE(_SIGTERM, SIGTERM);
+ DEFINE(_SIGUSR1, SIGUSR1);
+ DEFINE(_SIGUSR2, SIGUSR2);
+ DEFINE(_SIGCHLD, SIGCHLD);
+ DEFINE(_SIGPWR, SIGPWR);
+ DEFINE(_SIGWINCH, SIGWINCH);
+ DEFINE(_SIGURG, SIGURG);
+ DEFINE(_SIGIO, SIGIO);
+ DEFINE(_SIGSTOP, SIGSTOP);
+ DEFINE(_SIGTSTP, SIGTSTP);
+ DEFINE(_SIGCONT, SIGCONT);
+ DEFINE(_SIGTTIN, SIGTTIN);
+ DEFINE(_SIGTTOU, SIGTTOU);
+ DEFINE(_SIGVTALRM, SIGVTALRM);
+ DEFINE(_SIGPROF, SIGPROF);
+ DEFINE(_SIGXCPU, SIGXCPU);
+ DEFINE(_SIGXFSZ, SIGXFSZ);
+ BLANK();
+}
+
+#ifdef CONFIG_SMP
+void output_smpboot_defines(void)
+{
+ COMMENT("Linux smp cpu boot offsets.");
+ OFFSET(CPU_BOOT_STACK, secondary_data, stack);
+ OFFSET(CPU_BOOT_TINFO, secondary_data, thread_info);
+ BLANK();
+}
+#endif
diff --git a/arch/loongarch/kernel/cacheinfo.c b/arch/loongarch/kernel/cacheinfo.c
new file mode 100644
index 000000000..c7988f757
--- /dev/null
+++ b/arch/loongarch/kernel/cacheinfo.c
@@ -0,0 +1,86 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * LoongArch cacheinfo support
+ *
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#include <linux/cacheinfo.h>
+#include <linux/topology.h>
+#include <asm/bootinfo.h>
+#include <asm/cpu-info.h>
+
+int init_cache_level(unsigned int cpu)
+{
+ int cache_present = current_cpu_data.cache_leaves_present;
+ struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
+
+ this_cpu_ci->num_levels =
+ current_cpu_data.cache_leaves[cache_present - 1].level;
+ this_cpu_ci->num_leaves = cache_present;
+
+ return 0;
+}
+
+static inline bool cache_leaves_are_shared(struct cacheinfo *this_leaf,
+ struct cacheinfo *sib_leaf)
+{
+ return (!(*(unsigned char *)(this_leaf->priv) & CACHE_PRIVATE)
+ && !(*(unsigned char *)(sib_leaf->priv) & CACHE_PRIVATE));
+}
+
+static void cache_cpumap_setup(unsigned int cpu)
+{
+ unsigned int index;
+ struct cacheinfo *this_leaf, *sib_leaf;
+ struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
+
+ for (index = 0; index < this_cpu_ci->num_leaves; index++) {
+ unsigned int i;
+
+ this_leaf = this_cpu_ci->info_list + index;
+ /* skip if shared_cpu_map is already populated */
+ if (!cpumask_empty(&this_leaf->shared_cpu_map))
+ continue;
+
+ cpumask_set_cpu(cpu, &this_leaf->shared_cpu_map);
+ for_each_online_cpu(i) {
+ struct cpu_cacheinfo *sib_cpu_ci = get_cpu_cacheinfo(i);
+
+ if (i == cpu || !sib_cpu_ci->info_list ||
+ (cpu_to_node(i) != cpu_to_node(cpu)))
+ continue;
+
+ sib_leaf = sib_cpu_ci->info_list + index;
+ if (cache_leaves_are_shared(this_leaf, sib_leaf)) {
+ cpumask_set_cpu(cpu, &sib_leaf->shared_cpu_map);
+ cpumask_set_cpu(i, &this_leaf->shared_cpu_map);
+ }
+ }
+ }
+}
+
+int populate_cache_leaves(unsigned int cpu)
+{
+ int i, cache_present = current_cpu_data.cache_leaves_present;
+ struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
+ struct cacheinfo *this_leaf = this_cpu_ci->info_list;
+ struct cache_desc *cd, *cdesc = current_cpu_data.cache_leaves;
+
+ for (i = 0; i < cache_present; i++) {
+ cd = cdesc + i;
+
+ this_leaf->type = cd->type;
+ this_leaf->level = cd->level;
+ this_leaf->coherency_line_size = cd->linesz;
+ this_leaf->number_of_sets = cd->sets;
+ this_leaf->ways_of_associativity = cd->ways;
+ this_leaf->size = cd->linesz * cd->sets * cd->ways;
+ this_leaf->priv = &cd->flags;
+ this_leaf++;
+ }
+
+ cache_cpumap_setup(cpu);
+ this_cpu_ci->cpu_map_populated = true;
+
+ return 0;
+}
diff --git a/arch/loongarch/kernel/cpu-probe.c b/arch/loongarch/kernel/cpu-probe.c
new file mode 100644
index 000000000..5adf0f736
--- /dev/null
+++ b/arch/loongarch/kernel/cpu-probe.c
@@ -0,0 +1,299 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Processor capabilities determination functions.
+ *
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/ptrace.h>
+#include <linux/smp.h>
+#include <linux/stddef.h>
+#include <linux/export.h>
+#include <linux/printk.h>
+#include <linux/uaccess.h>
+
+#include <asm/cpu-features.h>
+#include <asm/elf.h>
+#include <asm/fpu.h>
+#include <asm/loongarch.h>
+#include <asm/pgtable-bits.h>
+#include <asm/setup.h>
+
+/* Hardware capabilities */
+unsigned int elf_hwcap __read_mostly;
+EXPORT_SYMBOL_GPL(elf_hwcap);
+
+/*
+ * Determine the FCSR mask for FPU hardware.
+ */
+static inline void cpu_set_fpu_fcsr_mask(struct cpuinfo_loongarch *c)
+{
+ unsigned long sr, mask, fcsr, fcsr0, fcsr1;
+
+ fcsr = c->fpu_csr0;
+ mask = FPU_CSR_ALL_X | FPU_CSR_ALL_E | FPU_CSR_ALL_S | FPU_CSR_RM;
+
+ sr = read_csr_euen();
+ enable_fpu();
+
+ fcsr0 = fcsr & mask;
+ write_fcsr(LOONGARCH_FCSR0, fcsr0);
+ fcsr0 = read_fcsr(LOONGARCH_FCSR0);
+
+ fcsr1 = fcsr | ~mask;
+ write_fcsr(LOONGARCH_FCSR0, fcsr1);
+ fcsr1 = read_fcsr(LOONGARCH_FCSR0);
+
+ write_fcsr(LOONGARCH_FCSR0, fcsr);
+
+ write_csr_euen(sr);
+
+ c->fpu_mask = ~(fcsr0 ^ fcsr1) & ~mask;
+}
+
+static inline void set_elf_platform(int cpu, const char *plat)
+{
+ if (cpu == 0)
+ __elf_platform = plat;
+}
+
+/* MAP BASE */
+unsigned long vm_map_base;
+EXPORT_SYMBOL(vm_map_base);
+
+static void cpu_probe_addrbits(struct cpuinfo_loongarch *c)
+{
+#ifdef __NEED_ADDRBITS_PROBE
+ c->pabits = (read_cpucfg(LOONGARCH_CPUCFG1) & CPUCFG1_PABITS) >> 4;
+ c->vabits = (read_cpucfg(LOONGARCH_CPUCFG1) & CPUCFG1_VABITS) >> 12;
+ vm_map_base = 0UL - (1UL << c->vabits);
+#endif
+}
+
+static void set_isa(struct cpuinfo_loongarch *c, unsigned int isa)
+{
+ switch (isa) {
+ case LOONGARCH_CPU_ISA_LA64:
+ c->isa_level |= LOONGARCH_CPU_ISA_LA64;
+ fallthrough;
+ case LOONGARCH_CPU_ISA_LA32S:
+ c->isa_level |= LOONGARCH_CPU_ISA_LA32S;
+ fallthrough;
+ case LOONGARCH_CPU_ISA_LA32R:
+ c->isa_level |= LOONGARCH_CPU_ISA_LA32R;
+ break;
+ }
+}
+
+static void cpu_probe_common(struct cpuinfo_loongarch *c)
+{
+ unsigned int config;
+ unsigned long asid_mask;
+
+ c->options = LOONGARCH_CPU_CPUCFG | LOONGARCH_CPU_CSR |
+ LOONGARCH_CPU_TLB | LOONGARCH_CPU_VINT | LOONGARCH_CPU_WATCH;
+
+ elf_hwcap = HWCAP_LOONGARCH_CPUCFG;
+
+ config = read_cpucfg(LOONGARCH_CPUCFG1);
+ if (config & CPUCFG1_UAL) {
+ c->options |= LOONGARCH_CPU_UAL;
+ elf_hwcap |= HWCAP_LOONGARCH_UAL;
+ }
+ if (config & CPUCFG1_CRC32) {
+ c->options |= LOONGARCH_CPU_CRC32;
+ elf_hwcap |= HWCAP_LOONGARCH_CRC32;
+ }
+
+
+ config = read_cpucfg(LOONGARCH_CPUCFG2);
+ if (config & CPUCFG2_LAM) {
+ c->options |= LOONGARCH_CPU_LAM;
+ elf_hwcap |= HWCAP_LOONGARCH_LAM;
+ }
+ if (config & CPUCFG2_FP) {
+ c->options |= LOONGARCH_CPU_FPU;
+ elf_hwcap |= HWCAP_LOONGARCH_FPU;
+ }
+ if (config & CPUCFG2_COMPLEX) {
+ c->options |= LOONGARCH_CPU_COMPLEX;
+ elf_hwcap |= HWCAP_LOONGARCH_COMPLEX;
+ }
+ if (config & CPUCFG2_CRYPTO) {
+ c->options |= LOONGARCH_CPU_CRYPTO;
+ elf_hwcap |= HWCAP_LOONGARCH_CRYPTO;
+ }
+ if (config & CPUCFG2_LVZP) {
+ c->options |= LOONGARCH_CPU_LVZ;
+ elf_hwcap |= HWCAP_LOONGARCH_LVZ;
+ }
+
+ config = read_cpucfg(LOONGARCH_CPUCFG6);
+ if (config & CPUCFG6_PMP)
+ c->options |= LOONGARCH_CPU_PMP;
+
+ config = iocsr_read32(LOONGARCH_IOCSR_FEATURES);
+ if (config & IOCSRF_CSRIPI)
+ c->options |= LOONGARCH_CPU_CSRIPI;
+ if (config & IOCSRF_EXTIOI)
+ c->options |= LOONGARCH_CPU_EXTIOI;
+ if (config & IOCSRF_FREQSCALE)
+ c->options |= LOONGARCH_CPU_SCALEFREQ;
+ if (config & IOCSRF_FLATMODE)
+ c->options |= LOONGARCH_CPU_FLATMODE;
+ if (config & IOCSRF_EIODECODE)
+ c->options |= LOONGARCH_CPU_EIODECODE;
+ if (config & IOCSRF_VM)
+ c->options |= LOONGARCH_CPU_HYPERVISOR;
+
+ config = csr_read32(LOONGARCH_CSR_ASID);
+ config = (config & CSR_ASID_BIT) >> CSR_ASID_BIT_SHIFT;
+ asid_mask = GENMASK(config - 1, 0);
+ set_cpu_asid_mask(c, asid_mask);
+
+ config = read_csr_prcfg1();
+ c->ksave_mask = GENMASK((config & CSR_CONF1_KSNUM) - 1, 0);
+ c->ksave_mask &= ~(EXC_KSAVE_MASK | PERCPU_KSAVE_MASK | KVM_KSAVE_MASK);
+
+ config = read_csr_prcfg3();
+ switch (config & CSR_CONF3_TLBTYPE) {
+ case 0:
+ c->tlbsizemtlb = 0;
+ c->tlbsizestlbsets = 0;
+ c->tlbsizestlbways = 0;
+ c->tlbsize = 0;
+ break;
+ case 1:
+ c->tlbsizemtlb = ((config & CSR_CONF3_MTLBSIZE) >> CSR_CONF3_MTLBSIZE_SHIFT) + 1;
+ c->tlbsizestlbsets = 0;
+ c->tlbsizestlbways = 0;
+ c->tlbsize = c->tlbsizemtlb + c->tlbsizestlbsets * c->tlbsizestlbways;
+ break;
+ case 2:
+ c->tlbsizemtlb = ((config & CSR_CONF3_MTLBSIZE) >> CSR_CONF3_MTLBSIZE_SHIFT) + 1;
+ c->tlbsizestlbsets = 1 << ((config & CSR_CONF3_STLBIDX) >> CSR_CONF3_STLBIDX_SHIFT);
+ c->tlbsizestlbways = ((config & CSR_CONF3_STLBWAYS) >> CSR_CONF3_STLBWAYS_SHIFT) + 1;
+ c->tlbsize = c->tlbsizemtlb + c->tlbsizestlbsets * c->tlbsizestlbways;
+ break;
+ default:
+ pr_warn("Warning: unknown TLB type\n");
+ }
+}
+
+#define MAX_NAME_LEN 32
+#define VENDOR_OFFSET 0
+#define CPUNAME_OFFSET 9
+
+static char cpu_full_name[MAX_NAME_LEN] = " - ";
+
+static inline void cpu_probe_loongson(struct cpuinfo_loongarch *c, unsigned int cpu)
+{
+ uint64_t *vendor = (void *)(&cpu_full_name[VENDOR_OFFSET]);
+ uint64_t *cpuname = (void *)(&cpu_full_name[CPUNAME_OFFSET]);
+
+ if (!__cpu_full_name[cpu])
+ __cpu_full_name[cpu] = cpu_full_name;
+
+ *vendor = iocsr_read64(LOONGARCH_IOCSR_VENDOR);
+ *cpuname = iocsr_read64(LOONGARCH_IOCSR_CPUNAME);
+
+ switch (c->processor_id & PRID_SERIES_MASK) {
+ case PRID_SERIES_LA132:
+ c->cputype = CPU_LOONGSON32;
+ set_isa(c, LOONGARCH_CPU_ISA_LA32S);
+ __cpu_family[cpu] = "Loongson-32bit";
+ pr_info("32-bit Loongson Processor probed (LA132 Core)\n");
+ break;
+ case PRID_SERIES_LA264:
+ c->cputype = CPU_LOONGSON64;
+ set_isa(c, LOONGARCH_CPU_ISA_LA64);
+ __cpu_family[cpu] = "Loongson-64bit";
+ pr_info("64-bit Loongson Processor probed (LA264 Core)\n");
+ break;
+ case PRID_SERIES_LA364:
+ c->cputype = CPU_LOONGSON64;
+ set_isa(c, LOONGARCH_CPU_ISA_LA64);
+ __cpu_family[cpu] = "Loongson-64bit";
+ pr_info("64-bit Loongson Processor probed (LA364 Core)\n");
+ break;
+ case PRID_SERIES_LA464:
+ c->cputype = CPU_LOONGSON64;
+ set_isa(c, LOONGARCH_CPU_ISA_LA64);
+ __cpu_family[cpu] = "Loongson-64bit";
+ pr_info("64-bit Loongson Processor probed (LA464 Core)\n");
+ break;
+ case PRID_SERIES_LA664:
+ c->cputype = CPU_LOONGSON64;
+ set_isa(c, LOONGARCH_CPU_ISA_LA64);
+ __cpu_family[cpu] = "Loongson-64bit";
+ pr_info("64-bit Loongson Processor probed (LA664 Core)\n");
+ break;
+ default: /* Default to 64 bit */
+ c->cputype = CPU_LOONGSON64;
+ set_isa(c, LOONGARCH_CPU_ISA_LA64);
+ __cpu_family[cpu] = "Loongson-64bit";
+ pr_info("64-bit Loongson Processor probed (Unknown Core)\n");
+ }
+}
+
+#ifdef CONFIG_64BIT
+/* For use by uaccess.h */
+u64 __ua_limit;
+EXPORT_SYMBOL(__ua_limit);
+#endif
+
+const char *__cpu_family[NR_CPUS];
+const char *__cpu_full_name[NR_CPUS];
+const char *__elf_platform;
+
+static void cpu_report(void)
+{
+ struct cpuinfo_loongarch *c = &current_cpu_data;
+
+ pr_info("CPU%d revision is: %08x (%s)\n",
+ smp_processor_id(), c->processor_id, cpu_family_string());
+ if (c->options & LOONGARCH_CPU_FPU)
+ pr_info("FPU%d revision is: %08x\n", smp_processor_id(), c->fpu_vers);
+}
+
+void cpu_probe(void)
+{
+ unsigned int cpu = smp_processor_id();
+ struct cpuinfo_loongarch *c = &current_cpu_data;
+
+ /*
+ * Set a default ELF platform, cpu probe may later
+ * overwrite it with a more precise value
+ */
+ set_elf_platform(cpu, "loongarch");
+
+ c->cputype = CPU_UNKNOWN;
+ c->processor_id = read_cpucfg(LOONGARCH_CPUCFG0);
+ c->fpu_vers = (read_cpucfg(LOONGARCH_CPUCFG2) & CPUCFG2_FPVERS) >> 3;
+
+ c->fpu_csr0 = FPU_CSR_RN;
+ c->fpu_mask = FPU_CSR_RSVD;
+
+ cpu_probe_common(c);
+
+ per_cpu_trap_init(cpu);
+
+ switch (c->processor_id & PRID_COMP_MASK) {
+ case PRID_COMP_LOONGSON:
+ cpu_probe_loongson(c, cpu);
+ break;
+ }
+
+ BUG_ON(!__cpu_family[cpu]);
+ BUG_ON(c->cputype == CPU_UNKNOWN);
+
+ cpu_probe_addrbits(c);
+
+#ifdef CONFIG_64BIT
+ if (cpu == 0)
+ __ua_limit = ~((1ull << cpu_vabits) - 1);
+#endif
+
+ cpu_report();
+}
diff --git a/arch/loongarch/kernel/crash_dump.c b/arch/loongarch/kernel/crash_dump.c
new file mode 100644
index 000000000..e559307c1
--- /dev/null
+++ b/arch/loongarch/kernel/crash_dump.c
@@ -0,0 +1,23 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/crash_dump.h>
+#include <linux/io.h>
+#include <linux/uio.h>
+
+ssize_t copy_oldmem_page(struct iov_iter *iter, unsigned long pfn,
+ size_t csize, unsigned long offset)
+{
+ void *vaddr;
+
+ if (!csize)
+ return 0;
+
+ vaddr = memremap(__pfn_to_phys(pfn), PAGE_SIZE, MEMREMAP_WB);
+ if (!vaddr)
+ return -ENOMEM;
+
+ csize = copy_to_iter(vaddr + offset, csize, iter);
+
+ memunmap(vaddr);
+
+ return csize;
+}
diff --git a/arch/loongarch/kernel/dma.c b/arch/loongarch/kernel/dma.c
new file mode 100644
index 000000000..7a9c6a9dd
--- /dev/null
+++ b/arch/loongarch/kernel/dma.c
@@ -0,0 +1,30 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#include <linux/acpi.h>
+#include <linux/dma-direct.h>
+
+void acpi_arch_dma_setup(struct device *dev)
+{
+ int ret;
+ u64 mask, end = 0;
+ const struct bus_dma_region *map = NULL;
+
+ ret = acpi_dma_get_range(dev, &map);
+ if (!ret && map) {
+ const struct bus_dma_region *r = map;
+
+ for (end = 0; r->size; r++) {
+ if (r->dma_start + r->size - 1 > end)
+ end = r->dma_start + r->size - 1;
+ }
+
+ mask = DMA_BIT_MASK(ilog2(end) + 1);
+ dev->bus_dma_limit = end;
+ dev->dma_range_map = map;
+ dev->coherent_dma_mask = min(dev->coherent_dma_mask, mask);
+ *dev->dma_mask = min(*dev->dma_mask, mask);
+ }
+
+}
diff --git a/arch/loongarch/kernel/efi-header.S b/arch/loongarch/kernel/efi-header.S
new file mode 100644
index 000000000..8c1d229a2
--- /dev/null
+++ b/arch/loongarch/kernel/efi-header.S
@@ -0,0 +1,99 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+
+#include <linux/pe.h>
+#include <linux/sizes.h>
+
+ .macro __EFI_PE_HEADER
+ .long PE_MAGIC
+.Lcoff_header:
+ .short IMAGE_FILE_MACHINE_LOONGARCH64 /* Machine */
+ .short .Lsection_count /* NumberOfSections */
+ .long 0 /* TimeDateStamp */
+ .long 0 /* PointerToSymbolTable */
+ .long 0 /* NumberOfSymbols */
+ .short .Lsection_table - .Loptional_header /* SizeOfOptionalHeader */
+ .short IMAGE_FILE_DEBUG_STRIPPED | \
+ IMAGE_FILE_EXECUTABLE_IMAGE | \
+ IMAGE_FILE_LINE_NUMS_STRIPPED /* Characteristics */
+
+.Loptional_header:
+ .short PE_OPT_MAGIC_PE32PLUS /* PE32+ format */
+ .byte 0x02 /* MajorLinkerVersion */
+ .byte 0x14 /* MinorLinkerVersion */
+ .long __inittext_end - .Lefi_header_end /* SizeOfCode */
+ .long _end - __initdata_begin /* SizeOfInitializedData */
+ .long 0 /* SizeOfUninitializedData */
+ .long __efistub_efi_pe_entry - _head /* AddressOfEntryPoint */
+ .long .Lefi_header_end - _head /* BaseOfCode */
+
+.Lextra_header_fields:
+ .quad 0 /* ImageBase */
+ .long PECOFF_SEGMENT_ALIGN /* SectionAlignment */
+ .long PECOFF_FILE_ALIGN /* FileAlignment */
+ .short 0 /* MajorOperatingSystemVersion */
+ .short 0 /* MinorOperatingSystemVersion */
+ .short LINUX_EFISTUB_MAJOR_VERSION /* MajorImageVersion */
+ .short LINUX_EFISTUB_MINOR_VERSION /* MinorImageVersion */
+ .short 0 /* MajorSubsystemVersion */
+ .short 0 /* MinorSubsystemVersion */
+ .long 0 /* Win32VersionValue */
+
+ .long _end - _head /* SizeOfImage */
+
+ /* Everything before the kernel image is considered part of the header */
+ .long .Lefi_header_end - _head /* SizeOfHeaders */
+ .long 0 /* CheckSum */
+ .short IMAGE_SUBSYSTEM_EFI_APPLICATION /* Subsystem */
+ .short 0 /* DllCharacteristics */
+ .quad 0 /* SizeOfStackReserve */
+ .quad 0 /* SizeOfStackCommit */
+ .quad 0 /* SizeOfHeapReserve */
+ .quad 0 /* SizeOfHeapCommit */
+ .long 0 /* LoaderFlags */
+ .long (.Lsection_table - .) / 8 /* NumberOfRvaAndSizes */
+
+ .quad 0 /* ExportTable */
+ .quad 0 /* ImportTable */
+ .quad 0 /* ResourceTable */
+ .quad 0 /* ExceptionTable */
+ .quad 0 /* CertificationTable */
+ .quad 0 /* BaseRelocationTable */
+
+ /* Section table */
+.Lsection_table:
+ .ascii ".text\0\0\0"
+ .long __inittext_end - .Lefi_header_end /* VirtualSize */
+ .long .Lefi_header_end - _head /* VirtualAddress */
+ .long __inittext_end - .Lefi_header_end /* SizeOfRawData */
+ .long .Lefi_header_end - _head /* PointerToRawData */
+
+ .long 0 /* PointerToRelocations */
+ .long 0 /* PointerToLineNumbers */
+ .short 0 /* NumberOfRelocations */
+ .short 0 /* NumberOfLineNumbers */
+ .long IMAGE_SCN_CNT_CODE | \
+ IMAGE_SCN_MEM_READ | \
+ IMAGE_SCN_MEM_EXECUTE /* Characteristics */
+
+ .ascii ".data\0\0\0"
+ .long _end - __initdata_begin /* VirtualSize */
+ .long __initdata_begin - _head /* VirtualAddress */
+ .long _edata - __initdata_begin /* SizeOfRawData */
+ .long __initdata_begin - _head /* PointerToRawData */
+
+ .long 0 /* PointerToRelocations */
+ .long 0 /* PointerToLineNumbers */
+ .short 0 /* NumberOfRelocations */
+ .short 0 /* NumberOfLineNumbers */
+ .long IMAGE_SCN_CNT_INITIALIZED_DATA | \
+ IMAGE_SCN_MEM_READ | \
+ IMAGE_SCN_MEM_WRITE /* Characteristics */
+
+ .set .Lsection_count, (. - .Lsection_table) / 40
+
+ .balign 0x10000 /* PECOFF_SEGMENT_ALIGN */
+.Lefi_header_end:
+ .endm
diff --git a/arch/loongarch/kernel/efi.c b/arch/loongarch/kernel/efi.c
new file mode 100644
index 000000000..a31329971
--- /dev/null
+++ b/arch/loongarch/kernel/efi.c
@@ -0,0 +1,103 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * EFI initialization
+ *
+ * Author: Jianmin Lv <lvjianmin@loongson.cn>
+ * Huacai Chen <chenhuacai@loongson.cn>
+ *
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+
+#include <linux/acpi.h>
+#include <linux/efi.h>
+#include <linux/efi-bgrt.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/export.h>
+#include <linux/io.h>
+#include <linux/kobject.h>
+#include <linux/memblock.h>
+#include <linux/reboot.h>
+#include <linux/uaccess.h>
+
+#include <asm/early_ioremap.h>
+#include <asm/efi.h>
+#include <asm/loongson.h>
+
+static unsigned long efi_nr_tables;
+static unsigned long efi_config_table;
+
+static unsigned long __initdata boot_memmap = EFI_INVALID_TABLE_ADDR;
+
+static efi_system_table_t *efi_systab;
+static efi_config_table_type_t arch_tables[] __initdata = {
+ {LINUX_EFI_BOOT_MEMMAP_GUID, &boot_memmap, "MEMMAP" },
+ {},
+};
+
+void __init efi_runtime_init(void)
+{
+ if (!efi_enabled(EFI_BOOT))
+ return;
+
+ if (efi_runtime_disabled()) {
+ pr_info("EFI runtime services will be disabled.\n");
+ return;
+ }
+
+ efi.runtime = (efi_runtime_services_t *)efi_systab->runtime;
+ efi.runtime_version = (unsigned int)efi.runtime->hdr.revision;
+
+ efi_native_runtime_setup();
+ set_bit(EFI_RUNTIME_SERVICES, &efi.flags);
+}
+
+void __init efi_init(void)
+{
+ int size;
+ void *config_tables;
+ struct efi_boot_memmap *tbl;
+
+ if (!efi_system_table)
+ return;
+
+ efi_systab = (efi_system_table_t *)early_memremap_ro(efi_system_table, sizeof(*efi_systab));
+ if (!efi_systab) {
+ pr_err("Can't find EFI system table.\n");
+ return;
+ }
+
+ efi_systab_report_header(&efi_systab->hdr, efi_systab->fw_vendor);
+
+ set_bit(EFI_64BIT, &efi.flags);
+ efi_nr_tables = efi_systab->nr_tables;
+ efi_config_table = (unsigned long)efi_systab->tables;
+
+ size = sizeof(efi_config_table_t);
+ config_tables = early_memremap(efi_config_table, efi_nr_tables * size);
+ efi_config_parse_tables(config_tables, efi_systab->nr_tables, arch_tables);
+ early_memunmap(config_tables, efi_nr_tables * size);
+
+ set_bit(EFI_CONFIG_TABLES, &efi.flags);
+
+ if (screen_info.orig_video_isVGA == VIDEO_TYPE_EFI)
+ memblock_reserve(screen_info.lfb_base, screen_info.lfb_size);
+
+ if (boot_memmap == EFI_INVALID_TABLE_ADDR)
+ return;
+
+ tbl = early_memremap_ro(boot_memmap, sizeof(*tbl));
+ if (tbl) {
+ struct efi_memory_map_data data;
+
+ data.phys_map = boot_memmap + sizeof(*tbl);
+ data.size = tbl->map_size;
+ data.desc_size = tbl->desc_size;
+ data.desc_version = tbl->desc_ver;
+
+ if (efi_memmap_init_early(&data) < 0)
+ panic("Unable to map EFI memory map.\n");
+
+ early_memunmap(tbl, sizeof(*tbl));
+ }
+}
diff --git a/arch/loongarch/kernel/elf.c b/arch/loongarch/kernel/elf.c
new file mode 100644
index 000000000..0fa81ced2
--- /dev/null
+++ b/arch/loongarch/kernel/elf.c
@@ -0,0 +1,25 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Author: Huacai Chen <chenhuacai@loongson.cn>
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+
+#include <linux/binfmts.h>
+#include <linux/elf.h>
+#include <linux/export.h>
+#include <linux/sched.h>
+
+#include <asm/cpu-features.h>
+#include <asm/cpu-info.h>
+
+int arch_elf_pt_proc(void *_ehdr, void *_phdr, struct file *elf,
+ bool is_interp, struct arch_elf_state *state)
+{
+ return 0;
+}
+
+int arch_check_elf(void *_ehdr, bool has_interpreter, void *_interp_ehdr,
+ struct arch_elf_state *state)
+{
+ return 0;
+}
diff --git a/arch/loongarch/kernel/entry.S b/arch/loongarch/kernel/entry.S
new file mode 100644
index 000000000..d53b631c9
--- /dev/null
+++ b/arch/loongarch/kernel/entry.S
@@ -0,0 +1,89 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ *
+ * Derived from MIPS:
+ * Copyright (C) 1994 - 2000, 2001, 2003 Ralf Baechle
+ * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
+ * Copyright (C) 2001 MIPS Technologies, Inc.
+ */
+
+#include <asm/asm.h>
+#include <asm/asmmacro.h>
+#include <asm/loongarch.h>
+#include <asm/regdef.h>
+#include <asm/stackframe.h>
+#include <asm/thread_info.h>
+
+ .text
+ .cfi_sections .debug_frame
+ .align 5
+SYM_FUNC_START(handle_syscall)
+ csrrd t0, PERCPU_BASE_KS
+ la.abs t1, kernelsp
+ add.d t1, t1, t0
+ move t2, sp
+ ld.d sp, t1, 0
+
+ addi.d sp, sp, -PT_SIZE
+ cfi_st t2, PT_R3
+ cfi_rel_offset sp, PT_R3
+ st.d zero, sp, PT_R0
+ csrrd t2, LOONGARCH_CSR_PRMD
+ st.d t2, sp, PT_PRMD
+ csrrd t2, LOONGARCH_CSR_CRMD
+ st.d t2, sp, PT_CRMD
+ csrrd t2, LOONGARCH_CSR_EUEN
+ st.d t2, sp, PT_EUEN
+ csrrd t2, LOONGARCH_CSR_ECFG
+ st.d t2, sp, PT_ECFG
+ csrrd t2, LOONGARCH_CSR_ESTAT
+ st.d t2, sp, PT_ESTAT
+ cfi_st ra, PT_R1
+ cfi_st a0, PT_R4
+ cfi_st a1, PT_R5
+ cfi_st a2, PT_R6
+ cfi_st a3, PT_R7
+ cfi_st a4, PT_R8
+ cfi_st a5, PT_R9
+ cfi_st a6, PT_R10
+ cfi_st a7, PT_R11
+ csrrd ra, LOONGARCH_CSR_ERA
+ st.d ra, sp, PT_ERA
+ cfi_rel_offset ra, PT_ERA
+
+ cfi_st tp, PT_R2
+ cfi_st u0, PT_R21
+ cfi_st fp, PT_R22
+
+ SAVE_STATIC
+
+ move u0, t0
+ li.d tp, ~_THREAD_MASK
+ and tp, tp, sp
+
+ move a0, sp
+ bl do_syscall
+
+ RESTORE_ALL_AND_RET
+SYM_FUNC_END(handle_syscall)
+
+SYM_CODE_START(ret_from_fork)
+ bl schedule_tail # a0 = struct task_struct *prev
+ move a0, sp
+ bl syscall_exit_to_user_mode
+ RESTORE_STATIC
+ RESTORE_SOME
+ RESTORE_SP_AND_RET
+SYM_CODE_END(ret_from_fork)
+
+SYM_CODE_START(ret_from_kernel_thread)
+ bl schedule_tail # a0 = struct task_struct *prev
+ move a0, s1
+ jirl ra, s0, 0
+ move a0, sp
+ bl syscall_exit_to_user_mode
+ RESTORE_STATIC
+ RESTORE_SOME
+ RESTORE_SP_AND_RET
+SYM_CODE_END(ret_from_kernel_thread)
diff --git a/arch/loongarch/kernel/env.c b/arch/loongarch/kernel/env.c
new file mode 100644
index 000000000..6d56a463b
--- /dev/null
+++ b/arch/loongarch/kernel/env.c
@@ -0,0 +1,76 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Author: Huacai Chen <chenhuacai@loongson.cn>
+ *
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#include <linux/acpi.h>
+#include <linux/efi.h>
+#include <linux/export.h>
+#include <linux/memblock.h>
+#include <asm/early_ioremap.h>
+#include <asm/bootinfo.h>
+#include <asm/loongson.h>
+
+u64 efi_system_table;
+struct loongson_system_configuration loongson_sysconf;
+EXPORT_SYMBOL(loongson_sysconf);
+
+void __init init_environ(void)
+{
+ int efi_boot = fw_arg0;
+ char *cmdline = early_memremap_ro(fw_arg1, COMMAND_LINE_SIZE);
+
+ if (efi_boot)
+ set_bit(EFI_BOOT, &efi.flags);
+ else
+ clear_bit(EFI_BOOT, &efi.flags);
+
+ strscpy(boot_command_line, cmdline, COMMAND_LINE_SIZE);
+ early_memunmap(cmdline, COMMAND_LINE_SIZE);
+
+ efi_system_table = fw_arg2;
+}
+
+static int __init init_cpu_fullname(void)
+{
+ int cpu;
+
+ if (loongson_sysconf.cpuname && !strncmp(loongson_sysconf.cpuname, "Loongson", 8)) {
+ for (cpu = 0; cpu < NR_CPUS; cpu++)
+ __cpu_full_name[cpu] = loongson_sysconf.cpuname;
+ }
+ return 0;
+}
+arch_initcall(init_cpu_fullname);
+
+static ssize_t boardinfo_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ return sprintf(buf,
+ "BIOS Information\n"
+ "Vendor\t\t\t: %s\n"
+ "Version\t\t\t: %s\n"
+ "ROM Size\t\t: %d KB\n"
+ "Release Date\t\t: %s\n\n"
+ "Board Information\n"
+ "Manufacturer\t\t: %s\n"
+ "Board Name\t\t: %s\n"
+ "Family\t\t\t: LOONGSON64\n\n",
+ b_info.bios_vendor, b_info.bios_version,
+ b_info.bios_size, b_info.bios_release_date,
+ b_info.board_vendor, b_info.board_name);
+}
+
+static struct kobj_attribute boardinfo_attr = __ATTR(boardinfo, 0444,
+ boardinfo_show, NULL);
+
+static int __init boardinfo_init(void)
+{
+ struct kobject *loongson_kobj;
+
+ loongson_kobj = kobject_create_and_add("loongson", firmware_kobj);
+
+ return sysfs_create_file(loongson_kobj, &boardinfo_attr.attr);
+}
+late_initcall(boardinfo_init);
diff --git a/arch/loongarch/kernel/fpu.S b/arch/loongarch/kernel/fpu.S
new file mode 100644
index 000000000..576b3370a
--- /dev/null
+++ b/arch/loongarch/kernel/fpu.S
@@ -0,0 +1,251 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Author: Lu Zeng <zenglu@loongson.cn>
+ * Pei Huang <huangpei@loongson.cn>
+ * Huacai Chen <chenhuacai@loongson.cn>
+ *
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#include <asm/asm.h>
+#include <asm/asmmacro.h>
+#include <asm/asm-offsets.h>
+#include <asm/errno.h>
+#include <asm/export.h>
+#include <asm/fpregdef.h>
+#include <asm/loongarch.h>
+#include <asm/regdef.h>
+
+#define FPU_REG_WIDTH 8
+#define LSX_REG_WIDTH 16
+#define LASX_REG_WIDTH 32
+
+ .macro EX insn, reg, src, offs
+.ex\@: \insn \reg, \src, \offs
+ .section __ex_table,"a"
+ PTR .ex\@, fault
+ .previous
+ .endm
+
+ .macro sc_save_fp base
+ EX fst.d $f0, \base, (0 * FPU_REG_WIDTH)
+ EX fst.d $f1, \base, (1 * FPU_REG_WIDTH)
+ EX fst.d $f2, \base, (2 * FPU_REG_WIDTH)
+ EX fst.d $f3, \base, (3 * FPU_REG_WIDTH)
+ EX fst.d $f4, \base, (4 * FPU_REG_WIDTH)
+ EX fst.d $f5, \base, (5 * FPU_REG_WIDTH)
+ EX fst.d $f6, \base, (6 * FPU_REG_WIDTH)
+ EX fst.d $f7, \base, (7 * FPU_REG_WIDTH)
+ EX fst.d $f8, \base, (8 * FPU_REG_WIDTH)
+ EX fst.d $f9, \base, (9 * FPU_REG_WIDTH)
+ EX fst.d $f10, \base, (10 * FPU_REG_WIDTH)
+ EX fst.d $f11, \base, (11 * FPU_REG_WIDTH)
+ EX fst.d $f12, \base, (12 * FPU_REG_WIDTH)
+ EX fst.d $f13, \base, (13 * FPU_REG_WIDTH)
+ EX fst.d $f14, \base, (14 * FPU_REG_WIDTH)
+ EX fst.d $f15, \base, (15 * FPU_REG_WIDTH)
+ EX fst.d $f16, \base, (16 * FPU_REG_WIDTH)
+ EX fst.d $f17, \base, (17 * FPU_REG_WIDTH)
+ EX fst.d $f18, \base, (18 * FPU_REG_WIDTH)
+ EX fst.d $f19, \base, (19 * FPU_REG_WIDTH)
+ EX fst.d $f20, \base, (20 * FPU_REG_WIDTH)
+ EX fst.d $f21, \base, (21 * FPU_REG_WIDTH)
+ EX fst.d $f22, \base, (22 * FPU_REG_WIDTH)
+ EX fst.d $f23, \base, (23 * FPU_REG_WIDTH)
+ EX fst.d $f24, \base, (24 * FPU_REG_WIDTH)
+ EX fst.d $f25, \base, (25 * FPU_REG_WIDTH)
+ EX fst.d $f26, \base, (26 * FPU_REG_WIDTH)
+ EX fst.d $f27, \base, (27 * FPU_REG_WIDTH)
+ EX fst.d $f28, \base, (28 * FPU_REG_WIDTH)
+ EX fst.d $f29, \base, (29 * FPU_REG_WIDTH)
+ EX fst.d $f30, \base, (30 * FPU_REG_WIDTH)
+ EX fst.d $f31, \base, (31 * FPU_REG_WIDTH)
+ .endm
+
+ .macro sc_restore_fp base
+ EX fld.d $f0, \base, (0 * FPU_REG_WIDTH)
+ EX fld.d $f1, \base, (1 * FPU_REG_WIDTH)
+ EX fld.d $f2, \base, (2 * FPU_REG_WIDTH)
+ EX fld.d $f3, \base, (3 * FPU_REG_WIDTH)
+ EX fld.d $f4, \base, (4 * FPU_REG_WIDTH)
+ EX fld.d $f5, \base, (5 * FPU_REG_WIDTH)
+ EX fld.d $f6, \base, (6 * FPU_REG_WIDTH)
+ EX fld.d $f7, \base, (7 * FPU_REG_WIDTH)
+ EX fld.d $f8, \base, (8 * FPU_REG_WIDTH)
+ EX fld.d $f9, \base, (9 * FPU_REG_WIDTH)
+ EX fld.d $f10, \base, (10 * FPU_REG_WIDTH)
+ EX fld.d $f11, \base, (11 * FPU_REG_WIDTH)
+ EX fld.d $f12, \base, (12 * FPU_REG_WIDTH)
+ EX fld.d $f13, \base, (13 * FPU_REG_WIDTH)
+ EX fld.d $f14, \base, (14 * FPU_REG_WIDTH)
+ EX fld.d $f15, \base, (15 * FPU_REG_WIDTH)
+ EX fld.d $f16, \base, (16 * FPU_REG_WIDTH)
+ EX fld.d $f17, \base, (17 * FPU_REG_WIDTH)
+ EX fld.d $f18, \base, (18 * FPU_REG_WIDTH)
+ EX fld.d $f19, \base, (19 * FPU_REG_WIDTH)
+ EX fld.d $f20, \base, (20 * FPU_REG_WIDTH)
+ EX fld.d $f21, \base, (21 * FPU_REG_WIDTH)
+ EX fld.d $f22, \base, (22 * FPU_REG_WIDTH)
+ EX fld.d $f23, \base, (23 * FPU_REG_WIDTH)
+ EX fld.d $f24, \base, (24 * FPU_REG_WIDTH)
+ EX fld.d $f25, \base, (25 * FPU_REG_WIDTH)
+ EX fld.d $f26, \base, (26 * FPU_REG_WIDTH)
+ EX fld.d $f27, \base, (27 * FPU_REG_WIDTH)
+ EX fld.d $f28, \base, (28 * FPU_REG_WIDTH)
+ EX fld.d $f29, \base, (29 * FPU_REG_WIDTH)
+ EX fld.d $f30, \base, (30 * FPU_REG_WIDTH)
+ EX fld.d $f31, \base, (31 * FPU_REG_WIDTH)
+ .endm
+
+ .macro sc_save_fcc base, tmp0, tmp1
+ movcf2gr \tmp0, $fcc0
+ move \tmp1, \tmp0
+ movcf2gr \tmp0, $fcc1
+ bstrins.d \tmp1, \tmp0, 15, 8
+ movcf2gr \tmp0, $fcc2
+ bstrins.d \tmp1, \tmp0, 23, 16
+ movcf2gr \tmp0, $fcc3
+ bstrins.d \tmp1, \tmp0, 31, 24
+ movcf2gr \tmp0, $fcc4
+ bstrins.d \tmp1, \tmp0, 39, 32
+ movcf2gr \tmp0, $fcc5
+ bstrins.d \tmp1, \tmp0, 47, 40
+ movcf2gr \tmp0, $fcc6
+ bstrins.d \tmp1, \tmp0, 55, 48
+ movcf2gr \tmp0, $fcc7
+ bstrins.d \tmp1, \tmp0, 63, 56
+ EX st.d \tmp1, \base, 0
+ .endm
+
+ .macro sc_restore_fcc base, tmp0, tmp1
+ EX ld.d \tmp0, \base, 0
+ bstrpick.d \tmp1, \tmp0, 7, 0
+ movgr2cf $fcc0, \tmp1
+ bstrpick.d \tmp1, \tmp0, 15, 8
+ movgr2cf $fcc1, \tmp1
+ bstrpick.d \tmp1, \tmp0, 23, 16
+ movgr2cf $fcc2, \tmp1
+ bstrpick.d \tmp1, \tmp0, 31, 24
+ movgr2cf $fcc3, \tmp1
+ bstrpick.d \tmp1, \tmp0, 39, 32
+ movgr2cf $fcc4, \tmp1
+ bstrpick.d \tmp1, \tmp0, 47, 40
+ movgr2cf $fcc5, \tmp1
+ bstrpick.d \tmp1, \tmp0, 55, 48
+ movgr2cf $fcc6, \tmp1
+ bstrpick.d \tmp1, \tmp0, 63, 56
+ movgr2cf $fcc7, \tmp1
+ .endm
+
+ .macro sc_save_fcsr base, tmp0
+ movfcsr2gr \tmp0, fcsr0
+ EX st.w \tmp0, \base, 0
+ .endm
+
+ .macro sc_restore_fcsr base, tmp0
+ EX ld.w \tmp0, \base, 0
+ movgr2fcsr fcsr0, \tmp0
+ .endm
+
+/*
+ * Save a thread's fp context.
+ */
+SYM_FUNC_START(_save_fp)
+ fpu_save_csr a0 t1
+ fpu_save_double a0 t1 # clobbers t1
+ fpu_save_cc a0 t1 t2 # clobbers t1, t2
+ jr ra
+SYM_FUNC_END(_save_fp)
+EXPORT_SYMBOL(_save_fp)
+
+/*
+ * Restore a thread's fp context.
+ */
+SYM_FUNC_START(_restore_fp)
+ fpu_restore_double a0 t1 # clobbers t1
+ fpu_restore_csr a0 t1
+ fpu_restore_cc a0 t1 t2 # clobbers t1, t2
+ jr ra
+SYM_FUNC_END(_restore_fp)
+
+/*
+ * Load the FPU with signalling NANS. This bit pattern we're using has
+ * the property that no matter whether considered as single or as double
+ * precision represents signaling NANS.
+ *
+ * The value to initialize fcsr0 to comes in $a0.
+ */
+
+SYM_FUNC_START(_init_fpu)
+ li.w t1, CSR_EUEN_FPEN
+ csrxchg t1, t1, LOONGARCH_CSR_EUEN
+
+ movgr2fcsr fcsr0, a0
+
+ li.w t1, -1 # SNaN
+
+ movgr2fr.d $f0, t1
+ movgr2fr.d $f1, t1
+ movgr2fr.d $f2, t1
+ movgr2fr.d $f3, t1
+ movgr2fr.d $f4, t1
+ movgr2fr.d $f5, t1
+ movgr2fr.d $f6, t1
+ movgr2fr.d $f7, t1
+ movgr2fr.d $f8, t1
+ movgr2fr.d $f9, t1
+ movgr2fr.d $f10, t1
+ movgr2fr.d $f11, t1
+ movgr2fr.d $f12, t1
+ movgr2fr.d $f13, t1
+ movgr2fr.d $f14, t1
+ movgr2fr.d $f15, t1
+ movgr2fr.d $f16, t1
+ movgr2fr.d $f17, t1
+ movgr2fr.d $f18, t1
+ movgr2fr.d $f19, t1
+ movgr2fr.d $f20, t1
+ movgr2fr.d $f21, t1
+ movgr2fr.d $f22, t1
+ movgr2fr.d $f23, t1
+ movgr2fr.d $f24, t1
+ movgr2fr.d $f25, t1
+ movgr2fr.d $f26, t1
+ movgr2fr.d $f27, t1
+ movgr2fr.d $f28, t1
+ movgr2fr.d $f29, t1
+ movgr2fr.d $f30, t1
+ movgr2fr.d $f31, t1
+
+ jr ra
+SYM_FUNC_END(_init_fpu)
+
+/*
+ * a0: fpregs
+ * a1: fcc
+ * a2: fcsr
+ */
+SYM_FUNC_START(_save_fp_context)
+ sc_save_fcc a1 t1 t2
+ sc_save_fcsr a2 t1
+ sc_save_fp a0
+ li.w a0, 0 # success
+ jr ra
+SYM_FUNC_END(_save_fp_context)
+
+/*
+ * a0: fpregs
+ * a1: fcc
+ * a2: fcsr
+ */
+SYM_FUNC_START(_restore_fp_context)
+ sc_restore_fp a0
+ sc_restore_fcc a1 t1 t2
+ sc_restore_fcsr a2 t1
+ li.w a0, 0 # success
+ jr ra
+SYM_FUNC_END(_restore_fp_context)
+
+SYM_FUNC_START(fault)
+ li.w a0, -EFAULT # failure
+ jr ra
+SYM_FUNC_END(fault)
diff --git a/arch/loongarch/kernel/genex.S b/arch/loongarch/kernel/genex.S
new file mode 100644
index 000000000..75e5be807
--- /dev/null
+++ b/arch/loongarch/kernel/genex.S
@@ -0,0 +1,95 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ *
+ * Derived from MIPS:
+ * Copyright (C) 1994 - 2000, 2001, 2003 Ralf Baechle
+ * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
+ * Copyright (C) 2002, 2007 Maciej W. Rozycki
+ * Copyright (C) 2001, 2012 MIPS Technologies, Inc. All rights reserved.
+ */
+#include <asm/asm.h>
+#include <asm/asmmacro.h>
+#include <asm/loongarch.h>
+#include <asm/regdef.h>
+#include <asm/fpregdef.h>
+#include <asm/stackframe.h>
+#include <asm/thread_info.h>
+
+ .align 5
+SYM_FUNC_START(__arch_cpu_idle)
+ /* start of rollback region */
+ LONG_L t0, tp, TI_FLAGS
+ nop
+ andi t0, t0, _TIF_NEED_RESCHED
+ bnez t0, 1f
+ nop
+ nop
+ nop
+ idle 0
+ /* end of rollback region */
+1: jr ra
+SYM_FUNC_END(__arch_cpu_idle)
+
+SYM_FUNC_START(handle_vint)
+ BACKUP_T0T1
+ SAVE_ALL
+ la.abs t1, __arch_cpu_idle
+ LONG_L t0, sp, PT_ERA
+ /* 32 byte rollback region */
+ ori t0, t0, 0x1f
+ xori t0, t0, 0x1f
+ bne t0, t1, 1f
+ LONG_S t0, sp, PT_ERA
+1: move a0, sp
+ move a1, sp
+ la.abs t0, do_vint
+ jirl ra, t0, 0
+ RESTORE_ALL_AND_RET
+SYM_FUNC_END(handle_vint)
+
+SYM_FUNC_START(except_vec_cex)
+ b cache_parity_error
+SYM_FUNC_END(except_vec_cex)
+
+ .macro build_prep_badv
+ csrrd t0, LOONGARCH_CSR_BADV
+ PTR_S t0, sp, PT_BVADDR
+ .endm
+
+ .macro build_prep_fcsr
+ movfcsr2gr a1, fcsr0
+ .endm
+
+ .macro build_prep_none
+ .endm
+
+ .macro BUILD_HANDLER exception handler prep
+ .align 5
+ SYM_FUNC_START(handle_\exception)
+ BACKUP_T0T1
+ SAVE_ALL
+ build_prep_\prep
+ move a0, sp
+ la.abs t0, do_\handler
+ jirl ra, t0, 0
+ RESTORE_ALL_AND_RET
+ SYM_FUNC_END(handle_\exception)
+ .endm
+
+ BUILD_HANDLER ade ade badv
+ BUILD_HANDLER ale ale badv
+ BUILD_HANDLER bp bp none
+ BUILD_HANDLER fpe fpe fcsr
+ BUILD_HANDLER fpu fpu none
+ BUILD_HANDLER lsx lsx none
+ BUILD_HANDLER lasx lasx none
+ BUILD_HANDLER lbt lbt none
+ BUILD_HANDLER ri ri none
+ BUILD_HANDLER watch watch none
+ BUILD_HANDLER reserved reserved none /* others */
+
+SYM_FUNC_START(handle_sys)
+ la.abs t0, handle_syscall
+ jr t0
+SYM_FUNC_END(handle_sys)
diff --git a/arch/loongarch/kernel/head.S b/arch/loongarch/kernel/head.S
new file mode 100644
index 000000000..84970e266
--- /dev/null
+++ b/arch/loongarch/kernel/head.S
@@ -0,0 +1,130 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#include <linux/init.h>
+#include <linux/threads.h>
+
+#include <asm/addrspace.h>
+#include <asm/asm.h>
+#include <asm/asmmacro.h>
+#include <asm/bug.h>
+#include <asm/regdef.h>
+#include <asm/loongarch.h>
+#include <asm/stackframe.h>
+
+#ifdef CONFIG_EFI_STUB
+
+#include "efi-header.S"
+
+ __HEAD
+
+_head:
+ .word MZ_MAGIC /* "MZ", MS-DOS header */
+ .org 0x8
+ .dword kernel_entry /* Kernel entry point */
+ .dword _end - _text /* Kernel image effective size */
+ .quad 0 /* Kernel image load offset from start of RAM */
+ .org 0x3c /* 0x20 ~ 0x3b reserved */
+ .long pe_header - _head /* Offset to the PE header */
+
+pe_header:
+ __EFI_PE_HEADER
+
+SYM_DATA(kernel_asize, .long _end - _text);
+SYM_DATA(kernel_fsize, .long _edata - _text);
+SYM_DATA(kernel_offset, .long kernel_offset - _text);
+
+#endif
+
+ __REF
+
+ .align 12
+
+SYM_CODE_START(kernel_entry) # kernel entry point
+
+ /* Config direct window and set PG */
+ li.d t0, CSR_DMW0_INIT # UC, PLV0, 0x8000 xxxx xxxx xxxx
+ csrwr t0, LOONGARCH_CSR_DMWIN0
+ li.d t0, CSR_DMW1_INIT # CA, PLV0, 0x9000 xxxx xxxx xxxx
+ csrwr t0, LOONGARCH_CSR_DMWIN1
+
+ /* We might not get launched at the address the kernel is linked to,
+ so we jump there. */
+ la.abs t0, 0f
+ jr t0
+0:
+ /* Enable PG */
+ li.w t0, 0xb0 # PLV=0, IE=0, PG=1
+ csrwr t0, LOONGARCH_CSR_CRMD
+ li.w t0, 0x04 # PLV=0, PIE=1, PWE=0
+ csrwr t0, LOONGARCH_CSR_PRMD
+ li.w t0, 0x00 # FPE=0, SXE=0, ASXE=0, BTE=0
+ csrwr t0, LOONGARCH_CSR_EUEN
+
+ la.pcrel t0, __bss_start # clear .bss
+ st.d zero, t0, 0
+ la.pcrel t1, __bss_stop - LONGSIZE
+1:
+ addi.d t0, t0, LONGSIZE
+ st.d zero, t0, 0
+ bne t0, t1, 1b
+
+ la.pcrel t0, fw_arg0
+ st.d a0, t0, 0 # firmware arguments
+ la.pcrel t0, fw_arg1
+ st.d a1, t0, 0
+ la.pcrel t0, fw_arg2
+ st.d a2, t0, 0
+
+ /* KSave3 used for percpu base, initialized as 0 */
+ csrwr zero, PERCPU_BASE_KS
+ /* GPR21 used for percpu base (runtime), initialized as 0 */
+ move u0, zero
+
+ la.pcrel tp, init_thread_union
+ /* Set the SP after an empty pt_regs. */
+ PTR_LI sp, (_THREAD_SIZE - PT_SIZE)
+ PTR_ADD sp, sp, tp
+ set_saved_sp sp, t0, t1
+
+ bl start_kernel
+ ASM_BUG()
+
+SYM_CODE_END(kernel_entry)
+
+#ifdef CONFIG_SMP
+
+/*
+ * SMP slave cpus entry point. Board specific code for bootstrap calls this
+ * function after setting up the stack and tp registers.
+ */
+SYM_CODE_START(smpboot_entry)
+ li.d t0, CSR_DMW0_INIT # UC, PLV0
+ csrwr t0, LOONGARCH_CSR_DMWIN0
+ li.d t0, CSR_DMW1_INIT # CA, PLV0
+ csrwr t0, LOONGARCH_CSR_DMWIN1
+
+ la.abs t0, 0f
+ jr t0
+0:
+ /* Enable PG */
+ li.w t0, 0xb0 # PLV=0, IE=0, PG=1
+ csrwr t0, LOONGARCH_CSR_CRMD
+ li.w t0, 0x04 # PLV=0, PIE=1, PWE=0
+ csrwr t0, LOONGARCH_CSR_PRMD
+ li.w t0, 0x00 # FPE=0, SXE=0, ASXE=0, BTE=0
+ csrwr t0, LOONGARCH_CSR_EUEN
+
+ la.abs t0, cpuboot_data
+ ld.d sp, t0, CPU_BOOT_STACK
+ ld.d tp, t0, CPU_BOOT_TINFO
+
+ bl start_secondary
+ ASM_BUG()
+
+SYM_CODE_END(smpboot_entry)
+
+#endif /* CONFIG_SMP */
+
+SYM_ENTRY(kernel_entry_end, SYM_L_GLOBAL, SYM_A_NONE)
diff --git a/arch/loongarch/kernel/idle.c b/arch/loongarch/kernel/idle.c
new file mode 100644
index 000000000..1a65d0527
--- /dev/null
+++ b/arch/loongarch/kernel/idle.c
@@ -0,0 +1,16 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * LoongArch idle loop support.
+ *
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#include <linux/cpu.h>
+#include <linux/irqflags.h>
+#include <asm/cpu.h>
+#include <asm/idle.h>
+
+void __cpuidle arch_cpu_idle(void)
+{
+ raw_local_irq_enable();
+ __arch_cpu_idle(); /* idle instruction needs irq enabled */
+}
diff --git a/arch/loongarch/kernel/image-vars.h b/arch/loongarch/kernel/image-vars.h
new file mode 100644
index 000000000..88f5d8170
--- /dev/null
+++ b/arch/loongarch/kernel/image-vars.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#ifndef __LOONGARCH_KERNEL_IMAGE_VARS_H
+#define __LOONGARCH_KERNEL_IMAGE_VARS_H
+
+#ifdef CONFIG_EFI_STUB
+
+__efistub_memcmp = memcmp;
+__efistub_memchr = memchr;
+__efistub_strcat = strcat;
+__efistub_strcmp = strcmp;
+__efistub_strlen = strlen;
+__efistub_strncat = strncat;
+__efistub_strnstr = strnstr;
+__efistub_strnlen = strnlen;
+__efistub_strrchr = strrchr;
+__efistub_kernel_entry = kernel_entry;
+__efistub_kernel_asize = kernel_asize;
+__efistub_kernel_fsize = kernel_fsize;
+__efistub_kernel_offset = kernel_offset;
+__efistub_screen_info = screen_info;
+
+#endif
+
+#endif /* __LOONGARCH_KERNEL_IMAGE_VARS_H */
diff --git a/arch/loongarch/kernel/inst.c b/arch/loongarch/kernel/inst.c
new file mode 100644
index 000000000..b1df0ec34
--- /dev/null
+++ b/arch/loongarch/kernel/inst.c
@@ -0,0 +1,40 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#include <asm/inst.h>
+
+u32 larch_insn_gen_lu32id(enum loongarch_gpr rd, int imm)
+{
+ union loongarch_instruction insn;
+
+ insn.reg1i20_format.opcode = lu32id_op;
+ insn.reg1i20_format.rd = rd;
+ insn.reg1i20_format.immediate = imm;
+
+ return insn.word;
+}
+
+u32 larch_insn_gen_lu52id(enum loongarch_gpr rd, enum loongarch_gpr rj, int imm)
+{
+ union loongarch_instruction insn;
+
+ insn.reg2i12_format.opcode = lu52id_op;
+ insn.reg2i12_format.rd = rd;
+ insn.reg2i12_format.rj = rj;
+ insn.reg2i12_format.immediate = imm;
+
+ return insn.word;
+}
+
+u32 larch_insn_gen_jirl(enum loongarch_gpr rd, enum loongarch_gpr rj, unsigned long pc, unsigned long dest)
+{
+ union loongarch_instruction insn;
+
+ insn.reg2i16_format.opcode = jirl_op;
+ insn.reg2i16_format.rd = rd;
+ insn.reg2i16_format.rj = rj;
+ insn.reg2i16_format.immediate = (dest - pc) >> 2;
+
+ return insn.word;
+}
diff --git a/arch/loongarch/kernel/io.c b/arch/loongarch/kernel/io.c
new file mode 100644
index 000000000..cb85bda5a
--- /dev/null
+++ b/arch/loongarch/kernel/io.c
@@ -0,0 +1,94 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#include <linux/export.h>
+#include <linux/types.h>
+#include <linux/io.h>
+
+/*
+ * Copy data from IO memory space to "real" memory space.
+ */
+void __memcpy_fromio(void *to, const volatile void __iomem *from, size_t count)
+{
+ while (count && !IS_ALIGNED((unsigned long)from, 8)) {
+ *(u8 *)to = __raw_readb(from);
+ from++;
+ to++;
+ count--;
+ }
+
+ while (count >= 8) {
+ *(u64 *)to = __raw_readq(from);
+ from += 8;
+ to += 8;
+ count -= 8;
+ }
+
+ while (count) {
+ *(u8 *)to = __raw_readb(from);
+ from++;
+ to++;
+ count--;
+ }
+}
+EXPORT_SYMBOL(__memcpy_fromio);
+
+/*
+ * Copy data from "real" memory space to IO memory space.
+ */
+void __memcpy_toio(volatile void __iomem *to, const void *from, size_t count)
+{
+ while (count && !IS_ALIGNED((unsigned long)to, 8)) {
+ __raw_writeb(*(u8 *)from, to);
+ from++;
+ to++;
+ count--;
+ }
+
+ while (count >= 8) {
+ __raw_writeq(*(u64 *)from, to);
+ from += 8;
+ to += 8;
+ count -= 8;
+ }
+
+ while (count) {
+ __raw_writeb(*(u8 *)from, to);
+ from++;
+ to++;
+ count--;
+ }
+}
+EXPORT_SYMBOL(__memcpy_toio);
+
+/*
+ * "memset" on IO memory space.
+ */
+void __memset_io(volatile void __iomem *dst, int c, size_t count)
+{
+ u64 qc = (u8)c;
+
+ qc |= qc << 8;
+ qc |= qc << 16;
+ qc |= qc << 32;
+
+ while (count && !IS_ALIGNED((unsigned long)dst, 8)) {
+ __raw_writeb(c, dst);
+ dst++;
+ count--;
+ }
+
+ while (count >= 8) {
+ __raw_writeq(qc, dst);
+ dst += 8;
+ count -= 8;
+ }
+
+ while (count) {
+ __raw_writeb(c, dst);
+ dst++;
+ count--;
+ }
+}
+EXPORT_SYMBOL(__memset_io);
diff --git a/arch/loongarch/kernel/irq.c b/arch/loongarch/kernel/irq.c
new file mode 100644
index 000000000..0524bf116
--- /dev/null
+++ b/arch/loongarch/kernel/irq.c
@@ -0,0 +1,137 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#include <linux/kernel.h>
+#include <linux/acpi.h>
+#include <linux/atomic.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/irqchip.h>
+#include <linux/kernel_stat.h>
+#include <linux/proc_fs.h>
+#include <linux/mm.h>
+#include <linux/sched.h>
+#include <linux/seq_file.h>
+#include <linux/kallsyms.h>
+#include <linux/uaccess.h>
+
+#include <asm/irq.h>
+#include <asm/loongson.h>
+#include <asm/setup.h>
+
+DEFINE_PER_CPU(unsigned long, irq_stack);
+DEFINE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat);
+EXPORT_PER_CPU_SYMBOL(irq_stat);
+
+struct acpi_vector_group pch_group[MAX_IO_PICS];
+struct acpi_vector_group msi_group[MAX_IO_PICS];
+/*
+ * 'what should we do if we get a hw irq event on an illegal vector'.
+ * each architecture has to answer this themselves.
+ */
+void ack_bad_irq(unsigned int irq)
+{
+ pr_warn("Unexpected IRQ # %d\n", irq);
+}
+
+atomic_t irq_err_count;
+
+asmlinkage void spurious_interrupt(void)
+{
+ atomic_inc(&irq_err_count);
+}
+
+int arch_show_interrupts(struct seq_file *p, int prec)
+{
+#ifdef CONFIG_SMP
+ show_ipi_list(p, prec);
+#endif
+ seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
+ return 0;
+}
+
+static int __init early_pci_mcfg_parse(struct acpi_table_header *header)
+{
+ struct acpi_table_mcfg *mcfg;
+ struct acpi_mcfg_allocation *mptr;
+ int i, n;
+
+ if (header->length < sizeof(struct acpi_table_mcfg))
+ return -EINVAL;
+
+ n = (header->length - sizeof(struct acpi_table_mcfg)) /
+ sizeof(struct acpi_mcfg_allocation);
+ mcfg = (struct acpi_table_mcfg *)header;
+ mptr = (struct acpi_mcfg_allocation *) &mcfg[1];
+
+ for (i = 0; i < n; i++, mptr++) {
+ msi_group[i].pci_segment = mptr->pci_segment;
+ pch_group[i].node = msi_group[i].node = (mptr->address >> 44) & 0xf;
+ }
+
+ return 0;
+}
+
+static void __init init_vec_parent_group(void)
+{
+ int i;
+
+ for (i = 0; i < MAX_IO_PICS; i++) {
+ msi_group[i].pci_segment = -1;
+ msi_group[i].node = -1;
+ pch_group[i].node = -1;
+ }
+
+ acpi_table_parse(ACPI_SIG_MCFG, early_pci_mcfg_parse);
+}
+
+static int __init get_ipi_irq(void)
+{
+ struct irq_domain *d = irq_find_matching_fwnode(cpuintc_handle, DOMAIN_BUS_ANY);
+
+ if (d)
+ return irq_create_mapping(d, EXCCODE_IPI - EXCCODE_INT_START);
+
+ return -EINVAL;
+}
+
+void __init init_IRQ(void)
+{
+ int i;
+#ifdef CONFIG_SMP
+ int r, ipi_irq;
+ static int ipi_dummy_dev;
+#endif
+ unsigned int order = get_order(IRQ_STACK_SIZE);
+ struct page *page;
+
+ clear_csr_ecfg(ECFG0_IM);
+ clear_csr_estat(ESTATF_IP);
+
+ init_vec_parent_group();
+ irqchip_init();
+#ifdef CONFIG_SMP
+ ipi_irq = get_ipi_irq();
+ if (ipi_irq < 0)
+ panic("IPI IRQ mapping failed\n");
+ irq_set_percpu_devid(ipi_irq);
+ r = request_percpu_irq(ipi_irq, loongson_ipi_interrupt, "IPI", &ipi_dummy_dev);
+ if (r < 0)
+ panic("IPI IRQ request failed\n");
+#endif
+
+ for (i = 0; i < NR_IRQS; i++)
+ irq_set_noprobe(i);
+
+ for_each_possible_cpu(i) {
+ page = alloc_pages_node(cpu_to_node(i), GFP_KERNEL, order);
+
+ per_cpu(irq_stack, i) = (unsigned long)page_address(page);
+ pr_debug("CPU%d IRQ stack at 0x%lx - 0x%lx\n", i,
+ per_cpu(irq_stack, i), per_cpu(irq_stack, i) + IRQ_STACK_SIZE);
+ }
+
+ set_csr_ecfg(ECFGF_IP0 | ECFGF_IP1 | ECFGF_IP2 | ECFGF_IPI | ECFGF_PMC);
+}
diff --git a/arch/loongarch/kernel/machine_kexec.c b/arch/loongarch/kernel/machine_kexec.c
new file mode 100644
index 000000000..2dcb9e003
--- /dev/null
+++ b/arch/loongarch/kernel/machine_kexec.c
@@ -0,0 +1,304 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * machine_kexec.c for kexec
+ *
+ * Copyright (C) 2022 Loongson Technology Corporation Limited
+ */
+#include <linux/compiler.h>
+#include <linux/cpu.h>
+#include <linux/kexec.h>
+#include <linux/crash_dump.h>
+#include <linux/delay.h>
+#include <linux/irq.h>
+#include <linux/libfdt.h>
+#include <linux/mm.h>
+#include <linux/of_fdt.h>
+#include <linux/reboot.h>
+#include <linux/sched.h>
+#include <linux/sched/task_stack.h>
+
+#include <asm/bootinfo.h>
+#include <asm/cacheflush.h>
+#include <asm/page.h>
+
+/* 0x100000 ~ 0x200000 is safe */
+#define KEXEC_CONTROL_CODE TO_CACHE(0x100000UL)
+#define KEXEC_CMDLINE_ADDR TO_CACHE(0x108000UL)
+
+static unsigned long reboot_code_buffer;
+static cpumask_t cpus_in_crash = CPU_MASK_NONE;
+
+#ifdef CONFIG_SMP
+static void (*relocated_kexec_smp_wait)(void *);
+atomic_t kexec_ready_to_reboot = ATOMIC_INIT(0);
+#endif
+
+static unsigned long efi_boot;
+static unsigned long cmdline_ptr;
+static unsigned long systable_ptr;
+static unsigned long start_addr;
+static unsigned long first_ind_entry;
+
+static void kexec_image_info(const struct kimage *kimage)
+{
+ unsigned long i;
+
+ pr_debug("kexec kimage info:\n");
+ pr_debug("\ttype: %d\n", kimage->type);
+ pr_debug("\tstart: %lx\n", kimage->start);
+ pr_debug("\thead: %lx\n", kimage->head);
+ pr_debug("\tnr_segments: %lu\n", kimage->nr_segments);
+
+ for (i = 0; i < kimage->nr_segments; i++) {
+ pr_debug("\t segment[%lu]: %016lx - %016lx", i,
+ kimage->segment[i].mem,
+ kimage->segment[i].mem + kimage->segment[i].memsz);
+ pr_debug("\t\t0x%lx bytes, %lu pages\n",
+ (unsigned long)kimage->segment[i].memsz,
+ (unsigned long)kimage->segment[i].memsz / PAGE_SIZE);
+ }
+}
+
+int machine_kexec_prepare(struct kimage *kimage)
+{
+ int i;
+ char *bootloader = "kexec";
+ void *cmdline_ptr = (void *)KEXEC_CMDLINE_ADDR;
+
+ kexec_image_info(kimage);
+
+ kimage->arch.efi_boot = fw_arg0;
+ kimage->arch.systable_ptr = fw_arg2;
+
+ /* Find the command line */
+ for (i = 0; i < kimage->nr_segments; i++) {
+ if (!strncmp(bootloader, (char __user *)kimage->segment[i].buf, strlen(bootloader))) {
+ if (!copy_from_user(cmdline_ptr, kimage->segment[i].buf, COMMAND_LINE_SIZE))
+ kimage->arch.cmdline_ptr = (unsigned long)cmdline_ptr;
+ break;
+ }
+ }
+
+ if (!kimage->arch.cmdline_ptr) {
+ pr_err("Command line not included in the provided image\n");
+ return -EINVAL;
+ }
+
+ /* kexec/kdump need a safe page to save reboot_code_buffer */
+ kimage->control_code_page = virt_to_page((void *)KEXEC_CONTROL_CODE);
+
+ reboot_code_buffer = (unsigned long)page_address(kimage->control_code_page);
+ memcpy((void *)reboot_code_buffer, relocate_new_kernel, relocate_new_kernel_size);
+
+#ifdef CONFIG_SMP
+ /* All secondary cpus now may jump to kexec_smp_wait cycle */
+ relocated_kexec_smp_wait = reboot_code_buffer + (void *)(kexec_smp_wait - relocate_new_kernel);
+#endif
+
+ return 0;
+}
+
+void machine_kexec_cleanup(struct kimage *kimage)
+{
+}
+
+void kexec_reboot(void)
+{
+ do_kexec_t do_kexec = NULL;
+
+ /*
+ * We know we were online, and there will be no incoming IPIs at
+ * this point. Mark online again before rebooting so that the crash
+ * analysis tool will see us correctly.
+ */
+ set_cpu_online(smp_processor_id(), true);
+
+ /* Ensure remote CPUs observe that we're online before rebooting. */
+ smp_mb__after_atomic();
+
+ /*
+ * Make sure we get correct instructions written by the
+ * machine_kexec_prepare() CPU.
+ */
+ __asm__ __volatile__ ("\tibar 0\n"::);
+
+#ifdef CONFIG_SMP
+ /* All secondary cpus go to kexec_smp_wait */
+ if (smp_processor_id() > 0) {
+ relocated_kexec_smp_wait(NULL);
+ unreachable();
+ }
+#endif
+
+ do_kexec = (void *)reboot_code_buffer;
+ do_kexec(efi_boot, cmdline_ptr, systable_ptr, start_addr, first_ind_entry);
+
+ unreachable();
+}
+
+
+#ifdef CONFIG_SMP
+static void kexec_shutdown_secondary(void *regs)
+{
+ int cpu = smp_processor_id();
+
+ if (!cpu_online(cpu))
+ return;
+
+ /* We won't be sent IPIs any more. */
+ set_cpu_online(cpu, false);
+
+ local_irq_disable();
+ while (!atomic_read(&kexec_ready_to_reboot))
+ cpu_relax();
+
+ kexec_reboot();
+}
+
+static void crash_shutdown_secondary(void *passed_regs)
+{
+ int cpu = smp_processor_id();
+ struct pt_regs *regs = passed_regs;
+
+ /*
+ * If we are passed registers, use those. Otherwise get the
+ * regs from the last interrupt, which should be correct, as
+ * we are in an interrupt. But if the regs are not there,
+ * pull them from the top of the stack. They are probably
+ * wrong, but we need something to keep from crashing again.
+ */
+ if (!regs)
+ regs = get_irq_regs();
+ if (!regs)
+ regs = task_pt_regs(current);
+
+ if (!cpu_online(cpu))
+ return;
+
+ /* We won't be sent IPIs any more. */
+ set_cpu_online(cpu, false);
+
+ local_irq_disable();
+ if (!cpumask_test_cpu(cpu, &cpus_in_crash))
+ crash_save_cpu(regs, cpu);
+ cpumask_set_cpu(cpu, &cpus_in_crash);
+
+ while (!atomic_read(&kexec_ready_to_reboot))
+ cpu_relax();
+
+ kexec_reboot();
+}
+
+void crash_smp_send_stop(void)
+{
+ unsigned int ncpus;
+ unsigned long timeout;
+ static int cpus_stopped;
+
+ /*
+ * This function can be called twice in panic path, but obviously
+ * we should execute this only once.
+ */
+ if (cpus_stopped)
+ return;
+
+ cpus_stopped = 1;
+
+ /* Excluding the panic cpu */
+ ncpus = num_online_cpus() - 1;
+
+ smp_call_function(crash_shutdown_secondary, NULL, 0);
+ smp_wmb();
+
+ /*
+ * The crash CPU sends an IPI and wait for other CPUs to
+ * respond. Delay of at least 10 seconds.
+ */
+ timeout = MSEC_PER_SEC * 10;
+ pr_emerg("Sending IPI to other cpus...\n");
+ while ((cpumask_weight(&cpus_in_crash) < ncpus) && timeout--) {
+ mdelay(1);
+ cpu_relax();
+ }
+}
+#endif /* defined(CONFIG_SMP) */
+
+void machine_shutdown(void)
+{
+ int cpu;
+
+ /* All CPUs go to reboot_code_buffer */
+ for_each_possible_cpu(cpu)
+ if (!cpu_online(cpu))
+ cpu_device_up(get_cpu_device(cpu));
+
+#ifdef CONFIG_SMP
+ smp_call_function(kexec_shutdown_secondary, NULL, 0);
+#endif
+}
+
+void machine_crash_shutdown(struct pt_regs *regs)
+{
+ int crashing_cpu;
+
+ local_irq_disable();
+
+ crashing_cpu = smp_processor_id();
+ crash_save_cpu(regs, crashing_cpu);
+
+#ifdef CONFIG_SMP
+ crash_smp_send_stop();
+#endif
+ cpumask_set_cpu(crashing_cpu, &cpus_in_crash);
+
+ pr_info("Starting crashdump kernel...\n");
+}
+
+void machine_kexec(struct kimage *image)
+{
+ unsigned long entry, *ptr;
+ struct kimage_arch *internal = &image->arch;
+
+ efi_boot = internal->efi_boot;
+ cmdline_ptr = internal->cmdline_ptr;
+ systable_ptr = internal->systable_ptr;
+
+ start_addr = (unsigned long)phys_to_virt(image->start);
+
+ first_ind_entry = (image->type == KEXEC_TYPE_DEFAULT) ?
+ (unsigned long)phys_to_virt(image->head & PAGE_MASK) : 0;
+
+ /*
+ * The generic kexec code builds a page list with physical
+ * addresses. they are directly accessible through XKPRANGE
+ * hence the phys_to_virt() call.
+ */
+ for (ptr = &image->head; (entry = *ptr) && !(entry & IND_DONE);
+ ptr = (entry & IND_INDIRECTION) ?
+ phys_to_virt(entry & PAGE_MASK) : ptr + 1) {
+ if (*ptr & IND_SOURCE || *ptr & IND_INDIRECTION ||
+ *ptr & IND_DESTINATION)
+ *ptr = (unsigned long) phys_to_virt(*ptr);
+ }
+
+ /* Mark offline before disabling local irq. */
+ set_cpu_online(smp_processor_id(), false);
+
+ /* We do not want to be bothered. */
+ local_irq_disable();
+
+ pr_notice("EFI boot flag 0x%lx\n", efi_boot);
+ pr_notice("Command line at 0x%lx\n", cmdline_ptr);
+ pr_notice("System table at 0x%lx\n", systable_ptr);
+ pr_notice("We will call new kernel at 0x%lx\n", start_addr);
+ pr_notice("Bye ...\n");
+
+ /* Make reboot code buffer available to the boot CPU. */
+ flush_cache_all();
+
+#ifdef CONFIG_SMP
+ atomic_set(&kexec_ready_to_reboot, 1);
+#endif
+
+ kexec_reboot();
+}
diff --git a/arch/loongarch/kernel/mem.c b/arch/loongarch/kernel/mem.c
new file mode 100644
index 000000000..aed901c57
--- /dev/null
+++ b/arch/loongarch/kernel/mem.c
@@ -0,0 +1,63 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#include <linux/efi.h>
+#include <linux/initrd.h>
+#include <linux/memblock.h>
+
+#include <asm/bootinfo.h>
+#include <asm/loongson.h>
+#include <asm/sections.h>
+
+void __init memblock_init(void)
+{
+ u32 mem_type;
+ u64 mem_start, mem_end, mem_size;
+ efi_memory_desc_t *md;
+
+ /* Parse memory information */
+ for_each_efi_memory_desc(md) {
+ mem_type = md->type;
+ mem_start = md->phys_addr;
+ mem_size = md->num_pages << EFI_PAGE_SHIFT;
+ mem_end = mem_start + mem_size;
+
+ switch (mem_type) {
+ case EFI_LOADER_CODE:
+ case EFI_LOADER_DATA:
+ case EFI_BOOT_SERVICES_CODE:
+ case EFI_BOOT_SERVICES_DATA:
+ case EFI_PERSISTENT_MEMORY:
+ case EFI_CONVENTIONAL_MEMORY:
+ memblock_add(mem_start, mem_size);
+ if (max_low_pfn < (mem_end >> PAGE_SHIFT))
+ max_low_pfn = mem_end >> PAGE_SHIFT;
+ break;
+ case EFI_PAL_CODE:
+ case EFI_UNUSABLE_MEMORY:
+ case EFI_ACPI_RECLAIM_MEMORY:
+ memblock_add(mem_start, mem_size);
+ fallthrough;
+ case EFI_RESERVED_TYPE:
+ case EFI_RUNTIME_SERVICES_CODE:
+ case EFI_RUNTIME_SERVICES_DATA:
+ case EFI_MEMORY_MAPPED_IO:
+ case EFI_MEMORY_MAPPED_IO_PORT_SPACE:
+ memblock_reserve(mem_start, mem_size);
+ break;
+ }
+ }
+
+ memblock_set_current_limit(PFN_PHYS(max_low_pfn));
+
+ /* Reserve the first 2MB */
+ memblock_reserve(PHYS_OFFSET, 0x200000);
+
+ /* Reserve the kernel text/data/bss */
+ memblock_reserve(__pa_symbol(&_text),
+ __pa_symbol(&_end) - __pa_symbol(&_text));
+
+ memblock_set_node(0, PHYS_ADDR_MAX, &memblock.memory, 0);
+ memblock_set_node(0, PHYS_ADDR_MAX, &memblock.reserved, 0);
+}
diff --git a/arch/loongarch/kernel/module-sections.c b/arch/loongarch/kernel/module-sections.c
new file mode 100644
index 000000000..d296a70b7
--- /dev/null
+++ b/arch/loongarch/kernel/module-sections.c
@@ -0,0 +1,170 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+
+#include <linux/elf.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+
+Elf_Addr module_emit_got_entry(struct module *mod, Elf_Addr val)
+{
+ struct mod_section *got_sec = &mod->arch.got;
+ int i = got_sec->num_entries;
+ struct got_entry *got = get_got_entry(val, got_sec);
+
+ if (got)
+ return (Elf_Addr)got;
+
+ /* There is no GOT entry for val yet, create a new one. */
+ got = (struct got_entry *)got_sec->shdr->sh_addr;
+ got[i] = emit_got_entry(val);
+
+ got_sec->num_entries++;
+ if (got_sec->num_entries > got_sec->max_entries) {
+ /*
+ * This may happen when the module contains a GOT_HI20 without
+ * a paired GOT_LO12. Such a module is broken, reject it.
+ */
+ pr_err("%s: module contains bad GOT relocation\n", mod->name);
+ return 0;
+ }
+
+ return (Elf_Addr)&got[i];
+}
+
+Elf_Addr module_emit_plt_entry(struct module *mod, Elf_Addr val)
+{
+ int nr;
+ struct mod_section *plt_sec = &mod->arch.plt;
+ struct mod_section *plt_idx_sec = &mod->arch.plt_idx;
+ struct plt_entry *plt = get_plt_entry(val, plt_sec, plt_idx_sec);
+ struct plt_idx_entry *plt_idx;
+
+ if (plt)
+ return (Elf_Addr)plt;
+
+ nr = plt_sec->num_entries;
+
+ /* There is no duplicate entry, create a new one */
+ plt = (struct plt_entry *)plt_sec->shdr->sh_addr;
+ plt[nr] = emit_plt_entry(val);
+ plt_idx = (struct plt_idx_entry *)plt_idx_sec->shdr->sh_addr;
+ plt_idx[nr] = emit_plt_idx_entry(val);
+
+ plt_sec->num_entries++;
+ plt_idx_sec->num_entries++;
+ BUG_ON(plt_sec->num_entries > plt_sec->max_entries);
+
+ return (Elf_Addr)&plt[nr];
+}
+
+static int is_rela_equal(const Elf_Rela *x, const Elf_Rela *y)
+{
+ return x->r_info == y->r_info && x->r_addend == y->r_addend;
+}
+
+static bool duplicate_rela(const Elf_Rela *rela, int idx)
+{
+ int i;
+
+ for (i = 0; i < idx; i++) {
+ if (is_rela_equal(&rela[i], &rela[idx]))
+ return true;
+ }
+
+ return false;
+}
+
+static void count_max_entries(Elf_Rela *relas, int num,
+ unsigned int *plts, unsigned int *gots)
+{
+ unsigned int i, type;
+
+ for (i = 0; i < num; i++) {
+ type = ELF_R_TYPE(relas[i].r_info);
+ switch (type) {
+ case R_LARCH_SOP_PUSH_PLT_PCREL:
+ case R_LARCH_B26:
+ if (!duplicate_rela(relas, i))
+ (*plts)++;
+ break;
+ case R_LARCH_GOT_PC_HI20:
+ if (!duplicate_rela(relas, i))
+ (*gots)++;
+ break;
+ default:
+ break; /* Do nothing. */
+ }
+ }
+}
+
+int module_frob_arch_sections(Elf_Ehdr *ehdr, Elf_Shdr *sechdrs,
+ char *secstrings, struct module *mod)
+{
+ unsigned int i, num_plts = 0, num_gots = 0;
+
+ /*
+ * Find the empty .plt sections.
+ */
+ for (i = 0; i < ehdr->e_shnum; i++) {
+ if (!strcmp(secstrings + sechdrs[i].sh_name, ".got"))
+ mod->arch.got.shdr = sechdrs + i;
+ else if (!strcmp(secstrings + sechdrs[i].sh_name, ".plt"))
+ mod->arch.plt.shdr = sechdrs + i;
+ else if (!strcmp(secstrings + sechdrs[i].sh_name, ".plt.idx"))
+ mod->arch.plt_idx.shdr = sechdrs + i;
+ }
+
+ if (!mod->arch.got.shdr) {
+ pr_err("%s: module GOT section(s) missing\n", mod->name);
+ return -ENOEXEC;
+ }
+ if (!mod->arch.plt.shdr) {
+ pr_err("%s: module PLT section(s) missing\n", mod->name);
+ return -ENOEXEC;
+ }
+ if (!mod->arch.plt_idx.shdr) {
+ pr_err("%s: module PLT.IDX section(s) missing\n", mod->name);
+ return -ENOEXEC;
+ }
+
+ /* Calculate the maxinum number of entries */
+ for (i = 0; i < ehdr->e_shnum; i++) {
+ int num_rela = sechdrs[i].sh_size / sizeof(Elf_Rela);
+ Elf_Rela *relas = (void *)ehdr + sechdrs[i].sh_offset;
+ Elf_Shdr *dst_sec = sechdrs + sechdrs[i].sh_info;
+
+ if (sechdrs[i].sh_type != SHT_RELA)
+ continue;
+
+ /* ignore relocations that operate on non-exec sections */
+ if (!(dst_sec->sh_flags & SHF_EXECINSTR))
+ continue;
+
+ count_max_entries(relas, num_rela, &num_plts, &num_gots);
+ }
+
+ mod->arch.got.shdr->sh_type = SHT_NOBITS;
+ mod->arch.got.shdr->sh_flags = SHF_ALLOC;
+ mod->arch.got.shdr->sh_addralign = L1_CACHE_BYTES;
+ mod->arch.got.shdr->sh_size = (num_gots + 1) * sizeof(struct got_entry);
+ mod->arch.got.num_entries = 0;
+ mod->arch.got.max_entries = num_gots;
+
+ mod->arch.plt.shdr->sh_type = SHT_NOBITS;
+ mod->arch.plt.shdr->sh_flags = SHF_EXECINSTR | SHF_ALLOC;
+ mod->arch.plt.shdr->sh_addralign = L1_CACHE_BYTES;
+ mod->arch.plt.shdr->sh_size = (num_plts + 1) * sizeof(struct plt_entry);
+ mod->arch.plt.num_entries = 0;
+ mod->arch.plt.max_entries = num_plts;
+
+ mod->arch.plt_idx.shdr->sh_type = SHT_NOBITS;
+ mod->arch.plt_idx.shdr->sh_flags = SHF_ALLOC;
+ mod->arch.plt_idx.shdr->sh_addralign = L1_CACHE_BYTES;
+ mod->arch.plt_idx.shdr->sh_size = (num_plts + 1) * sizeof(struct plt_idx_entry);
+ mod->arch.plt_idx.num_entries = 0;
+ mod->arch.plt_idx.max_entries = num_plts;
+
+ return 0;
+}
diff --git a/arch/loongarch/kernel/module.c b/arch/loongarch/kernel/module.c
new file mode 100644
index 000000000..4f1e6e55d
--- /dev/null
+++ b/arch/loongarch/kernel/module.c
@@ -0,0 +1,458 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Author: Hanlu Li <lihanlu@loongson.cn>
+ * Huacai Chen <chenhuacai@loongson.cn>
+ *
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+
+#define pr_fmt(fmt) "kmod: " fmt
+
+#include <linux/moduleloader.h>
+#include <linux/elf.h>
+#include <linux/mm.h>
+#include <linux/numa.h>
+#include <linux/vmalloc.h>
+#include <linux/slab.h>
+#include <linux/fs.h>
+#include <linux/string.h>
+#include <linux/kernel.h>
+
+static int rela_stack_push(s64 stack_value, s64 *rela_stack, size_t *rela_stack_top)
+{
+ if (*rela_stack_top >= RELA_STACK_DEPTH)
+ return -ENOEXEC;
+
+ rela_stack[(*rela_stack_top)++] = stack_value;
+ pr_debug("%s stack_value = 0x%llx\n", __func__, stack_value);
+
+ return 0;
+}
+
+static int rela_stack_pop(s64 *stack_value, s64 *rela_stack, size_t *rela_stack_top)
+{
+ if (*rela_stack_top == 0)
+ return -ENOEXEC;
+
+ *stack_value = rela_stack[--(*rela_stack_top)];
+ pr_debug("%s stack_value = 0x%llx\n", __func__, *stack_value);
+
+ return 0;
+}
+
+static int apply_r_larch_none(struct module *mod, u32 *location, Elf_Addr v,
+ s64 *rela_stack, size_t *rela_stack_top, unsigned int type)
+{
+ return 0;
+}
+
+static int apply_r_larch_error(struct module *me, u32 *location, Elf_Addr v,
+ s64 *rela_stack, size_t *rela_stack_top, unsigned int type)
+{
+ pr_err("%s: Unsupport relocation type %u, please add its support.\n", me->name, type);
+ return -EINVAL;
+}
+
+static int apply_r_larch_32(struct module *mod, u32 *location, Elf_Addr v,
+ s64 *rela_stack, size_t *rela_stack_top, unsigned int type)
+{
+ *location = v;
+ return 0;
+}
+
+static int apply_r_larch_64(struct module *mod, u32 *location, Elf_Addr v,
+ s64 *rela_stack, size_t *rela_stack_top, unsigned int type)
+{
+ *(Elf_Addr *)location = v;
+ return 0;
+}
+
+static int apply_r_larch_sop_push_pcrel(struct module *mod, u32 *location, Elf_Addr v,
+ s64 *rela_stack, size_t *rela_stack_top, unsigned int type)
+{
+ return rela_stack_push(v - (u64)location, rela_stack, rela_stack_top);
+}
+
+static int apply_r_larch_sop_push_absolute(struct module *mod, u32 *location, Elf_Addr v,
+ s64 *rela_stack, size_t *rela_stack_top, unsigned int type)
+{
+ return rela_stack_push(v, rela_stack, rela_stack_top);
+}
+
+static int apply_r_larch_sop_push_dup(struct module *mod, u32 *location, Elf_Addr v,
+ s64 *rela_stack, size_t *rela_stack_top, unsigned int type)
+{
+ int err = 0;
+ s64 opr1;
+
+ err = rela_stack_pop(&opr1, rela_stack, rela_stack_top);
+ if (err)
+ return err;
+ err = rela_stack_push(opr1, rela_stack, rela_stack_top);
+ if (err)
+ return err;
+ err = rela_stack_push(opr1, rela_stack, rela_stack_top);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static int apply_r_larch_sop_push_plt_pcrel(struct module *mod, u32 *location, Elf_Addr v,
+ s64 *rela_stack, size_t *rela_stack_top, unsigned int type)
+{
+ ptrdiff_t offset = (void *)v - (void *)location;
+
+ if (offset >= SZ_128M)
+ v = module_emit_plt_entry(mod, v);
+
+ if (offset < -SZ_128M)
+ v = module_emit_plt_entry(mod, v);
+
+ return apply_r_larch_sop_push_pcrel(mod, location, v, rela_stack, rela_stack_top, type);
+}
+
+static int apply_r_larch_sop(struct module *mod, u32 *location, Elf_Addr v,
+ s64 *rela_stack, size_t *rela_stack_top, unsigned int type)
+{
+ int err = 0;
+ s64 opr1, opr2, opr3;
+
+ if (type == R_LARCH_SOP_IF_ELSE) {
+ err = rela_stack_pop(&opr3, rela_stack, rela_stack_top);
+ if (err)
+ return err;
+ }
+
+ err = rela_stack_pop(&opr2, rela_stack, rela_stack_top);
+ if (err)
+ return err;
+ err = rela_stack_pop(&opr1, rela_stack, rela_stack_top);
+ if (err)
+ return err;
+
+ switch (type) {
+ case R_LARCH_SOP_AND:
+ err = rela_stack_push(opr1 & opr2, rela_stack, rela_stack_top);
+ break;
+ case R_LARCH_SOP_ADD:
+ err = rela_stack_push(opr1 + opr2, rela_stack, rela_stack_top);
+ break;
+ case R_LARCH_SOP_SUB:
+ err = rela_stack_push(opr1 - opr2, rela_stack, rela_stack_top);
+ break;
+ case R_LARCH_SOP_SL:
+ err = rela_stack_push(opr1 << opr2, rela_stack, rela_stack_top);
+ break;
+ case R_LARCH_SOP_SR:
+ err = rela_stack_push(opr1 >> opr2, rela_stack, rela_stack_top);
+ break;
+ case R_LARCH_SOP_IF_ELSE:
+ err = rela_stack_push(opr1 ? opr2 : opr3, rela_stack, rela_stack_top);
+ break;
+ default:
+ pr_err("%s: Unsupport relocation type %u\n", mod->name, type);
+ return -EINVAL;
+ }
+
+ return err;
+}
+
+static int apply_r_larch_sop_imm_field(struct module *mod, u32 *location, Elf_Addr v,
+ s64 *rela_stack, size_t *rela_stack_top, unsigned int type)
+{
+ int err = 0;
+ s64 opr1;
+ union loongarch_instruction *insn = (union loongarch_instruction *)location;
+
+ err = rela_stack_pop(&opr1, rela_stack, rela_stack_top);
+ if (err)
+ return err;
+
+ switch (type) {
+ case R_LARCH_SOP_POP_32_U_10_12:
+ if (!unsigned_imm_check(opr1, 12))
+ goto overflow;
+
+ /* (*(uint32_t *) PC) [21 ... 10] = opr [11 ... 0] */
+ insn->reg2i12_format.immediate = opr1 & 0xfff;
+ return 0;
+ case R_LARCH_SOP_POP_32_S_10_12:
+ if (!signed_imm_check(opr1, 12))
+ goto overflow;
+
+ insn->reg2i12_format.immediate = opr1 & 0xfff;
+ return 0;
+ case R_LARCH_SOP_POP_32_S_10_16:
+ if (!signed_imm_check(opr1, 16))
+ goto overflow;
+
+ insn->reg2i16_format.immediate = opr1 & 0xffff;
+ return 0;
+ case R_LARCH_SOP_POP_32_S_10_16_S2:
+ if (opr1 % 4)
+ goto unaligned;
+
+ if (!signed_imm_check(opr1, 18))
+ goto overflow;
+
+ insn->reg2i16_format.immediate = (opr1 >> 2) & 0xffff;
+ return 0;
+ case R_LARCH_SOP_POP_32_S_5_20:
+ if (!signed_imm_check(opr1, 20))
+ goto overflow;
+
+ insn->reg1i20_format.immediate = (opr1) & 0xfffff;
+ return 0;
+ case R_LARCH_SOP_POP_32_S_0_5_10_16_S2:
+ if (opr1 % 4)
+ goto unaligned;
+
+ if (!signed_imm_check(opr1, 23))
+ goto overflow;
+
+ opr1 >>= 2;
+ insn->reg1i21_format.immediate_l = opr1 & 0xffff;
+ insn->reg1i21_format.immediate_h = (opr1 >> 16) & 0x1f;
+ return 0;
+ case R_LARCH_SOP_POP_32_S_0_10_10_16_S2:
+ if (opr1 % 4)
+ goto unaligned;
+
+ if (!signed_imm_check(opr1, 28))
+ goto overflow;
+
+ opr1 >>= 2;
+ insn->reg0i26_format.immediate_l = opr1 & 0xffff;
+ insn->reg0i26_format.immediate_h = (opr1 >> 16) & 0x3ff;
+ return 0;
+ case R_LARCH_SOP_POP_32_U:
+ if (!unsigned_imm_check(opr1, 32))
+ goto overflow;
+
+ /* (*(uint32_t *) PC) = opr */
+ *location = (u32)opr1;
+ return 0;
+ default:
+ pr_err("%s: Unsupport relocation type %u\n", mod->name, type);
+ return -EINVAL;
+ }
+
+overflow:
+ pr_err("module %s: opr1 = 0x%llx overflow! dangerous %s (%u) relocation\n",
+ mod->name, opr1, __func__, type);
+ return -ENOEXEC;
+
+unaligned:
+ pr_err("module %s: opr1 = 0x%llx unaligned! dangerous %s (%u) relocation\n",
+ mod->name, opr1, __func__, type);
+ return -ENOEXEC;
+}
+
+static int apply_r_larch_add_sub(struct module *mod, u32 *location, Elf_Addr v,
+ s64 *rela_stack, size_t *rela_stack_top, unsigned int type)
+{
+ switch (type) {
+ case R_LARCH_ADD32:
+ *(s32 *)location += v;
+ return 0;
+ case R_LARCH_ADD64:
+ *(s64 *)location += v;
+ return 0;
+ case R_LARCH_SUB32:
+ *(s32 *)location -= v;
+ return 0;
+ case R_LARCH_SUB64:
+ *(s64 *)location -= v;
+ return 0;
+ default:
+ pr_err("%s: Unsupport relocation type %u\n", mod->name, type);
+ return -EINVAL;
+ }
+}
+
+static int apply_r_larch_b26(struct module *mod, u32 *location, Elf_Addr v,
+ s64 *rela_stack, size_t *rela_stack_top, unsigned int type)
+{
+ ptrdiff_t offset = (void *)v - (void *)location;
+ union loongarch_instruction *insn = (union loongarch_instruction *)location;
+
+ if (offset >= SZ_128M)
+ v = module_emit_plt_entry(mod, v);
+
+ if (offset < -SZ_128M)
+ v = module_emit_plt_entry(mod, v);
+
+ offset = (void *)v - (void *)location;
+
+ if (offset & 3) {
+ pr_err("module %s: jump offset = 0x%llx unaligned! dangerous R_LARCH_B26 (%u) relocation\n",
+ mod->name, (long long)offset, type);
+ return -ENOEXEC;
+ }
+
+ if (!signed_imm_check(offset, 28)) {
+ pr_err("module %s: jump offset = 0x%llx overflow! dangerous R_LARCH_B26 (%u) relocation\n",
+ mod->name, (long long)offset, type);
+ return -ENOEXEC;
+ }
+
+ offset >>= 2;
+ insn->reg0i26_format.immediate_l = offset & 0xffff;
+ insn->reg0i26_format.immediate_h = (offset >> 16) & 0x3ff;
+
+ return 0;
+}
+
+static int apply_r_larch_pcala(struct module *mod, u32 *location, Elf_Addr v,
+ s64 *rela_stack, size_t *rela_stack_top, unsigned int type)
+{
+ union loongarch_instruction *insn = (union loongarch_instruction *)location;
+ /* Use s32 for a sign-extension deliberately. */
+ s32 offset_hi20 = (void *)((v + 0x800) & ~0xfff) -
+ (void *)((Elf_Addr)location & ~0xfff);
+ Elf_Addr anchor = (((Elf_Addr)location) & ~0xfff) + offset_hi20;
+ ptrdiff_t offset_rem = (void *)v - (void *)anchor;
+
+ switch (type) {
+ case R_LARCH_PCALA_LO12:
+ insn->reg2i12_format.immediate = v & 0xfff;
+ break;
+ case R_LARCH_PCALA_HI20:
+ v = offset_hi20 >> 12;
+ insn->reg1i20_format.immediate = v & 0xfffff;
+ break;
+ case R_LARCH_PCALA64_LO20:
+ v = offset_rem >> 32;
+ insn->reg1i20_format.immediate = v & 0xfffff;
+ break;
+ case R_LARCH_PCALA64_HI12:
+ v = offset_rem >> 52;
+ insn->reg2i12_format.immediate = v & 0xfff;
+ break;
+ default:
+ pr_err("%s: Unsupport relocation type %u\n", mod->name, type);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int apply_r_larch_got_pc(struct module *mod, u32 *location, Elf_Addr v,
+ s64 *rela_stack, size_t *rela_stack_top, unsigned int type)
+{
+ Elf_Addr got = module_emit_got_entry(mod, v);
+
+ if (!got)
+ return -EINVAL;
+
+ switch (type) {
+ case R_LARCH_GOT_PC_LO12:
+ type = R_LARCH_PCALA_LO12;
+ break;
+ case R_LARCH_GOT_PC_HI20:
+ type = R_LARCH_PCALA_HI20;
+ break;
+ default:
+ pr_err("%s: Unsupport relocation type %u\n", mod->name, type);
+ return -EINVAL;
+ }
+
+ return apply_r_larch_pcala(mod, location, got, rela_stack, rela_stack_top, type);
+}
+
+/*
+ * reloc_handlers_rela() - Apply a particular relocation to a module
+ * @mod: the module to apply the reloc to
+ * @location: the address at which the reloc is to be applied
+ * @v: the value of the reloc, with addend for RELA-style
+ * @rela_stack: the stack used for store relocation info, LOCAL to THIS module
+ * @rela_stac_top: where the stack operation(pop/push) applies to
+ *
+ * Return: 0 upon success, else -ERRNO
+ */
+typedef int (*reloc_rela_handler)(struct module *mod, u32 *location, Elf_Addr v,
+ s64 *rela_stack, size_t *rela_stack_top, unsigned int type);
+
+/* The handlers for known reloc types */
+static reloc_rela_handler reloc_rela_handlers[] = {
+ [R_LARCH_NONE ... R_LARCH_64_PCREL] = apply_r_larch_error,
+
+ [R_LARCH_NONE] = apply_r_larch_none,
+ [R_LARCH_32] = apply_r_larch_32,
+ [R_LARCH_64] = apply_r_larch_64,
+ [R_LARCH_MARK_LA] = apply_r_larch_none,
+ [R_LARCH_MARK_PCREL] = apply_r_larch_none,
+ [R_LARCH_SOP_PUSH_PCREL] = apply_r_larch_sop_push_pcrel,
+ [R_LARCH_SOP_PUSH_ABSOLUTE] = apply_r_larch_sop_push_absolute,
+ [R_LARCH_SOP_PUSH_DUP] = apply_r_larch_sop_push_dup,
+ [R_LARCH_SOP_PUSH_PLT_PCREL] = apply_r_larch_sop_push_plt_pcrel,
+ [R_LARCH_SOP_SUB ... R_LARCH_SOP_IF_ELSE] = apply_r_larch_sop,
+ [R_LARCH_SOP_POP_32_S_10_5 ... R_LARCH_SOP_POP_32_U] = apply_r_larch_sop_imm_field,
+ [R_LARCH_ADD32 ... R_LARCH_SUB64] = apply_r_larch_add_sub,
+ [R_LARCH_B26] = apply_r_larch_b26,
+ [R_LARCH_PCALA_HI20...R_LARCH_PCALA64_HI12] = apply_r_larch_pcala,
+ [R_LARCH_GOT_PC_HI20...R_LARCH_GOT_PC_LO12] = apply_r_larch_got_pc,
+};
+
+int apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab,
+ unsigned int symindex, unsigned int relsec,
+ struct module *mod)
+{
+ int i, err;
+ unsigned int type;
+ s64 rela_stack[RELA_STACK_DEPTH];
+ size_t rela_stack_top = 0;
+ reloc_rela_handler handler;
+ void *location;
+ Elf_Addr v;
+ Elf_Sym *sym;
+ Elf_Rela *rel = (void *) sechdrs[relsec].sh_addr;
+
+ pr_debug("%s: Applying relocate section %u to %u\n", __func__, relsec,
+ sechdrs[relsec].sh_info);
+
+ rela_stack_top = 0;
+ for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
+ /* This is where to make the change */
+ location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr + rel[i].r_offset;
+ /* This is the symbol it is referring to */
+ sym = (Elf_Sym *)sechdrs[symindex].sh_addr + ELF_R_SYM(rel[i].r_info);
+ if (IS_ERR_VALUE(sym->st_value)) {
+ /* Ignore unresolved weak symbol */
+ if (ELF_ST_BIND(sym->st_info) == STB_WEAK)
+ continue;
+ pr_warn("%s: Unknown symbol %s\n", mod->name, strtab + sym->st_name);
+ return -ENOENT;
+ }
+
+ type = ELF_R_TYPE(rel[i].r_info);
+
+ if (type < ARRAY_SIZE(reloc_rela_handlers))
+ handler = reloc_rela_handlers[type];
+ else
+ handler = NULL;
+
+ if (!handler) {
+ pr_err("%s: Unknown relocation type %u\n", mod->name, type);
+ return -EINVAL;
+ }
+
+ pr_debug("type %d st_value %llx r_addend %llx loc %llx\n",
+ (int)ELF_R_TYPE(rel[i].r_info),
+ sym->st_value, rel[i].r_addend, (u64)location);
+
+ v = sym->st_value + rel[i].r_addend;
+ err = handler(mod, location, v, rela_stack, &rela_stack_top, type);
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+void *module_alloc(unsigned long size)
+{
+ return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
+ GFP_KERNEL, PAGE_KERNEL, 0, NUMA_NO_NODE, __builtin_return_address(0));
+}
diff --git a/arch/loongarch/kernel/numa.c b/arch/loongarch/kernel/numa.c
new file mode 100644
index 000000000..f7ffce170
--- /dev/null
+++ b/arch/loongarch/kernel/numa.c
@@ -0,0 +1,465 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Author: Xiang Gao <gaoxiang@loongson.cn>
+ * Huacai Chen <chenhuacai@loongson.cn>
+ *
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/mmzone.h>
+#include <linux/export.h>
+#include <linux/nodemask.h>
+#include <linux/swap.h>
+#include <linux/memblock.h>
+#include <linux/pfn.h>
+#include <linux/acpi.h>
+#include <linux/efi.h>
+#include <linux/irq.h>
+#include <linux/pci.h>
+#include <asm/bootinfo.h>
+#include <asm/loongson.h>
+#include <asm/numa.h>
+#include <asm/page.h>
+#include <asm/pgalloc.h>
+#include <asm/sections.h>
+#include <asm/time.h>
+
+int numa_off;
+struct pglist_data *node_data[MAX_NUMNODES];
+unsigned char node_distances[MAX_NUMNODES][MAX_NUMNODES];
+
+EXPORT_SYMBOL(node_data);
+EXPORT_SYMBOL(node_distances);
+
+static struct numa_meminfo numa_meminfo;
+cpumask_t cpus_on_node[MAX_NUMNODES];
+cpumask_t phys_cpus_on_node[MAX_NUMNODES];
+EXPORT_SYMBOL(cpus_on_node);
+
+/*
+ * apicid, cpu, node mappings
+ */
+s16 __cpuid_to_node[CONFIG_NR_CPUS] = {
+ [0 ... CONFIG_NR_CPUS - 1] = NUMA_NO_NODE
+};
+EXPORT_SYMBOL(__cpuid_to_node);
+
+nodemask_t numa_nodes_parsed __initdata;
+
+#ifdef CONFIG_HAVE_SETUP_PER_CPU_AREA
+unsigned long __per_cpu_offset[NR_CPUS] __read_mostly;
+EXPORT_SYMBOL(__per_cpu_offset);
+
+static int __init pcpu_cpu_to_node(int cpu)
+{
+ return early_cpu_to_node(cpu);
+}
+
+static int __init pcpu_cpu_distance(unsigned int from, unsigned int to)
+{
+ if (early_cpu_to_node(from) == early_cpu_to_node(to))
+ return LOCAL_DISTANCE;
+ else
+ return REMOTE_DISTANCE;
+}
+
+void __init pcpu_populate_pte(unsigned long addr)
+{
+ pgd_t *pgd = pgd_offset_k(addr);
+ p4d_t *p4d = p4d_offset(pgd, addr);
+ pud_t *pud;
+ pmd_t *pmd;
+
+ if (p4d_none(*p4d)) {
+ pud_t *new;
+
+ new = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
+ pgd_populate(&init_mm, pgd, new);
+#ifndef __PAGETABLE_PUD_FOLDED
+ pud_init((unsigned long)new, (unsigned long)invalid_pmd_table);
+#endif
+ }
+
+ pud = pud_offset(p4d, addr);
+ if (pud_none(*pud)) {
+ pmd_t *new;
+
+ new = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
+ pud_populate(&init_mm, pud, new);
+#ifndef __PAGETABLE_PMD_FOLDED
+ pmd_init((unsigned long)new, (unsigned long)invalid_pte_table);
+#endif
+ }
+
+ pmd = pmd_offset(pud, addr);
+ if (!pmd_present(*pmd)) {
+ pte_t *new;
+
+ new = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
+ pmd_populate_kernel(&init_mm, pmd, new);
+ }
+}
+
+void __init setup_per_cpu_areas(void)
+{
+ unsigned long delta;
+ unsigned int cpu;
+ int rc = -EINVAL;
+
+ if (pcpu_chosen_fc == PCPU_FC_AUTO) {
+ if (nr_node_ids >= 8)
+ pcpu_chosen_fc = PCPU_FC_PAGE;
+ else
+ pcpu_chosen_fc = PCPU_FC_EMBED;
+ }
+
+ /*
+ * Always reserve area for module percpu variables. That's
+ * what the legacy allocator did.
+ */
+ if (pcpu_chosen_fc != PCPU_FC_PAGE) {
+ rc = pcpu_embed_first_chunk(PERCPU_MODULE_RESERVE,
+ PERCPU_DYNAMIC_RESERVE, PMD_SIZE,
+ pcpu_cpu_distance, pcpu_cpu_to_node);
+ if (rc < 0)
+ pr_warn("%s allocator failed (%d), falling back to page size\n",
+ pcpu_fc_names[pcpu_chosen_fc], rc);
+ }
+ if (rc < 0)
+ rc = pcpu_page_first_chunk(PERCPU_MODULE_RESERVE, pcpu_cpu_to_node);
+ if (rc < 0)
+ panic("cannot initialize percpu area (err=%d)", rc);
+
+ delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
+ for_each_possible_cpu(cpu)
+ __per_cpu_offset[cpu] = delta + pcpu_unit_offsets[cpu];
+}
+#endif
+
+/*
+ * Get nodeid by logical cpu number.
+ * __cpuid_to_node maps phyical cpu id to node, so we
+ * should use cpu_logical_map(cpu) to index it.
+ *
+ * This routine is only used in early phase during
+ * booting, after setup_per_cpu_areas calling and numa_node
+ * initialization, cpu_to_node will be used instead.
+ */
+int early_cpu_to_node(int cpu)
+{
+ int physid = cpu_logical_map(cpu);
+
+ if (physid < 0)
+ return NUMA_NO_NODE;
+
+ return __cpuid_to_node[physid];
+}
+
+void __init early_numa_add_cpu(int cpuid, s16 node)
+{
+ int cpu = __cpu_number_map[cpuid];
+
+ if (cpu < 0)
+ return;
+
+ cpumask_set_cpu(cpu, &cpus_on_node[node]);
+ cpumask_set_cpu(cpuid, &phys_cpus_on_node[node]);
+}
+
+void numa_add_cpu(unsigned int cpu)
+{
+ int nid = cpu_to_node(cpu);
+ cpumask_set_cpu(cpu, &cpus_on_node[nid]);
+}
+
+void numa_remove_cpu(unsigned int cpu)
+{
+ int nid = cpu_to_node(cpu);
+ cpumask_clear_cpu(cpu, &cpus_on_node[nid]);
+}
+
+static int __init numa_add_memblk_to(int nid, u64 start, u64 end,
+ struct numa_meminfo *mi)
+{
+ /* ignore zero length blks */
+ if (start == end)
+ return 0;
+
+ /* whine about and ignore invalid blks */
+ if (start > end || nid < 0 || nid >= MAX_NUMNODES) {
+ pr_warn("NUMA: Warning: invalid memblk node %d [mem %#010Lx-%#010Lx]\n",
+ nid, start, end - 1);
+ return 0;
+ }
+
+ if (mi->nr_blks >= NR_NODE_MEMBLKS) {
+ pr_err("NUMA: too many memblk ranges\n");
+ return -EINVAL;
+ }
+
+ mi->blk[mi->nr_blks].start = PFN_ALIGN(start);
+ mi->blk[mi->nr_blks].end = PFN_ALIGN(end - PAGE_SIZE + 1);
+ mi->blk[mi->nr_blks].nid = nid;
+ mi->nr_blks++;
+ return 0;
+}
+
+/**
+ * numa_add_memblk - Add one numa_memblk to numa_meminfo
+ * @nid: NUMA node ID of the new memblk
+ * @start: Start address of the new memblk
+ * @end: End address of the new memblk
+ *
+ * Add a new memblk to the default numa_meminfo.
+ *
+ * RETURNS:
+ * 0 on success, -errno on failure.
+ */
+int __init numa_add_memblk(int nid, u64 start, u64 end)
+{
+ return numa_add_memblk_to(nid, start, end, &numa_meminfo);
+}
+
+static void __init alloc_node_data(int nid)
+{
+ void *nd;
+ unsigned long nd_pa;
+ size_t nd_sz = roundup(sizeof(pg_data_t), PAGE_SIZE);
+
+ nd_pa = memblock_phys_alloc_try_nid(nd_sz, SMP_CACHE_BYTES, nid);
+ if (!nd_pa) {
+ pr_err("Cannot find %zu Byte for node_data (initial node: %d)\n", nd_sz, nid);
+ return;
+ }
+
+ nd = __va(nd_pa);
+
+ node_data[nid] = nd;
+ memset(nd, 0, sizeof(pg_data_t));
+}
+
+static void __init node_mem_init(unsigned int node)
+{
+ unsigned long start_pfn, end_pfn;
+ unsigned long node_addrspace_offset;
+
+ node_addrspace_offset = nid_to_addrbase(node);
+ pr_info("Node%d's addrspace_offset is 0x%lx\n",
+ node, node_addrspace_offset);
+
+ get_pfn_range_for_nid(node, &start_pfn, &end_pfn);
+ pr_info("Node%d: start_pfn=0x%lx, end_pfn=0x%lx\n",
+ node, start_pfn, end_pfn);
+
+ alloc_node_data(node);
+}
+
+#ifdef CONFIG_ACPI_NUMA
+
+/*
+ * Sanity check to catch more bad NUMA configurations (they are amazingly
+ * common). Make sure the nodes cover all memory.
+ */
+static bool __init numa_meminfo_cover_memory(const struct numa_meminfo *mi)
+{
+ int i;
+ u64 numaram, biosram;
+
+ numaram = 0;
+ for (i = 0; i < mi->nr_blks; i++) {
+ u64 s = mi->blk[i].start >> PAGE_SHIFT;
+ u64 e = mi->blk[i].end >> PAGE_SHIFT;
+
+ numaram += e - s;
+ numaram -= __absent_pages_in_range(mi->blk[i].nid, s, e);
+ if ((s64)numaram < 0)
+ numaram = 0;
+ }
+ max_pfn = max_low_pfn;
+ biosram = max_pfn - absent_pages_in_range(0, max_pfn);
+
+ BUG_ON((s64)(biosram - numaram) >= (1 << (20 - PAGE_SHIFT)));
+ return true;
+}
+
+static void __init add_node_intersection(u32 node, u64 start, u64 size, u32 type)
+{
+ static unsigned long num_physpages;
+
+ num_physpages += (size >> PAGE_SHIFT);
+ pr_info("Node%d: mem_type:%d, mem_start:0x%llx, mem_size:0x%llx Bytes\n",
+ node, type, start, size);
+ pr_info(" start_pfn:0x%llx, end_pfn:0x%llx, num_physpages:0x%lx\n",
+ start >> PAGE_SHIFT, (start + size) >> PAGE_SHIFT, num_physpages);
+ memblock_set_node(start, size, &memblock.memory, node);
+}
+
+/*
+ * add_numamem_region
+ *
+ * Add a uasable memory region described by BIOS. The
+ * routine gets each intersection between BIOS's region
+ * and node's region, and adds them into node's memblock
+ * pool.
+ *
+ */
+static void __init add_numamem_region(u64 start, u64 end, u32 type)
+{
+ u32 i;
+ u64 ofs = start;
+
+ if (start >= end) {
+ pr_debug("Invalid region: %016llx-%016llx\n", start, end);
+ return;
+ }
+
+ for (i = 0; i < numa_meminfo.nr_blks; i++) {
+ struct numa_memblk *mb = &numa_meminfo.blk[i];
+
+ if (ofs > mb->end)
+ continue;
+
+ if (end > mb->end) {
+ add_node_intersection(mb->nid, ofs, mb->end - ofs, type);
+ ofs = mb->end;
+ } else {
+ add_node_intersection(mb->nid, ofs, end - ofs, type);
+ break;
+ }
+ }
+}
+
+static void __init init_node_memblock(void)
+{
+ u32 mem_type;
+ u64 mem_end, mem_start, mem_size;
+ efi_memory_desc_t *md;
+
+ /* Parse memory information and activate */
+ for_each_efi_memory_desc(md) {
+ mem_type = md->type;
+ mem_start = md->phys_addr;
+ mem_size = md->num_pages << EFI_PAGE_SHIFT;
+ mem_end = mem_start + mem_size;
+
+ switch (mem_type) {
+ case EFI_LOADER_CODE:
+ case EFI_LOADER_DATA:
+ case EFI_BOOT_SERVICES_CODE:
+ case EFI_BOOT_SERVICES_DATA:
+ case EFI_PERSISTENT_MEMORY:
+ case EFI_CONVENTIONAL_MEMORY:
+ add_numamem_region(mem_start, mem_end, mem_type);
+ break;
+ case EFI_PAL_CODE:
+ case EFI_UNUSABLE_MEMORY:
+ case EFI_ACPI_RECLAIM_MEMORY:
+ add_numamem_region(mem_start, mem_end, mem_type);
+ fallthrough;
+ case EFI_RESERVED_TYPE:
+ case EFI_RUNTIME_SERVICES_CODE:
+ case EFI_RUNTIME_SERVICES_DATA:
+ case EFI_MEMORY_MAPPED_IO:
+ case EFI_MEMORY_MAPPED_IO_PORT_SPACE:
+ pr_info("Resvd: mem_type:%d, mem_start:0x%llx, mem_size:0x%llx Bytes\n",
+ mem_type, mem_start, mem_size);
+ break;
+ }
+ }
+}
+
+static void __init numa_default_distance(void)
+{
+ int row, col;
+
+ for (row = 0; row < MAX_NUMNODES; row++)
+ for (col = 0; col < MAX_NUMNODES; col++) {
+ if (col == row)
+ node_distances[row][col] = LOCAL_DISTANCE;
+ else
+ /* We assume that one node per package here!
+ *
+ * A SLIT should be used for multiple nodes
+ * per package to override default setting.
+ */
+ node_distances[row][col] = REMOTE_DISTANCE;
+ }
+}
+
+int __init init_numa_memory(void)
+{
+ int i;
+ int ret;
+ int node;
+
+ for (i = 0; i < NR_CPUS; i++)
+ set_cpuid_to_node(i, NUMA_NO_NODE);
+
+ numa_default_distance();
+ nodes_clear(numa_nodes_parsed);
+ nodes_clear(node_possible_map);
+ nodes_clear(node_online_map);
+ memset(&numa_meminfo, 0, sizeof(numa_meminfo));
+
+ /* Parse SRAT and SLIT if provided by firmware. */
+ ret = acpi_numa_init();
+ if (ret < 0)
+ return ret;
+
+ node_possible_map = numa_nodes_parsed;
+ if (WARN_ON(nodes_empty(node_possible_map)))
+ return -EINVAL;
+
+ init_node_memblock();
+ if (numa_meminfo_cover_memory(&numa_meminfo) == false)
+ return -EINVAL;
+
+ for_each_node_mask(node, node_possible_map) {
+ node_mem_init(node);
+ node_set_online(node);
+ }
+ max_low_pfn = PHYS_PFN(memblock_end_of_DRAM());
+
+ setup_nr_node_ids();
+ loongson_sysconf.nr_nodes = nr_node_ids;
+ loongson_sysconf.cores_per_node = cpumask_weight(&phys_cpus_on_node[0]);
+
+ return 0;
+}
+
+#endif
+
+void __init paging_init(void)
+{
+ unsigned int node;
+ unsigned long zones_size[MAX_NR_ZONES] = {0, };
+
+ for_each_online_node(node) {
+ unsigned long start_pfn, end_pfn;
+
+ get_pfn_range_for_nid(node, &start_pfn, &end_pfn);
+
+ if (end_pfn > max_low_pfn)
+ max_low_pfn = end_pfn;
+ }
+#ifdef CONFIG_ZONE_DMA32
+ zones_size[ZONE_DMA32] = MAX_DMA32_PFN;
+#endif
+ zones_size[ZONE_NORMAL] = max_low_pfn;
+ free_area_init(zones_size);
+}
+
+void __init mem_init(void)
+{
+ high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT);
+ memblock_free_all();
+ setup_zero_pages(); /* This comes from node 0 */
+}
+
+int pcibus_to_node(struct pci_bus *bus)
+{
+ return dev_to_node(&bus->dev);
+}
+EXPORT_SYMBOL(pcibus_to_node);
diff --git a/arch/loongarch/kernel/perf_event.c b/arch/loongarch/kernel/perf_event.c
new file mode 100644
index 000000000..3a2edb157
--- /dev/null
+++ b/arch/loongarch/kernel/perf_event.c
@@ -0,0 +1,887 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Linux performance counter support for LoongArch.
+ *
+ * Copyright (C) 2022 Loongson Technology Corporation Limited
+ *
+ * Derived from MIPS:
+ * Copyright (C) 2010 MIPS Technologies, Inc.
+ * Copyright (C) 2011 Cavium Networks, Inc.
+ * Author: Deng-Cheng Zhu
+ */
+
+#include <linux/cpumask.h>
+#include <linux/interrupt.h>
+#include <linux/smp.h>
+#include <linux/kernel.h>
+#include <linux/perf_event.h>
+#include <linux/uaccess.h>
+#include <linux/sched/task_stack.h>
+
+#include <asm/irq.h>
+#include <asm/irq_regs.h>
+#include <asm/stacktrace.h>
+#include <asm/unwind.h>
+
+/*
+ * Get the return address for a single stackframe and return a pointer to the
+ * next frame tail.
+ */
+static unsigned long
+user_backtrace(struct perf_callchain_entry_ctx *entry, unsigned long fp)
+{
+ unsigned long err;
+ unsigned long __user *user_frame_tail;
+ struct stack_frame buftail;
+
+ user_frame_tail = (unsigned long __user *)(fp - sizeof(struct stack_frame));
+
+ /* Also check accessibility of one struct frame_tail beyond */
+ if (!access_ok(user_frame_tail, sizeof(buftail)))
+ return 0;
+
+ pagefault_disable();
+ err = __copy_from_user_inatomic(&buftail, user_frame_tail, sizeof(buftail));
+ pagefault_enable();
+
+ if (err || (unsigned long)user_frame_tail >= buftail.fp)
+ return 0;
+
+ perf_callchain_store(entry, buftail.ra);
+
+ return buftail.fp;
+}
+
+void perf_callchain_user(struct perf_callchain_entry_ctx *entry,
+ struct pt_regs *regs)
+{
+ unsigned long fp;
+
+ if (perf_guest_state()) {
+ /* We don't support guest os callchain now */
+ return;
+ }
+
+ perf_callchain_store(entry, regs->csr_era);
+
+ fp = regs->regs[22];
+
+ while (entry->nr < entry->max_stack && fp && !((unsigned long)fp & 0xf))
+ fp = user_backtrace(entry, fp);
+}
+
+void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry,
+ struct pt_regs *regs)
+{
+ struct unwind_state state;
+ unsigned long addr;
+
+ for (unwind_start(&state, current, regs);
+ !unwind_done(&state); unwind_next_frame(&state)) {
+ addr = unwind_get_return_address(&state);
+ if (!addr || perf_callchain_store(entry, addr))
+ return;
+ }
+}
+
+#define LOONGARCH_MAX_HWEVENTS 32
+
+struct cpu_hw_events {
+ /* Array of events on this cpu. */
+ struct perf_event *events[LOONGARCH_MAX_HWEVENTS];
+
+ /*
+ * Set the bit (indexed by the counter number) when the counter
+ * is used for an event.
+ */
+ unsigned long used_mask[BITS_TO_LONGS(LOONGARCH_MAX_HWEVENTS)];
+
+ /*
+ * Software copy of the control register for each performance counter.
+ */
+ unsigned int saved_ctrl[LOONGARCH_MAX_HWEVENTS];
+};
+static DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = {
+ .saved_ctrl = {0},
+};
+
+/* The description of LoongArch performance events. */
+struct loongarch_perf_event {
+ unsigned int event_id;
+};
+
+static struct loongarch_perf_event raw_event;
+static DEFINE_MUTEX(raw_event_mutex);
+
+#define C(x) PERF_COUNT_HW_CACHE_##x
+#define HW_OP_UNSUPPORTED 0xffffffff
+#define CACHE_OP_UNSUPPORTED 0xffffffff
+
+#define PERF_MAP_ALL_UNSUPPORTED \
+ [0 ... PERF_COUNT_HW_MAX - 1] = {HW_OP_UNSUPPORTED}
+
+#define PERF_CACHE_MAP_ALL_UNSUPPORTED \
+[0 ... C(MAX) - 1] = { \
+ [0 ... C(OP_MAX) - 1] = { \
+ [0 ... C(RESULT_MAX) - 1] = {CACHE_OP_UNSUPPORTED}, \
+ }, \
+}
+
+struct loongarch_pmu {
+ u64 max_period;
+ u64 valid_count;
+ u64 overflow;
+ const char *name;
+ unsigned int num_counters;
+ u64 (*read_counter)(unsigned int idx);
+ void (*write_counter)(unsigned int idx, u64 val);
+ const struct loongarch_perf_event *(*map_raw_event)(u64 config);
+ const struct loongarch_perf_event (*general_event_map)[PERF_COUNT_HW_MAX];
+ const struct loongarch_perf_event (*cache_event_map)
+ [PERF_COUNT_HW_CACHE_MAX]
+ [PERF_COUNT_HW_CACHE_OP_MAX]
+ [PERF_COUNT_HW_CACHE_RESULT_MAX];
+};
+
+static struct loongarch_pmu loongarch_pmu;
+
+#define M_PERFCTL_EVENT(event) (event & CSR_PERFCTRL_EVENT)
+
+#define M_PERFCTL_COUNT_EVENT_WHENEVER (CSR_PERFCTRL_PLV0 | \
+ CSR_PERFCTRL_PLV1 | \
+ CSR_PERFCTRL_PLV2 | \
+ CSR_PERFCTRL_PLV3 | \
+ CSR_PERFCTRL_IE)
+
+#define M_PERFCTL_CONFIG_MASK 0x1f0000
+
+static void pause_local_counters(void);
+static void resume_local_counters(void);
+
+static u64 loongarch_pmu_read_counter(unsigned int idx)
+{
+ u64 val = -1;
+
+ switch (idx) {
+ case 0:
+ val = read_csr_perfcntr0();
+ break;
+ case 1:
+ val = read_csr_perfcntr1();
+ break;
+ case 2:
+ val = read_csr_perfcntr2();
+ break;
+ case 3:
+ val = read_csr_perfcntr3();
+ break;
+ default:
+ WARN_ONCE(1, "Invalid performance counter number (%d)\n", idx);
+ return 0;
+ }
+
+ return val;
+}
+
+static void loongarch_pmu_write_counter(unsigned int idx, u64 val)
+{
+ switch (idx) {
+ case 0:
+ write_csr_perfcntr0(val);
+ return;
+ case 1:
+ write_csr_perfcntr1(val);
+ return;
+ case 2:
+ write_csr_perfcntr2(val);
+ return;
+ case 3:
+ write_csr_perfcntr3(val);
+ return;
+ default:
+ WARN_ONCE(1, "Invalid performance counter number (%d)\n", idx);
+ return;
+ }
+}
+
+static unsigned int loongarch_pmu_read_control(unsigned int idx)
+{
+ unsigned int val = -1;
+
+ switch (idx) {
+ case 0:
+ val = read_csr_perfctrl0();
+ break;
+ case 1:
+ val = read_csr_perfctrl1();
+ break;
+ case 2:
+ val = read_csr_perfctrl2();
+ break;
+ case 3:
+ val = read_csr_perfctrl3();
+ break;
+ default:
+ WARN_ONCE(1, "Invalid performance counter number (%d)\n", idx);
+ return 0;
+ }
+
+ return val;
+}
+
+static void loongarch_pmu_write_control(unsigned int idx, unsigned int val)
+{
+ switch (idx) {
+ case 0:
+ write_csr_perfctrl0(val);
+ return;
+ case 1:
+ write_csr_perfctrl1(val);
+ return;
+ case 2:
+ write_csr_perfctrl2(val);
+ return;
+ case 3:
+ write_csr_perfctrl3(val);
+ return;
+ default:
+ WARN_ONCE(1, "Invalid performance counter number (%d)\n", idx);
+ return;
+ }
+}
+
+static int loongarch_pmu_alloc_counter(struct cpu_hw_events *cpuc, struct hw_perf_event *hwc)
+{
+ int i;
+
+ for (i = 0; i < loongarch_pmu.num_counters; i++) {
+ if (!test_and_set_bit(i, cpuc->used_mask))
+ return i;
+ }
+
+ return -EAGAIN;
+}
+
+static void loongarch_pmu_enable_event(struct hw_perf_event *evt, int idx)
+{
+ unsigned int cpu;
+ struct perf_event *event = container_of(evt, struct perf_event, hw);
+ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
+
+ WARN_ON(idx < 0 || idx >= loongarch_pmu.num_counters);
+
+ /* Make sure interrupt enabled. */
+ cpuc->saved_ctrl[idx] = M_PERFCTL_EVENT(evt->event_base) |
+ (evt->config_base & M_PERFCTL_CONFIG_MASK) | CSR_PERFCTRL_IE;
+
+ cpu = (event->cpu >= 0) ? event->cpu : smp_processor_id();
+
+ /*
+ * We do not actually let the counter run. Leave it until start().
+ */
+ pr_debug("Enabling perf counter for CPU%d\n", cpu);
+}
+
+static void loongarch_pmu_disable_event(int idx)
+{
+ unsigned long flags;
+ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
+
+ WARN_ON(idx < 0 || idx >= loongarch_pmu.num_counters);
+
+ local_irq_save(flags);
+ cpuc->saved_ctrl[idx] = loongarch_pmu_read_control(idx) &
+ ~M_PERFCTL_COUNT_EVENT_WHENEVER;
+ loongarch_pmu_write_control(idx, cpuc->saved_ctrl[idx]);
+ local_irq_restore(flags);
+}
+
+static int loongarch_pmu_event_set_period(struct perf_event *event,
+ struct hw_perf_event *hwc,
+ int idx)
+{
+ int ret = 0;
+ u64 left = local64_read(&hwc->period_left);
+ u64 period = hwc->sample_period;
+
+ if (unlikely((left + period) & (1ULL << 63))) {
+ /* left underflowed by more than period. */
+ left = period;
+ local64_set(&hwc->period_left, left);
+ hwc->last_period = period;
+ ret = 1;
+ } else if (unlikely((left + period) <= period)) {
+ /* left underflowed by less than period. */
+ left += period;
+ local64_set(&hwc->period_left, left);
+ hwc->last_period = period;
+ ret = 1;
+ }
+
+ if (left > loongarch_pmu.max_period) {
+ left = loongarch_pmu.max_period;
+ local64_set(&hwc->period_left, left);
+ }
+
+ local64_set(&hwc->prev_count, loongarch_pmu.overflow - left);
+
+ loongarch_pmu.write_counter(idx, loongarch_pmu.overflow - left);
+
+ perf_event_update_userpage(event);
+
+ return ret;
+}
+
+static void loongarch_pmu_event_update(struct perf_event *event,
+ struct hw_perf_event *hwc,
+ int idx)
+{
+ u64 delta;
+ u64 prev_raw_count, new_raw_count;
+
+again:
+ prev_raw_count = local64_read(&hwc->prev_count);
+ new_raw_count = loongarch_pmu.read_counter(idx);
+
+ if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
+ new_raw_count) != prev_raw_count)
+ goto again;
+
+ delta = new_raw_count - prev_raw_count;
+
+ local64_add(delta, &event->count);
+ local64_sub(delta, &hwc->period_left);
+}
+
+static void loongarch_pmu_start(struct perf_event *event, int flags)
+{
+ struct hw_perf_event *hwc = &event->hw;
+
+ if (flags & PERF_EF_RELOAD)
+ WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE));
+
+ hwc->state = 0;
+
+ /* Set the period for the event. */
+ loongarch_pmu_event_set_period(event, hwc, hwc->idx);
+
+ /* Enable the event. */
+ loongarch_pmu_enable_event(hwc, hwc->idx);
+}
+
+static void loongarch_pmu_stop(struct perf_event *event, int flags)
+{
+ struct hw_perf_event *hwc = &event->hw;
+
+ if (!(hwc->state & PERF_HES_STOPPED)) {
+ /* We are working on a local event. */
+ loongarch_pmu_disable_event(hwc->idx);
+ barrier();
+ loongarch_pmu_event_update(event, hwc, hwc->idx);
+ hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
+ }
+}
+
+static int loongarch_pmu_add(struct perf_event *event, int flags)
+{
+ int idx, err = 0;
+ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
+ struct hw_perf_event *hwc = &event->hw;
+
+ perf_pmu_disable(event->pmu);
+
+ /* To look for a free counter for this event. */
+ idx = loongarch_pmu_alloc_counter(cpuc, hwc);
+ if (idx < 0) {
+ err = idx;
+ goto out;
+ }
+
+ /*
+ * If there is an event in the counter we are going to use then
+ * make sure it is disabled.
+ */
+ event->hw.idx = idx;
+ loongarch_pmu_disable_event(idx);
+ cpuc->events[idx] = event;
+
+ hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
+ if (flags & PERF_EF_START)
+ loongarch_pmu_start(event, PERF_EF_RELOAD);
+
+ /* Propagate our changes to the userspace mapping. */
+ perf_event_update_userpage(event);
+
+out:
+ perf_pmu_enable(event->pmu);
+ return err;
+}
+
+static void loongarch_pmu_del(struct perf_event *event, int flags)
+{
+ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
+ struct hw_perf_event *hwc = &event->hw;
+ int idx = hwc->idx;
+
+ WARN_ON(idx < 0 || idx >= loongarch_pmu.num_counters);
+
+ loongarch_pmu_stop(event, PERF_EF_UPDATE);
+ cpuc->events[idx] = NULL;
+ clear_bit(idx, cpuc->used_mask);
+
+ perf_event_update_userpage(event);
+}
+
+static void loongarch_pmu_read(struct perf_event *event)
+{
+ struct hw_perf_event *hwc = &event->hw;
+
+ /* Don't read disabled counters! */
+ if (hwc->idx < 0)
+ return;
+
+ loongarch_pmu_event_update(event, hwc, hwc->idx);
+}
+
+static void loongarch_pmu_enable(struct pmu *pmu)
+{
+ resume_local_counters();
+}
+
+static void loongarch_pmu_disable(struct pmu *pmu)
+{
+ pause_local_counters();
+}
+
+static DEFINE_MUTEX(pmu_reserve_mutex);
+static atomic_t active_events = ATOMIC_INIT(0);
+
+static int get_pmc_irq(void)
+{
+ struct irq_domain *d = irq_find_matching_fwnode(cpuintc_handle, DOMAIN_BUS_ANY);
+
+ if (d)
+ return irq_create_mapping(d, EXCCODE_PMC - EXCCODE_INT_START);
+
+ return -EINVAL;
+}
+
+static void reset_counters(void *arg);
+static int __hw_perf_event_init(struct perf_event *event);
+
+static void hw_perf_event_destroy(struct perf_event *event)
+{
+ if (atomic_dec_and_mutex_lock(&active_events, &pmu_reserve_mutex)) {
+ on_each_cpu(reset_counters, NULL, 1);
+ free_irq(get_pmc_irq(), &loongarch_pmu);
+ mutex_unlock(&pmu_reserve_mutex);
+ }
+}
+
+static void handle_associated_event(struct cpu_hw_events *cpuc, int idx,
+ struct perf_sample_data *data, struct pt_regs *regs)
+{
+ struct perf_event *event = cpuc->events[idx];
+ struct hw_perf_event *hwc = &event->hw;
+
+ loongarch_pmu_event_update(event, hwc, idx);
+ data->period = event->hw.last_period;
+ if (!loongarch_pmu_event_set_period(event, hwc, idx))
+ return;
+
+ if (perf_event_overflow(event, data, regs))
+ loongarch_pmu_disable_event(idx);
+}
+
+static irqreturn_t pmu_handle_irq(int irq, void *dev)
+{
+ int n;
+ int handled = IRQ_NONE;
+ uint64_t counter;
+ struct pt_regs *regs;
+ struct perf_sample_data data;
+ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
+
+ /*
+ * First we pause the local counters, so that when we are locked
+ * here, the counters are all paused. When it gets locked due to
+ * perf_disable(), the timer interrupt handler will be delayed.
+ *
+ * See also loongarch_pmu_start().
+ */
+ pause_local_counters();
+
+ regs = get_irq_regs();
+
+ perf_sample_data_init(&data, 0, 0);
+
+ for (n = 0; n < loongarch_pmu.num_counters; n++) {
+ if (test_bit(n, cpuc->used_mask)) {
+ counter = loongarch_pmu.read_counter(n);
+ if (counter & loongarch_pmu.overflow) {
+ handle_associated_event(cpuc, n, &data, regs);
+ handled = IRQ_HANDLED;
+ }
+ }
+ }
+
+ resume_local_counters();
+
+ /*
+ * Do all the work for the pending perf events. We can do this
+ * in here because the performance counter interrupt is a regular
+ * interrupt, not NMI.
+ */
+ if (handled == IRQ_HANDLED)
+ irq_work_run();
+
+ return handled;
+}
+
+static int loongarch_pmu_event_init(struct perf_event *event)
+{
+ int r, irq;
+ unsigned long flags;
+
+ /* does not support taken branch sampling */
+ if (has_branch_stack(event))
+ return -EOPNOTSUPP;
+
+ switch (event->attr.type) {
+ case PERF_TYPE_RAW:
+ case PERF_TYPE_HARDWARE:
+ case PERF_TYPE_HW_CACHE:
+ break;
+
+ default:
+ /* Init it to avoid false validate_group */
+ event->hw.event_base = 0xffffffff;
+ return -ENOENT;
+ }
+
+ if (event->cpu >= 0 && !cpu_online(event->cpu))
+ return -ENODEV;
+
+ irq = get_pmc_irq();
+ flags = IRQF_PERCPU | IRQF_NOBALANCING | IRQF_NO_THREAD | IRQF_NO_SUSPEND | IRQF_SHARED;
+ if (!atomic_inc_not_zero(&active_events)) {
+ mutex_lock(&pmu_reserve_mutex);
+ if (atomic_read(&active_events) == 0) {
+ r = request_irq(irq, pmu_handle_irq, flags, "Perf_PMU", &loongarch_pmu);
+ if (r < 0) {
+ mutex_unlock(&pmu_reserve_mutex);
+ pr_warn("PMU IRQ request failed\n");
+ return -ENODEV;
+ }
+ }
+ atomic_inc(&active_events);
+ mutex_unlock(&pmu_reserve_mutex);
+ }
+
+ return __hw_perf_event_init(event);
+}
+
+static struct pmu pmu = {
+ .pmu_enable = loongarch_pmu_enable,
+ .pmu_disable = loongarch_pmu_disable,
+ .event_init = loongarch_pmu_event_init,
+ .add = loongarch_pmu_add,
+ .del = loongarch_pmu_del,
+ .start = loongarch_pmu_start,
+ .stop = loongarch_pmu_stop,
+ .read = loongarch_pmu_read,
+};
+
+static unsigned int loongarch_pmu_perf_event_encode(const struct loongarch_perf_event *pev)
+{
+ return M_PERFCTL_EVENT(pev->event_id);
+}
+
+static const struct loongarch_perf_event *loongarch_pmu_map_general_event(int idx)
+{
+ const struct loongarch_perf_event *pev;
+
+ pev = &(*loongarch_pmu.general_event_map)[idx];
+
+ if (pev->event_id == HW_OP_UNSUPPORTED)
+ return ERR_PTR(-ENOENT);
+
+ return pev;
+}
+
+static const struct loongarch_perf_event *loongarch_pmu_map_cache_event(u64 config)
+{
+ unsigned int cache_type, cache_op, cache_result;
+ const struct loongarch_perf_event *pev;
+
+ cache_type = (config >> 0) & 0xff;
+ if (cache_type >= PERF_COUNT_HW_CACHE_MAX)
+ return ERR_PTR(-EINVAL);
+
+ cache_op = (config >> 8) & 0xff;
+ if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX)
+ return ERR_PTR(-EINVAL);
+
+ cache_result = (config >> 16) & 0xff;
+ if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
+ return ERR_PTR(-EINVAL);
+
+ pev = &((*loongarch_pmu.cache_event_map)
+ [cache_type]
+ [cache_op]
+ [cache_result]);
+
+ if (pev->event_id == CACHE_OP_UNSUPPORTED)
+ return ERR_PTR(-ENOENT);
+
+ return pev;
+}
+
+static int validate_group(struct perf_event *event)
+{
+ struct cpu_hw_events fake_cpuc;
+ struct perf_event *sibling, *leader = event->group_leader;
+
+ memset(&fake_cpuc, 0, sizeof(fake_cpuc));
+
+ if (loongarch_pmu_alloc_counter(&fake_cpuc, &leader->hw) < 0)
+ return -EINVAL;
+
+ for_each_sibling_event(sibling, leader) {
+ if (loongarch_pmu_alloc_counter(&fake_cpuc, &sibling->hw) < 0)
+ return -EINVAL;
+ }
+
+ if (loongarch_pmu_alloc_counter(&fake_cpuc, &event->hw) < 0)
+ return -EINVAL;
+
+ return 0;
+}
+
+static void reset_counters(void *arg)
+{
+ int n;
+ int counters = loongarch_pmu.num_counters;
+
+ for (n = 0; n < counters; n++) {
+ loongarch_pmu_write_control(n, 0);
+ loongarch_pmu.write_counter(n, 0);
+ }
+}
+
+static const struct loongarch_perf_event loongson_event_map[PERF_COUNT_HW_MAX] = {
+ PERF_MAP_ALL_UNSUPPORTED,
+ [PERF_COUNT_HW_CPU_CYCLES] = { 0x00 },
+ [PERF_COUNT_HW_INSTRUCTIONS] = { 0x01 },
+ [PERF_COUNT_HW_CACHE_REFERENCES] = { 0x08 },
+ [PERF_COUNT_HW_CACHE_MISSES] = { 0x09 },
+ [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x02 },
+ [PERF_COUNT_HW_BRANCH_MISSES] = { 0x03 },
+};
+
+static const struct loongarch_perf_event loongson_cache_map
+ [PERF_COUNT_HW_CACHE_MAX]
+ [PERF_COUNT_HW_CACHE_OP_MAX]
+ [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
+PERF_CACHE_MAP_ALL_UNSUPPORTED,
+[C(L1D)] = {
+ /*
+ * Like some other architectures (e.g. ARM), the performance
+ * counters don't differentiate between read and write
+ * accesses/misses, so this isn't strictly correct, but it's the
+ * best we can do. Writes and reads get combined.
+ */
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = { 0x8 },
+ [C(RESULT_MISS)] = { 0x9 },
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = { 0x8 },
+ [C(RESULT_MISS)] = { 0x9 },
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = { 0xaa },
+ [C(RESULT_MISS)] = { 0xa9 },
+ },
+},
+[C(L1I)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = { 0x6 },
+ [C(RESULT_MISS)] = { 0x7 },
+ },
+},
+[C(LL)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = { 0xc },
+ [C(RESULT_MISS)] = { 0xd },
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = { 0xc },
+ [C(RESULT_MISS)] = { 0xd },
+ },
+},
+[C(ITLB)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_MISS)] = { 0x3b },
+ },
+},
+[C(DTLB)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = { 0x4 },
+ [C(RESULT_MISS)] = { 0x3c },
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = { 0x4 },
+ [C(RESULT_MISS)] = { 0x3c },
+ },
+},
+[C(BPU)] = {
+ /* Using the same code for *HW_BRANCH* */
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = { 0x02 },
+ [C(RESULT_MISS)] = { 0x03 },
+ },
+},
+};
+
+static int __hw_perf_event_init(struct perf_event *event)
+{
+ int err;
+ struct hw_perf_event *hwc = &event->hw;
+ struct perf_event_attr *attr = &event->attr;
+ const struct loongarch_perf_event *pev;
+
+ /* Returning LoongArch event descriptor for generic perf event. */
+ if (PERF_TYPE_HARDWARE == event->attr.type) {
+ if (event->attr.config >= PERF_COUNT_HW_MAX)
+ return -EINVAL;
+ pev = loongarch_pmu_map_general_event(event->attr.config);
+ } else if (PERF_TYPE_HW_CACHE == event->attr.type) {
+ pev = loongarch_pmu_map_cache_event(event->attr.config);
+ } else if (PERF_TYPE_RAW == event->attr.type) {
+ /* We are working on the global raw event. */
+ mutex_lock(&raw_event_mutex);
+ pev = loongarch_pmu.map_raw_event(event->attr.config);
+ } else {
+ /* The event type is not (yet) supported. */
+ return -EOPNOTSUPP;
+ }
+
+ if (IS_ERR(pev)) {
+ if (PERF_TYPE_RAW == event->attr.type)
+ mutex_unlock(&raw_event_mutex);
+ return PTR_ERR(pev);
+ }
+
+ /*
+ * We allow max flexibility on how each individual counter shared
+ * by the single CPU operates (the mode exclusion and the range).
+ */
+ hwc->config_base = CSR_PERFCTRL_IE;
+
+ hwc->event_base = loongarch_pmu_perf_event_encode(pev);
+ if (PERF_TYPE_RAW == event->attr.type)
+ mutex_unlock(&raw_event_mutex);
+
+ if (!attr->exclude_user) {
+ hwc->config_base |= CSR_PERFCTRL_PLV3;
+ hwc->config_base |= CSR_PERFCTRL_PLV2;
+ }
+ if (!attr->exclude_kernel) {
+ hwc->config_base |= CSR_PERFCTRL_PLV0;
+ }
+ if (!attr->exclude_hv) {
+ hwc->config_base |= CSR_PERFCTRL_PLV1;
+ }
+
+ hwc->config_base &= M_PERFCTL_CONFIG_MASK;
+ /*
+ * The event can belong to another cpu. We do not assign a local
+ * counter for it for now.
+ */
+ hwc->idx = -1;
+ hwc->config = 0;
+
+ if (!hwc->sample_period) {
+ hwc->sample_period = loongarch_pmu.max_period;
+ hwc->last_period = hwc->sample_period;
+ local64_set(&hwc->period_left, hwc->sample_period);
+ }
+
+ err = 0;
+ if (event->group_leader != event)
+ err = validate_group(event);
+
+ event->destroy = hw_perf_event_destroy;
+
+ if (err)
+ event->destroy(event);
+
+ return err;
+}
+
+static void pause_local_counters(void)
+{
+ unsigned long flags;
+ int ctr = loongarch_pmu.num_counters;
+ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
+
+ local_irq_save(flags);
+ do {
+ ctr--;
+ cpuc->saved_ctrl[ctr] = loongarch_pmu_read_control(ctr);
+ loongarch_pmu_write_control(ctr, cpuc->saved_ctrl[ctr] &
+ ~M_PERFCTL_COUNT_EVENT_WHENEVER);
+ } while (ctr > 0);
+ local_irq_restore(flags);
+}
+
+static void resume_local_counters(void)
+{
+ int ctr = loongarch_pmu.num_counters;
+ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
+
+ do {
+ ctr--;
+ loongarch_pmu_write_control(ctr, cpuc->saved_ctrl[ctr]);
+ } while (ctr > 0);
+}
+
+static const struct loongarch_perf_event *loongarch_pmu_map_raw_event(u64 config)
+{
+ raw_event.event_id = M_PERFCTL_EVENT(config);
+
+ return &raw_event;
+}
+
+static int __init init_hw_perf_events(void)
+{
+ int counters;
+
+ if (!cpu_has_pmp)
+ return -ENODEV;
+
+ pr_info("Performance counters: ");
+ counters = ((read_cpucfg(LOONGARCH_CPUCFG6) & CPUCFG6_PMNUM) >> 4) + 1;
+
+ loongarch_pmu.num_counters = counters;
+ loongarch_pmu.max_period = (1ULL << 63) - 1;
+ loongarch_pmu.valid_count = (1ULL << 63) - 1;
+ loongarch_pmu.overflow = 1ULL << 63;
+ loongarch_pmu.name = "loongarch/loongson64";
+ loongarch_pmu.read_counter = loongarch_pmu_read_counter;
+ loongarch_pmu.write_counter = loongarch_pmu_write_counter;
+ loongarch_pmu.map_raw_event = loongarch_pmu_map_raw_event;
+ loongarch_pmu.general_event_map = &loongson_event_map;
+ loongarch_pmu.cache_event_map = &loongson_cache_map;
+
+ on_each_cpu(reset_counters, NULL, 1);
+
+ pr_cont("%s PMU enabled, %d %d-bit counters available to each CPU.\n",
+ loongarch_pmu.name, counters, 64);
+
+ perf_pmu_register(&pmu, "cpu", PERF_TYPE_RAW);
+
+ return 0;
+}
+early_initcall(init_hw_perf_events);
diff --git a/arch/loongarch/kernel/perf_regs.c b/arch/loongarch/kernel/perf_regs.c
new file mode 100644
index 000000000..263ac4ab5
--- /dev/null
+++ b/arch/loongarch/kernel/perf_regs.c
@@ -0,0 +1,53 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2022 Loongson Technology Corporation Limited
+ *
+ * Derived from MIPS:
+ * Copyright (C) 2013 Cavium, Inc.
+ */
+
+#include <linux/perf_event.h>
+
+#include <asm/ptrace.h>
+
+#ifdef CONFIG_32BIT
+u64 perf_reg_abi(struct task_struct *tsk)
+{
+ return PERF_SAMPLE_REGS_ABI_32;
+}
+#else /* Must be CONFIG_64BIT */
+u64 perf_reg_abi(struct task_struct *tsk)
+{
+ if (test_tsk_thread_flag(tsk, TIF_32BIT_REGS))
+ return PERF_SAMPLE_REGS_ABI_32;
+ else
+ return PERF_SAMPLE_REGS_ABI_64;
+}
+#endif /* CONFIG_32BIT */
+
+int perf_reg_validate(u64 mask)
+{
+ if (!mask)
+ return -EINVAL;
+ if (mask & ~((1ull << PERF_REG_LOONGARCH_MAX) - 1))
+ return -EINVAL;
+ return 0;
+}
+
+u64 perf_reg_value(struct pt_regs *regs, int idx)
+{
+ if (WARN_ON_ONCE((u32)idx >= PERF_REG_LOONGARCH_MAX))
+ return 0;
+
+ if ((u32)idx == PERF_REG_LOONGARCH_PC)
+ return regs->csr_era;
+
+ return regs->regs[idx];
+}
+
+void perf_get_regs_user(struct perf_regs *regs_user,
+ struct pt_regs *regs)
+{
+ regs_user->regs = task_pt_regs(current);
+ regs_user->abi = perf_reg_abi(current);
+}
diff --git a/arch/loongarch/kernel/proc.c b/arch/loongarch/kernel/proc.c
new file mode 100644
index 000000000..0d82907b5
--- /dev/null
+++ b/arch/loongarch/kernel/proc.c
@@ -0,0 +1,128 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/seq_file.h>
+#include <asm/bootinfo.h>
+#include <asm/cpu.h>
+#include <asm/cpu-features.h>
+#include <asm/idle.h>
+#include <asm/processor.h>
+#include <asm/time.h>
+
+/*
+ * No lock; only written during early bootup by CPU 0.
+ */
+static RAW_NOTIFIER_HEAD(proc_cpuinfo_chain);
+
+int __ref register_proc_cpuinfo_notifier(struct notifier_block *nb)
+{
+ return raw_notifier_chain_register(&proc_cpuinfo_chain, nb);
+}
+
+int proc_cpuinfo_notifier_call_chain(unsigned long val, void *v)
+{
+ return raw_notifier_call_chain(&proc_cpuinfo_chain, val, v);
+}
+
+static int show_cpuinfo(struct seq_file *m, void *v)
+{
+ unsigned long n = (unsigned long) v - 1;
+ unsigned int version = cpu_data[n].processor_id & 0xff;
+ unsigned int fp_version = cpu_data[n].fpu_vers;
+ struct proc_cpuinfo_notifier_args proc_cpuinfo_notifier_args;
+
+#ifdef CONFIG_SMP
+ if (!cpu_online(n))
+ return 0;
+#endif
+
+ /*
+ * For the first processor also print the system type
+ */
+ if (n == 0)
+ seq_printf(m, "system type\t\t: %s\n\n", get_system_type());
+
+ seq_printf(m, "processor\t\t: %ld\n", n);
+ seq_printf(m, "package\t\t\t: %d\n", cpu_data[n].package);
+ seq_printf(m, "core\t\t\t: %d\n", cpu_data[n].core);
+ seq_printf(m, "CPU Family\t\t: %s\n", __cpu_family[n]);
+ seq_printf(m, "Model Name\t\t: %s\n", __cpu_full_name[n]);
+ seq_printf(m, "CPU Revision\t\t: 0x%02x\n", version);
+ seq_printf(m, "FPU Revision\t\t: 0x%02x\n", fp_version);
+ seq_printf(m, "CPU MHz\t\t\t: %llu.%02llu\n",
+ cpu_clock_freq / 1000000, (cpu_clock_freq / 10000) % 100);
+ seq_printf(m, "BogoMIPS\t\t: %llu.%02llu\n",
+ (lpj_fine * cpu_clock_freq / const_clock_freq) / (500000/HZ),
+ ((lpj_fine * cpu_clock_freq / const_clock_freq) / (5000/HZ)) % 100);
+ seq_printf(m, "TLB Entries\t\t: %d\n", cpu_data[n].tlbsize);
+ seq_printf(m, "Address Sizes\t\t: %d bits physical, %d bits virtual\n",
+ cpu_pabits + 1, cpu_vabits + 1);
+
+ seq_printf(m, "ISA\t\t\t:");
+ if (cpu_has_loongarch32)
+ seq_printf(m, " loongarch32");
+ if (cpu_has_loongarch64)
+ seq_printf(m, " loongarch64");
+ seq_printf(m, "\n");
+
+ seq_printf(m, "Features\t\t:");
+ if (cpu_has_cpucfg) seq_printf(m, " cpucfg");
+ if (cpu_has_lam) seq_printf(m, " lam");
+ if (cpu_has_ual) seq_printf(m, " ual");
+ if (cpu_has_fpu) seq_printf(m, " fpu");
+ if (cpu_has_lsx) seq_printf(m, " lsx");
+ if (cpu_has_lasx) seq_printf(m, " lasx");
+ if (cpu_has_crc32) seq_printf(m, " crc32");
+ if (cpu_has_complex) seq_printf(m, " complex");
+ if (cpu_has_crypto) seq_printf(m, " crypto");
+ if (cpu_has_lvz) seq_printf(m, " lvz");
+ if (cpu_has_lbt_x86) seq_printf(m, " lbt_x86");
+ if (cpu_has_lbt_arm) seq_printf(m, " lbt_arm");
+ if (cpu_has_lbt_mips) seq_printf(m, " lbt_mips");
+ seq_printf(m, "\n");
+
+ seq_printf(m, "Hardware Watchpoint\t: %s",
+ cpu_has_watch ? "yes, " : "no\n");
+ if (cpu_has_watch) {
+ seq_printf(m, "iwatch count: %d, dwatch count: %d\n",
+ cpu_data[n].watch_ireg_count, cpu_data[n].watch_dreg_count);
+ }
+
+ proc_cpuinfo_notifier_args.m = m;
+ proc_cpuinfo_notifier_args.n = n;
+
+ raw_notifier_call_chain(&proc_cpuinfo_chain, 0,
+ &proc_cpuinfo_notifier_args);
+
+ seq_printf(m, "\n");
+
+ return 0;
+}
+
+static void *c_start(struct seq_file *m, loff_t *pos)
+{
+ unsigned long i = *pos;
+
+ return i < nr_cpu_ids ? (void *)(i + 1) : NULL;
+}
+
+static void *c_next(struct seq_file *m, void *v, loff_t *pos)
+{
+ ++*pos;
+ return c_start(m, pos);
+}
+
+static void c_stop(struct seq_file *m, void *v)
+{
+}
+
+const struct seq_operations cpuinfo_op = {
+ .start = c_start,
+ .next = c_next,
+ .stop = c_stop,
+ .show = show_cpuinfo,
+};
diff --git a/arch/loongarch/kernel/process.c b/arch/loongarch/kernel/process.c
new file mode 100644
index 000000000..1259bc312
--- /dev/null
+++ b/arch/loongarch/kernel/process.c
@@ -0,0 +1,353 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Author: Huacai Chen <chenhuacai@loongson.cn>
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ *
+ * Derived from MIPS:
+ * Copyright (C) 1994 - 1999, 2000 by Ralf Baechle and others.
+ * Copyright (C) 2005, 2006 by Ralf Baechle (ralf@linux-mips.org)
+ * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
+ * Copyright (C) 2004 Thiemo Seufer
+ * Copyright (C) 2013 Imagination Technologies Ltd.
+ */
+#include <linux/cpu.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/sched.h>
+#include <linux/sched/debug.h>
+#include <linux/sched/task.h>
+#include <linux/sched/task_stack.h>
+#include <linux/mm.h>
+#include <linux/stddef.h>
+#include <linux/unistd.h>
+#include <linux/export.h>
+#include <linux/ptrace.h>
+#include <linux/mman.h>
+#include <linux/personality.h>
+#include <linux/sys.h>
+#include <linux/completion.h>
+#include <linux/kallsyms.h>
+#include <linux/random.h>
+#include <linux/prctl.h>
+#include <linux/nmi.h>
+
+#include <asm/asm.h>
+#include <asm/bootinfo.h>
+#include <asm/cpu.h>
+#include <asm/elf.h>
+#include <asm/fpu.h>
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <asm/irq_regs.h>
+#include <asm/loongarch.h>
+#include <asm/pgtable.h>
+#include <asm/processor.h>
+#include <asm/reg.h>
+#include <asm/unwind.h>
+#include <asm/vdso.h>
+
+/*
+ * Idle related variables and functions
+ */
+
+unsigned long boot_option_idle_override = IDLE_NO_OVERRIDE;
+EXPORT_SYMBOL(boot_option_idle_override);
+
+#ifdef CONFIG_HOTPLUG_CPU
+void arch_cpu_idle_dead(void)
+{
+ play_dead();
+}
+#endif
+
+asmlinkage void ret_from_fork(void);
+asmlinkage void ret_from_kernel_thread(void);
+
+void start_thread(struct pt_regs *regs, unsigned long pc, unsigned long sp)
+{
+ unsigned long crmd;
+ unsigned long prmd;
+ unsigned long euen;
+
+ /* New thread loses kernel privileges. */
+ crmd = regs->csr_crmd & ~(PLV_MASK);
+ crmd |= PLV_USER;
+ regs->csr_crmd = crmd;
+
+ prmd = regs->csr_prmd & ~(PLV_MASK);
+ prmd |= PLV_USER;
+ regs->csr_prmd = prmd;
+
+ euen = regs->csr_euen & ~(CSR_EUEN_FPEN);
+ regs->csr_euen = euen;
+ lose_fpu(0);
+ current->thread.fpu.fcsr = boot_cpu_data.fpu_csr0;
+
+ clear_thread_flag(TIF_LSX_CTX_LIVE);
+ clear_thread_flag(TIF_LASX_CTX_LIVE);
+ clear_used_math();
+ regs->csr_era = pc;
+ regs->regs[3] = sp;
+}
+
+void exit_thread(struct task_struct *tsk)
+{
+}
+
+int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
+{
+ /*
+ * Save any process state which is live in hardware registers to the
+ * parent context prior to duplication. This prevents the new child
+ * state becoming stale if the parent is preempted before copy_thread()
+ * gets a chance to save the parent's live hardware registers to the
+ * child context.
+ */
+ preempt_disable();
+
+ if (is_fpu_owner())
+ save_fp(current);
+
+ preempt_enable();
+
+ if (used_math())
+ memcpy(dst, src, sizeof(struct task_struct));
+ else
+ memcpy(dst, src, offsetof(struct task_struct, thread.fpu.fpr));
+
+ return 0;
+}
+
+/*
+ * Copy architecture-specific thread state
+ */
+int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
+{
+ unsigned long childksp;
+ unsigned long tls = args->tls;
+ unsigned long usp = args->stack;
+ unsigned long clone_flags = args->flags;
+ struct pt_regs *childregs, *regs = current_pt_regs();
+
+ childksp = (unsigned long)task_stack_page(p) + THREAD_SIZE;
+
+ /* set up new TSS. */
+ childregs = (struct pt_regs *) childksp - 1;
+ /* Put the stack after the struct pt_regs. */
+ childksp = (unsigned long) childregs;
+ p->thread.sched_cfa = 0;
+ p->thread.csr_euen = 0;
+ p->thread.csr_crmd = csr_read32(LOONGARCH_CSR_CRMD);
+ p->thread.csr_prmd = csr_read32(LOONGARCH_CSR_PRMD);
+ p->thread.csr_ecfg = csr_read32(LOONGARCH_CSR_ECFG);
+ if (unlikely(args->fn)) {
+ /* kernel thread */
+ p->thread.reg03 = childksp;
+ p->thread.reg23 = (unsigned long)args->fn;
+ p->thread.reg24 = (unsigned long)args->fn_arg;
+ p->thread.reg01 = (unsigned long)ret_from_kernel_thread;
+ p->thread.sched_ra = (unsigned long)ret_from_kernel_thread;
+ memset(childregs, 0, sizeof(struct pt_regs));
+ childregs->csr_euen = p->thread.csr_euen;
+ childregs->csr_crmd = p->thread.csr_crmd;
+ childregs->csr_prmd = p->thread.csr_prmd;
+ childregs->csr_ecfg = p->thread.csr_ecfg;
+ goto out;
+ }
+
+ /* user thread */
+ *childregs = *regs;
+ childregs->regs[4] = 0; /* Child gets zero as return value */
+ if (usp)
+ childregs->regs[3] = usp;
+
+ p->thread.reg03 = (unsigned long) childregs;
+ p->thread.reg01 = (unsigned long) ret_from_fork;
+ p->thread.sched_ra = (unsigned long) ret_from_fork;
+
+ /*
+ * New tasks lose permission to use the fpu. This accelerates context
+ * switching for most programs since they don't use the fpu.
+ */
+ childregs->csr_euen = 0;
+
+ if (clone_flags & CLONE_SETTLS)
+ childregs->regs[2] = tls;
+
+out:
+ clear_tsk_thread_flag(p, TIF_USEDFPU);
+ clear_tsk_thread_flag(p, TIF_USEDSIMD);
+ clear_tsk_thread_flag(p, TIF_LSX_CTX_LIVE);
+ clear_tsk_thread_flag(p, TIF_LASX_CTX_LIVE);
+
+ return 0;
+}
+
+unsigned long __get_wchan(struct task_struct *task)
+{
+ unsigned long pc = 0;
+ struct unwind_state state;
+
+ if (!try_get_task_stack(task))
+ return 0;
+
+ for (unwind_start(&state, task, NULL);
+ !unwind_done(&state); unwind_next_frame(&state)) {
+ pc = unwind_get_return_address(&state);
+ if (!pc)
+ break;
+ if (in_sched_functions(pc))
+ continue;
+ break;
+ }
+
+ put_task_stack(task);
+
+ return pc;
+}
+
+bool in_irq_stack(unsigned long stack, struct stack_info *info)
+{
+ unsigned long nextsp;
+ unsigned long begin = (unsigned long)this_cpu_read(irq_stack);
+ unsigned long end = begin + IRQ_STACK_START;
+
+ if (stack < begin || stack >= end)
+ return false;
+
+ nextsp = *(unsigned long *)end;
+ if (nextsp & (SZREG - 1))
+ return false;
+
+ info->begin = begin;
+ info->end = end;
+ info->next_sp = nextsp;
+ info->type = STACK_TYPE_IRQ;
+
+ return true;
+}
+
+bool in_task_stack(unsigned long stack, struct task_struct *task,
+ struct stack_info *info)
+{
+ unsigned long begin = (unsigned long)task_stack_page(task);
+ unsigned long end = begin + THREAD_SIZE;
+
+ if (stack < begin || stack >= end)
+ return false;
+
+ info->begin = begin;
+ info->end = end;
+ info->next_sp = 0;
+ info->type = STACK_TYPE_TASK;
+
+ return true;
+}
+
+int get_stack_info(unsigned long stack, struct task_struct *task,
+ struct stack_info *info)
+{
+ task = task ? : current;
+
+ if (!stack || stack & (SZREG - 1))
+ goto unknown;
+
+ if (in_task_stack(stack, task, info))
+ return 0;
+
+ if (task != current)
+ goto unknown;
+
+ if (in_irq_stack(stack, info))
+ return 0;
+
+unknown:
+ info->type = STACK_TYPE_UNKNOWN;
+ return -EINVAL;
+}
+
+unsigned long stack_top(void)
+{
+ unsigned long top = TASK_SIZE & PAGE_MASK;
+
+ /* Space for the VDSO & data page */
+ top -= PAGE_ALIGN(current->thread.vdso->size);
+ top -= PAGE_SIZE;
+
+ /* Space to randomize the VDSO base */
+ if (current->flags & PF_RANDOMIZE)
+ top -= VDSO_RANDOMIZE_SIZE;
+
+ return top;
+}
+
+/*
+ * Don't forget that the stack pointer must be aligned on a 8 bytes
+ * boundary for 32-bits ABI and 16 bytes for 64-bits ABI.
+ */
+unsigned long arch_align_stack(unsigned long sp)
+{
+ if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
+ sp -= prandom_u32_max(PAGE_SIZE);
+
+ return sp & STACK_ALIGN;
+}
+
+static DEFINE_PER_CPU(call_single_data_t, backtrace_csd);
+static struct cpumask backtrace_csd_busy;
+
+static void handle_backtrace(void *info)
+{
+ nmi_cpu_backtrace(get_irq_regs());
+ cpumask_clear_cpu(smp_processor_id(), &backtrace_csd_busy);
+}
+
+static void raise_backtrace(cpumask_t *mask)
+{
+ call_single_data_t *csd;
+ int cpu;
+
+ for_each_cpu(cpu, mask) {
+ /*
+ * If we previously sent an IPI to the target CPU & it hasn't
+ * cleared its bit in the busy cpumask then it didn't handle
+ * our previous IPI & it's not safe for us to reuse the
+ * call_single_data_t.
+ */
+ if (cpumask_test_and_set_cpu(cpu, &backtrace_csd_busy)) {
+ pr_warn("Unable to send backtrace IPI to CPU%u - perhaps it hung?\n",
+ cpu);
+ continue;
+ }
+
+ csd = &per_cpu(backtrace_csd, cpu);
+ csd->func = handle_backtrace;
+ smp_call_function_single_async(cpu, csd);
+ }
+}
+
+void arch_trigger_cpumask_backtrace(const cpumask_t *mask, bool exclude_self)
+{
+ nmi_trigger_cpumask_backtrace(mask, exclude_self, raise_backtrace);
+}
+
+#ifdef CONFIG_64BIT
+void loongarch_dump_regs64(u64 *uregs, const struct pt_regs *regs)
+{
+ unsigned int i;
+
+ for (i = LOONGARCH_EF_R1; i <= LOONGARCH_EF_R31; i++) {
+ uregs[i] = regs->regs[i - LOONGARCH_EF_R0];
+ }
+
+ uregs[LOONGARCH_EF_ORIG_A0] = regs->orig_a0;
+ uregs[LOONGARCH_EF_CSR_ERA] = regs->csr_era;
+ uregs[LOONGARCH_EF_CSR_BADV] = regs->csr_badvaddr;
+ uregs[LOONGARCH_EF_CSR_CRMD] = regs->csr_crmd;
+ uregs[LOONGARCH_EF_CSR_PRMD] = regs->csr_prmd;
+ uregs[LOONGARCH_EF_CSR_EUEN] = regs->csr_euen;
+ uregs[LOONGARCH_EF_CSR_ECFG] = regs->csr_ecfg;
+ uregs[LOONGARCH_EF_CSR_ESTAT] = regs->csr_estat;
+}
+#endif /* CONFIG_64BIT */
diff --git a/arch/loongarch/kernel/ptrace.c b/arch/loongarch/kernel/ptrace.c
new file mode 100644
index 000000000..dc2b82ea8
--- /dev/null
+++ b/arch/loongarch/kernel/ptrace.c
@@ -0,0 +1,433 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Author: Hanlu Li <lihanlu@loongson.cn>
+ * Huacai Chen <chenhuacai@loongson.cn>
+ *
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ *
+ * Derived from MIPS:
+ * Copyright (C) 1992 Ross Biro
+ * Copyright (C) Linus Torvalds
+ * Copyright (C) 1994, 95, 96, 97, 98, 2000 Ralf Baechle
+ * Copyright (C) 1996 David S. Miller
+ * Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com
+ * Copyright (C) 1999 MIPS Technologies, Inc.
+ * Copyright (C) 2000 Ulf Carlsson
+ */
+#include <linux/kernel.h>
+#include <linux/audit.h>
+#include <linux/compiler.h>
+#include <linux/context_tracking.h>
+#include <linux/elf.h>
+#include <linux/errno.h>
+#include <linux/mm.h>
+#include <linux/ptrace.h>
+#include <linux/regset.h>
+#include <linux/sched.h>
+#include <linux/sched/task_stack.h>
+#include <linux/security.h>
+#include <linux/smp.h>
+#include <linux/stddef.h>
+#include <linux/seccomp.h>
+#include <linux/uaccess.h>
+
+#include <asm/byteorder.h>
+#include <asm/cpu.h>
+#include <asm/cpu-info.h>
+#include <asm/fpu.h>
+#include <asm/loongarch.h>
+#include <asm/page.h>
+#include <asm/pgtable.h>
+#include <asm/processor.h>
+#include <asm/reg.h>
+#include <asm/syscall.h>
+
+static void init_fp_ctx(struct task_struct *target)
+{
+ /* The target already has context */
+ if (tsk_used_math(target))
+ return;
+
+ /* Begin with data registers set to all 1s... */
+ memset(&target->thread.fpu.fpr, ~0, sizeof(target->thread.fpu.fpr));
+ set_stopped_child_used_math(target);
+}
+
+/*
+ * Called by kernel/ptrace.c when detaching..
+ *
+ * Make sure single step bits etc are not set.
+ */
+void ptrace_disable(struct task_struct *child)
+{
+ /* Don't load the watchpoint registers for the ex-child. */
+ clear_tsk_thread_flag(child, TIF_LOAD_WATCH);
+ clear_tsk_thread_flag(child, TIF_SINGLESTEP);
+}
+
+/* regset get/set implementations */
+
+static int gpr_get(struct task_struct *target,
+ const struct user_regset *regset,
+ struct membuf to)
+{
+ int r;
+ struct pt_regs *regs = task_pt_regs(target);
+
+ r = membuf_write(&to, &regs->regs, sizeof(u64) * GPR_NUM);
+ r = membuf_write(&to, &regs->orig_a0, sizeof(u64));
+ r = membuf_write(&to, &regs->csr_era, sizeof(u64));
+ r = membuf_write(&to, &regs->csr_badvaddr, sizeof(u64));
+
+ return r;
+}
+
+static int gpr_set(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ const void *kbuf, const void __user *ubuf)
+{
+ int err;
+ int a0_start = sizeof(u64) * GPR_NUM;
+ int era_start = a0_start + sizeof(u64);
+ int badvaddr_start = era_start + sizeof(u64);
+ struct pt_regs *regs = task_pt_regs(target);
+
+ err = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+ &regs->regs,
+ 0, a0_start);
+ err |= user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+ &regs->orig_a0,
+ a0_start, a0_start + sizeof(u64));
+ err |= user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+ &regs->csr_era,
+ era_start, era_start + sizeof(u64));
+ err |= user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+ &regs->csr_badvaddr,
+ badvaddr_start, badvaddr_start + sizeof(u64));
+
+ return err;
+}
+
+
+/*
+ * Get the general floating-point registers.
+ */
+static int gfpr_get(struct task_struct *target, struct membuf *to)
+{
+ return membuf_write(to, &target->thread.fpu.fpr,
+ sizeof(elf_fpreg_t) * NUM_FPU_REGS);
+}
+
+static int gfpr_get_simd(struct task_struct *target, struct membuf *to)
+{
+ int i, r;
+ u64 fpr_val;
+
+ BUILD_BUG_ON(sizeof(fpr_val) != sizeof(elf_fpreg_t));
+ for (i = 0; i < NUM_FPU_REGS; i++) {
+ fpr_val = get_fpr64(&target->thread.fpu.fpr[i], 0);
+ r = membuf_write(to, &fpr_val, sizeof(elf_fpreg_t));
+ }
+
+ return r;
+}
+
+/*
+ * Choose the appropriate helper for general registers, and then copy
+ * the FCC and FCSR registers separately.
+ */
+static int fpr_get(struct task_struct *target,
+ const struct user_regset *regset,
+ struct membuf to)
+{
+ int r;
+
+ if (sizeof(target->thread.fpu.fpr[0]) == sizeof(elf_fpreg_t))
+ r = gfpr_get(target, &to);
+ else
+ r = gfpr_get_simd(target, &to);
+
+ r = membuf_write(&to, &target->thread.fpu.fcc, sizeof(target->thread.fpu.fcc));
+ r = membuf_write(&to, &target->thread.fpu.fcsr, sizeof(target->thread.fpu.fcsr));
+
+ return r;
+}
+
+static int gfpr_set(struct task_struct *target,
+ unsigned int *pos, unsigned int *count,
+ const void **kbuf, const void __user **ubuf)
+{
+ return user_regset_copyin(pos, count, kbuf, ubuf,
+ &target->thread.fpu.fpr,
+ 0, NUM_FPU_REGS * sizeof(elf_fpreg_t));
+}
+
+static int gfpr_set_simd(struct task_struct *target,
+ unsigned int *pos, unsigned int *count,
+ const void **kbuf, const void __user **ubuf)
+{
+ int i, err;
+ u64 fpr_val;
+
+ BUILD_BUG_ON(sizeof(fpr_val) != sizeof(elf_fpreg_t));
+ for (i = 0; i < NUM_FPU_REGS && *count > 0; i++) {
+ err = user_regset_copyin(pos, count, kbuf, ubuf,
+ &fpr_val, i * sizeof(elf_fpreg_t),
+ (i + 1) * sizeof(elf_fpreg_t));
+ if (err)
+ return err;
+ set_fpr64(&target->thread.fpu.fpr[i], 0, fpr_val);
+ }
+
+ return 0;
+}
+
+/*
+ * Choose the appropriate helper for general registers, and then copy
+ * the FCC register separately.
+ */
+static int fpr_set(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ const void *kbuf, const void __user *ubuf)
+{
+ const int fcc_start = NUM_FPU_REGS * sizeof(elf_fpreg_t);
+ const int fcsr_start = fcc_start + sizeof(u64);
+ int err;
+
+ BUG_ON(count % sizeof(elf_fpreg_t));
+ if (pos + count > sizeof(elf_fpregset_t))
+ return -EIO;
+
+ init_fp_ctx(target);
+
+ if (sizeof(target->thread.fpu.fpr[0]) == sizeof(elf_fpreg_t))
+ err = gfpr_set(target, &pos, &count, &kbuf, &ubuf);
+ else
+ err = gfpr_set_simd(target, &pos, &count, &kbuf, &ubuf);
+ if (err)
+ return err;
+
+ err |= user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+ &target->thread.fpu.fcc, fcc_start,
+ fcc_start + sizeof(u64));
+ err |= user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+ &target->thread.fpu.fcsr, fcsr_start,
+ fcsr_start + sizeof(u32));
+
+ return err;
+}
+
+static int cfg_get(struct task_struct *target,
+ const struct user_regset *regset,
+ struct membuf to)
+{
+ int i, r;
+ u32 cfg_val;
+
+ i = 0;
+ while (to.left > 0) {
+ cfg_val = read_cpucfg(i++);
+ r = membuf_write(&to, &cfg_val, sizeof(u32));
+ }
+
+ return r;
+}
+
+/*
+ * CFG registers are read-only.
+ */
+static int cfg_set(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ const void *kbuf, const void __user *ubuf)
+{
+ return 0;
+}
+
+struct pt_regs_offset {
+ const char *name;
+ int offset;
+};
+
+#define REG_OFFSET_NAME(n, r) {.name = #n, .offset = offsetof(struct pt_regs, r)}
+#define REG_OFFSET_END {.name = NULL, .offset = 0}
+
+static const struct pt_regs_offset regoffset_table[] = {
+ REG_OFFSET_NAME(r0, regs[0]),
+ REG_OFFSET_NAME(r1, regs[1]),
+ REG_OFFSET_NAME(r2, regs[2]),
+ REG_OFFSET_NAME(r3, regs[3]),
+ REG_OFFSET_NAME(r4, regs[4]),
+ REG_OFFSET_NAME(r5, regs[5]),
+ REG_OFFSET_NAME(r6, regs[6]),
+ REG_OFFSET_NAME(r7, regs[7]),
+ REG_OFFSET_NAME(r8, regs[8]),
+ REG_OFFSET_NAME(r9, regs[9]),
+ REG_OFFSET_NAME(r10, regs[10]),
+ REG_OFFSET_NAME(r11, regs[11]),
+ REG_OFFSET_NAME(r12, regs[12]),
+ REG_OFFSET_NAME(r13, regs[13]),
+ REG_OFFSET_NAME(r14, regs[14]),
+ REG_OFFSET_NAME(r15, regs[15]),
+ REG_OFFSET_NAME(r16, regs[16]),
+ REG_OFFSET_NAME(r17, regs[17]),
+ REG_OFFSET_NAME(r18, regs[18]),
+ REG_OFFSET_NAME(r19, regs[19]),
+ REG_OFFSET_NAME(r20, regs[20]),
+ REG_OFFSET_NAME(r21, regs[21]),
+ REG_OFFSET_NAME(r22, regs[22]),
+ REG_OFFSET_NAME(r23, regs[23]),
+ REG_OFFSET_NAME(r24, regs[24]),
+ REG_OFFSET_NAME(r25, regs[25]),
+ REG_OFFSET_NAME(r26, regs[26]),
+ REG_OFFSET_NAME(r27, regs[27]),
+ REG_OFFSET_NAME(r28, regs[28]),
+ REG_OFFSET_NAME(r29, regs[29]),
+ REG_OFFSET_NAME(r30, regs[30]),
+ REG_OFFSET_NAME(r31, regs[31]),
+ REG_OFFSET_NAME(orig_a0, orig_a0),
+ REG_OFFSET_NAME(csr_era, csr_era),
+ REG_OFFSET_NAME(csr_badvaddr, csr_badvaddr),
+ REG_OFFSET_NAME(csr_crmd, csr_crmd),
+ REG_OFFSET_NAME(csr_prmd, csr_prmd),
+ REG_OFFSET_NAME(csr_euen, csr_euen),
+ REG_OFFSET_NAME(csr_ecfg, csr_ecfg),
+ REG_OFFSET_NAME(csr_estat, csr_estat),
+ REG_OFFSET_END,
+};
+
+/**
+ * regs_query_register_offset() - query register offset from its name
+ * @name: the name of a register
+ *
+ * regs_query_register_offset() returns the offset of a register in struct
+ * pt_regs from its name. If the name is invalid, this returns -EINVAL;
+ */
+int regs_query_register_offset(const char *name)
+{
+ const struct pt_regs_offset *roff;
+
+ for (roff = regoffset_table; roff->name != NULL; roff++)
+ if (!strcmp(roff->name, name))
+ return roff->offset;
+ return -EINVAL;
+}
+
+enum loongarch_regset {
+ REGSET_GPR,
+ REGSET_FPR,
+ REGSET_CPUCFG,
+};
+
+static const struct user_regset loongarch64_regsets[] = {
+ [REGSET_GPR] = {
+ .core_note_type = NT_PRSTATUS,
+ .n = ELF_NGREG,
+ .size = sizeof(elf_greg_t),
+ .align = sizeof(elf_greg_t),
+ .regset_get = gpr_get,
+ .set = gpr_set,
+ },
+ [REGSET_FPR] = {
+ .core_note_type = NT_PRFPREG,
+ .n = ELF_NFPREG,
+ .size = sizeof(elf_fpreg_t),
+ .align = sizeof(elf_fpreg_t),
+ .regset_get = fpr_get,
+ .set = fpr_set,
+ },
+ [REGSET_CPUCFG] = {
+ .core_note_type = NT_LOONGARCH_CPUCFG,
+ .n = 64,
+ .size = sizeof(u32),
+ .align = sizeof(u32),
+ .regset_get = cfg_get,
+ .set = cfg_set,
+ },
+};
+
+static const struct user_regset_view user_loongarch64_view = {
+ .name = "loongarch64",
+ .e_machine = ELF_ARCH,
+ .regsets = loongarch64_regsets,
+ .n = ARRAY_SIZE(loongarch64_regsets),
+};
+
+
+const struct user_regset_view *task_user_regset_view(struct task_struct *task)
+{
+ return &user_loongarch64_view;
+}
+
+static inline int read_user(struct task_struct *target, unsigned long addr,
+ unsigned long __user *data)
+{
+ unsigned long tmp = 0;
+
+ switch (addr) {
+ case 0 ... 31:
+ tmp = task_pt_regs(target)->regs[addr];
+ break;
+ case ARG0:
+ tmp = task_pt_regs(target)->orig_a0;
+ break;
+ case PC:
+ tmp = task_pt_regs(target)->csr_era;
+ break;
+ case BADVADDR:
+ tmp = task_pt_regs(target)->csr_badvaddr;
+ break;
+ default:
+ return -EIO;
+ }
+
+ return put_user(tmp, data);
+}
+
+static inline int write_user(struct task_struct *target, unsigned long addr,
+ unsigned long data)
+{
+ switch (addr) {
+ case 0 ... 31:
+ task_pt_regs(target)->regs[addr] = data;
+ break;
+ case ARG0:
+ task_pt_regs(target)->orig_a0 = data;
+ break;
+ case PC:
+ task_pt_regs(target)->csr_era = data;
+ break;
+ case BADVADDR:
+ task_pt_regs(target)->csr_badvaddr = data;
+ break;
+ default:
+ return -EIO;
+ }
+
+ return 0;
+}
+
+long arch_ptrace(struct task_struct *child, long request,
+ unsigned long addr, unsigned long data)
+{
+ int ret;
+ unsigned long __user *datap = (void __user *) data;
+
+ switch (request) {
+ case PTRACE_PEEKUSR:
+ ret = read_user(child, addr, datap);
+ break;
+
+ case PTRACE_POKEUSR:
+ ret = write_user(child, addr, data);
+ break;
+
+ default:
+ ret = ptrace_request(child, request, addr, data);
+ break;
+ }
+
+ return ret;
+}
diff --git a/arch/loongarch/kernel/relocate_kernel.S b/arch/loongarch/kernel/relocate_kernel.S
new file mode 100644
index 000000000..d13252553
--- /dev/null
+++ b/arch/loongarch/kernel/relocate_kernel.S
@@ -0,0 +1,112 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * relocate_kernel.S for kexec
+ *
+ * Copyright (C) 2022 Loongson Technology Corporation Limited
+ */
+
+#include <linux/kexec.h>
+
+#include <asm/asm.h>
+#include <asm/asmmacro.h>
+#include <asm/regdef.h>
+#include <asm/loongarch.h>
+#include <asm/stackframe.h>
+#include <asm/addrspace.h>
+
+SYM_CODE_START(relocate_new_kernel)
+ /*
+ * a0: EFI boot flag for the new kernel
+ * a1: Command line pointer for the new kernel
+ * a2: System table pointer for the new kernel
+ * a3: Start address to jump to after relocation
+ * a4: Pointer to the current indirection page entry
+ */
+ move s0, a4
+
+ /*
+ * In case of a kdump/crash kernel, the indirection page is not
+ * populated as the kernel is directly copied to a reserved location
+ */
+ beqz s0, done
+
+process_entry:
+ PTR_L s1, s0, 0
+ PTR_ADDI s0, s0, SZREG
+
+ /* destination page */
+ andi s2, s1, IND_DESTINATION
+ beqz s2, 1f
+ li.w t0, ~0x1
+ and s3, s1, t0 /* store destination addr in s3 */
+ b process_entry
+
+1:
+ /* indirection page, update s0 */
+ andi s2, s1, IND_INDIRECTION
+ beqz s2, 1f
+ li.w t0, ~0x2
+ and s0, s1, t0
+ b process_entry
+
+1:
+ /* done page */
+ andi s2, s1, IND_DONE
+ beqz s2, 1f
+ b done
+
+1:
+ /* source page */
+ andi s2, s1, IND_SOURCE
+ beqz s2, process_entry
+ li.w t0, ~0x8
+ and s1, s1, t0
+ li.w s5, (1 << _PAGE_SHIFT) / SZREG
+
+copy_word:
+ /* copy page word by word */
+ REG_L s4, s1, 0
+ REG_S s4, s3, 0
+ PTR_ADDI s3, s3, SZREG
+ PTR_ADDI s1, s1, SZREG
+ LONG_ADDI s5, s5, -1
+ beqz s5, process_entry
+ b copy_word
+ b process_entry
+
+done:
+ ibar 0
+ dbar 0
+
+ /*
+ * Jump to the new kernel,
+ * make sure the values of a0, a1, a2 and a3 are not changed.
+ */
+ jr a3
+SYM_CODE_END(relocate_new_kernel)
+
+#ifdef CONFIG_SMP
+/*
+ * Other CPUs should wait until code is relocated and
+ * then start at the entry point from LOONGARCH_IOCSR_MBUF0.
+ */
+SYM_CODE_START(kexec_smp_wait)
+1: li.w t0, 0x100 /* wait for init loop */
+2: addi.w t0, t0, -1 /* limit mailbox access */
+ bnez t0, 2b
+ li.w t1, LOONGARCH_IOCSR_MBUF0
+ iocsrrd.w s0, t1 /* check PC as an indicator */
+ beqz s0, 1b
+ iocsrrd.d s0, t1 /* get PC via mailbox */
+
+ li.d t0, CACHE_BASE
+ or s0, s0, t0 /* s0 = TO_CACHE(s0) */
+ jr s0 /* jump to initial PC */
+SYM_CODE_END(kexec_smp_wait)
+#endif
+
+relocate_new_kernel_end:
+
+SYM_DATA_START(relocate_new_kernel_size)
+ PTR relocate_new_kernel_end - relocate_new_kernel
+SYM_DATA_END(relocate_new_kernel_size)
diff --git a/arch/loongarch/kernel/reset.c b/arch/loongarch/kernel/reset.c
new file mode 100644
index 000000000..8c82021eb
--- /dev/null
+++ b/arch/loongarch/kernel/reset.c
@@ -0,0 +1,74 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#include <linux/kernel.h>
+#include <linux/acpi.h>
+#include <linux/efi.h>
+#include <linux/export.h>
+#include <linux/pm.h>
+#include <linux/types.h>
+#include <linux/reboot.h>
+#include <linux/delay.h>
+#include <linux/console.h>
+
+#include <acpi/reboot.h>
+#include <asm/idle.h>
+#include <asm/loongarch.h>
+
+void (*pm_power_off)(void);
+EXPORT_SYMBOL(pm_power_off);
+
+void machine_halt(void)
+{
+#ifdef CONFIG_SMP
+ preempt_disable();
+ smp_send_stop();
+#endif
+ local_irq_disable();
+ clear_csr_ecfg(ECFG0_IM);
+
+ pr_notice("\n\n** You can safely turn off the power now **\n\n");
+ console_flush_on_panic(CONSOLE_FLUSH_PENDING);
+
+ while (true) {
+ __arch_cpu_idle();
+ }
+}
+
+void machine_power_off(void)
+{
+#ifdef CONFIG_SMP
+ preempt_disable();
+ smp_send_stop();
+#endif
+ do_kernel_power_off();
+#ifdef CONFIG_EFI
+ efi.reset_system(EFI_RESET_SHUTDOWN, EFI_SUCCESS, 0, NULL);
+#endif
+
+ while (true) {
+ __arch_cpu_idle();
+ }
+}
+
+void machine_restart(char *command)
+{
+#ifdef CONFIG_SMP
+ preempt_disable();
+ smp_send_stop();
+#endif
+ do_kernel_restart(command);
+#ifdef CONFIG_EFI
+ if (efi_capsule_pending(NULL))
+ efi_reboot(REBOOT_WARM, NULL);
+ else
+ efi_reboot(REBOOT_COLD, NULL);
+#endif
+ if (!acpi_disabled)
+ acpi_reboot();
+
+ while (true) {
+ __arch_cpu_idle();
+ }
+}
diff --git a/arch/loongarch/kernel/setup.c b/arch/loongarch/kernel/setup.c
new file mode 100644
index 000000000..ae436def7
--- /dev/null
+++ b/arch/loongarch/kernel/setup.c
@@ -0,0 +1,437 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ *
+ * Derived from MIPS:
+ * Copyright (C) 1995 Linus Torvalds
+ * Copyright (C) 1995 Waldorf Electronics
+ * Copyright (C) 1994, 95, 96, 97, 98, 99, 2000, 01, 02, 03 Ralf Baechle
+ * Copyright (C) 1996 Stoned Elipot
+ * Copyright (C) 1999 Silicon Graphics, Inc.
+ * Copyright (C) 2000, 2001, 2002, 2007 Maciej W. Rozycki
+ */
+#include <linux/init.h>
+#include <linux/acpi.h>
+#include <linux/dmi.h>
+#include <linux/efi.h>
+#include <linux/export.h>
+#include <linux/screen_info.h>
+#include <linux/memblock.h>
+#include <linux/initrd.h>
+#include <linux/ioport.h>
+#include <linux/kexec.h>
+#include <linux/crash_dump.h>
+#include <linux/root_dev.h>
+#include <linux/console.h>
+#include <linux/pfn.h>
+#include <linux/platform_device.h>
+#include <linux/sizes.h>
+#include <linux/device.h>
+#include <linux/dma-map-ops.h>
+#include <linux/swiotlb.h>
+
+#include <asm/addrspace.h>
+#include <asm/bootinfo.h>
+#include <asm/cache.h>
+#include <asm/cpu.h>
+#include <asm/dma.h>
+#include <asm/efi.h>
+#include <asm/loongson.h>
+#include <asm/numa.h>
+#include <asm/pgalloc.h>
+#include <asm/sections.h>
+#include <asm/setup.h>
+#include <asm/time.h>
+
+#define SMBIOS_BIOSSIZE_OFFSET 0x09
+#define SMBIOS_BIOSEXTERN_OFFSET 0x13
+#define SMBIOS_FREQLOW_OFFSET 0x16
+#define SMBIOS_FREQHIGH_OFFSET 0x17
+#define SMBIOS_FREQLOW_MASK 0xFF
+#define SMBIOS_CORE_PACKAGE_OFFSET 0x23
+#define LOONGSON_EFI_ENABLE (1 << 3)
+
+struct screen_info screen_info __section(".data");
+
+unsigned long fw_arg0, fw_arg1, fw_arg2;
+DEFINE_PER_CPU(unsigned long, kernelsp);
+struct cpuinfo_loongarch cpu_data[NR_CPUS] __read_mostly;
+
+EXPORT_SYMBOL(cpu_data);
+
+struct loongson_board_info b_info;
+static const char dmi_empty_string[] = " ";
+
+/*
+ * Setup information
+ *
+ * These are initialized so they are in the .data section
+ */
+
+static int num_standard_resources;
+static struct resource *standard_resources;
+
+static struct resource code_resource = { .name = "Kernel code", };
+static struct resource data_resource = { .name = "Kernel data", };
+static struct resource bss_resource = { .name = "Kernel bss", };
+
+const char *get_system_type(void)
+{
+ return "generic-loongson-machine";
+}
+
+static const char *dmi_string_parse(const struct dmi_header *dm, u8 s)
+{
+ const u8 *bp = ((u8 *) dm) + dm->length;
+
+ if (s) {
+ s--;
+ while (s > 0 && *bp) {
+ bp += strlen(bp) + 1;
+ s--;
+ }
+
+ if (*bp != 0) {
+ size_t len = strlen(bp)+1;
+ size_t cmp_len = len > 8 ? 8 : len;
+
+ if (!memcmp(bp, dmi_empty_string, cmp_len))
+ return dmi_empty_string;
+
+ return bp;
+ }
+ }
+
+ return "";
+}
+
+static void __init parse_cpu_table(const struct dmi_header *dm)
+{
+ long freq_temp = 0;
+ char *dmi_data = (char *)dm;
+
+ freq_temp = ((*(dmi_data + SMBIOS_FREQHIGH_OFFSET) << 8) +
+ ((*(dmi_data + SMBIOS_FREQLOW_OFFSET)) & SMBIOS_FREQLOW_MASK));
+ cpu_clock_freq = freq_temp * 1000000;
+
+ loongson_sysconf.cpuname = (void *)dmi_string_parse(dm, dmi_data[16]);
+ loongson_sysconf.cores_per_package = *(dmi_data + SMBIOS_CORE_PACKAGE_OFFSET);
+
+ pr_info("CpuClock = %llu\n", cpu_clock_freq);
+}
+
+static void __init parse_bios_table(const struct dmi_header *dm)
+{
+ char *dmi_data = (char *)dm;
+
+ b_info.bios_size = (*(dmi_data + SMBIOS_BIOSSIZE_OFFSET) + 1) << 6;
+}
+
+static void __init find_tokens(const struct dmi_header *dm, void *dummy)
+{
+ switch (dm->type) {
+ case 0x0: /* Extern BIOS */
+ parse_bios_table(dm);
+ break;
+ case 0x4: /* Calling interface */
+ parse_cpu_table(dm);
+ break;
+ }
+}
+static void __init smbios_parse(void)
+{
+ b_info.bios_vendor = (void *)dmi_get_system_info(DMI_BIOS_VENDOR);
+ b_info.bios_version = (void *)dmi_get_system_info(DMI_BIOS_VERSION);
+ b_info.bios_release_date = (void *)dmi_get_system_info(DMI_BIOS_DATE);
+ b_info.board_vendor = (void *)dmi_get_system_info(DMI_BOARD_VENDOR);
+ b_info.board_name = (void *)dmi_get_system_info(DMI_BOARD_NAME);
+ dmi_walk(find_tokens, NULL);
+}
+
+static int usermem __initdata;
+
+static int __init early_parse_mem(char *p)
+{
+ phys_addr_t start, size;
+
+ if (!p) {
+ pr_err("mem parameter is empty, do nothing\n");
+ return -EINVAL;
+ }
+
+ /*
+ * If a user specifies memory size, we
+ * blow away any automatically generated
+ * size.
+ */
+ if (usermem == 0) {
+ usermem = 1;
+ memblock_remove(memblock_start_of_DRAM(),
+ memblock_end_of_DRAM() - memblock_start_of_DRAM());
+ }
+ start = 0;
+ size = memparse(p, &p);
+ if (*p == '@')
+ start = memparse(p + 1, &p);
+ else {
+ pr_err("Invalid format!\n");
+ return -EINVAL;
+ }
+
+ if (!IS_ENABLED(CONFIG_NUMA))
+ memblock_add(start, size);
+ else
+ memblock_add_node(start, size, pa_to_nid(start), MEMBLOCK_NONE);
+
+ return 0;
+}
+early_param("mem", early_parse_mem);
+
+static void __init arch_reserve_vmcore(void)
+{
+#ifdef CONFIG_PROC_VMCORE
+ u64 i;
+ phys_addr_t start, end;
+
+ if (!is_kdump_kernel())
+ return;
+
+ if (!elfcorehdr_size) {
+ for_each_mem_range(i, &start, &end) {
+ if (elfcorehdr_addr >= start && elfcorehdr_addr < end) {
+ /*
+ * Reserve from the elf core header to the end of
+ * the memory segment, that should all be kdump
+ * reserved memory.
+ */
+ elfcorehdr_size = end - elfcorehdr_addr;
+ break;
+ }
+ }
+ }
+
+ if (memblock_is_region_reserved(elfcorehdr_addr, elfcorehdr_size)) {
+ pr_warn("elfcorehdr is overlapped\n");
+ return;
+ }
+
+ memblock_reserve(elfcorehdr_addr, elfcorehdr_size);
+
+ pr_info("Reserving %llu KiB of memory at 0x%llx for elfcorehdr\n",
+ elfcorehdr_size >> 10, elfcorehdr_addr);
+#endif
+}
+
+static void __init arch_parse_crashkernel(void)
+{
+#ifdef CONFIG_KEXEC
+ int ret;
+ unsigned long long start;
+ unsigned long long total_mem;
+ unsigned long long crash_base, crash_size;
+
+ total_mem = memblock_phys_mem_size();
+ ret = parse_crashkernel(boot_command_line, total_mem, &crash_size, &crash_base);
+ if (ret < 0 || crash_size <= 0)
+ return;
+
+ start = memblock_phys_alloc_range(crash_size, 1, crash_base, crash_base + crash_size);
+ if (start != crash_base) {
+ pr_warn("Invalid memory region reserved for crash kernel\n");
+ return;
+ }
+
+ crashk_res.start = crash_base;
+ crashk_res.end = crash_base + crash_size - 1;
+#endif
+}
+
+void __init platform_init(void)
+{
+ arch_reserve_vmcore();
+ arch_parse_crashkernel();
+
+#ifdef CONFIG_ACPI_TABLE_UPGRADE
+ acpi_table_upgrade();
+#endif
+#ifdef CONFIG_ACPI
+ acpi_gbl_use_default_register_widths = false;
+ acpi_boot_table_init();
+#endif
+
+#ifdef CONFIG_NUMA
+ init_numa_memory();
+#endif
+ dmi_setup();
+ smbios_parse();
+ pr_info("The BIOS Version: %s\n", b_info.bios_version);
+
+ efi_runtime_init();
+}
+
+static void __init check_kernel_sections_mem(void)
+{
+ phys_addr_t start = __pa_symbol(&_text);
+ phys_addr_t size = __pa_symbol(&_end) - start;
+
+ if (!memblock_is_region_memory(start, size)) {
+ pr_info("Kernel sections are not in the memory maps\n");
+ memblock_add(start, size);
+ }
+}
+
+/*
+ * arch_mem_init - initialize memory management subsystem
+ */
+static void __init arch_mem_init(char **cmdline_p)
+{
+ if (usermem)
+ pr_info("User-defined physical RAM map overwrite\n");
+
+ check_kernel_sections_mem();
+
+ /*
+ * In order to reduce the possibility of kernel panic when failed to
+ * get IO TLB memory under CONFIG_SWIOTLB, it is better to allocate
+ * low memory as small as possible before plat_swiotlb_setup(), so
+ * make sparse_init() using top-down allocation.
+ */
+ memblock_set_bottom_up(false);
+ sparse_init();
+ memblock_set_bottom_up(true);
+
+ swiotlb_init(true, SWIOTLB_VERBOSE);
+
+ dma_contiguous_reserve(PFN_PHYS(max_low_pfn));
+
+ memblock_dump_all();
+
+ early_memtest(PFN_PHYS(ARCH_PFN_OFFSET), PFN_PHYS(max_low_pfn));
+}
+
+static void __init resource_init(void)
+{
+ long i = 0;
+ size_t res_size;
+ struct resource *res;
+ struct memblock_region *region;
+
+ code_resource.start = __pa_symbol(&_text);
+ code_resource.end = __pa_symbol(&_etext) - 1;
+ data_resource.start = __pa_symbol(&_etext);
+ data_resource.end = __pa_symbol(&_edata) - 1;
+ bss_resource.start = __pa_symbol(&__bss_start);
+ bss_resource.end = __pa_symbol(&__bss_stop) - 1;
+
+ num_standard_resources = memblock.memory.cnt;
+ res_size = num_standard_resources * sizeof(*standard_resources);
+ standard_resources = memblock_alloc(res_size, SMP_CACHE_BYTES);
+
+ for_each_mem_region(region) {
+ res = &standard_resources[i++];
+ if (!memblock_is_nomap(region)) {
+ res->name = "System RAM";
+ res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
+ res->start = __pfn_to_phys(memblock_region_memory_base_pfn(region));
+ res->end = __pfn_to_phys(memblock_region_memory_end_pfn(region)) - 1;
+ } else {
+ res->name = "Reserved";
+ res->flags = IORESOURCE_MEM;
+ res->start = __pfn_to_phys(memblock_region_reserved_base_pfn(region));
+ res->end = __pfn_to_phys(memblock_region_reserved_end_pfn(region)) - 1;
+ }
+
+ request_resource(&iomem_resource, res);
+
+ /*
+ * We don't know which RAM region contains kernel data,
+ * so we try it repeatedly and let the resource manager
+ * test it.
+ */
+ request_resource(res, &code_resource);
+ request_resource(res, &data_resource);
+ request_resource(res, &bss_resource);
+ }
+
+#ifdef CONFIG_KEXEC
+ if (crashk_res.start < crashk_res.end) {
+ insert_resource(&iomem_resource, &crashk_res);
+ pr_info("Reserving %ldMB of memory at %ldMB for crashkernel\n",
+ (unsigned long)((crashk_res.end - crashk_res.start + 1) >> 20),
+ (unsigned long)(crashk_res.start >> 20));
+ }
+#endif
+}
+
+static int __init reserve_memblock_reserved_regions(void)
+{
+ u64 i, j;
+
+ for (i = 0; i < num_standard_resources; ++i) {
+ struct resource *mem = &standard_resources[i];
+ phys_addr_t r_start, r_end, mem_size = resource_size(mem);
+
+ if (!memblock_is_region_reserved(mem->start, mem_size))
+ continue;
+
+ for_each_reserved_mem_range(j, &r_start, &r_end) {
+ resource_size_t start, end;
+
+ start = max(PFN_PHYS(PFN_DOWN(r_start)), mem->start);
+ end = min(PFN_PHYS(PFN_UP(r_end)) - 1, mem->end);
+
+ if (start > mem->end || end < mem->start)
+ continue;
+
+ reserve_region_with_split(mem, start, end, "Reserved");
+ }
+ }
+
+ return 0;
+}
+arch_initcall(reserve_memblock_reserved_regions);
+
+#ifdef CONFIG_SMP
+static void __init prefill_possible_map(void)
+{
+ int i, possible;
+
+ possible = num_processors + disabled_cpus;
+ if (possible > nr_cpu_ids)
+ possible = nr_cpu_ids;
+
+ pr_info("SMP: Allowing %d CPUs, %d hotplug CPUs\n",
+ possible, max((possible - num_processors), 0));
+
+ for (i = 0; i < possible; i++)
+ set_cpu_possible(i, true);
+ for (; i < NR_CPUS; i++)
+ set_cpu_possible(i, false);
+
+ set_nr_cpu_ids(possible);
+}
+#endif
+
+void __init setup_arch(char **cmdline_p)
+{
+ cpu_probe();
+ *cmdline_p = boot_command_line;
+
+ init_environ();
+ efi_init();
+ memblock_init();
+ pagetable_init();
+ parse_early_param();
+ reserve_initrd_mem();
+
+ platform_init();
+ arch_mem_init(cmdline_p);
+
+ resource_init();
+#ifdef CONFIG_SMP
+ plat_smp_setup();
+ prefill_possible_map();
+#endif
+
+ paging_init();
+}
diff --git a/arch/loongarch/kernel/signal.c b/arch/loongarch/kernel/signal.c
new file mode 100644
index 000000000..8f5b79863
--- /dev/null
+++ b/arch/loongarch/kernel/signal.c
@@ -0,0 +1,566 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Author: Hanlu Li <lihanlu@loongson.cn>
+ * Huacai Chen <chenhuacai@loongson.cn>
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ *
+ * Derived from MIPS:
+ * Copyright (C) 1991, 1992 Linus Torvalds
+ * Copyright (C) 1994 - 2000 Ralf Baechle
+ * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
+ * Copyright (C) 2014, Imagination Technologies Ltd.
+ */
+#include <linux/audit.h>
+#include <linux/cache.h>
+#include <linux/context_tracking.h>
+#include <linux/irqflags.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/personality.h>
+#include <linux/smp.h>
+#include <linux/kernel.h>
+#include <linux/signal.h>
+#include <linux/errno.h>
+#include <linux/wait.h>
+#include <linux/ptrace.h>
+#include <linux/unistd.h>
+#include <linux/compiler.h>
+#include <linux/syscalls.h>
+#include <linux/uaccess.h>
+
+#include <asm/asm.h>
+#include <asm/cacheflush.h>
+#include <asm/cpu-features.h>
+#include <asm/fpu.h>
+#include <asm/ucontext.h>
+#include <asm/vdso.h>
+
+#ifdef DEBUG_SIG
+# define DEBUGP(fmt, args...) printk("%s: " fmt, __func__, ##args)
+#else
+# define DEBUGP(fmt, args...)
+#endif
+
+/* Make sure we will not lose FPU ownership */
+#define lock_fpu_owner() ({ preempt_disable(); pagefault_disable(); })
+#define unlock_fpu_owner() ({ pagefault_enable(); preempt_enable(); })
+
+/* Assembly functions to move context to/from the FPU */
+extern asmlinkage int
+_save_fp_context(void __user *fpregs, void __user *fcc, void __user *csr);
+extern asmlinkage int
+_restore_fp_context(void __user *fpregs, void __user *fcc, void __user *csr);
+
+struct rt_sigframe {
+ struct siginfo rs_info;
+ struct ucontext rs_uctx;
+};
+
+struct _ctx_layout {
+ struct sctx_info *addr;
+ unsigned int size;
+};
+
+struct extctx_layout {
+ unsigned long size;
+ unsigned int flags;
+ struct _ctx_layout fpu;
+ struct _ctx_layout end;
+};
+
+static void __user *get_ctx_through_ctxinfo(struct sctx_info *info)
+{
+ return (void __user *)((char *)info + sizeof(struct sctx_info));
+}
+
+/*
+ * Thread saved context copy to/from a signal context presumed to be on the
+ * user stack, and therefore accessed with appropriate macros from uaccess.h.
+ */
+static int copy_fpu_to_sigcontext(struct fpu_context __user *ctx)
+{
+ int i;
+ int err = 0;
+ uint64_t __user *regs = (uint64_t *)&ctx->regs;
+ uint64_t __user *fcc = &ctx->fcc;
+ uint32_t __user *fcsr = &ctx->fcsr;
+
+ for (i = 0; i < NUM_FPU_REGS; i++) {
+ err |=
+ __put_user(get_fpr64(&current->thread.fpu.fpr[i], 0),
+ &regs[i]);
+ }
+ err |= __put_user(current->thread.fpu.fcc, fcc);
+ err |= __put_user(current->thread.fpu.fcsr, fcsr);
+
+ return err;
+}
+
+static int copy_fpu_from_sigcontext(struct fpu_context __user *ctx)
+{
+ int i;
+ int err = 0;
+ u64 fpr_val;
+ uint64_t __user *regs = (uint64_t *)&ctx->regs;
+ uint64_t __user *fcc = &ctx->fcc;
+ uint32_t __user *fcsr = &ctx->fcsr;
+
+ for (i = 0; i < NUM_FPU_REGS; i++) {
+ err |= __get_user(fpr_val, &regs[i]);
+ set_fpr64(&current->thread.fpu.fpr[i], 0, fpr_val);
+ }
+ err |= __get_user(current->thread.fpu.fcc, fcc);
+ err |= __get_user(current->thread.fpu.fcsr, fcsr);
+
+ return err;
+}
+
+/*
+ * Wrappers for the assembly _{save,restore}_fp_context functions.
+ */
+static int save_hw_fpu_context(struct fpu_context __user *ctx)
+{
+ uint64_t __user *regs = (uint64_t *)&ctx->regs;
+ uint64_t __user *fcc = &ctx->fcc;
+ uint32_t __user *fcsr = &ctx->fcsr;
+
+ return _save_fp_context(regs, fcc, fcsr);
+}
+
+static int restore_hw_fpu_context(struct fpu_context __user *ctx)
+{
+ uint64_t __user *regs = (uint64_t *)&ctx->regs;
+ uint64_t __user *fcc = &ctx->fcc;
+ uint32_t __user *fcsr = &ctx->fcsr;
+
+ return _restore_fp_context(regs, fcc, fcsr);
+}
+
+static int fcsr_pending(unsigned int __user *fcsr)
+{
+ int err, sig = 0;
+ unsigned int csr, enabled;
+
+ err = __get_user(csr, fcsr);
+ enabled = ((csr & FPU_CSR_ALL_E) << 24);
+ /*
+ * If the signal handler set some FPU exceptions, clear it and
+ * send SIGFPE.
+ */
+ if (csr & enabled) {
+ csr &= ~enabled;
+ err |= __put_user(csr, fcsr);
+ sig = SIGFPE;
+ }
+ return err ?: sig;
+}
+
+/*
+ * Helper routines
+ */
+static int protected_save_fpu_context(struct extctx_layout *extctx)
+{
+ int err = 0;
+ struct sctx_info __user *info = extctx->fpu.addr;
+ struct fpu_context __user *fpu_ctx = (struct fpu_context *)get_ctx_through_ctxinfo(info);
+ uint64_t __user *regs = (uint64_t *)&fpu_ctx->regs;
+ uint64_t __user *fcc = &fpu_ctx->fcc;
+ uint32_t __user *fcsr = &fpu_ctx->fcsr;
+
+ while (1) {
+ lock_fpu_owner();
+ if (is_fpu_owner())
+ err = save_hw_fpu_context(fpu_ctx);
+ else
+ err = copy_fpu_to_sigcontext(fpu_ctx);
+ unlock_fpu_owner();
+
+ err |= __put_user(FPU_CTX_MAGIC, &info->magic);
+ err |= __put_user(extctx->fpu.size, &info->size);
+
+ if (likely(!err))
+ break;
+ /* Touch the FPU context and try again */
+ err = __put_user(0, &regs[0]) |
+ __put_user(0, &regs[31]) |
+ __put_user(0, fcc) |
+ __put_user(0, fcsr);
+ if (err)
+ return err; /* really bad sigcontext */
+ }
+
+ return err;
+}
+
+static int protected_restore_fpu_context(struct extctx_layout *extctx)
+{
+ int err = 0, sig = 0, tmp __maybe_unused;
+ struct sctx_info __user *info = extctx->fpu.addr;
+ struct fpu_context __user *fpu_ctx = (struct fpu_context *)get_ctx_through_ctxinfo(info);
+ uint64_t __user *regs = (uint64_t *)&fpu_ctx->regs;
+ uint64_t __user *fcc = &fpu_ctx->fcc;
+ uint32_t __user *fcsr = &fpu_ctx->fcsr;
+
+ err = sig = fcsr_pending(fcsr);
+ if (err < 0)
+ return err;
+
+ while (1) {
+ lock_fpu_owner();
+ if (is_fpu_owner())
+ err = restore_hw_fpu_context(fpu_ctx);
+ else
+ err = copy_fpu_from_sigcontext(fpu_ctx);
+ unlock_fpu_owner();
+
+ if (likely(!err))
+ break;
+ /* Touch the FPU context and try again */
+ err = __get_user(tmp, &regs[0]) |
+ __get_user(tmp, &regs[31]) |
+ __get_user(tmp, fcc) |
+ __get_user(tmp, fcsr);
+ if (err)
+ break; /* really bad sigcontext */
+ }
+
+ return err ?: sig;
+}
+
+static int setup_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc,
+ struct extctx_layout *extctx)
+{
+ int i, err = 0;
+ struct sctx_info __user *info;
+
+ err |= __put_user(regs->csr_era, &sc->sc_pc);
+ err |= __put_user(extctx->flags, &sc->sc_flags);
+
+ err |= __put_user(0, &sc->sc_regs[0]);
+ for (i = 1; i < 32; i++)
+ err |= __put_user(regs->regs[i], &sc->sc_regs[i]);
+
+ if (extctx->fpu.addr)
+ err |= protected_save_fpu_context(extctx);
+
+ /* Set the "end" magic */
+ info = (struct sctx_info *)extctx->end.addr;
+ err |= __put_user(0, &info->magic);
+ err |= __put_user(0, &info->size);
+
+ return err;
+}
+
+static int parse_extcontext(struct sigcontext __user *sc, struct extctx_layout *extctx)
+{
+ int err = 0;
+ unsigned int magic, size;
+ struct sctx_info __user *info = (struct sctx_info __user *)&sc->sc_extcontext;
+
+ while(1) {
+ err |= __get_user(magic, &info->magic);
+ err |= __get_user(size, &info->size);
+ if (err)
+ return err;
+
+ switch (magic) {
+ case 0: /* END */
+ goto done;
+
+ case FPU_CTX_MAGIC:
+ if (size < (sizeof(struct sctx_info) +
+ sizeof(struct fpu_context)))
+ goto invalid;
+ extctx->fpu.addr = info;
+ break;
+
+ default:
+ goto invalid;
+ }
+
+ info = (struct sctx_info *)((char *)info + size);
+ }
+
+done:
+ return 0;
+
+invalid:
+ return -EINVAL;
+}
+
+static int restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc)
+{
+ int i, err = 0;
+ struct extctx_layout extctx;
+
+ memset(&extctx, 0, sizeof(struct extctx_layout));
+
+ err = __get_user(extctx.flags, &sc->sc_flags);
+ if (err)
+ goto bad;
+
+ err = parse_extcontext(sc, &extctx);
+ if (err)
+ goto bad;
+
+ conditional_used_math(extctx.flags & SC_USED_FP);
+
+ /*
+ * The signal handler may have used FPU; give it up if the program
+ * doesn't want it following sigreturn.
+ */
+ if (!(extctx.flags & SC_USED_FP))
+ lose_fpu(0);
+
+ /* Always make any pending restarted system calls return -EINTR */
+ current->restart_block.fn = do_no_restart_syscall;
+
+ err |= __get_user(regs->csr_era, &sc->sc_pc);
+ for (i = 1; i < 32; i++)
+ err |= __get_user(regs->regs[i], &sc->sc_regs[i]);
+
+ if (extctx.fpu.addr)
+ err |= protected_restore_fpu_context(&extctx);
+
+bad:
+ return err;
+}
+
+static unsigned int handle_flags(void)
+{
+ unsigned int flags = 0;
+
+ flags = used_math() ? SC_USED_FP : 0;
+
+ switch (current->thread.error_code) {
+ case 1:
+ flags |= SC_ADDRERR_RD;
+ break;
+ case 2:
+ flags |= SC_ADDRERR_WR;
+ break;
+ }
+
+ return flags;
+}
+
+static unsigned long extframe_alloc(struct extctx_layout *extctx,
+ struct _ctx_layout *layout,
+ size_t size, unsigned int align, unsigned long base)
+{
+ unsigned long new_base = base - size;
+
+ new_base = round_down(new_base, (align < 16 ? 16 : align));
+ new_base -= sizeof(struct sctx_info);
+
+ layout->addr = (void *)new_base;
+ layout->size = (unsigned int)(base - new_base);
+ extctx->size += layout->size;
+
+ return new_base;
+}
+
+static unsigned long setup_extcontext(struct extctx_layout *extctx, unsigned long sp)
+{
+ unsigned long new_sp = sp;
+
+ memset(extctx, 0, sizeof(struct extctx_layout));
+
+ extctx->flags = handle_flags();
+
+ /* Grow down, alloc "end" context info first. */
+ new_sp -= sizeof(struct sctx_info);
+ extctx->end.addr = (void *)new_sp;
+ extctx->end.size = (unsigned int)sizeof(struct sctx_info);
+ extctx->size += extctx->end.size;
+
+ if (extctx->flags & SC_USED_FP) {
+ if (cpu_has_fpu)
+ new_sp = extframe_alloc(extctx, &extctx->fpu,
+ sizeof(struct fpu_context), FPU_CTX_ALIGN, new_sp);
+ }
+
+ return new_sp;
+}
+
+void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs,
+ struct extctx_layout *extctx)
+{
+ unsigned long sp;
+
+ /* Default to using normal stack */
+ sp = regs->regs[3];
+
+ /*
+ * If we are on the alternate signal stack and would overflow it, don't.
+ * Return an always-bogus address instead so we will die with SIGSEGV.
+ */
+ if (on_sig_stack(sp) &&
+ !likely(on_sig_stack(sp - sizeof(struct rt_sigframe))))
+ return (void __user __force *)(-1UL);
+
+ sp = sigsp(sp, ksig);
+ sp = round_down(sp, 16);
+ sp = setup_extcontext(extctx, sp);
+ sp -= sizeof(struct rt_sigframe);
+
+ if (!IS_ALIGNED(sp, 16))
+ BUG();
+
+ return (void __user *)sp;
+}
+
+/*
+ * Atomically swap in the new signal mask, and wait for a signal.
+ */
+
+asmlinkage long sys_rt_sigreturn(void)
+{
+ int sig;
+ sigset_t set;
+ struct pt_regs *regs;
+ struct rt_sigframe __user *frame;
+
+ regs = current_pt_regs();
+ frame = (struct rt_sigframe __user *)regs->regs[3];
+ if (!access_ok(frame, sizeof(*frame)))
+ goto badframe;
+ if (__copy_from_user(&set, &frame->rs_uctx.uc_sigmask, sizeof(set)))
+ goto badframe;
+
+ set_current_blocked(&set);
+
+ sig = restore_sigcontext(regs, &frame->rs_uctx.uc_mcontext);
+ if (sig < 0)
+ goto badframe;
+ else if (sig)
+ force_sig(sig);
+
+ regs->regs[0] = 0; /* No syscall restarting */
+ if (restore_altstack(&frame->rs_uctx.uc_stack))
+ goto badframe;
+
+ return regs->regs[4];
+
+badframe:
+ force_sig(SIGSEGV);
+ return 0;
+}
+
+static int setup_rt_frame(void *sig_return, struct ksignal *ksig,
+ struct pt_regs *regs, sigset_t *set)
+{
+ int err = 0;
+ struct extctx_layout extctx;
+ struct rt_sigframe __user *frame;
+
+ frame = get_sigframe(ksig, regs, &extctx);
+ if (!access_ok(frame, sizeof(*frame) + extctx.size))
+ return -EFAULT;
+
+ /* Create siginfo. */
+ err |= copy_siginfo_to_user(&frame->rs_info, &ksig->info);
+
+ /* Create the ucontext. */
+ err |= __put_user(0, &frame->rs_uctx.uc_flags);
+ err |= __put_user(NULL, &frame->rs_uctx.uc_link);
+ err |= __save_altstack(&frame->rs_uctx.uc_stack, regs->regs[3]);
+ err |= setup_sigcontext(regs, &frame->rs_uctx.uc_mcontext, &extctx);
+ err |= __copy_to_user(&frame->rs_uctx.uc_sigmask, set, sizeof(*set));
+
+ if (err)
+ return -EFAULT;
+
+ /*
+ * Arguments to signal handler:
+ *
+ * a0 = signal number
+ * a1 = pointer to siginfo
+ * a2 = pointer to ucontext
+ *
+ * c0_era point to the signal handler, $r3 (sp) points to
+ * the struct rt_sigframe.
+ */
+ regs->regs[4] = ksig->sig;
+ regs->regs[5] = (unsigned long) &frame->rs_info;
+ regs->regs[6] = (unsigned long) &frame->rs_uctx;
+ regs->regs[3] = (unsigned long) frame;
+ regs->regs[1] = (unsigned long) sig_return;
+ regs->csr_era = (unsigned long) ksig->ka.sa.sa_handler;
+
+ DEBUGP("SIG deliver (%s:%d): sp=0x%p pc=0x%lx ra=0x%lx\n",
+ current->comm, current->pid,
+ frame, regs->csr_era, regs->regs[1]);
+
+ return 0;
+}
+
+static void handle_signal(struct ksignal *ksig, struct pt_regs *regs)
+{
+ int ret;
+ sigset_t *oldset = sigmask_to_save();
+ void *vdso = current->mm->context.vdso;
+
+ /* Are we from a system call? */
+ if (regs->regs[0]) {
+ switch (regs->regs[4]) {
+ case -ERESTART_RESTARTBLOCK:
+ case -ERESTARTNOHAND:
+ regs->regs[4] = -EINTR;
+ break;
+ case -ERESTARTSYS:
+ if (!(ksig->ka.sa.sa_flags & SA_RESTART)) {
+ regs->regs[4] = -EINTR;
+ break;
+ }
+ fallthrough;
+ case -ERESTARTNOINTR:
+ regs->regs[4] = regs->orig_a0;
+ regs->csr_era -= 4;
+ }
+
+ regs->regs[0] = 0; /* Don't deal with this again. */
+ }
+
+ rseq_signal_deliver(ksig, regs);
+
+ ret = setup_rt_frame(vdso + current->thread.vdso->offset_sigreturn, ksig, regs, oldset);
+
+ signal_setup_done(ret, ksig, 0);
+}
+
+void arch_do_signal_or_restart(struct pt_regs *regs)
+{
+ struct ksignal ksig;
+
+ if (get_signal(&ksig)) {
+ /* Whee! Actually deliver the signal. */
+ handle_signal(&ksig, regs);
+ return;
+ }
+
+ /* Are we from a system call? */
+ if (regs->regs[0]) {
+ switch (regs->regs[4]) {
+ case -ERESTARTNOHAND:
+ case -ERESTARTSYS:
+ case -ERESTARTNOINTR:
+ regs->regs[4] = regs->orig_a0;
+ regs->csr_era -= 4;
+ break;
+
+ case -ERESTART_RESTARTBLOCK:
+ regs->regs[4] = regs->orig_a0;
+ regs->regs[11] = __NR_restart_syscall;
+ regs->csr_era -= 4;
+ break;
+ }
+ regs->regs[0] = 0; /* Don't deal with this again. */
+ }
+
+ /*
+ * If there's no signal to deliver, we just put the saved sigmask
+ * back
+ */
+ restore_saved_sigmask();
+}
diff --git a/arch/loongarch/kernel/smp.c b/arch/loongarch/kernel/smp.c
new file mode 100644
index 000000000..434bfc1cd
--- /dev/null
+++ b/arch/loongarch/kernel/smp.c
@@ -0,0 +1,666 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ *
+ * Derived from MIPS:
+ * Copyright (C) 2000, 2001 Kanoj Sarcar
+ * Copyright (C) 2000, 2001 Ralf Baechle
+ * Copyright (C) 2000, 2001 Silicon Graphics, Inc.
+ * Copyright (C) 2000, 2001, 2003 Broadcom Corporation
+ */
+#include <linux/cpu.h>
+#include <linux/cpumask.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/seq_file.h>
+#include <linux/smp.h>
+#include <linux/threads.h>
+#include <linux/export.h>
+#include <linux/time.h>
+#include <linux/tracepoint.h>
+#include <linux/sched/hotplug.h>
+#include <linux/sched/task_stack.h>
+
+#include <asm/cpu.h>
+#include <asm/idle.h>
+#include <asm/loongson.h>
+#include <asm/mmu_context.h>
+#include <asm/numa.h>
+#include <asm/processor.h>
+#include <asm/setup.h>
+#include <asm/time.h>
+
+int __cpu_number_map[NR_CPUS]; /* Map physical to logical */
+EXPORT_SYMBOL(__cpu_number_map);
+
+int __cpu_logical_map[NR_CPUS]; /* Map logical to physical */
+EXPORT_SYMBOL(__cpu_logical_map);
+
+/* Number of threads (siblings) per CPU core */
+int smp_num_siblings = 1;
+EXPORT_SYMBOL(smp_num_siblings);
+
+/* Representing the threads (siblings) of each logical CPU */
+cpumask_t cpu_sibling_map[NR_CPUS] __read_mostly;
+EXPORT_SYMBOL(cpu_sibling_map);
+
+/* Representing the core map of multi-core chips of each logical CPU */
+cpumask_t cpu_core_map[NR_CPUS] __read_mostly;
+EXPORT_SYMBOL(cpu_core_map);
+
+static DECLARE_COMPLETION(cpu_starting);
+static DECLARE_COMPLETION(cpu_running);
+
+/*
+ * A logcal cpu mask containing only one VPE per core to
+ * reduce the number of IPIs on large MT systems.
+ */
+cpumask_t cpu_foreign_map[NR_CPUS] __read_mostly;
+EXPORT_SYMBOL(cpu_foreign_map);
+
+/* representing cpus for which sibling maps can be computed */
+static cpumask_t cpu_sibling_setup_map;
+
+/* representing cpus for which core maps can be computed */
+static cpumask_t cpu_core_setup_map;
+
+struct secondary_data cpuboot_data;
+static DEFINE_PER_CPU(int, cpu_state);
+
+enum ipi_msg_type {
+ IPI_RESCHEDULE,
+ IPI_CALL_FUNCTION,
+};
+
+static const char *ipi_types[NR_IPI] __tracepoint_string = {
+ [IPI_RESCHEDULE] = "Rescheduling interrupts",
+ [IPI_CALL_FUNCTION] = "Function call interrupts",
+};
+
+void show_ipi_list(struct seq_file *p, int prec)
+{
+ unsigned int cpu, i;
+
+ for (i = 0; i < NR_IPI; i++) {
+ seq_printf(p, "%*s%u:%s", prec - 1, "IPI", i, prec >= 4 ? " " : "");
+ for_each_online_cpu(cpu)
+ seq_printf(p, "%10u ", per_cpu(irq_stat, cpu).ipi_irqs[i]);
+ seq_printf(p, " LoongArch %d %s\n", i + 1, ipi_types[i]);
+ }
+}
+
+/* Send mailbox buffer via Mail_Send */
+static void csr_mail_send(uint64_t data, int cpu, int mailbox)
+{
+ uint64_t val;
+
+ /* Send high 32 bits */
+ val = IOCSR_MBUF_SEND_BLOCKING;
+ val |= (IOCSR_MBUF_SEND_BOX_HI(mailbox) << IOCSR_MBUF_SEND_BOX_SHIFT);
+ val |= (cpu << IOCSR_MBUF_SEND_CPU_SHIFT);
+ val |= (data & IOCSR_MBUF_SEND_H32_MASK);
+ iocsr_write64(val, LOONGARCH_IOCSR_MBUF_SEND);
+
+ /* Send low 32 bits */
+ val = IOCSR_MBUF_SEND_BLOCKING;
+ val |= (IOCSR_MBUF_SEND_BOX_LO(mailbox) << IOCSR_MBUF_SEND_BOX_SHIFT);
+ val |= (cpu << IOCSR_MBUF_SEND_CPU_SHIFT);
+ val |= (data << IOCSR_MBUF_SEND_BUF_SHIFT);
+ iocsr_write64(val, LOONGARCH_IOCSR_MBUF_SEND);
+};
+
+static u32 ipi_read_clear(int cpu)
+{
+ u32 action;
+
+ /* Load the ipi register to figure out what we're supposed to do */
+ action = iocsr_read32(LOONGARCH_IOCSR_IPI_STATUS);
+ /* Clear the ipi register to clear the interrupt */
+ iocsr_write32(action, LOONGARCH_IOCSR_IPI_CLEAR);
+ smp_mb();
+
+ return action;
+}
+
+static void ipi_write_action(int cpu, u32 action)
+{
+ unsigned int irq = 0;
+
+ while ((irq = ffs(action))) {
+ uint32_t val = IOCSR_IPI_SEND_BLOCKING;
+
+ val |= (irq - 1);
+ val |= (cpu << IOCSR_IPI_SEND_CPU_SHIFT);
+ iocsr_write32(val, LOONGARCH_IOCSR_IPI_SEND);
+ action &= ~BIT(irq - 1);
+ }
+}
+
+void loongson_send_ipi_single(int cpu, unsigned int action)
+{
+ ipi_write_action(cpu_logical_map(cpu), (u32)action);
+}
+
+void loongson_send_ipi_mask(const struct cpumask *mask, unsigned int action)
+{
+ unsigned int i;
+
+ for_each_cpu(i, mask)
+ ipi_write_action(cpu_logical_map(i), (u32)action);
+}
+
+/*
+ * This function sends a 'reschedule' IPI to another CPU.
+ * it goes straight through and wastes no time serializing
+ * anything. Worst case is that we lose a reschedule ...
+ */
+void smp_send_reschedule(int cpu)
+{
+ loongson_send_ipi_single(cpu, SMP_RESCHEDULE);
+}
+EXPORT_SYMBOL_GPL(smp_send_reschedule);
+
+irqreturn_t loongson_ipi_interrupt(int irq, void *dev)
+{
+ unsigned int action;
+ unsigned int cpu = smp_processor_id();
+
+ action = ipi_read_clear(cpu_logical_map(cpu));
+
+ if (action & SMP_RESCHEDULE) {
+ scheduler_ipi();
+ per_cpu(irq_stat, cpu).ipi_irqs[IPI_RESCHEDULE]++;
+ }
+
+ if (action & SMP_CALL_FUNCTION) {
+ generic_smp_call_function_interrupt();
+ per_cpu(irq_stat, cpu).ipi_irqs[IPI_CALL_FUNCTION]++;
+ }
+
+ return IRQ_HANDLED;
+}
+
+void __init loongson_smp_setup(void)
+{
+ cpu_data[0].core = cpu_logical_map(0) % loongson_sysconf.cores_per_package;
+ cpu_data[0].package = cpu_logical_map(0) / loongson_sysconf.cores_per_package;
+
+ iocsr_write32(0xffffffff, LOONGARCH_IOCSR_IPI_EN);
+ pr_info("Detected %i available CPU(s)\n", loongson_sysconf.nr_cpus);
+}
+
+void __init loongson_prepare_cpus(unsigned int max_cpus)
+{
+ int i = 0;
+
+ for (i = 0; i < loongson_sysconf.nr_cpus; i++) {
+ set_cpu_present(i, true);
+ csr_mail_send(0, __cpu_logical_map[i], 0);
+ }
+
+ per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE;
+}
+
+/*
+ * Setup the PC, SP, and TP of a secondary processor and start it running!
+ */
+void loongson_boot_secondary(int cpu, struct task_struct *idle)
+{
+ unsigned long entry;
+
+ pr_info("Booting CPU#%d...\n", cpu);
+
+ entry = __pa_symbol((unsigned long)&smpboot_entry);
+ cpuboot_data.stack = (unsigned long)__KSTK_TOS(idle);
+ cpuboot_data.thread_info = (unsigned long)task_thread_info(idle);
+
+ csr_mail_send(entry, cpu_logical_map(cpu), 0);
+
+ loongson_send_ipi_single(cpu, SMP_BOOT_CPU);
+}
+
+/*
+ * SMP init and finish on secondary CPUs
+ */
+void loongson_init_secondary(void)
+{
+ unsigned int cpu = smp_processor_id();
+ unsigned int imask = ECFGF_IP0 | ECFGF_IP1 | ECFGF_IP2 |
+ ECFGF_IPI | ECFGF_PMC | ECFGF_TIMER;
+
+ change_csr_ecfg(ECFG0_IM, imask);
+
+ iocsr_write32(0xffffffff, LOONGARCH_IOCSR_IPI_EN);
+
+#ifdef CONFIG_NUMA
+ numa_add_cpu(cpu);
+#endif
+ per_cpu(cpu_state, cpu) = CPU_ONLINE;
+ cpu_data[cpu].core =
+ cpu_logical_map(cpu) % loongson_sysconf.cores_per_package;
+ cpu_data[cpu].package =
+ cpu_logical_map(cpu) / loongson_sysconf.cores_per_package;
+}
+
+void loongson_smp_finish(void)
+{
+ local_irq_enable();
+ iocsr_write64(0, LOONGARCH_IOCSR_MBUF0);
+ pr_info("CPU#%d finished\n", smp_processor_id());
+}
+
+#ifdef CONFIG_HOTPLUG_CPU
+
+int loongson_cpu_disable(void)
+{
+ unsigned long flags;
+ unsigned int cpu = smp_processor_id();
+
+ if (io_master(cpu))
+ return -EBUSY;
+
+#ifdef CONFIG_NUMA
+ numa_remove_cpu(cpu);
+#endif
+ set_cpu_online(cpu, false);
+ calculate_cpu_foreign_map();
+ local_irq_save(flags);
+ irq_migrate_all_off_this_cpu();
+ clear_csr_ecfg(ECFG0_IM);
+ local_irq_restore(flags);
+ local_flush_tlb_all();
+
+ return 0;
+}
+
+void loongson_cpu_die(unsigned int cpu)
+{
+ while (per_cpu(cpu_state, cpu) != CPU_DEAD)
+ cpu_relax();
+
+ mb();
+}
+
+void play_dead(void)
+{
+ register uint64_t addr;
+ register void (*init_fn)(void);
+
+ idle_task_exit();
+ local_irq_enable();
+ set_csr_ecfg(ECFGF_IPI);
+ __this_cpu_write(cpu_state, CPU_DEAD);
+
+ __smp_mb();
+ do {
+ __asm__ __volatile__("idle 0\n\t");
+ addr = iocsr_read64(LOONGARCH_IOCSR_MBUF0);
+ } while (addr == 0);
+
+ init_fn = (void *)TO_CACHE(addr);
+ iocsr_write32(0xffffffff, LOONGARCH_IOCSR_IPI_CLEAR);
+
+ init_fn();
+ unreachable();
+}
+
+#endif
+
+/*
+ * Power management
+ */
+#ifdef CONFIG_PM
+
+static int loongson_ipi_suspend(void)
+{
+ return 0;
+}
+
+static void loongson_ipi_resume(void)
+{
+ iocsr_write32(0xffffffff, LOONGARCH_IOCSR_IPI_EN);
+}
+
+static struct syscore_ops loongson_ipi_syscore_ops = {
+ .resume = loongson_ipi_resume,
+ .suspend = loongson_ipi_suspend,
+};
+
+/*
+ * Enable boot cpu ipi before enabling nonboot cpus
+ * during syscore_resume.
+ */
+static int __init ipi_pm_init(void)
+{
+ register_syscore_ops(&loongson_ipi_syscore_ops);
+ return 0;
+}
+
+core_initcall(ipi_pm_init);
+#endif
+
+static inline void set_cpu_sibling_map(int cpu)
+{
+ int i;
+
+ cpumask_set_cpu(cpu, &cpu_sibling_setup_map);
+
+ if (smp_num_siblings <= 1)
+ cpumask_set_cpu(cpu, &cpu_sibling_map[cpu]);
+ else {
+ for_each_cpu(i, &cpu_sibling_setup_map) {
+ if (cpus_are_siblings(cpu, i)) {
+ cpumask_set_cpu(i, &cpu_sibling_map[cpu]);
+ cpumask_set_cpu(cpu, &cpu_sibling_map[i]);
+ }
+ }
+ }
+}
+
+static inline void set_cpu_core_map(int cpu)
+{
+ int i;
+
+ cpumask_set_cpu(cpu, &cpu_core_setup_map);
+
+ for_each_cpu(i, &cpu_core_setup_map) {
+ if (cpu_data[cpu].package == cpu_data[i].package) {
+ cpumask_set_cpu(i, &cpu_core_map[cpu]);
+ cpumask_set_cpu(cpu, &cpu_core_map[i]);
+ }
+ }
+}
+
+/*
+ * Calculate a new cpu_foreign_map mask whenever a
+ * new cpu appears or disappears.
+ */
+void calculate_cpu_foreign_map(void)
+{
+ int i, k, core_present;
+ cpumask_t temp_foreign_map;
+
+ /* Re-calculate the mask */
+ cpumask_clear(&temp_foreign_map);
+ for_each_online_cpu(i) {
+ core_present = 0;
+ for_each_cpu(k, &temp_foreign_map)
+ if (cpus_are_siblings(i, k))
+ core_present = 1;
+ if (!core_present)
+ cpumask_set_cpu(i, &temp_foreign_map);
+ }
+
+ for_each_online_cpu(i)
+ cpumask_andnot(&cpu_foreign_map[i],
+ &temp_foreign_map, &cpu_sibling_map[i]);
+}
+
+/* Preload SMP state for boot cpu */
+void smp_prepare_boot_cpu(void)
+{
+ unsigned int cpu, node, rr_node;
+
+ set_cpu_possible(0, true);
+ set_cpu_online(0, true);
+ set_my_cpu_offset(per_cpu_offset(0));
+
+ rr_node = first_node(node_online_map);
+ for_each_possible_cpu(cpu) {
+ node = early_cpu_to_node(cpu);
+
+ /*
+ * The mapping between present cpus and nodes has been
+ * built during MADT and SRAT parsing.
+ *
+ * If possible cpus = present cpus here, early_cpu_to_node
+ * will return valid node.
+ *
+ * If possible cpus > present cpus here (e.g. some possible
+ * cpus will be added by cpu-hotplug later), for possible but
+ * not present cpus, early_cpu_to_node will return NUMA_NO_NODE,
+ * and we just map them to online nodes in round-robin way.
+ * Once hotplugged, new correct mapping will be built for them.
+ */
+ if (node != NUMA_NO_NODE)
+ set_cpu_numa_node(cpu, node);
+ else {
+ set_cpu_numa_node(cpu, rr_node);
+ rr_node = next_node_in(rr_node, node_online_map);
+ }
+ }
+}
+
+/* called from main before smp_init() */
+void __init smp_prepare_cpus(unsigned int max_cpus)
+{
+ init_new_context(current, &init_mm);
+ current_thread_info()->cpu = 0;
+ loongson_prepare_cpus(max_cpus);
+ set_cpu_sibling_map(0);
+ set_cpu_core_map(0);
+ calculate_cpu_foreign_map();
+#ifndef CONFIG_HOTPLUG_CPU
+ init_cpu_present(cpu_possible_mask);
+#endif
+}
+
+int __cpu_up(unsigned int cpu, struct task_struct *tidle)
+{
+ loongson_boot_secondary(cpu, tidle);
+
+ /* Wait for CPU to start and be ready to sync counters */
+ if (!wait_for_completion_timeout(&cpu_starting,
+ msecs_to_jiffies(5000))) {
+ pr_crit("CPU%u: failed to start\n", cpu);
+ return -EIO;
+ }
+
+ /* Wait for CPU to finish startup & mark itself online before return */
+ wait_for_completion(&cpu_running);
+
+ return 0;
+}
+
+/*
+ * First C code run on the secondary CPUs after being started up by
+ * the master.
+ */
+asmlinkage void start_secondary(void)
+{
+ unsigned int cpu;
+
+ sync_counter();
+ cpu = raw_smp_processor_id();
+ set_my_cpu_offset(per_cpu_offset(cpu));
+ rcu_cpu_starting(cpu);
+
+ cpu_probe();
+ constant_clockevent_init();
+ loongson_init_secondary();
+
+ set_cpu_sibling_map(cpu);
+ set_cpu_core_map(cpu);
+
+ notify_cpu_starting(cpu);
+
+ /* Notify boot CPU that we're starting */
+ complete(&cpu_starting);
+
+ /* The CPU is running, now mark it online */
+ set_cpu_online(cpu, true);
+
+ calculate_cpu_foreign_map();
+
+ /*
+ * Notify boot CPU that we're up & online and it can safely return
+ * from __cpu_up()
+ */
+ complete(&cpu_running);
+
+ /*
+ * irq will be enabled in loongson_smp_finish(), enabling it too
+ * early is dangerous.
+ */
+ WARN_ON_ONCE(!irqs_disabled());
+ loongson_smp_finish();
+
+ cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
+}
+
+void __init smp_cpus_done(unsigned int max_cpus)
+{
+}
+
+static void stop_this_cpu(void *dummy)
+{
+ set_cpu_online(smp_processor_id(), false);
+ calculate_cpu_foreign_map();
+ local_irq_disable();
+ while (true);
+}
+
+void smp_send_stop(void)
+{
+ smp_call_function(stop_this_cpu, NULL, 0);
+}
+
+int setup_profiling_timer(unsigned int multiplier)
+{
+ return 0;
+}
+
+static void flush_tlb_all_ipi(void *info)
+{
+ local_flush_tlb_all();
+}
+
+void flush_tlb_all(void)
+{
+ on_each_cpu(flush_tlb_all_ipi, NULL, 1);
+}
+
+static void flush_tlb_mm_ipi(void *mm)
+{
+ local_flush_tlb_mm((struct mm_struct *)mm);
+}
+
+void flush_tlb_mm(struct mm_struct *mm)
+{
+ if (atomic_read(&mm->mm_users) == 0)
+ return; /* happens as a result of exit_mmap() */
+
+ preempt_disable();
+
+ if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
+ on_each_cpu_mask(mm_cpumask(mm), flush_tlb_mm_ipi, mm, 1);
+ } else {
+ unsigned int cpu;
+
+ for_each_online_cpu(cpu) {
+ if (cpu != smp_processor_id() && cpu_context(cpu, mm))
+ cpu_context(cpu, mm) = 0;
+ }
+ local_flush_tlb_mm(mm);
+ }
+
+ preempt_enable();
+}
+
+struct flush_tlb_data {
+ struct vm_area_struct *vma;
+ unsigned long addr1;
+ unsigned long addr2;
+};
+
+static void flush_tlb_range_ipi(void *info)
+{
+ struct flush_tlb_data *fd = info;
+
+ local_flush_tlb_range(fd->vma, fd->addr1, fd->addr2);
+}
+
+void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
+{
+ struct mm_struct *mm = vma->vm_mm;
+
+ preempt_disable();
+ if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
+ struct flush_tlb_data fd = {
+ .vma = vma,
+ .addr1 = start,
+ .addr2 = end,
+ };
+
+ on_each_cpu_mask(mm_cpumask(mm), flush_tlb_range_ipi, &fd, 1);
+ } else {
+ unsigned int cpu;
+
+ for_each_online_cpu(cpu) {
+ if (cpu != smp_processor_id() && cpu_context(cpu, mm))
+ cpu_context(cpu, mm) = 0;
+ }
+ local_flush_tlb_range(vma, start, end);
+ }
+ preempt_enable();
+}
+
+static void flush_tlb_kernel_range_ipi(void *info)
+{
+ struct flush_tlb_data *fd = info;
+
+ local_flush_tlb_kernel_range(fd->addr1, fd->addr2);
+}
+
+void flush_tlb_kernel_range(unsigned long start, unsigned long end)
+{
+ struct flush_tlb_data fd = {
+ .addr1 = start,
+ .addr2 = end,
+ };
+
+ on_each_cpu(flush_tlb_kernel_range_ipi, &fd, 1);
+}
+
+static void flush_tlb_page_ipi(void *info)
+{
+ struct flush_tlb_data *fd = info;
+
+ local_flush_tlb_page(fd->vma, fd->addr1);
+}
+
+void flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
+{
+ preempt_disable();
+ if ((atomic_read(&vma->vm_mm->mm_users) != 1) || (current->mm != vma->vm_mm)) {
+ struct flush_tlb_data fd = {
+ .vma = vma,
+ .addr1 = page,
+ };
+
+ on_each_cpu_mask(mm_cpumask(vma->vm_mm), flush_tlb_page_ipi, &fd, 1);
+ } else {
+ unsigned int cpu;
+
+ for_each_online_cpu(cpu) {
+ if (cpu != smp_processor_id() && cpu_context(cpu, vma->vm_mm))
+ cpu_context(cpu, vma->vm_mm) = 0;
+ }
+ local_flush_tlb_page(vma, page);
+ }
+ preempt_enable();
+}
+EXPORT_SYMBOL(flush_tlb_page);
+
+static void flush_tlb_one_ipi(void *info)
+{
+ unsigned long vaddr = (unsigned long) info;
+
+ local_flush_tlb_one(vaddr);
+}
+
+void flush_tlb_one(unsigned long vaddr)
+{
+ on_each_cpu(flush_tlb_one_ipi, (void *)vaddr, 1);
+}
+EXPORT_SYMBOL(flush_tlb_one);
diff --git a/arch/loongarch/kernel/stacktrace.c b/arch/loongarch/kernel/stacktrace.c
new file mode 100644
index 000000000..3a690f96f
--- /dev/null
+++ b/arch/loongarch/kernel/stacktrace.c
@@ -0,0 +1,78 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Stack trace management functions
+ *
+ * Copyright (C) 2022 Loongson Technology Corporation Limited
+ */
+#include <linux/sched.h>
+#include <linux/stacktrace.h>
+#include <linux/uaccess.h>
+
+#include <asm/stacktrace.h>
+#include <asm/unwind.h>
+
+void arch_stack_walk(stack_trace_consume_fn consume_entry, void *cookie,
+ struct task_struct *task, struct pt_regs *regs)
+{
+ unsigned long addr;
+ struct pt_regs dummyregs;
+ struct unwind_state state;
+
+ regs = &dummyregs;
+
+ if (task == current) {
+ regs->regs[3] = (unsigned long)__builtin_frame_address(0);
+ regs->csr_era = (unsigned long)__builtin_return_address(0);
+ } else {
+ regs->regs[3] = thread_saved_fp(task);
+ regs->csr_era = thread_saved_ra(task);
+ }
+
+ regs->regs[1] = 0;
+ for (unwind_start(&state, task, regs);
+ !unwind_done(&state); unwind_next_frame(&state)) {
+ addr = unwind_get_return_address(&state);
+ if (!addr || !consume_entry(cookie, addr))
+ break;
+ }
+}
+
+static int
+copy_stack_frame(unsigned long fp, struct stack_frame *frame)
+{
+ int ret = 1;
+ unsigned long err;
+ unsigned long __user *user_frame_tail;
+
+ user_frame_tail = (unsigned long *)(fp - sizeof(struct stack_frame));
+ if (!access_ok(user_frame_tail, sizeof(*frame)))
+ return 0;
+
+ pagefault_disable();
+ err = (__copy_from_user_inatomic(frame, user_frame_tail, sizeof(*frame)));
+ if (err || (unsigned long)user_frame_tail >= frame->fp)
+ ret = 0;
+ pagefault_enable();
+
+ return ret;
+}
+
+void arch_stack_walk_user(stack_trace_consume_fn consume_entry, void *cookie,
+ const struct pt_regs *regs)
+{
+ unsigned long fp = regs->regs[22];
+
+ while (fp && !((unsigned long)fp & 0xf)) {
+ struct stack_frame frame;
+
+ frame.fp = 0;
+ frame.ra = 0;
+ if (!copy_stack_frame(fp, &frame))
+ break;
+ if (!frame.ra)
+ break;
+ if (!consume_entry(cookie, frame.ra))
+ break;
+ fp = frame.fp;
+ }
+}
diff --git a/arch/loongarch/kernel/switch.S b/arch/loongarch/kernel/switch.S
new file mode 100644
index 000000000..202a163cb
--- /dev/null
+++ b/arch/loongarch/kernel/switch.S
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#include <asm/asm.h>
+#include <asm/asmmacro.h>
+#include <asm/asm-offsets.h>
+#include <asm/loongarch.h>
+#include <asm/regdef.h>
+#include <asm/stackframe.h>
+#include <asm/thread_info.h>
+
+/*
+ * task_struct *__switch_to(task_struct *prev, task_struct *next,
+ * struct thread_info *next_ti)
+ */
+ .align 5
+SYM_FUNC_START(__switch_to)
+ csrrd t1, LOONGARCH_CSR_PRMD
+ stptr.d t1, a0, THREAD_CSRPRMD
+
+ cpu_save_nonscratch a0
+ stptr.d ra, a0, THREAD_REG01
+ stptr.d a3, a0, THREAD_SCHED_RA
+ stptr.d a4, a0, THREAD_SCHED_CFA
+ move tp, a2
+ cpu_restore_nonscratch a1
+
+ li.w t0, _THREAD_SIZE
+ PTR_ADD t0, t0, tp
+ set_saved_sp t0, t1, t2
+
+ ldptr.d t1, a1, THREAD_CSRPRMD
+ csrwr t1, LOONGARCH_CSR_PRMD
+
+ jr ra
+SYM_FUNC_END(__switch_to)
diff --git a/arch/loongarch/kernel/syscall.c b/arch/loongarch/kernel/syscall.c
new file mode 100644
index 000000000..3fc4211db
--- /dev/null
+++ b/arch/loongarch/kernel/syscall.c
@@ -0,0 +1,63 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Author: Hanlu Li <lihanlu@loongson.cn>
+ * Huacai Chen <chenhuacai@loongson.cn>
+ *
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#include <linux/capability.h>
+#include <linux/entry-common.h>
+#include <linux/errno.h>
+#include <linux/linkage.h>
+#include <linux/syscalls.h>
+#include <linux/unistd.h>
+
+#include <asm/asm.h>
+#include <asm/signal.h>
+#include <asm/switch_to.h>
+#include <asm-generic/syscalls.h>
+
+#undef __SYSCALL
+#define __SYSCALL(nr, call) [nr] = (call),
+
+SYSCALL_DEFINE6(mmap, unsigned long, addr, unsigned long, len, unsigned long,
+ prot, unsigned long, flags, unsigned long, fd, off_t, offset)
+{
+ if (offset & ~PAGE_MASK)
+ return -EINVAL;
+
+ return ksys_mmap_pgoff(addr, len, prot, flags, fd, offset >> PAGE_SHIFT);
+}
+
+void *sys_call_table[__NR_syscalls] = {
+ [0 ... __NR_syscalls - 1] = sys_ni_syscall,
+#include <asm/unistd.h>
+};
+
+typedef long (*sys_call_fn)(unsigned long, unsigned long,
+ unsigned long, unsigned long, unsigned long, unsigned long);
+
+void noinstr do_syscall(struct pt_regs *regs)
+{
+ unsigned long nr;
+ sys_call_fn syscall_fn;
+
+ nr = regs->regs[11];
+ /* Set for syscall restarting */
+ if (nr < NR_syscalls)
+ regs->regs[0] = nr + 1;
+
+ regs->csr_era += 4;
+ regs->orig_a0 = regs->regs[4];
+ regs->regs[4] = -ENOSYS;
+
+ nr = syscall_enter_from_user_mode(regs, nr);
+
+ if (nr < NR_syscalls) {
+ syscall_fn = sys_call_table[nr];
+ regs->regs[4] = syscall_fn(regs->orig_a0, regs->regs[5], regs->regs[6],
+ regs->regs[7], regs->regs[8], regs->regs[9]);
+ }
+
+ syscall_exit_to_user_mode(regs);
+}
diff --git a/arch/loongarch/kernel/sysrq.c b/arch/loongarch/kernel/sysrq.c
new file mode 100644
index 000000000..366baef72
--- /dev/null
+++ b/arch/loongarch/kernel/sysrq.c
@@ -0,0 +1,65 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * LoongArch specific sysrq operations.
+ *
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#include <linux/init.h>
+#include <linux/smp.h>
+#include <linux/spinlock.h>
+#include <linux/sysrq.h>
+#include <linux/workqueue.h>
+
+#include <asm/cpu-features.h>
+#include <asm/tlb.h>
+
+/*
+ * Dump TLB entries on all CPUs.
+ */
+
+static DEFINE_SPINLOCK(show_lock);
+
+static void sysrq_tlbdump_single(void *dummy)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&show_lock, flags);
+
+ pr_info("CPU%d:\n", smp_processor_id());
+ dump_tlb_regs();
+ pr_info("\n");
+ dump_tlb_all();
+ pr_info("\n");
+
+ spin_unlock_irqrestore(&show_lock, flags);
+}
+
+#ifdef CONFIG_SMP
+static void sysrq_tlbdump_othercpus(struct work_struct *dummy)
+{
+ smp_call_function(sysrq_tlbdump_single, NULL, 0);
+}
+
+static DECLARE_WORK(sysrq_tlbdump, sysrq_tlbdump_othercpus);
+#endif
+
+static void sysrq_handle_tlbdump(int key)
+{
+ sysrq_tlbdump_single(NULL);
+#ifdef CONFIG_SMP
+ schedule_work(&sysrq_tlbdump);
+#endif
+}
+
+static struct sysrq_key_op sysrq_tlbdump_op = {
+ .handler = sysrq_handle_tlbdump,
+ .help_msg = "show-tlbs(x)",
+ .action_msg = "Show TLB entries",
+ .enable_mask = SYSRQ_ENABLE_DUMP,
+};
+
+static int __init loongarch_sysrq_init(void)
+{
+ return register_sysrq_key('x', &sysrq_tlbdump_op);
+}
+arch_initcall(loongarch_sysrq_init);
diff --git a/arch/loongarch/kernel/time.c b/arch/loongarch/kernel/time.c
new file mode 100644
index 000000000..150df6e17
--- /dev/null
+++ b/arch/loongarch/kernel/time.c
@@ -0,0 +1,222 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Common time service routines for LoongArch machines.
+ *
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#include <linux/clockchips.h>
+#include <linux/delay.h>
+#include <linux/export.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/sched_clock.h>
+#include <linux/spinlock.h>
+
+#include <asm/cpu-features.h>
+#include <asm/loongarch.h>
+#include <asm/time.h>
+
+u64 cpu_clock_freq;
+EXPORT_SYMBOL(cpu_clock_freq);
+u64 const_clock_freq;
+EXPORT_SYMBOL(const_clock_freq);
+
+static DEFINE_RAW_SPINLOCK(state_lock);
+static DEFINE_PER_CPU(struct clock_event_device, constant_clockevent_device);
+
+static void constant_event_handler(struct clock_event_device *dev)
+{
+}
+
+irqreturn_t constant_timer_interrupt(int irq, void *data)
+{
+ int cpu = smp_processor_id();
+ struct clock_event_device *cd;
+
+ /* Clear Timer Interrupt */
+ write_csr_tintclear(CSR_TINTCLR_TI);
+ cd = &per_cpu(constant_clockevent_device, cpu);
+ cd->event_handler(cd);
+
+ return IRQ_HANDLED;
+}
+
+static int constant_set_state_oneshot(struct clock_event_device *evt)
+{
+ unsigned long timer_config;
+
+ raw_spin_lock(&state_lock);
+
+ timer_config = csr_read64(LOONGARCH_CSR_TCFG);
+ timer_config |= CSR_TCFG_EN;
+ timer_config &= ~CSR_TCFG_PERIOD;
+ csr_write64(timer_config, LOONGARCH_CSR_TCFG);
+
+ raw_spin_unlock(&state_lock);
+
+ return 0;
+}
+
+static int constant_set_state_periodic(struct clock_event_device *evt)
+{
+ unsigned long period;
+ unsigned long timer_config;
+
+ raw_spin_lock(&state_lock);
+
+ period = const_clock_freq / HZ;
+ timer_config = period & CSR_TCFG_VAL;
+ timer_config |= (CSR_TCFG_PERIOD | CSR_TCFG_EN);
+ csr_write64(timer_config, LOONGARCH_CSR_TCFG);
+
+ raw_spin_unlock(&state_lock);
+
+ return 0;
+}
+
+static int constant_set_state_shutdown(struct clock_event_device *evt)
+{
+ unsigned long timer_config;
+
+ raw_spin_lock(&state_lock);
+
+ timer_config = csr_read64(LOONGARCH_CSR_TCFG);
+ timer_config &= ~CSR_TCFG_EN;
+ csr_write64(timer_config, LOONGARCH_CSR_TCFG);
+
+ raw_spin_unlock(&state_lock);
+
+ return 0;
+}
+
+static int constant_timer_next_event(unsigned long delta, struct clock_event_device *evt)
+{
+ unsigned long timer_config;
+
+ delta &= CSR_TCFG_VAL;
+ timer_config = delta | CSR_TCFG_EN;
+ csr_write64(timer_config, LOONGARCH_CSR_TCFG);
+
+ return 0;
+}
+
+static unsigned long __init get_loops_per_jiffy(void)
+{
+ unsigned long lpj = (unsigned long)const_clock_freq;
+
+ do_div(lpj, HZ);
+
+ return lpj;
+}
+
+static long init_timeval;
+
+void sync_counter(void)
+{
+ /* Ensure counter begin at 0 */
+ csr_write64(-init_timeval, LOONGARCH_CSR_CNTC);
+}
+
+static int get_timer_irq(void)
+{
+ struct irq_domain *d = irq_find_matching_fwnode(cpuintc_handle, DOMAIN_BUS_ANY);
+
+ if (d)
+ return irq_create_mapping(d, EXCCODE_TIMER - EXCCODE_INT_START);
+
+ return -EINVAL;
+}
+
+int constant_clockevent_init(void)
+{
+ unsigned int cpu = smp_processor_id();
+ unsigned long min_delta = 0x600;
+ unsigned long max_delta = (1UL << 48) - 1;
+ struct clock_event_device *cd;
+ static int irq = 0, timer_irq_installed = 0;
+
+ if (!timer_irq_installed) {
+ irq = get_timer_irq();
+ if (irq < 0)
+ pr_err("Failed to map irq %d (timer)\n", irq);
+ }
+
+ cd = &per_cpu(constant_clockevent_device, cpu);
+
+ cd->name = "Constant";
+ cd->features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_PERCPU;
+
+ cd->irq = irq;
+ cd->rating = 320;
+ cd->cpumask = cpumask_of(cpu);
+ cd->set_state_oneshot = constant_set_state_oneshot;
+ cd->set_state_oneshot_stopped = constant_set_state_shutdown;
+ cd->set_state_periodic = constant_set_state_periodic;
+ cd->set_state_shutdown = constant_set_state_shutdown;
+ cd->set_next_event = constant_timer_next_event;
+ cd->event_handler = constant_event_handler;
+
+ clockevents_config_and_register(cd, const_clock_freq, min_delta, max_delta);
+
+ if (timer_irq_installed)
+ return 0;
+
+ timer_irq_installed = 1;
+
+ sync_counter();
+
+ if (request_irq(irq, constant_timer_interrupt, IRQF_PERCPU | IRQF_TIMER, "timer", NULL))
+ pr_err("Failed to request irq %d (timer)\n", irq);
+
+ lpj_fine = get_loops_per_jiffy();
+ pr_info("Constant clock event device register\n");
+
+ return 0;
+}
+
+static u64 read_const_counter(struct clocksource *clk)
+{
+ return drdtime();
+}
+
+static u64 native_sched_clock(void)
+{
+ return read_const_counter(NULL);
+}
+
+static struct clocksource clocksource_const = {
+ .name = "Constant",
+ .rating = 400,
+ .read = read_const_counter,
+ .mask = CLOCKSOURCE_MASK(64),
+ .flags = CLOCK_SOURCE_IS_CONTINUOUS,
+ .vdso_clock_mode = VDSO_CLOCKMODE_CPU,
+};
+
+int __init constant_clocksource_init(void)
+{
+ int res;
+ unsigned long freq = const_clock_freq;
+
+ res = clocksource_register_hz(&clocksource_const, freq);
+
+ sched_clock_register(native_sched_clock, 64, freq);
+
+ pr_info("Constant clock source device register\n");
+
+ return res;
+}
+
+void __init time_init(void)
+{
+ if (!cpu_has_cpucfg)
+ const_clock_freq = cpu_clock_freq;
+ else
+ const_clock_freq = calc_const_freq();
+
+ init_timeval = drdtime() - csr_read64(LOONGARCH_CSR_CNTC);
+
+ constant_clockevent_init();
+ constant_clocksource_init();
+}
diff --git a/arch/loongarch/kernel/topology.c b/arch/loongarch/kernel/topology.c
new file mode 100644
index 000000000..caa7cd859
--- /dev/null
+++ b/arch/loongarch/kernel/topology.c
@@ -0,0 +1,53 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/cpu.h>
+#include <linux/cpumask.h>
+#include <linux/init.h>
+#include <linux/node.h>
+#include <linux/nodemask.h>
+#include <linux/percpu.h>
+#include <asm/bootinfo.h>
+
+static DEFINE_PER_CPU(struct cpu, cpu_devices);
+
+#ifdef CONFIG_HOTPLUG_CPU
+int arch_register_cpu(int cpu)
+{
+ int ret;
+ struct cpu *c = &per_cpu(cpu_devices, cpu);
+
+ c->hotpluggable = 1;
+ ret = register_cpu(c, cpu);
+ if (ret < 0)
+ pr_warn("register_cpu %d failed (%d)\n", cpu, ret);
+
+ return ret;
+}
+EXPORT_SYMBOL(arch_register_cpu);
+
+void arch_unregister_cpu(int cpu)
+{
+ struct cpu *c = &per_cpu(cpu_devices, cpu);
+
+ c->hotpluggable = 0;
+ unregister_cpu(c);
+}
+EXPORT_SYMBOL(arch_unregister_cpu);
+#endif
+
+static int __init topology_init(void)
+{
+ int i, ret;
+
+ for_each_present_cpu(i) {
+ struct cpu *c = &per_cpu(cpu_devices, i);
+
+ c->hotpluggable = !io_master(i);
+ ret = register_cpu(c, i);
+ if (ret < 0)
+ pr_warn("topology_init: register_cpu %d failed (%d)\n", i, ret);
+ }
+
+ return 0;
+}
+
+subsys_initcall(topology_init);
diff --git a/arch/loongarch/kernel/traps.c b/arch/loongarch/kernel/traps.c
new file mode 100644
index 000000000..1a4dce84e
--- /dev/null
+++ b/arch/loongarch/kernel/traps.c
@@ -0,0 +1,738 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Author: Huacai Chen <chenhuacai@loongson.cn>
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#include <linux/bitops.h>
+#include <linux/bug.h>
+#include <linux/compiler.h>
+#include <linux/context_tracking.h>
+#include <linux/entry-common.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/kexec.h>
+#include <linux/module.h>
+#include <linux/extable.h>
+#include <linux/mm.h>
+#include <linux/sched/mm.h>
+#include <linux/sched/debug.h>
+#include <linux/smp.h>
+#include <linux/spinlock.h>
+#include <linux/kallsyms.h>
+#include <linux/memblock.h>
+#include <linux/interrupt.h>
+#include <linux/ptrace.h>
+#include <linux/kgdb.h>
+#include <linux/kdebug.h>
+#include <linux/kprobes.h>
+#include <linux/notifier.h>
+#include <linux/irq.h>
+#include <linux/perf_event.h>
+
+#include <asm/addrspace.h>
+#include <asm/bootinfo.h>
+#include <asm/branch.h>
+#include <asm/break.h>
+#include <asm/cpu.h>
+#include <asm/fpu.h>
+#include <asm/loongarch.h>
+#include <asm/mmu_context.h>
+#include <asm/pgtable.h>
+#include <asm/ptrace.h>
+#include <asm/sections.h>
+#include <asm/siginfo.h>
+#include <asm/stacktrace.h>
+#include <asm/tlb.h>
+#include <asm/types.h>
+#include <asm/unwind.h>
+
+#include "access-helper.h"
+
+extern asmlinkage void handle_ade(void);
+extern asmlinkage void handle_ale(void);
+extern asmlinkage void handle_sys(void);
+extern asmlinkage void handle_bp(void);
+extern asmlinkage void handle_ri(void);
+extern asmlinkage void handle_fpu(void);
+extern asmlinkage void handle_fpe(void);
+extern asmlinkage void handle_lbt(void);
+extern asmlinkage void handle_lsx(void);
+extern asmlinkage void handle_lasx(void);
+extern asmlinkage void handle_reserved(void);
+extern asmlinkage void handle_watch(void);
+extern asmlinkage void handle_vint(void);
+
+static void show_backtrace(struct task_struct *task, const struct pt_regs *regs,
+ const char *loglvl, bool user)
+{
+ unsigned long addr;
+ struct unwind_state state;
+ struct pt_regs *pregs = (struct pt_regs *)regs;
+
+ if (!task)
+ task = current;
+
+ if (user_mode(regs))
+ state.type = UNWINDER_GUESS;
+
+ printk("%sCall Trace:", loglvl);
+ for (unwind_start(&state, task, pregs);
+ !unwind_done(&state); unwind_next_frame(&state)) {
+ addr = unwind_get_return_address(&state);
+ print_ip_sym(loglvl, addr);
+ }
+ printk("%s\n", loglvl);
+}
+
+static void show_stacktrace(struct task_struct *task,
+ const struct pt_regs *regs, const char *loglvl, bool user)
+{
+ int i;
+ const int field = 2 * sizeof(unsigned long);
+ unsigned long stackdata;
+ unsigned long *sp = (unsigned long *)regs->regs[3];
+
+ printk("%sStack :", loglvl);
+ i = 0;
+ while ((unsigned long) sp & (PAGE_SIZE - 1)) {
+ if (i && ((i % (64 / field)) == 0)) {
+ pr_cont("\n");
+ printk("%s ", loglvl);
+ }
+ if (i > 39) {
+ pr_cont(" ...");
+ break;
+ }
+
+ if (__get_addr(&stackdata, sp++, user)) {
+ pr_cont(" (Bad stack address)");
+ break;
+ }
+
+ pr_cont(" %0*lx", field, stackdata);
+ i++;
+ }
+ pr_cont("\n");
+ show_backtrace(task, regs, loglvl, user);
+}
+
+void show_stack(struct task_struct *task, unsigned long *sp, const char *loglvl)
+{
+ struct pt_regs regs;
+
+ regs.csr_crmd = 0;
+ if (sp) {
+ regs.csr_era = 0;
+ regs.regs[1] = 0;
+ regs.regs[3] = (unsigned long)sp;
+ } else {
+ if (!task || task == current)
+ prepare_frametrace(&regs);
+ else {
+ regs.csr_era = task->thread.reg01;
+ regs.regs[1] = 0;
+ regs.regs[3] = task->thread.reg03;
+ regs.regs[22] = task->thread.reg22;
+ }
+ }
+
+ show_stacktrace(task, &regs, loglvl, false);
+}
+
+static void show_code(unsigned int *pc, bool user)
+{
+ long i;
+ unsigned int insn;
+
+ printk("Code:");
+
+ for(i = -3 ; i < 6 ; i++) {
+ if (__get_inst(&insn, pc + i, user)) {
+ pr_cont(" (Bad address in era)\n");
+ break;
+ }
+ pr_cont("%c%08x%c", (i?' ':'<'), insn, (i?' ':'>'));
+ }
+ pr_cont("\n");
+}
+
+static void __show_regs(const struct pt_regs *regs)
+{
+ const int field = 2 * sizeof(unsigned long);
+ unsigned int excsubcode;
+ unsigned int exccode;
+ int i;
+
+ show_regs_print_info(KERN_DEFAULT);
+
+ /*
+ * Saved main processor registers
+ */
+ for (i = 0; i < 32; ) {
+ if ((i % 4) == 0)
+ printk("$%2d :", i);
+ pr_cont(" %0*lx", field, regs->regs[i]);
+
+ i++;
+ if ((i % 4) == 0)
+ pr_cont("\n");
+ }
+
+ /*
+ * Saved csr registers
+ */
+ printk("era : %0*lx %pS\n", field, regs->csr_era,
+ (void *) regs->csr_era);
+ printk("ra : %0*lx %pS\n", field, regs->regs[1],
+ (void *) regs->regs[1]);
+
+ printk("CSR crmd: %08lx ", regs->csr_crmd);
+ printk("CSR prmd: %08lx ", regs->csr_prmd);
+ printk("CSR euen: %08lx ", regs->csr_euen);
+ printk("CSR ecfg: %08lx ", regs->csr_ecfg);
+ printk("CSR estat: %08lx ", regs->csr_estat);
+
+ pr_cont("\n");
+
+ exccode = ((regs->csr_estat) & CSR_ESTAT_EXC) >> CSR_ESTAT_EXC_SHIFT;
+ excsubcode = ((regs->csr_estat) & CSR_ESTAT_ESUBCODE) >> CSR_ESTAT_ESUBCODE_SHIFT;
+ printk("ExcCode : %x (SubCode %x)\n", exccode, excsubcode);
+
+ if (exccode >= EXCCODE_TLBL && exccode <= EXCCODE_ALE)
+ printk("BadVA : %0*lx\n", field, regs->csr_badvaddr);
+
+ printk("PrId : %08x (%s)\n", read_cpucfg(LOONGARCH_CPUCFG0),
+ cpu_family_string());
+}
+
+void show_regs(struct pt_regs *regs)
+{
+ __show_regs((struct pt_regs *)regs);
+ dump_stack();
+}
+
+void show_registers(struct pt_regs *regs)
+{
+ __show_regs(regs);
+ print_modules();
+ printk("Process %s (pid: %d, threadinfo=%p, task=%p)\n",
+ current->comm, current->pid, current_thread_info(), current);
+
+ show_stacktrace(current, regs, KERN_DEFAULT, user_mode(regs));
+ show_code((void *)regs->csr_era, user_mode(regs));
+ printk("\n");
+}
+
+static DEFINE_RAW_SPINLOCK(die_lock);
+
+void __noreturn die(const char *str, struct pt_regs *regs)
+{
+ static int die_counter;
+ int sig = SIGSEGV;
+
+ oops_enter();
+
+ if (notify_die(DIE_OOPS, str, regs, 0, current->thread.trap_nr,
+ SIGSEGV) == NOTIFY_STOP)
+ sig = 0;
+
+ console_verbose();
+ raw_spin_lock_irq(&die_lock);
+ bust_spinlocks(1);
+
+ printk("%s[#%d]:\n", str, ++die_counter);
+ show_registers(regs);
+ add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
+ raw_spin_unlock_irq(&die_lock);
+
+ oops_exit();
+
+ if (regs && kexec_should_crash(current))
+ crash_kexec(regs);
+
+ if (in_interrupt())
+ panic("Fatal exception in interrupt");
+
+ if (panic_on_oops)
+ panic("Fatal exception");
+
+ make_task_dead(sig);
+}
+
+static inline void setup_vint_size(unsigned int size)
+{
+ unsigned int vs;
+
+ vs = ilog2(size/4);
+
+ if (vs == 0 || vs > 7)
+ panic("vint_size %d Not support yet", vs);
+
+ csr_xchg32(vs<<CSR_ECFG_VS_SHIFT, CSR_ECFG_VS, LOONGARCH_CSR_ECFG);
+}
+
+/*
+ * Send SIGFPE according to FCSR Cause bits, which must have already
+ * been masked against Enable bits. This is impotant as Inexact can
+ * happen together with Overflow or Underflow, and `ptrace' can set
+ * any bits.
+ */
+void force_fcsr_sig(unsigned long fcsr, void __user *fault_addr,
+ struct task_struct *tsk)
+{
+ int si_code = FPE_FLTUNK;
+
+ if (fcsr & FPU_CSR_INV_X)
+ si_code = FPE_FLTINV;
+ else if (fcsr & FPU_CSR_DIV_X)
+ si_code = FPE_FLTDIV;
+ else if (fcsr & FPU_CSR_OVF_X)
+ si_code = FPE_FLTOVF;
+ else if (fcsr & FPU_CSR_UDF_X)
+ si_code = FPE_FLTUND;
+ else if (fcsr & FPU_CSR_INE_X)
+ si_code = FPE_FLTRES;
+
+ force_sig_fault(SIGFPE, si_code, fault_addr);
+}
+
+int process_fpemu_return(int sig, void __user *fault_addr, unsigned long fcsr)
+{
+ int si_code;
+
+ switch (sig) {
+ case 0:
+ return 0;
+
+ case SIGFPE:
+ force_fcsr_sig(fcsr, fault_addr, current);
+ return 1;
+
+ case SIGBUS:
+ force_sig_fault(SIGBUS, BUS_ADRERR, fault_addr);
+ return 1;
+
+ case SIGSEGV:
+ mmap_read_lock(current->mm);
+ if (vma_lookup(current->mm, (unsigned long)fault_addr))
+ si_code = SEGV_ACCERR;
+ else
+ si_code = SEGV_MAPERR;
+ mmap_read_unlock(current->mm);
+ force_sig_fault(SIGSEGV, si_code, fault_addr);
+ return 1;
+
+ default:
+ force_sig(sig);
+ return 1;
+ }
+}
+
+/*
+ * Delayed fp exceptions when doing a lazy ctx switch
+ */
+asmlinkage void noinstr do_fpe(struct pt_regs *regs, unsigned long fcsr)
+{
+ int sig;
+ void __user *fault_addr;
+ irqentry_state_t state = irqentry_enter(regs);
+
+ if (notify_die(DIE_FP, "FP exception", regs, 0, current->thread.trap_nr,
+ SIGFPE) == NOTIFY_STOP)
+ goto out;
+
+ /* Clear FCSR.Cause before enabling interrupts */
+ write_fcsr(LOONGARCH_FCSR0, fcsr & ~mask_fcsr_x(fcsr));
+ local_irq_enable();
+
+ die_if_kernel("FP exception in kernel code", regs);
+
+ sig = SIGFPE;
+ fault_addr = (void __user *) regs->csr_era;
+
+ /* Send a signal if required. */
+ process_fpemu_return(sig, fault_addr, fcsr);
+
+out:
+ local_irq_disable();
+ irqentry_exit(regs, state);
+}
+
+asmlinkage void noinstr do_ade(struct pt_regs *regs)
+{
+ irqentry_state_t state = irqentry_enter(regs);
+
+ die_if_kernel("Kernel ade access", regs);
+ force_sig_fault(SIGBUS, BUS_ADRERR, (void __user *)regs->csr_badvaddr);
+
+ irqentry_exit(regs, state);
+}
+
+asmlinkage void noinstr do_ale(struct pt_regs *regs)
+{
+ irqentry_state_t state = irqentry_enter(regs);
+
+ die_if_kernel("Kernel ale access", regs);
+ force_sig_fault(SIGBUS, BUS_ADRALN, (void __user *)regs->csr_badvaddr);
+
+ irqentry_exit(regs, state);
+}
+
+#ifdef CONFIG_GENERIC_BUG
+int is_valid_bugaddr(unsigned long addr)
+{
+ return 1;
+}
+#endif /* CONFIG_GENERIC_BUG */
+
+static void bug_handler(struct pt_regs *regs)
+{
+ switch (report_bug(regs->csr_era, regs)) {
+ case BUG_TRAP_TYPE_BUG:
+ case BUG_TRAP_TYPE_NONE:
+ die_if_kernel("Oops - BUG", regs);
+ force_sig(SIGTRAP);
+ break;
+
+ case BUG_TRAP_TYPE_WARN:
+ /* Skip the BUG instruction and continue */
+ regs->csr_era += LOONGARCH_INSN_SIZE;
+ break;
+ }
+}
+
+asmlinkage void noinstr do_bp(struct pt_regs *regs)
+{
+ bool user = user_mode(regs);
+ unsigned int opcode, bcode;
+ unsigned long era = exception_era(regs);
+ irqentry_state_t state = irqentry_enter(regs);
+
+ local_irq_enable();
+ current->thread.trap_nr = read_csr_excode();
+ if (__get_inst(&opcode, (u32 *)era, user))
+ goto out_sigsegv;
+
+ bcode = (opcode & 0x7fff);
+
+ /*
+ * notify the kprobe handlers, if instruction is likely to
+ * pertain to them.
+ */
+ switch (bcode) {
+ case BRK_KPROBE_BP:
+ if (notify_die(DIE_BREAK, "Kprobe", regs, bcode,
+ current->thread.trap_nr, SIGTRAP) == NOTIFY_STOP)
+ goto out;
+ else
+ break;
+ case BRK_KPROBE_SSTEPBP:
+ if (notify_die(DIE_SSTEPBP, "Kprobe_SingleStep", regs, bcode,
+ current->thread.trap_nr, SIGTRAP) == NOTIFY_STOP)
+ goto out;
+ else
+ break;
+ case BRK_UPROBE_BP:
+ if (notify_die(DIE_UPROBE, "Uprobe", regs, bcode,
+ current->thread.trap_nr, SIGTRAP) == NOTIFY_STOP)
+ goto out;
+ else
+ break;
+ case BRK_UPROBE_XOLBP:
+ if (notify_die(DIE_UPROBE_XOL, "Uprobe_XOL", regs, bcode,
+ current->thread.trap_nr, SIGTRAP) == NOTIFY_STOP)
+ goto out;
+ else
+ break;
+ default:
+ if (notify_die(DIE_TRAP, "Break", regs, bcode,
+ current->thread.trap_nr, SIGTRAP) == NOTIFY_STOP)
+ goto out;
+ else
+ break;
+ }
+
+ switch (bcode) {
+ case BRK_BUG:
+ bug_handler(regs);
+ break;
+ case BRK_DIVZERO:
+ die_if_kernel("Break instruction in kernel code", regs);
+ force_sig_fault(SIGFPE, FPE_INTDIV, (void __user *)regs->csr_era);
+ break;
+ case BRK_OVERFLOW:
+ die_if_kernel("Break instruction in kernel code", regs);
+ force_sig_fault(SIGFPE, FPE_INTOVF, (void __user *)regs->csr_era);
+ break;
+ default:
+ die_if_kernel("Break instruction in kernel code", regs);
+ force_sig_fault(SIGTRAP, TRAP_BRKPT, (void __user *)regs->csr_era);
+ break;
+ }
+
+out:
+ local_irq_disable();
+ irqentry_exit(regs, state);
+ return;
+
+out_sigsegv:
+ force_sig(SIGSEGV);
+ goto out;
+}
+
+asmlinkage void noinstr do_watch(struct pt_regs *regs)
+{
+ pr_warn("Hardware watch point handler not implemented!\n");
+}
+
+asmlinkage void noinstr do_ri(struct pt_regs *regs)
+{
+ int status = SIGILL;
+ unsigned int opcode = 0;
+ unsigned int __user *era = (unsigned int __user *)exception_era(regs);
+ irqentry_state_t state = irqentry_enter(regs);
+
+ local_irq_enable();
+ current->thread.trap_nr = read_csr_excode();
+
+ if (notify_die(DIE_RI, "RI Fault", regs, 0, current->thread.trap_nr,
+ SIGILL) == NOTIFY_STOP)
+ goto out;
+
+ die_if_kernel("Reserved instruction in kernel code", regs);
+
+ if (unlikely(get_user(opcode, era) < 0)) {
+ status = SIGSEGV;
+ current->thread.error_code = 1;
+ }
+
+ force_sig(status);
+
+out:
+ local_irq_disable();
+ irqentry_exit(regs, state);
+}
+
+static void init_restore_fp(void)
+{
+ if (!used_math()) {
+ /* First time FP context user. */
+ init_fpu();
+ } else {
+ /* This task has formerly used the FP context */
+ if (!is_fpu_owner())
+ own_fpu_inatomic(1);
+ }
+
+ BUG_ON(!is_fp_enabled());
+}
+
+asmlinkage void noinstr do_fpu(struct pt_regs *regs)
+{
+ irqentry_state_t state = irqentry_enter(regs);
+
+ local_irq_enable();
+ die_if_kernel("do_fpu invoked from kernel context!", regs);
+
+ preempt_disable();
+ init_restore_fp();
+ preempt_enable();
+
+ local_irq_disable();
+ irqentry_exit(regs, state);
+}
+
+asmlinkage void noinstr do_lsx(struct pt_regs *regs)
+{
+ irqentry_state_t state = irqentry_enter(regs);
+
+ local_irq_enable();
+ force_sig(SIGILL);
+ local_irq_disable();
+
+ irqentry_exit(regs, state);
+}
+
+asmlinkage void noinstr do_lasx(struct pt_regs *regs)
+{
+ irqentry_state_t state = irqentry_enter(regs);
+
+ local_irq_enable();
+ force_sig(SIGILL);
+ local_irq_disable();
+
+ irqentry_exit(regs, state);
+}
+
+asmlinkage void noinstr do_lbt(struct pt_regs *regs)
+{
+ irqentry_state_t state = irqentry_enter(regs);
+
+ local_irq_enable();
+ force_sig(SIGILL);
+ local_irq_disable();
+
+ irqentry_exit(regs, state);
+}
+
+asmlinkage void noinstr do_reserved(struct pt_regs *regs)
+{
+ irqentry_state_t state = irqentry_enter(regs);
+
+ local_irq_enable();
+ /*
+ * Game over - no way to handle this if it ever occurs. Most probably
+ * caused by a fatal error after another hardware/software error.
+ */
+ pr_err("Caught reserved exception %u on pid:%d [%s] - should not happen\n",
+ read_csr_excode(), current->pid, current->comm);
+ die_if_kernel("do_reserved exception", regs);
+ force_sig(SIGUNUSED);
+
+ local_irq_disable();
+
+ irqentry_exit(regs, state);
+}
+
+asmlinkage void cache_parity_error(void)
+{
+ /* For the moment, report the problem and hang. */
+ pr_err("Cache error exception:\n");
+ pr_err("csr_merrctl == %08x\n", csr_read32(LOONGARCH_CSR_MERRCTL));
+ pr_err("csr_merrera == %016llx\n", csr_read64(LOONGARCH_CSR_MERRERA));
+ panic("Can't handle the cache error!");
+}
+
+asmlinkage void noinstr handle_loongarch_irq(struct pt_regs *regs)
+{
+ struct pt_regs *old_regs;
+
+ irq_enter_rcu();
+ old_regs = set_irq_regs(regs);
+ handle_arch_irq(regs);
+ set_irq_regs(old_regs);
+ irq_exit_rcu();
+}
+
+asmlinkage void noinstr do_vint(struct pt_regs *regs, unsigned long sp)
+{
+ register int cpu;
+ register unsigned long stack;
+ irqentry_state_t state = irqentry_enter(regs);
+
+ cpu = smp_processor_id();
+
+ if (on_irq_stack(cpu, sp))
+ handle_loongarch_irq(regs);
+ else {
+ stack = per_cpu(irq_stack, cpu) + IRQ_STACK_START;
+
+ /* Save task's sp on IRQ stack for unwinding */
+ *(unsigned long *)stack = sp;
+
+ __asm__ __volatile__(
+ "move $s0, $sp \n" /* Preserve sp */
+ "move $sp, %[stk] \n" /* Switch stack */
+ "move $a0, %[regs] \n"
+ "bl handle_loongarch_irq \n"
+ "move $sp, $s0 \n" /* Restore sp */
+ : /* No outputs */
+ : [stk] "r" (stack), [regs] "r" (regs)
+ : "$a0", "$a1", "$a2", "$a3", "$a4", "$a5", "$a6", "$a7", "$s0",
+ "$t0", "$t1", "$t2", "$t3", "$t4", "$t5", "$t6", "$t7", "$t8",
+ "memory");
+ }
+
+ irqentry_exit(regs, state);
+}
+
+unsigned long eentry;
+unsigned long tlbrentry;
+
+long exception_handlers[VECSIZE * 128 / sizeof(long)] __aligned(SZ_64K);
+
+static void configure_exception_vector(void)
+{
+ eentry = (unsigned long)exception_handlers;
+ tlbrentry = (unsigned long)exception_handlers + 80*VECSIZE;
+
+ csr_write64(eentry, LOONGARCH_CSR_EENTRY);
+ csr_write64(eentry, LOONGARCH_CSR_MERRENTRY);
+ csr_write64(tlbrentry, LOONGARCH_CSR_TLBRENTRY);
+}
+
+void per_cpu_trap_init(int cpu)
+{
+ unsigned int i;
+
+ setup_vint_size(VECSIZE);
+
+ configure_exception_vector();
+
+ if (!cpu_data[cpu].asid_cache)
+ cpu_data[cpu].asid_cache = asid_first_version(cpu);
+
+ mmgrab(&init_mm);
+ current->active_mm = &init_mm;
+ BUG_ON(current->mm);
+ enter_lazy_tlb(&init_mm, current);
+
+ /* Initialise exception handlers */
+ if (cpu == 0)
+ for (i = 0; i < 64; i++)
+ set_handler(i * VECSIZE, handle_reserved, VECSIZE);
+
+ tlb_init(cpu);
+ cpu_cache_init();
+}
+
+/* Install CPU exception handler */
+void set_handler(unsigned long offset, void *addr, unsigned long size)
+{
+ memcpy((void *)(eentry + offset), addr, size);
+ local_flush_icache_range(eentry + offset, eentry + offset + size);
+}
+
+static const char panic_null_cerr[] =
+ "Trying to set NULL cache error exception handler\n";
+
+/*
+ * Install uncached CPU exception handler.
+ * This is suitable only for the cache error exception which is the only
+ * exception handler that is being run uncached.
+ */
+void set_merr_handler(unsigned long offset, void *addr, unsigned long size)
+{
+ unsigned long uncached_eentry = TO_UNCACHE(__pa(eentry));
+
+ if (!addr)
+ panic(panic_null_cerr);
+
+ memcpy((void *)(uncached_eentry + offset), addr, size);
+}
+
+void __init trap_init(void)
+{
+ long i;
+
+ /* Set interrupt vector handler */
+ for (i = EXCCODE_INT_START; i < EXCCODE_INT_END; i++)
+ set_handler(i * VECSIZE, handle_vint, VECSIZE);
+
+ set_handler(EXCCODE_ADE * VECSIZE, handle_ade, VECSIZE);
+ set_handler(EXCCODE_ALE * VECSIZE, handle_ale, VECSIZE);
+ set_handler(EXCCODE_SYS * VECSIZE, handle_sys, VECSIZE);
+ set_handler(EXCCODE_BP * VECSIZE, handle_bp, VECSIZE);
+ set_handler(EXCCODE_INE * VECSIZE, handle_ri, VECSIZE);
+ set_handler(EXCCODE_IPE * VECSIZE, handle_ri, VECSIZE);
+ set_handler(EXCCODE_FPDIS * VECSIZE, handle_fpu, VECSIZE);
+ set_handler(EXCCODE_LSXDIS * VECSIZE, handle_lsx, VECSIZE);
+ set_handler(EXCCODE_LASXDIS * VECSIZE, handle_lasx, VECSIZE);
+ set_handler(EXCCODE_FPE * VECSIZE, handle_fpe, VECSIZE);
+ set_handler(EXCCODE_BTDIS * VECSIZE, handle_lbt, VECSIZE);
+ set_handler(EXCCODE_WATCH * VECSIZE, handle_watch, VECSIZE);
+
+ cache_error_setup();
+
+ local_flush_icache_range(eentry, eentry + 0x400);
+}
diff --git a/arch/loongarch/kernel/unwind_guess.c b/arch/loongarch/kernel/unwind_guess.c
new file mode 100644
index 000000000..0c20e5184
--- /dev/null
+++ b/arch/loongarch/kernel/unwind_guess.c
@@ -0,0 +1,73 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2022 Loongson Technology Corporation Limited
+ */
+#include <linux/kernel.h>
+
+#include <asm/unwind.h>
+
+unsigned long unwind_get_return_address(struct unwind_state *state)
+{
+ if (unwind_done(state))
+ return 0;
+ else if (state->first)
+ return state->pc;
+
+ return *(unsigned long *)(state->sp);
+}
+EXPORT_SYMBOL_GPL(unwind_get_return_address);
+
+void unwind_start(struct unwind_state *state, struct task_struct *task,
+ struct pt_regs *regs)
+{
+ memset(state, 0, sizeof(*state));
+
+ if (regs) {
+ state->sp = regs->regs[3];
+ state->pc = regs->csr_era;
+ } else if (task && task != current) {
+ state->sp = thread_saved_fp(task);
+ state->pc = thread_saved_ra(task);
+ } else {
+ state->sp = (unsigned long)__builtin_frame_address(0);
+ state->pc = (unsigned long)__builtin_return_address(0);
+ }
+
+ state->task = task;
+ state->first = true;
+
+ get_stack_info(state->sp, state->task, &state->stack_info);
+
+ if (!unwind_done(state) && !__kernel_text_address(state->pc))
+ unwind_next_frame(state);
+}
+EXPORT_SYMBOL_GPL(unwind_start);
+
+bool unwind_next_frame(struct unwind_state *state)
+{
+ struct stack_info *info = &state->stack_info;
+ unsigned long addr;
+
+ if (unwind_done(state))
+ return false;
+
+ if (state->first)
+ state->first = false;
+
+ do {
+ for (state->sp += sizeof(unsigned long);
+ state->sp < info->end;
+ state->sp += sizeof(unsigned long)) {
+ addr = *(unsigned long *)(state->sp);
+
+ if (__kernel_text_address(addr))
+ return true;
+ }
+
+ state->sp = info->next_sp;
+
+ } while (!get_stack_info(state->sp, state->task, info));
+
+ return false;
+}
+EXPORT_SYMBOL_GPL(unwind_next_frame);
diff --git a/arch/loongarch/kernel/unwind_prologue.c b/arch/loongarch/kernel/unwind_prologue.c
new file mode 100644
index 000000000..1c5b65756
--- /dev/null
+++ b/arch/loongarch/kernel/unwind_prologue.c
@@ -0,0 +1,187 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2022 Loongson Technology Corporation Limited
+ */
+#include <linux/kallsyms.h>
+
+#include <asm/inst.h>
+#include <asm/ptrace.h>
+#include <asm/unwind.h>
+
+unsigned long unwind_get_return_address(struct unwind_state *state)
+{
+
+ if (unwind_done(state))
+ return 0;
+ else if (state->type)
+ return state->pc;
+ else if (state->first)
+ return state->pc;
+
+ return *(unsigned long *)(state->sp);
+
+}
+EXPORT_SYMBOL_GPL(unwind_get_return_address);
+
+static bool unwind_by_guess(struct unwind_state *state)
+{
+ struct stack_info *info = &state->stack_info;
+ unsigned long addr;
+
+ for (state->sp += sizeof(unsigned long);
+ state->sp < info->end;
+ state->sp += sizeof(unsigned long)) {
+ addr = *(unsigned long *)(state->sp);
+ if (__kernel_text_address(addr))
+ return true;
+ }
+
+ return false;
+}
+
+static bool unwind_by_prologue(struct unwind_state *state)
+{
+ struct stack_info *info = &state->stack_info;
+ union loongarch_instruction *ip, *ip_end;
+ long frame_ra = -1;
+ unsigned long frame_size = 0;
+ unsigned long size, offset, pc = state->pc;
+
+ if (state->sp >= info->end || state->sp < info->begin)
+ return false;
+
+ if (!kallsyms_lookup_size_offset(pc, &size, &offset))
+ return false;
+
+ ip = (union loongarch_instruction *)(pc - offset);
+ ip_end = (union loongarch_instruction *)pc;
+
+ while (ip < ip_end) {
+ if (is_stack_alloc_ins(ip)) {
+ frame_size = (1 << 12) - ip->reg2i12_format.immediate;
+ ip++;
+ break;
+ }
+ ip++;
+ }
+
+ if (!frame_size) {
+ if (state->first)
+ goto first;
+
+ return false;
+ }
+
+ while (ip < ip_end) {
+ if (is_ra_save_ins(ip)) {
+ frame_ra = ip->reg2i12_format.immediate;
+ break;
+ }
+ if (is_branch_ins(ip))
+ break;
+ ip++;
+ }
+
+ if (frame_ra < 0) {
+ if (state->first) {
+ state->sp = state->sp + frame_size;
+ goto first;
+ }
+ return false;
+ }
+
+ if (state->first)
+ state->first = false;
+
+ state->pc = *(unsigned long *)(state->sp + frame_ra);
+ state->sp = state->sp + frame_size;
+ return !!__kernel_text_address(state->pc);
+
+first:
+ state->first = false;
+ if (state->pc == state->ra)
+ return false;
+
+ state->pc = state->ra;
+
+ return !!__kernel_text_address(state->ra);
+}
+
+void unwind_start(struct unwind_state *state, struct task_struct *task,
+ struct pt_regs *regs)
+{
+ memset(state, 0, sizeof(*state));
+ state->type = UNWINDER_PROLOGUE;
+
+ if (regs) {
+ state->sp = regs->regs[3];
+ state->pc = regs->csr_era;
+ state->ra = regs->regs[1];
+ if (!__kernel_text_address(state->pc))
+ state->type = UNWINDER_GUESS;
+ } else if (task && task != current) {
+ state->sp = thread_saved_fp(task);
+ state->pc = thread_saved_ra(task);
+ state->ra = 0;
+ } else {
+ state->sp = (unsigned long)__builtin_frame_address(0);
+ state->pc = (unsigned long)__builtin_return_address(0);
+ state->ra = 0;
+ }
+
+ state->task = task;
+ state->first = true;
+
+ get_stack_info(state->sp, state->task, &state->stack_info);
+
+ if (!unwind_done(state) && !__kernel_text_address(state->pc))
+ unwind_next_frame(state);
+}
+EXPORT_SYMBOL_GPL(unwind_start);
+
+bool unwind_next_frame(struct unwind_state *state)
+{
+ struct stack_info *info = &state->stack_info;
+ struct pt_regs *regs;
+ unsigned long pc;
+
+ if (unwind_done(state))
+ return false;
+
+ do {
+ switch (state->type) {
+ case UNWINDER_GUESS:
+ state->first = false;
+ if (unwind_by_guess(state))
+ return true;
+ break;
+
+ case UNWINDER_PROLOGUE:
+ if (unwind_by_prologue(state))
+ return true;
+
+ if (info->type == STACK_TYPE_IRQ &&
+ info->end == state->sp) {
+ regs = (struct pt_regs *)info->next_sp;
+ pc = regs->csr_era;
+
+ if (user_mode(regs) || !__kernel_text_address(pc))
+ return false;
+
+ state->pc = pc;
+ state->sp = regs->regs[3];
+ state->ra = regs->regs[1];
+ state->first = true;
+ get_stack_info(state->sp, state->task, info);
+
+ return true;
+ }
+ }
+
+ state->sp = info->next_sp;
+
+ } while (!get_stack_info(state->sp, state->task, info));
+
+ return false;
+}
+EXPORT_SYMBOL_GPL(unwind_next_frame);
diff --git a/arch/loongarch/kernel/vdso.c b/arch/loongarch/kernel/vdso.c
new file mode 100644
index 000000000..8c9826062
--- /dev/null
+++ b/arch/loongarch/kernel/vdso.c
@@ -0,0 +1,143 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Author: Huacai Chen <chenhuacai@loongson.cn>
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+
+#include <linux/binfmts.h>
+#include <linux/elf.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/ioport.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/random.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/timekeeper_internal.h>
+
+#include <asm/page.h>
+#include <asm/vdso.h>
+#include <vdso/helpers.h>
+#include <vdso/vsyscall.h>
+#include <generated/vdso-offsets.h>
+
+extern char vdso_start[], vdso_end[];
+
+/* Kernel-provided data used by the VDSO. */
+static union {
+ u8 page[VDSO_DATA_SIZE];
+ struct loongarch_vdso_data vdata;
+} loongarch_vdso_data __page_aligned_data;
+
+static struct page *vdso_pages[] = { NULL };
+struct vdso_data *vdso_data = loongarch_vdso_data.vdata.data;
+struct vdso_pcpu_data *vdso_pdata = loongarch_vdso_data.vdata.pdata;
+
+static int vdso_mremap(const struct vm_special_mapping *sm, struct vm_area_struct *new_vma)
+{
+ current->mm->context.vdso = (void *)(new_vma->vm_start);
+
+ return 0;
+}
+
+struct loongarch_vdso_info vdso_info = {
+ .vdso = vdso_start,
+ .size = PAGE_SIZE,
+ .code_mapping = {
+ .name = "[vdso]",
+ .pages = vdso_pages,
+ .mremap = vdso_mremap,
+ },
+ .data_mapping = {
+ .name = "[vvar]",
+ },
+ .offset_sigreturn = vdso_offset_sigreturn,
+};
+
+static int __init init_vdso(void)
+{
+ unsigned long i, cpu, pfn;
+
+ BUG_ON(!PAGE_ALIGNED(vdso_info.vdso));
+ BUG_ON(!PAGE_ALIGNED(vdso_info.size));
+
+ for_each_possible_cpu(cpu)
+ vdso_pdata[cpu].node = cpu_to_node(cpu);
+
+ pfn = __phys_to_pfn(__pa_symbol(vdso_info.vdso));
+ for (i = 0; i < vdso_info.size / PAGE_SIZE; i++)
+ vdso_info.code_mapping.pages[i] = pfn_to_page(pfn + i);
+
+ return 0;
+}
+subsys_initcall(init_vdso);
+
+static unsigned long vdso_base(void)
+{
+ unsigned long base = STACK_TOP;
+
+ if (current->flags & PF_RANDOMIZE) {
+ base += prandom_u32_max(VDSO_RANDOMIZE_SIZE);
+ base = PAGE_ALIGN(base);
+ }
+
+ return base;
+}
+
+int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
+{
+ int ret;
+ unsigned long vvar_size, size, data_addr, vdso_addr;
+ struct mm_struct *mm = current->mm;
+ struct vm_area_struct *vma;
+ struct loongarch_vdso_info *info = current->thread.vdso;
+
+ if (mmap_write_lock_killable(mm))
+ return -EINTR;
+
+ /*
+ * Determine total area size. This includes the VDSO data itself
+ * and the data pages.
+ */
+ vvar_size = VDSO_DATA_SIZE;
+ size = vvar_size + info->size;
+
+ data_addr = get_unmapped_area(NULL, vdso_base(), size, 0, 0);
+ if (IS_ERR_VALUE(data_addr)) {
+ ret = data_addr;
+ goto out;
+ }
+ vdso_addr = data_addr + VDSO_DATA_SIZE;
+
+ vma = _install_special_mapping(mm, data_addr, vvar_size,
+ VM_READ | VM_MAYREAD,
+ &info->data_mapping);
+ if (IS_ERR(vma)) {
+ ret = PTR_ERR(vma);
+ goto out;
+ }
+
+ /* Map VDSO data page. */
+ ret = remap_pfn_range(vma, data_addr,
+ virt_to_phys(&loongarch_vdso_data) >> PAGE_SHIFT,
+ vvar_size, PAGE_READONLY);
+ if (ret)
+ goto out;
+
+ /* Map VDSO code page. */
+ vma = _install_special_mapping(mm, vdso_addr, info->size,
+ VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC,
+ &info->code_mapping);
+ if (IS_ERR(vma)) {
+ ret = PTR_ERR(vma);
+ goto out;
+ }
+
+ mm->context.vdso = (void *)vdso_addr;
+ ret = 0;
+
+out:
+ mmap_write_unlock(mm);
+ return ret;
+}
diff --git a/arch/loongarch/kernel/vmlinux.lds.S b/arch/loongarch/kernel/vmlinux.lds.S
new file mode 100644
index 000000000..b3309a5e6
--- /dev/null
+++ b/arch/loongarch/kernel/vmlinux.lds.S
@@ -0,0 +1,129 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#include <linux/sizes.h>
+#include <asm/asm-offsets.h>
+#include <asm/thread_info.h>
+
+#define PAGE_SIZE _PAGE_SIZE
+
+/*
+ * Put .bss..swapper_pg_dir as the first thing in .bss. This will
+ * ensure that it has .bss alignment (64K).
+ */
+#define BSS_FIRST_SECTIONS *(.bss..swapper_pg_dir)
+
+#include <asm-generic/vmlinux.lds.h>
+#include "image-vars.h"
+
+/*
+ * Max avaliable Page Size is 64K, so we set SectionAlignment
+ * field of EFI application to 64K.
+ */
+PECOFF_FILE_ALIGN = 0x200;
+PECOFF_SEGMENT_ALIGN = 0x10000;
+
+OUTPUT_ARCH(loongarch)
+ENTRY(kernel_entry)
+PHDRS {
+ text PT_LOAD FLAGS(7); /* RWX */
+ note PT_NOTE FLAGS(4); /* R__ */
+}
+
+jiffies = jiffies_64;
+
+SECTIONS
+{
+ . = VMLINUX_LOAD_ADDRESS;
+
+ _text = .;
+ HEAD_TEXT_SECTION
+
+ . = ALIGN(PECOFF_SEGMENT_ALIGN);
+ _stext = .;
+ .text : {
+ TEXT_TEXT
+ SCHED_TEXT
+ CPUIDLE_TEXT
+ LOCK_TEXT
+ KPROBES_TEXT
+ IRQENTRY_TEXT
+ SOFTIRQENTRY_TEXT
+ *(.fixup)
+ *(.gnu.warning)
+ } :text = 0
+ . = ALIGN(PECOFF_SEGMENT_ALIGN);
+ _etext = .;
+
+ EXCEPTION_TABLE(16)
+
+ .got : ALIGN(16) { *(.got) }
+ .plt : ALIGN(16) { *(.plt) }
+ .got.plt : ALIGN(16) { *(.got.plt) }
+
+ . = ALIGN(PECOFF_SEGMENT_ALIGN);
+ __init_begin = .;
+ __inittext_begin = .;
+
+ INIT_TEXT_SECTION(PAGE_SIZE)
+ .exit.text : {
+ EXIT_TEXT
+ }
+
+ . = ALIGN(PECOFF_SEGMENT_ALIGN);
+ __inittext_end = .;
+
+ __initdata_begin = .;
+
+ INIT_DATA_SECTION(16)
+ .exit.data : {
+ EXIT_DATA
+ }
+
+#ifdef CONFIG_SMP
+ PERCPU_SECTION(1 << CONFIG_L1_CACHE_SHIFT)
+#endif
+
+ .rela.dyn : ALIGN(8) { *(.rela.dyn) *(.rela*) }
+
+ .init.bss : {
+ *(.init.bss)
+ }
+ . = ALIGN(PECOFF_SEGMENT_ALIGN);
+ __initdata_end = .;
+
+ __init_end = .;
+
+ _sdata = .;
+ RO_DATA(4096)
+ RW_DATA(1 << CONFIG_L1_CACHE_SHIFT, PAGE_SIZE, THREAD_SIZE)
+
+ .sdata : {
+ *(.sdata)
+ }
+ .edata_padding : { BYTE(0); . = ALIGN(PECOFF_FILE_ALIGN); }
+ _edata = .;
+
+ BSS_SECTION(0, SZ_64K, 8)
+ . = ALIGN(PECOFF_SEGMENT_ALIGN);
+
+ _end = .;
+
+ STABS_DEBUG
+ DWARF_DEBUG
+ ELF_DETAILS
+
+ .gptab.sdata : {
+ *(.gptab.data)
+ *(.gptab.sdata)
+ }
+ .gptab.sbss : {
+ *(.gptab.bss)
+ *(.gptab.sbss)
+ }
+
+ DISCARDS
+ /DISCARD/ : {
+ *(.gnu.attributes)
+ *(.options)
+ *(.eh_frame)
+ }
+}