summaryrefslogtreecommitdiffstats
path: root/arch/loongarch/kernel
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-11 08:27:49 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-11 08:27:49 +0000
commitace9429bb58fd418f0c81d4c2835699bddf6bde6 (patch)
treeb2d64bc10158fdd5497876388cd68142ca374ed3 /arch/loongarch/kernel
parentInitial commit. (diff)
downloadlinux-ace9429bb58fd418f0c81d4c2835699bddf6bde6.tar.xz
linux-ace9429bb58fd418f0c81d4c2835699bddf6bde6.zip
Adding upstream version 6.6.15.upstream/6.6.15
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'arch/loongarch/kernel')
-rw-r--r--arch/loongarch/kernel/.gitignore2
-rw-r--r--arch/loongarch/kernel/Makefile76
-rw-r--r--arch/loongarch/kernel/access-helper.h13
-rw-r--r--arch/loongarch/kernel/acpi.c343
-rw-r--r--arch/loongarch/kernel/alternative.c246
-rw-r--r--arch/loongarch/kernel/asm-offsets.c291
-rw-r--r--arch/loongarch/kernel/cacheinfo.c86
-rw-r--r--arch/loongarch/kernel/cpu-probe.c329
-rw-r--r--arch/loongarch/kernel/crash_dump.c23
-rw-r--r--arch/loongarch/kernel/dma.c30
-rw-r--r--arch/loongarch/kernel/efi-header.S99
-rw-r--r--arch/loongarch/kernel/efi.c137
-rw-r--r--arch/loongarch/kernel/elf.c25
-rw-r--r--arch/loongarch/kernel/entry.S95
-rw-r--r--arch/loongarch/kernel/env.c78
-rw-r--r--arch/loongarch/kernel/fpu.S526
-rw-r--r--arch/loongarch/kernel/ftrace.c73
-rw-r--r--arch/loongarch/kernel/ftrace_dyn.c341
-rw-r--r--arch/loongarch/kernel/genex.S99
-rw-r--r--arch/loongarch/kernel/head.S149
-rw-r--r--arch/loongarch/kernel/hw_breakpoint.c549
-rw-r--r--arch/loongarch/kernel/idle.c17
-rw-r--r--arch/loongarch/kernel/image-vars.h19
-rw-r--r--arch/loongarch/kernel/inst.c338
-rw-r--r--arch/loongarch/kernel/io.c94
-rw-r--r--arch/loongarch/kernel/irq.c137
-rw-r--r--arch/loongarch/kernel/jump_label.c22
-rw-r--r--arch/loongarch/kernel/kfpu.c90
-rw-r--r--arch/loongarch/kernel/kgdb.c727
-rw-r--r--arch/loongarch/kernel/kprobes.c338
-rw-r--r--arch/loongarch/kernel/lbt.S155
-rw-r--r--arch/loongarch/kernel/machine_kexec.c304
-rw-r--r--arch/loongarch/kernel/mcount.S98
-rw-r--r--arch/loongarch/kernel/mcount_dyn.S160
-rw-r--r--arch/loongarch/kernel/mem.c63
-rw-r--r--arch/loongarch/kernel/module-sections.c185
-rw-r--r--arch/loongarch/kernel/module.c529
-rw-r--r--arch/loongarch/kernel/numa.c447
-rw-r--r--arch/loongarch/kernel/perf_event.c887
-rw-r--r--arch/loongarch/kernel/perf_regs.c53
-rw-r--r--arch/loongarch/kernel/proc.c130
-rw-r--r--arch/loongarch/kernel/process.c375
-rw-r--r--arch/loongarch/kernel/ptrace.c1080
-rw-r--r--arch/loongarch/kernel/relocate.c246
-rw-r--r--arch/loongarch/kernel/relocate_kernel.S111
-rw-r--r--arch/loongarch/kernel/reset.c79
-rw-r--r--arch/loongarch/kernel/rethook.c28
-rw-r--r--arch/loongarch/kernel/rethook.h8
-rw-r--r--arch/loongarch/kernel/rethook_trampoline.S96
-rw-r--r--arch/loongarch/kernel/setup.c633
-rw-r--r--arch/loongarch/kernel/signal.c1075
-rw-r--r--arch/loongarch/kernel/smp.c701
-rw-r--r--arch/loongarch/kernel/stacktrace.c80
-rw-r--r--arch/loongarch/kernel/switch.S42
-rw-r--r--arch/loongarch/kernel/syscall.c64
-rw-r--r--arch/loongarch/kernel/sysrq.c65
-rw-r--r--arch/loongarch/kernel/time.c227
-rw-r--r--arch/loongarch/kernel/topology.c56
-rw-r--r--arch/loongarch/kernel/traps.c1170
-rw-r--r--arch/loongarch/kernel/unaligned.c497
-rw-r--r--arch/loongarch/kernel/unwind.c32
-rw-r--r--arch/loongarch/kernel/unwind_guess.c26
-rw-r--r--arch/loongarch/kernel/unwind_prologue.c265
-rw-r--r--arch/loongarch/kernel/uprobes.c153
-rw-r--r--arch/loongarch/kernel/vdso.c209
-rw-r--r--arch/loongarch/kernel/vmlinux.lds.S165
66 files changed, 15886 insertions, 0 deletions
diff --git a/arch/loongarch/kernel/.gitignore b/arch/loongarch/kernel/.gitignore
new file mode 100644
index 0000000000..bbb90f92d0
--- /dev/null
+++ b/arch/loongarch/kernel/.gitignore
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
+vmlinux.lds
diff --git a/arch/loongarch/kernel/Makefile b/arch/loongarch/kernel/Makefile
new file mode 100644
index 0000000000..4fcc168f07
--- /dev/null
+++ b/arch/loongarch/kernel/Makefile
@@ -0,0 +1,76 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for the Linux/LoongArch kernel.
+#
+
+extra-y := vmlinux.lds
+
+obj-y += head.o cpu-probe.o cacheinfo.o env.o setup.o entry.o genex.o \
+ traps.o irq.o idle.o process.o dma.o mem.o io.o reset.o switch.o \
+ elf.o syscall.o signal.o time.o topology.o inst.o ptrace.o vdso.o \
+ alternative.o unwind.o
+
+obj-$(CONFIG_ACPI) += acpi.o
+obj-$(CONFIG_EFI) += efi.o
+
+obj-$(CONFIG_CPU_HAS_FPU) += fpu.o kfpu.o
+
+obj-$(CONFIG_CPU_HAS_LBT) += lbt.o
+
+obj-$(CONFIG_ARCH_STRICT_ALIGN) += unaligned.o
+
+CFLAGS_module.o += $(call cc-option,-Wno-override-init,)
+CFLAGS_syscall.o += $(call cc-option,-Wno-override-init,)
+CFLAGS_perf_event.o += $(call cc-option,-Wno-override-init,)
+
+ifdef CONFIG_FUNCTION_TRACER
+ ifndef CONFIG_DYNAMIC_FTRACE
+ obj-y += mcount.o ftrace.o
+ CFLAGS_REMOVE_ftrace.o = $(CC_FLAGS_FTRACE)
+ else
+ obj-y += mcount_dyn.o ftrace_dyn.o
+ CFLAGS_REMOVE_ftrace_dyn.o = $(CC_FLAGS_FTRACE)
+ endif
+ CFLAGS_REMOVE_inst.o = $(CC_FLAGS_FTRACE)
+ CFLAGS_REMOVE_time.o = $(CC_FLAGS_FTRACE)
+ CFLAGS_REMOVE_perf_event.o = $(CC_FLAGS_FTRACE)
+ CFLAGS_REMOVE_rethook.o = $(CC_FLAGS_FTRACE)
+ CFLAGS_REMOVE_rethook_trampoline.o = $(CC_FLAGS_FTRACE)
+endif
+
+KASAN_SANITIZE_efi.o := n
+KASAN_SANITIZE_cpu-probe.o := n
+KASAN_SANITIZE_traps.o := n
+KASAN_SANITIZE_smp.o := n
+KASAN_SANITIZE_vdso.o := n
+
+obj-$(CONFIG_MODULES) += module.o module-sections.o
+obj-$(CONFIG_STACKTRACE) += stacktrace.o
+
+obj-$(CONFIG_PROC_FS) += proc.o
+
+obj-$(CONFIG_SMP) += smp.o
+
+obj-$(CONFIG_NUMA) += numa.o
+
+obj-$(CONFIG_MAGIC_SYSRQ) += sysrq.o
+
+obj-$(CONFIG_RELOCATABLE) += relocate.o
+
+obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o
+obj-$(CONFIG_CRASH_DUMP) += crash_dump.o
+
+obj-$(CONFIG_UNWINDER_GUESS) += unwind_guess.o
+obj-$(CONFIG_UNWINDER_PROLOGUE) += unwind_prologue.o
+
+obj-$(CONFIG_PERF_EVENTS) += perf_event.o perf_regs.o
+obj-$(CONFIG_HAVE_HW_BREAKPOINT) += hw_breakpoint.o
+
+obj-$(CONFIG_KGDB) += kgdb.o
+obj-$(CONFIG_KPROBES) += kprobes.o
+obj-$(CONFIG_RETHOOK) += rethook.o rethook_trampoline.o
+obj-$(CONFIG_UPROBES) += uprobes.o
+
+obj-$(CONFIG_JUMP_LABEL) += jump_label.o
+
+CPPFLAGS_vmlinux.lds := $(KBUILD_CFLAGS)
diff --git a/arch/loongarch/kernel/access-helper.h b/arch/loongarch/kernel/access-helper.h
new file mode 100644
index 0000000000..4a35ca81bd
--- /dev/null
+++ b/arch/loongarch/kernel/access-helper.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#include <linux/uaccess.h>
+
+static inline int __get_inst(u32 *i, u32 *p, bool user)
+{
+ return user ? get_user(*i, (u32 __user *)p) : get_kernel_nofault(*i, p);
+}
+
+static inline int __get_addr(unsigned long *a, unsigned long *p, bool user)
+{
+ return user ? get_user(*a, (unsigned long __user *)p) : get_kernel_nofault(*a, p);
+}
diff --git a/arch/loongarch/kernel/acpi.c b/arch/loongarch/kernel/acpi.c
new file mode 100644
index 0000000000..8e00a754e5
--- /dev/null
+++ b/arch/loongarch/kernel/acpi.c
@@ -0,0 +1,343 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * acpi.c - Architecture-Specific Low-Level ACPI Boot Support
+ *
+ * Author: Jianmin Lv <lvjianmin@loongson.cn>
+ * Huacai Chen <chenhuacai@loongson.cn>
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+
+#include <linux/init.h>
+#include <linux/acpi.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/memblock.h>
+#include <linux/of_fdt.h>
+#include <linux/serial_core.h>
+#include <asm/io.h>
+#include <asm/numa.h>
+#include <asm/loongson.h>
+
+int acpi_disabled;
+EXPORT_SYMBOL(acpi_disabled);
+int acpi_noirq;
+int acpi_pci_disabled;
+EXPORT_SYMBOL(acpi_pci_disabled);
+int acpi_strict = 1; /* We have no workarounds on LoongArch */
+int num_processors;
+int disabled_cpus;
+
+u64 acpi_saved_sp;
+
+#define MAX_CORE_PIC 256
+
+#define PREFIX "ACPI: "
+
+struct acpi_madt_core_pic acpi_core_pic[NR_CPUS];
+
+void __init __iomem * __acpi_map_table(unsigned long phys, unsigned long size)
+{
+
+ if (!phys || !size)
+ return NULL;
+
+ return early_memremap(phys, size);
+}
+void __init __acpi_unmap_table(void __iomem *map, unsigned long size)
+{
+ if (!map || !size)
+ return;
+
+ early_memunmap(map, size);
+}
+
+void __iomem *acpi_os_ioremap(acpi_physical_address phys, acpi_size size)
+{
+ if (!memblock_is_memory(phys))
+ return ioremap(phys, size);
+ else
+ return ioremap_cache(phys, size);
+}
+
+#ifdef CONFIG_SMP
+static int set_processor_mask(u32 id, u32 flags)
+{
+
+ int cpu, cpuid = id;
+
+ if (num_processors >= nr_cpu_ids) {
+ pr_warn(PREFIX "nr_cpus/possible_cpus limit of %i reached."
+ " processor 0x%x ignored.\n", nr_cpu_ids, cpuid);
+
+ return -ENODEV;
+
+ }
+ if (cpuid == loongson_sysconf.boot_cpu_id)
+ cpu = 0;
+ else
+ cpu = cpumask_next_zero(-1, cpu_present_mask);
+
+ if (flags & ACPI_MADT_ENABLED) {
+ num_processors++;
+ set_cpu_possible(cpu, true);
+ set_cpu_present(cpu, true);
+ __cpu_number_map[cpuid] = cpu;
+ __cpu_logical_map[cpu] = cpuid;
+ } else
+ disabled_cpus++;
+
+ return cpu;
+}
+#endif
+
+static int __init
+acpi_parse_processor(union acpi_subtable_headers *header, const unsigned long end)
+{
+ struct acpi_madt_core_pic *processor = NULL;
+
+ processor = (struct acpi_madt_core_pic *)header;
+ if (BAD_MADT_ENTRY(processor, end))
+ return -EINVAL;
+
+ acpi_table_print_madt_entry(&header->common);
+#ifdef CONFIG_SMP
+ acpi_core_pic[processor->core_id] = *processor;
+ set_processor_mask(processor->core_id, processor->flags);
+#endif
+
+ return 0;
+}
+
+static int __init
+acpi_parse_eio_master(union acpi_subtable_headers *header, const unsigned long end)
+{
+ static int core = 0;
+ struct acpi_madt_eio_pic *eiointc = NULL;
+
+ eiointc = (struct acpi_madt_eio_pic *)header;
+ if (BAD_MADT_ENTRY(eiointc, end))
+ return -EINVAL;
+
+ core = eiointc->node * CORES_PER_EIO_NODE;
+ set_bit(core, &(loongson_sysconf.cores_io_master));
+
+ return 0;
+}
+
+static void __init acpi_process_madt(void)
+{
+#ifdef CONFIG_SMP
+ int i;
+
+ for (i = 0; i < NR_CPUS; i++) {
+ __cpu_number_map[i] = -1;
+ __cpu_logical_map[i] = -1;
+ }
+#endif
+ acpi_table_parse_madt(ACPI_MADT_TYPE_CORE_PIC,
+ acpi_parse_processor, MAX_CORE_PIC);
+
+ acpi_table_parse_madt(ACPI_MADT_TYPE_EIO_PIC,
+ acpi_parse_eio_master, MAX_IO_PICS);
+
+ loongson_sysconf.nr_cpus = num_processors;
+}
+
+int pptt_enabled;
+
+int __init parse_acpi_topology(void)
+{
+ int cpu, topology_id;
+
+ for_each_possible_cpu(cpu) {
+ topology_id = find_acpi_cpu_topology(cpu, 0);
+ if (topology_id < 0) {
+ pr_warn("Invalid BIOS PPTT\n");
+ return -ENOENT;
+ }
+
+ if (acpi_pptt_cpu_is_thread(cpu) <= 0)
+ cpu_data[cpu].core = topology_id;
+ else {
+ topology_id = find_acpi_cpu_topology(cpu, 1);
+ if (topology_id < 0)
+ return -ENOENT;
+
+ cpu_data[cpu].core = topology_id;
+ }
+ }
+
+ pptt_enabled = 1;
+
+ return 0;
+}
+
+#ifndef CONFIG_SUSPEND
+int (*acpi_suspend_lowlevel)(void);
+#else
+int (*acpi_suspend_lowlevel)(void) = loongarch_acpi_suspend;
+#endif
+
+void __init acpi_boot_table_init(void)
+{
+ /*
+ * If acpi_disabled, bail out
+ */
+ if (acpi_disabled)
+ goto fdt_earlycon;
+
+ /*
+ * Initialize the ACPI boot-time table parser.
+ */
+ if (acpi_table_init()) {
+ disable_acpi();
+ goto fdt_earlycon;
+ }
+
+ loongson_sysconf.boot_cpu_id = read_csr_cpuid();
+
+ /*
+ * Process the Multiple APIC Description Table (MADT), if present
+ */
+ acpi_process_madt();
+
+ /* Do not enable ACPI SPCR console by default */
+ acpi_parse_spcr(earlycon_acpi_spcr_enable, false);
+
+ return;
+
+fdt_earlycon:
+ if (earlycon_acpi_spcr_enable)
+ early_init_dt_scan_chosen_stdout();
+}
+
+#ifdef CONFIG_ACPI_NUMA
+
+static __init int setup_node(int pxm)
+{
+ return acpi_map_pxm_to_node(pxm);
+}
+
+/*
+ * Callback for SLIT parsing. pxm_to_node() returns NUMA_NO_NODE for
+ * I/O localities since SRAT does not list them. I/O localities are
+ * not supported at this point.
+ */
+unsigned int numa_distance_cnt;
+
+static inline unsigned int get_numa_distances_cnt(struct acpi_table_slit *slit)
+{
+ return slit->locality_count;
+}
+
+void __init numa_set_distance(int from, int to, int distance)
+{
+ if ((u8)distance != distance || (from == to && distance != LOCAL_DISTANCE)) {
+ pr_warn_once("Warning: invalid distance parameter, from=%d to=%d distance=%d\n",
+ from, to, distance);
+ return;
+ }
+
+ node_distances[from][to] = distance;
+}
+
+/* Callback for Proximity Domain -> CPUID mapping */
+void __init
+acpi_numa_processor_affinity_init(struct acpi_srat_cpu_affinity *pa)
+{
+ int pxm, node;
+
+ if (srat_disabled())
+ return;
+ if (pa->header.length != sizeof(struct acpi_srat_cpu_affinity)) {
+ bad_srat();
+ return;
+ }
+ if ((pa->flags & ACPI_SRAT_CPU_ENABLED) == 0)
+ return;
+ pxm = pa->proximity_domain_lo;
+ if (acpi_srat_revision >= 2) {
+ pxm |= (pa->proximity_domain_hi[0] << 8);
+ pxm |= (pa->proximity_domain_hi[1] << 16);
+ pxm |= (pa->proximity_domain_hi[2] << 24);
+ }
+ node = setup_node(pxm);
+ if (node < 0) {
+ pr_err("SRAT: Too many proximity domains %x\n", pxm);
+ bad_srat();
+ return;
+ }
+
+ if (pa->apic_id >= CONFIG_NR_CPUS) {
+ pr_info("SRAT: PXM %u -> CPU 0x%02x -> Node %u skipped apicid that is too big\n",
+ pxm, pa->apic_id, node);
+ return;
+ }
+
+ early_numa_add_cpu(pa->apic_id, node);
+
+ set_cpuid_to_node(pa->apic_id, node);
+ node_set(node, numa_nodes_parsed);
+ pr_info("SRAT: PXM %u -> CPU 0x%02x -> Node %u\n", pxm, pa->apic_id, node);
+}
+
+#endif
+
+void __init arch_reserve_mem_area(acpi_physical_address addr, size_t size)
+{
+ memblock_reserve(addr, size);
+}
+
+#ifdef CONFIG_ACPI_HOTPLUG_CPU
+
+#include <acpi/processor.h>
+
+static int __ref acpi_map_cpu2node(acpi_handle handle, int cpu, int physid)
+{
+#ifdef CONFIG_ACPI_NUMA
+ int nid;
+
+ nid = acpi_get_node(handle);
+ if (nid != NUMA_NO_NODE) {
+ set_cpuid_to_node(physid, nid);
+ node_set(nid, numa_nodes_parsed);
+ set_cpu_numa_node(cpu, nid);
+ cpumask_set_cpu(cpu, cpumask_of_node(nid));
+ }
+#endif
+ return 0;
+}
+
+int acpi_map_cpu(acpi_handle handle, phys_cpuid_t physid, u32 acpi_id, int *pcpu)
+{
+ int cpu;
+
+ cpu = set_processor_mask(physid, ACPI_MADT_ENABLED);
+ if (cpu < 0) {
+ pr_info(PREFIX "Unable to map lapic to logical cpu number\n");
+ return cpu;
+ }
+
+ acpi_map_cpu2node(handle, cpu, physid);
+
+ *pcpu = cpu;
+
+ return 0;
+}
+EXPORT_SYMBOL(acpi_map_cpu);
+
+int acpi_unmap_cpu(int cpu)
+{
+#ifdef CONFIG_ACPI_NUMA
+ set_cpuid_to_node(cpu_logical_map(cpu), NUMA_NO_NODE);
+#endif
+ set_cpu_present(cpu, false);
+ num_processors--;
+
+ pr_info("cpu%d hot remove!\n", cpu);
+
+ return 0;
+}
+EXPORT_SYMBOL(acpi_unmap_cpu);
+
+#endif /* CONFIG_ACPI_HOTPLUG_CPU */
diff --git a/arch/loongarch/kernel/alternative.c b/arch/loongarch/kernel/alternative.c
new file mode 100644
index 0000000000..4ad13847e9
--- /dev/null
+++ b/arch/loongarch/kernel/alternative.c
@@ -0,0 +1,246 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <asm/alternative.h>
+#include <asm/cacheflush.h>
+#include <asm/inst.h>
+#include <asm/sections.h>
+
+int __read_mostly alternatives_patched;
+
+EXPORT_SYMBOL_GPL(alternatives_patched);
+
+#define MAX_PATCH_SIZE (((u8)(-1)) / LOONGARCH_INSN_SIZE)
+
+static int __initdata_or_module debug_alternative;
+
+static int __init debug_alt(char *str)
+{
+ debug_alternative = 1;
+ return 1;
+}
+__setup("debug-alternative", debug_alt);
+
+#define DPRINTK(fmt, args...) \
+do { \
+ if (debug_alternative) \
+ printk(KERN_DEBUG "%s: " fmt "\n", __func__, ##args); \
+} while (0)
+
+#define DUMP_WORDS(buf, count, fmt, args...) \
+do { \
+ if (unlikely(debug_alternative)) { \
+ int _j; \
+ union loongarch_instruction *_buf = buf; \
+ \
+ if (!(count)) \
+ break; \
+ \
+ printk(KERN_DEBUG fmt, ##args); \
+ for (_j = 0; _j < count - 1; _j++) \
+ printk(KERN_CONT "<%08x> ", _buf[_j].word); \
+ printk(KERN_CONT "<%08x>\n", _buf[_j].word); \
+ } \
+} while (0)
+
+/* Use this to add nops to a buffer, then text_poke the whole buffer. */
+static void __init_or_module add_nops(union loongarch_instruction *insn, int count)
+{
+ while (count--) {
+ insn->word = INSN_NOP;
+ insn++;
+ }
+}
+
+/* Is the jump addr in local .altinstructions */
+static inline bool in_alt_jump(unsigned long jump, void *start, void *end)
+{
+ return jump >= (unsigned long)start && jump < (unsigned long)end;
+}
+
+static void __init_or_module recompute_jump(union loongarch_instruction *buf,
+ union loongarch_instruction *dest, union loongarch_instruction *src,
+ void *start, void *end)
+{
+ unsigned int si, si_l, si_h;
+ unsigned long cur_pc, jump_addr, pc;
+ long offset;
+
+ cur_pc = (unsigned long)src;
+ pc = (unsigned long)dest;
+
+ si_l = src->reg0i26_format.immediate_l;
+ si_h = src->reg0i26_format.immediate_h;
+ switch (src->reg0i26_format.opcode) {
+ case b_op:
+ case bl_op:
+ jump_addr = cur_pc + sign_extend64((si_h << 16 | si_l) << 2, 27);
+ if (in_alt_jump(jump_addr, start, end))
+ return;
+ offset = jump_addr - pc;
+ BUG_ON(offset < -SZ_128M || offset >= SZ_128M);
+ offset >>= 2;
+ buf->reg0i26_format.immediate_h = offset >> 16;
+ buf->reg0i26_format.immediate_l = offset;
+ return;
+ }
+
+ si_l = src->reg1i21_format.immediate_l;
+ si_h = src->reg1i21_format.immediate_h;
+ switch (src->reg1i21_format.opcode) {
+ case bceqz_op: /* bceqz_op = bcnez_op */
+ BUG_ON(buf->reg1i21_format.rj & BIT(4));
+ fallthrough;
+ case beqz_op:
+ case bnez_op:
+ jump_addr = cur_pc + sign_extend64((si_h << 16 | si_l) << 2, 22);
+ if (in_alt_jump(jump_addr, start, end))
+ return;
+ offset = jump_addr - pc;
+ BUG_ON(offset < -SZ_4M || offset >= SZ_4M);
+ offset >>= 2;
+ buf->reg1i21_format.immediate_h = offset >> 16;
+ buf->reg1i21_format.immediate_l = offset;
+ return;
+ }
+
+ si = src->reg2i16_format.immediate;
+ switch (src->reg2i16_format.opcode) {
+ case beq_op:
+ case bne_op:
+ case blt_op:
+ case bge_op:
+ case bltu_op:
+ case bgeu_op:
+ jump_addr = cur_pc + sign_extend64(si << 2, 17);
+ if (in_alt_jump(jump_addr, start, end))
+ return;
+ offset = jump_addr - pc;
+ BUG_ON(offset < -SZ_128K || offset >= SZ_128K);
+ offset >>= 2;
+ buf->reg2i16_format.immediate = offset;
+ return;
+ }
+}
+
+static int __init_or_module copy_alt_insns(union loongarch_instruction *buf,
+ union loongarch_instruction *dest, union loongarch_instruction *src, int nr)
+{
+ int i;
+
+ for (i = 0; i < nr; i++) {
+ buf[i].word = src[i].word;
+
+ if (is_pc_ins(&src[i])) {
+ pr_err("Not support pcrel instruction at present!");
+ return -EINVAL;
+ }
+
+ if (is_branch_ins(&src[i]) &&
+ src[i].reg2i16_format.opcode != jirl_op) {
+ recompute_jump(&buf[i], &dest[i], &src[i], src, src + nr);
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * text_poke_early - Update instructions on a live kernel at boot time
+ *
+ * When you use this code to patch more than one byte of an instruction
+ * you need to make sure that other CPUs cannot execute this code in parallel.
+ * Also no thread must be currently preempted in the middle of these
+ * instructions. And on the local CPU you need to be protected again NMI or MCE
+ * handlers seeing an inconsistent instruction while you patch.
+ */
+static void *__init_or_module text_poke_early(union loongarch_instruction *insn,
+ union loongarch_instruction *buf, unsigned int nr)
+{
+ int i;
+ unsigned long flags;
+
+ local_irq_save(flags);
+
+ for (i = 0; i < nr; i++)
+ insn[i].word = buf[i].word;
+
+ local_irq_restore(flags);
+
+ wbflush();
+ flush_icache_range((unsigned long)insn, (unsigned long)(insn + nr));
+
+ return insn;
+}
+
+/*
+ * Replace instructions with better alternatives for this CPU type. This runs
+ * before SMP is initialized to avoid SMP problems with self modifying code.
+ * This implies that asymmetric systems where APs have less capabilities than
+ * the boot processor are not handled. Tough. Make sure you disable such
+ * features by hand.
+ */
+void __init_or_module apply_alternatives(struct alt_instr *start, struct alt_instr *end)
+{
+ struct alt_instr *a;
+ unsigned int nr_instr, nr_repl, nr_insnbuf;
+ union loongarch_instruction *instr, *replacement;
+ union loongarch_instruction insnbuf[MAX_PATCH_SIZE];
+
+ DPRINTK("alt table %px, -> %px", start, end);
+ /*
+ * The scan order should be from start to end. A later scanned
+ * alternative code can overwrite previously scanned alternative code.
+ * Some kernel functions (e.g. memcpy, memset, etc) use this order to
+ * patch code.
+ *
+ * So be careful if you want to change the scan order to any other
+ * order.
+ */
+ for (a = start; a < end; a++) {
+ nr_insnbuf = 0;
+
+ instr = (void *)&a->instr_offset + a->instr_offset;
+ replacement = (void *)&a->replace_offset + a->replace_offset;
+
+ BUG_ON(a->instrlen > sizeof(insnbuf));
+ BUG_ON(a->instrlen & 0x3);
+ BUG_ON(a->replacementlen & 0x3);
+
+ nr_instr = a->instrlen / LOONGARCH_INSN_SIZE;
+ nr_repl = a->replacementlen / LOONGARCH_INSN_SIZE;
+
+ if (!cpu_has(a->feature)) {
+ DPRINTK("feat not exist: %d, old: (%px len: %d), repl: (%px, len: %d)",
+ a->feature, instr, a->instrlen,
+ replacement, a->replacementlen);
+
+ continue;
+ }
+
+ DPRINTK("feat: %d, old: (%px len: %d), repl: (%px, len: %d)",
+ a->feature, instr, a->instrlen,
+ replacement, a->replacementlen);
+
+ DUMP_WORDS(instr, nr_instr, "%px: old_insn: ", instr);
+ DUMP_WORDS(replacement, nr_repl, "%px: rpl_insn: ", replacement);
+
+ copy_alt_insns(insnbuf, instr, replacement, nr_repl);
+ nr_insnbuf = nr_repl;
+
+ if (nr_instr > nr_repl) {
+ add_nops(insnbuf + nr_repl, nr_instr - nr_repl);
+ nr_insnbuf += nr_instr - nr_repl;
+ }
+ DUMP_WORDS(insnbuf, nr_insnbuf, "%px: final_insn: ", instr);
+
+ text_poke_early(instr, insnbuf, nr_insnbuf);
+ }
+}
+
+void __init alternative_instructions(void)
+{
+ apply_alternatives(__alt_instructions, __alt_instructions_end);
+
+ alternatives_patched = 1;
+}
diff --git a/arch/loongarch/kernel/asm-offsets.c b/arch/loongarch/kernel/asm-offsets.c
new file mode 100644
index 0000000000..8da0726777
--- /dev/null
+++ b/arch/loongarch/kernel/asm-offsets.c
@@ -0,0 +1,291 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * asm-offsets.c: Calculate pt_regs and task_struct offsets.
+ *
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#include <linux/types.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/kbuild.h>
+#include <linux/suspend.h>
+#include <asm/cpu-info.h>
+#include <asm/ptrace.h>
+#include <asm/processor.h>
+#include <asm/ftrace.h>
+
+void output_ptreg_defines(void)
+{
+ COMMENT("LoongArch pt_regs offsets.");
+ OFFSET(PT_R0, pt_regs, regs[0]);
+ OFFSET(PT_R1, pt_regs, regs[1]);
+ OFFSET(PT_R2, pt_regs, regs[2]);
+ OFFSET(PT_R3, pt_regs, regs[3]);
+ OFFSET(PT_R4, pt_regs, regs[4]);
+ OFFSET(PT_R5, pt_regs, regs[5]);
+ OFFSET(PT_R6, pt_regs, regs[6]);
+ OFFSET(PT_R7, pt_regs, regs[7]);
+ OFFSET(PT_R8, pt_regs, regs[8]);
+ OFFSET(PT_R9, pt_regs, regs[9]);
+ OFFSET(PT_R10, pt_regs, regs[10]);
+ OFFSET(PT_R11, pt_regs, regs[11]);
+ OFFSET(PT_R12, pt_regs, regs[12]);
+ OFFSET(PT_R13, pt_regs, regs[13]);
+ OFFSET(PT_R14, pt_regs, regs[14]);
+ OFFSET(PT_R15, pt_regs, regs[15]);
+ OFFSET(PT_R16, pt_regs, regs[16]);
+ OFFSET(PT_R17, pt_regs, regs[17]);
+ OFFSET(PT_R18, pt_regs, regs[18]);
+ OFFSET(PT_R19, pt_regs, regs[19]);
+ OFFSET(PT_R20, pt_regs, regs[20]);
+ OFFSET(PT_R21, pt_regs, regs[21]);
+ OFFSET(PT_R22, pt_regs, regs[22]);
+ OFFSET(PT_R23, pt_regs, regs[23]);
+ OFFSET(PT_R24, pt_regs, regs[24]);
+ OFFSET(PT_R25, pt_regs, regs[25]);
+ OFFSET(PT_R26, pt_regs, regs[26]);
+ OFFSET(PT_R27, pt_regs, regs[27]);
+ OFFSET(PT_R28, pt_regs, regs[28]);
+ OFFSET(PT_R29, pt_regs, regs[29]);
+ OFFSET(PT_R30, pt_regs, regs[30]);
+ OFFSET(PT_R31, pt_regs, regs[31]);
+ OFFSET(PT_CRMD, pt_regs, csr_crmd);
+ OFFSET(PT_PRMD, pt_regs, csr_prmd);
+ OFFSET(PT_EUEN, pt_regs, csr_euen);
+ OFFSET(PT_ECFG, pt_regs, csr_ecfg);
+ OFFSET(PT_ESTAT, pt_regs, csr_estat);
+ OFFSET(PT_ERA, pt_regs, csr_era);
+ OFFSET(PT_BVADDR, pt_regs, csr_badvaddr);
+ OFFSET(PT_ORIG_A0, pt_regs, orig_a0);
+ DEFINE(PT_SIZE, sizeof(struct pt_regs));
+ BLANK();
+}
+
+void output_task_defines(void)
+{
+ COMMENT("LoongArch task_struct offsets.");
+ OFFSET(TASK_STATE, task_struct, __state);
+ OFFSET(TASK_THREAD_INFO, task_struct, stack);
+ OFFSET(TASK_FLAGS, task_struct, flags);
+ OFFSET(TASK_MM, task_struct, mm);
+ OFFSET(TASK_PID, task_struct, pid);
+#if defined(CONFIG_STACKPROTECTOR)
+ OFFSET(TASK_STACK_CANARY, task_struct, stack_canary);
+#endif
+ DEFINE(TASK_STRUCT_SIZE, sizeof(struct task_struct));
+ BLANK();
+}
+
+void output_thread_info_defines(void)
+{
+ COMMENT("LoongArch thread_info offsets.");
+ OFFSET(TI_TASK, thread_info, task);
+ OFFSET(TI_FLAGS, thread_info, flags);
+ OFFSET(TI_TP_VALUE, thread_info, tp_value);
+ OFFSET(TI_CPU, thread_info, cpu);
+ OFFSET(TI_PRE_COUNT, thread_info, preempt_count);
+ OFFSET(TI_REGS, thread_info, regs);
+ DEFINE(_THREAD_SIZE, THREAD_SIZE);
+ DEFINE(_THREAD_MASK, THREAD_MASK);
+ DEFINE(_IRQ_STACK_SIZE, IRQ_STACK_SIZE);
+ DEFINE(_IRQ_STACK_START, IRQ_STACK_START);
+ BLANK();
+}
+
+void output_thread_defines(void)
+{
+ COMMENT("LoongArch specific thread_struct offsets.");
+ OFFSET(THREAD_REG01, task_struct, thread.reg01);
+ OFFSET(THREAD_REG03, task_struct, thread.reg03);
+ OFFSET(THREAD_REG22, task_struct, thread.reg22);
+ OFFSET(THREAD_REG23, task_struct, thread.reg23);
+ OFFSET(THREAD_REG24, task_struct, thread.reg24);
+ OFFSET(THREAD_REG25, task_struct, thread.reg25);
+ OFFSET(THREAD_REG26, task_struct, thread.reg26);
+ OFFSET(THREAD_REG27, task_struct, thread.reg27);
+ OFFSET(THREAD_REG28, task_struct, thread.reg28);
+ OFFSET(THREAD_REG29, task_struct, thread.reg29);
+ OFFSET(THREAD_REG30, task_struct, thread.reg30);
+ OFFSET(THREAD_REG31, task_struct, thread.reg31);
+ OFFSET(THREAD_SCHED_RA, task_struct, thread.sched_ra);
+ OFFSET(THREAD_SCHED_CFA, task_struct, thread.sched_cfa);
+ OFFSET(THREAD_CSRCRMD, task_struct,
+ thread.csr_crmd);
+ OFFSET(THREAD_CSRPRMD, task_struct,
+ thread.csr_prmd);
+ OFFSET(THREAD_CSREUEN, task_struct,
+ thread.csr_euen);
+ OFFSET(THREAD_CSRECFG, task_struct,
+ thread.csr_ecfg);
+
+ OFFSET(THREAD_FPU, task_struct, thread.fpu);
+
+ OFFSET(THREAD_BVADDR, task_struct, \
+ thread.csr_badvaddr);
+ OFFSET(THREAD_ECODE, task_struct, \
+ thread.error_code);
+ OFFSET(THREAD_TRAPNO, task_struct, thread.trap_nr);
+ BLANK();
+}
+
+void output_thread_fpu_defines(void)
+{
+ OFFSET(THREAD_FPR0, loongarch_fpu, fpr[0]);
+ OFFSET(THREAD_FPR1, loongarch_fpu, fpr[1]);
+ OFFSET(THREAD_FPR2, loongarch_fpu, fpr[2]);
+ OFFSET(THREAD_FPR3, loongarch_fpu, fpr[3]);
+ OFFSET(THREAD_FPR4, loongarch_fpu, fpr[4]);
+ OFFSET(THREAD_FPR5, loongarch_fpu, fpr[5]);
+ OFFSET(THREAD_FPR6, loongarch_fpu, fpr[6]);
+ OFFSET(THREAD_FPR7, loongarch_fpu, fpr[7]);
+ OFFSET(THREAD_FPR8, loongarch_fpu, fpr[8]);
+ OFFSET(THREAD_FPR9, loongarch_fpu, fpr[9]);
+ OFFSET(THREAD_FPR10, loongarch_fpu, fpr[10]);
+ OFFSET(THREAD_FPR11, loongarch_fpu, fpr[11]);
+ OFFSET(THREAD_FPR12, loongarch_fpu, fpr[12]);
+ OFFSET(THREAD_FPR13, loongarch_fpu, fpr[13]);
+ OFFSET(THREAD_FPR14, loongarch_fpu, fpr[14]);
+ OFFSET(THREAD_FPR15, loongarch_fpu, fpr[15]);
+ OFFSET(THREAD_FPR16, loongarch_fpu, fpr[16]);
+ OFFSET(THREAD_FPR17, loongarch_fpu, fpr[17]);
+ OFFSET(THREAD_FPR18, loongarch_fpu, fpr[18]);
+ OFFSET(THREAD_FPR19, loongarch_fpu, fpr[19]);
+ OFFSET(THREAD_FPR20, loongarch_fpu, fpr[20]);
+ OFFSET(THREAD_FPR21, loongarch_fpu, fpr[21]);
+ OFFSET(THREAD_FPR22, loongarch_fpu, fpr[22]);
+ OFFSET(THREAD_FPR23, loongarch_fpu, fpr[23]);
+ OFFSET(THREAD_FPR24, loongarch_fpu, fpr[24]);
+ OFFSET(THREAD_FPR25, loongarch_fpu, fpr[25]);
+ OFFSET(THREAD_FPR26, loongarch_fpu, fpr[26]);
+ OFFSET(THREAD_FPR27, loongarch_fpu, fpr[27]);
+ OFFSET(THREAD_FPR28, loongarch_fpu, fpr[28]);
+ OFFSET(THREAD_FPR29, loongarch_fpu, fpr[29]);
+ OFFSET(THREAD_FPR30, loongarch_fpu, fpr[30]);
+ OFFSET(THREAD_FPR31, loongarch_fpu, fpr[31]);
+
+ OFFSET(THREAD_FCSR, loongarch_fpu, fcsr);
+ OFFSET(THREAD_FCC, loongarch_fpu, fcc);
+ OFFSET(THREAD_FTOP, loongarch_fpu, ftop);
+ BLANK();
+}
+
+void output_thread_lbt_defines(void)
+{
+ OFFSET(THREAD_SCR0, loongarch_lbt, scr0);
+ OFFSET(THREAD_SCR1, loongarch_lbt, scr1);
+ OFFSET(THREAD_SCR2, loongarch_lbt, scr2);
+ OFFSET(THREAD_SCR3, loongarch_lbt, scr3);
+ OFFSET(THREAD_EFLAGS, loongarch_lbt, eflags);
+ BLANK();
+}
+
+void output_mm_defines(void)
+{
+ COMMENT("Size of struct page");
+ DEFINE(STRUCT_PAGE_SIZE, sizeof(struct page));
+ BLANK();
+ COMMENT("Linux mm_struct offsets.");
+ OFFSET(MM_USERS, mm_struct, mm_users);
+ OFFSET(MM_PGD, mm_struct, pgd);
+ OFFSET(MM_CONTEXT, mm_struct, context);
+ BLANK();
+ DEFINE(_PGD_T_SIZE, sizeof(pgd_t));
+ DEFINE(_PMD_T_SIZE, sizeof(pmd_t));
+ DEFINE(_PTE_T_SIZE, sizeof(pte_t));
+ BLANK();
+ DEFINE(_PGD_T_LOG2, PGD_T_LOG2);
+#ifndef __PAGETABLE_PMD_FOLDED
+ DEFINE(_PMD_T_LOG2, PMD_T_LOG2);
+#endif
+ DEFINE(_PTE_T_LOG2, PTE_T_LOG2);
+ BLANK();
+ DEFINE(_PMD_SHIFT, PMD_SHIFT);
+ DEFINE(_PGDIR_SHIFT, PGDIR_SHIFT);
+ BLANK();
+ DEFINE(_PTRS_PER_PGD, PTRS_PER_PGD);
+ DEFINE(_PTRS_PER_PMD, PTRS_PER_PMD);
+ DEFINE(_PTRS_PER_PTE, PTRS_PER_PTE);
+ BLANK();
+ DEFINE(_PAGE_SHIFT, PAGE_SHIFT);
+ DEFINE(_PAGE_SIZE, PAGE_SIZE);
+ BLANK();
+}
+
+void output_sc_defines(void)
+{
+ COMMENT("Linux sigcontext offsets.");
+ OFFSET(SC_REGS, sigcontext, sc_regs);
+ OFFSET(SC_PC, sigcontext, sc_pc);
+ BLANK();
+}
+
+void output_signal_defines(void)
+{
+ COMMENT("Linux signal numbers.");
+ DEFINE(_SIGHUP, SIGHUP);
+ DEFINE(_SIGINT, SIGINT);
+ DEFINE(_SIGQUIT, SIGQUIT);
+ DEFINE(_SIGILL, SIGILL);
+ DEFINE(_SIGTRAP, SIGTRAP);
+ DEFINE(_SIGIOT, SIGIOT);
+ DEFINE(_SIGABRT, SIGABRT);
+ DEFINE(_SIGFPE, SIGFPE);
+ DEFINE(_SIGKILL, SIGKILL);
+ DEFINE(_SIGBUS, SIGBUS);
+ DEFINE(_SIGSEGV, SIGSEGV);
+ DEFINE(_SIGSYS, SIGSYS);
+ DEFINE(_SIGPIPE, SIGPIPE);
+ DEFINE(_SIGALRM, SIGALRM);
+ DEFINE(_SIGTERM, SIGTERM);
+ DEFINE(_SIGUSR1, SIGUSR1);
+ DEFINE(_SIGUSR2, SIGUSR2);
+ DEFINE(_SIGCHLD, SIGCHLD);
+ DEFINE(_SIGPWR, SIGPWR);
+ DEFINE(_SIGWINCH, SIGWINCH);
+ DEFINE(_SIGURG, SIGURG);
+ DEFINE(_SIGIO, SIGIO);
+ DEFINE(_SIGSTOP, SIGSTOP);
+ DEFINE(_SIGTSTP, SIGTSTP);
+ DEFINE(_SIGCONT, SIGCONT);
+ DEFINE(_SIGTTIN, SIGTTIN);
+ DEFINE(_SIGTTOU, SIGTTOU);
+ DEFINE(_SIGVTALRM, SIGVTALRM);
+ DEFINE(_SIGPROF, SIGPROF);
+ DEFINE(_SIGXCPU, SIGXCPU);
+ DEFINE(_SIGXFSZ, SIGXFSZ);
+ BLANK();
+}
+
+#ifdef CONFIG_SMP
+void output_smpboot_defines(void)
+{
+ COMMENT("Linux smp cpu boot offsets.");
+ OFFSET(CPU_BOOT_STACK, secondary_data, stack);
+ OFFSET(CPU_BOOT_TINFO, secondary_data, thread_info);
+ BLANK();
+}
+#endif
+
+#ifdef CONFIG_HIBERNATION
+void output_pbe_defines(void)
+{
+ COMMENT("Linux struct pbe offsets.");
+ OFFSET(PBE_ADDRESS, pbe, address);
+ OFFSET(PBE_ORIG_ADDRESS, pbe, orig_address);
+ OFFSET(PBE_NEXT, pbe, next);
+ DEFINE(PBE_SIZE, sizeof(struct pbe));
+ BLANK();
+}
+#endif
+
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+void output_fgraph_ret_regs_defines(void)
+{
+ COMMENT("LoongArch fgraph_ret_regs offsets.");
+ OFFSET(FGRET_REGS_A0, fgraph_ret_regs, regs[0]);
+ OFFSET(FGRET_REGS_A1, fgraph_ret_regs, regs[1]);
+ OFFSET(FGRET_REGS_FP, fgraph_ret_regs, fp);
+ DEFINE(FGRET_REGS_SIZE, sizeof(struct fgraph_ret_regs));
+ BLANK();
+}
+#endif
diff --git a/arch/loongarch/kernel/cacheinfo.c b/arch/loongarch/kernel/cacheinfo.c
new file mode 100644
index 0000000000..c7988f7572
--- /dev/null
+++ b/arch/loongarch/kernel/cacheinfo.c
@@ -0,0 +1,86 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * LoongArch cacheinfo support
+ *
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#include <linux/cacheinfo.h>
+#include <linux/topology.h>
+#include <asm/bootinfo.h>
+#include <asm/cpu-info.h>
+
+int init_cache_level(unsigned int cpu)
+{
+ int cache_present = current_cpu_data.cache_leaves_present;
+ struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
+
+ this_cpu_ci->num_levels =
+ current_cpu_data.cache_leaves[cache_present - 1].level;
+ this_cpu_ci->num_leaves = cache_present;
+
+ return 0;
+}
+
+static inline bool cache_leaves_are_shared(struct cacheinfo *this_leaf,
+ struct cacheinfo *sib_leaf)
+{
+ return (!(*(unsigned char *)(this_leaf->priv) & CACHE_PRIVATE)
+ && !(*(unsigned char *)(sib_leaf->priv) & CACHE_PRIVATE));
+}
+
+static void cache_cpumap_setup(unsigned int cpu)
+{
+ unsigned int index;
+ struct cacheinfo *this_leaf, *sib_leaf;
+ struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
+
+ for (index = 0; index < this_cpu_ci->num_leaves; index++) {
+ unsigned int i;
+
+ this_leaf = this_cpu_ci->info_list + index;
+ /* skip if shared_cpu_map is already populated */
+ if (!cpumask_empty(&this_leaf->shared_cpu_map))
+ continue;
+
+ cpumask_set_cpu(cpu, &this_leaf->shared_cpu_map);
+ for_each_online_cpu(i) {
+ struct cpu_cacheinfo *sib_cpu_ci = get_cpu_cacheinfo(i);
+
+ if (i == cpu || !sib_cpu_ci->info_list ||
+ (cpu_to_node(i) != cpu_to_node(cpu)))
+ continue;
+
+ sib_leaf = sib_cpu_ci->info_list + index;
+ if (cache_leaves_are_shared(this_leaf, sib_leaf)) {
+ cpumask_set_cpu(cpu, &sib_leaf->shared_cpu_map);
+ cpumask_set_cpu(i, &this_leaf->shared_cpu_map);
+ }
+ }
+ }
+}
+
+int populate_cache_leaves(unsigned int cpu)
+{
+ int i, cache_present = current_cpu_data.cache_leaves_present;
+ struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
+ struct cacheinfo *this_leaf = this_cpu_ci->info_list;
+ struct cache_desc *cd, *cdesc = current_cpu_data.cache_leaves;
+
+ for (i = 0; i < cache_present; i++) {
+ cd = cdesc + i;
+
+ this_leaf->type = cd->type;
+ this_leaf->level = cd->level;
+ this_leaf->coherency_line_size = cd->linesz;
+ this_leaf->number_of_sets = cd->sets;
+ this_leaf->ways_of_associativity = cd->ways;
+ this_leaf->size = cd->linesz * cd->sets * cd->ways;
+ this_leaf->priv = &cd->flags;
+ this_leaf++;
+ }
+
+ cache_cpumap_setup(cpu);
+ this_cpu_ci->cpu_map_populated = true;
+
+ return 0;
+}
diff --git a/arch/loongarch/kernel/cpu-probe.c b/arch/loongarch/kernel/cpu-probe.c
new file mode 100644
index 0000000000..55320813ee
--- /dev/null
+++ b/arch/loongarch/kernel/cpu-probe.c
@@ -0,0 +1,329 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Processor capabilities determination functions.
+ *
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/ptrace.h>
+#include <linux/smp.h>
+#include <linux/stddef.h>
+#include <linux/export.h>
+#include <linux/printk.h>
+#include <linux/uaccess.h>
+
+#include <asm/cpu-features.h>
+#include <asm/elf.h>
+#include <asm/fpu.h>
+#include <asm/loongarch.h>
+#include <asm/pgtable-bits.h>
+#include <asm/setup.h>
+
+/* Hardware capabilities */
+unsigned int elf_hwcap __read_mostly;
+EXPORT_SYMBOL_GPL(elf_hwcap);
+
+/*
+ * Determine the FCSR mask for FPU hardware.
+ */
+static inline void cpu_set_fpu_fcsr_mask(struct cpuinfo_loongarch *c)
+{
+ unsigned long sr, mask, fcsr, fcsr0, fcsr1;
+
+ fcsr = c->fpu_csr0;
+ mask = FPU_CSR_ALL_X | FPU_CSR_ALL_E | FPU_CSR_ALL_S | FPU_CSR_RM;
+
+ sr = read_csr_euen();
+ enable_fpu();
+
+ fcsr0 = fcsr & mask;
+ write_fcsr(LOONGARCH_FCSR0, fcsr0);
+ fcsr0 = read_fcsr(LOONGARCH_FCSR0);
+
+ fcsr1 = fcsr | ~mask;
+ write_fcsr(LOONGARCH_FCSR0, fcsr1);
+ fcsr1 = read_fcsr(LOONGARCH_FCSR0);
+
+ write_fcsr(LOONGARCH_FCSR0, fcsr);
+
+ write_csr_euen(sr);
+
+ c->fpu_mask = ~(fcsr0 ^ fcsr1) & ~mask;
+}
+
+static inline void set_elf_platform(int cpu, const char *plat)
+{
+ if (cpu == 0)
+ __elf_platform = plat;
+}
+
+/* MAP BASE */
+unsigned long vm_map_base;
+EXPORT_SYMBOL(vm_map_base);
+
+static void cpu_probe_addrbits(struct cpuinfo_loongarch *c)
+{
+#ifdef __NEED_ADDRBITS_PROBE
+ c->pabits = (read_cpucfg(LOONGARCH_CPUCFG1) & CPUCFG1_PABITS) >> 4;
+ c->vabits = (read_cpucfg(LOONGARCH_CPUCFG1) & CPUCFG1_VABITS) >> 12;
+ vm_map_base = 0UL - (1UL << c->vabits);
+#endif
+}
+
+static void set_isa(struct cpuinfo_loongarch *c, unsigned int isa)
+{
+ switch (isa) {
+ case LOONGARCH_CPU_ISA_LA64:
+ c->isa_level |= LOONGARCH_CPU_ISA_LA64;
+ fallthrough;
+ case LOONGARCH_CPU_ISA_LA32S:
+ c->isa_level |= LOONGARCH_CPU_ISA_LA32S;
+ fallthrough;
+ case LOONGARCH_CPU_ISA_LA32R:
+ c->isa_level |= LOONGARCH_CPU_ISA_LA32R;
+ break;
+ }
+}
+
+static void cpu_probe_common(struct cpuinfo_loongarch *c)
+{
+ unsigned int config;
+ unsigned long asid_mask;
+
+ c->options = LOONGARCH_CPU_CPUCFG | LOONGARCH_CPU_CSR |
+ LOONGARCH_CPU_TLB | LOONGARCH_CPU_VINT | LOONGARCH_CPU_WATCH;
+
+ elf_hwcap = HWCAP_LOONGARCH_CPUCFG;
+
+ config = read_cpucfg(LOONGARCH_CPUCFG1);
+ if (config & CPUCFG1_UAL) {
+ c->options |= LOONGARCH_CPU_UAL;
+ elf_hwcap |= HWCAP_LOONGARCH_UAL;
+ }
+ if (config & CPUCFG1_CRC32) {
+ c->options |= LOONGARCH_CPU_CRC32;
+ elf_hwcap |= HWCAP_LOONGARCH_CRC32;
+ }
+
+
+ config = read_cpucfg(LOONGARCH_CPUCFG2);
+ if (config & CPUCFG2_LAM) {
+ c->options |= LOONGARCH_CPU_LAM;
+ elf_hwcap |= HWCAP_LOONGARCH_LAM;
+ }
+ if (config & CPUCFG2_FP) {
+ c->options |= LOONGARCH_CPU_FPU;
+ elf_hwcap |= HWCAP_LOONGARCH_FPU;
+ }
+#ifdef CONFIG_CPU_HAS_LSX
+ if (config & CPUCFG2_LSX) {
+ c->options |= LOONGARCH_CPU_LSX;
+ elf_hwcap |= HWCAP_LOONGARCH_LSX;
+ }
+#endif
+#ifdef CONFIG_CPU_HAS_LASX
+ if (config & CPUCFG2_LASX) {
+ c->options |= LOONGARCH_CPU_LASX;
+ elf_hwcap |= HWCAP_LOONGARCH_LASX;
+ }
+#endif
+ if (config & CPUCFG2_COMPLEX) {
+ c->options |= LOONGARCH_CPU_COMPLEX;
+ elf_hwcap |= HWCAP_LOONGARCH_COMPLEX;
+ }
+ if (config & CPUCFG2_CRYPTO) {
+ c->options |= LOONGARCH_CPU_CRYPTO;
+ elf_hwcap |= HWCAP_LOONGARCH_CRYPTO;
+ }
+ if (config & CPUCFG2_PTW) {
+ c->options |= LOONGARCH_CPU_PTW;
+ elf_hwcap |= HWCAP_LOONGARCH_PTW;
+ }
+ if (config & CPUCFG2_LVZP) {
+ c->options |= LOONGARCH_CPU_LVZ;
+ elf_hwcap |= HWCAP_LOONGARCH_LVZ;
+ }
+#ifdef CONFIG_CPU_HAS_LBT
+ if (config & CPUCFG2_X86BT) {
+ c->options |= LOONGARCH_CPU_LBT_X86;
+ elf_hwcap |= HWCAP_LOONGARCH_LBT_X86;
+ }
+ if (config & CPUCFG2_ARMBT) {
+ c->options |= LOONGARCH_CPU_LBT_ARM;
+ elf_hwcap |= HWCAP_LOONGARCH_LBT_ARM;
+ }
+ if (config & CPUCFG2_MIPSBT) {
+ c->options |= LOONGARCH_CPU_LBT_MIPS;
+ elf_hwcap |= HWCAP_LOONGARCH_LBT_MIPS;
+ }
+#endif
+
+ config = read_cpucfg(LOONGARCH_CPUCFG6);
+ if (config & CPUCFG6_PMP)
+ c->options |= LOONGARCH_CPU_PMP;
+
+ config = iocsr_read32(LOONGARCH_IOCSR_FEATURES);
+ if (config & IOCSRF_CSRIPI)
+ c->options |= LOONGARCH_CPU_CSRIPI;
+ if (config & IOCSRF_EXTIOI)
+ c->options |= LOONGARCH_CPU_EXTIOI;
+ if (config & IOCSRF_FREQSCALE)
+ c->options |= LOONGARCH_CPU_SCALEFREQ;
+ if (config & IOCSRF_FLATMODE)
+ c->options |= LOONGARCH_CPU_FLATMODE;
+ if (config & IOCSRF_EIODECODE)
+ c->options |= LOONGARCH_CPU_EIODECODE;
+ if (config & IOCSRF_VM)
+ c->options |= LOONGARCH_CPU_HYPERVISOR;
+
+ config = csr_read32(LOONGARCH_CSR_ASID);
+ config = (config & CSR_ASID_BIT) >> CSR_ASID_BIT_SHIFT;
+ asid_mask = GENMASK(config - 1, 0);
+ set_cpu_asid_mask(c, asid_mask);
+
+ config = read_csr_prcfg1();
+ c->ksave_mask = GENMASK((config & CSR_CONF1_KSNUM) - 1, 0);
+ c->ksave_mask &= ~(EXC_KSAVE_MASK | PERCPU_KSAVE_MASK | KVM_KSAVE_MASK);
+
+ config = read_csr_prcfg3();
+ switch (config & CSR_CONF3_TLBTYPE) {
+ case 0:
+ c->tlbsizemtlb = 0;
+ c->tlbsizestlbsets = 0;
+ c->tlbsizestlbways = 0;
+ c->tlbsize = 0;
+ break;
+ case 1:
+ c->tlbsizemtlb = ((config & CSR_CONF3_MTLBSIZE) >> CSR_CONF3_MTLBSIZE_SHIFT) + 1;
+ c->tlbsizestlbsets = 0;
+ c->tlbsizestlbways = 0;
+ c->tlbsize = c->tlbsizemtlb + c->tlbsizestlbsets * c->tlbsizestlbways;
+ break;
+ case 2:
+ c->tlbsizemtlb = ((config & CSR_CONF3_MTLBSIZE) >> CSR_CONF3_MTLBSIZE_SHIFT) + 1;
+ c->tlbsizestlbsets = 1 << ((config & CSR_CONF3_STLBIDX) >> CSR_CONF3_STLBIDX_SHIFT);
+ c->tlbsizestlbways = ((config & CSR_CONF3_STLBWAYS) >> CSR_CONF3_STLBWAYS_SHIFT) + 1;
+ c->tlbsize = c->tlbsizemtlb + c->tlbsizestlbsets * c->tlbsizestlbways;
+ break;
+ default:
+ pr_warn("Warning: unknown TLB type\n");
+ }
+}
+
+#define MAX_NAME_LEN 32
+#define VENDOR_OFFSET 0
+#define CPUNAME_OFFSET 9
+
+static char cpu_full_name[MAX_NAME_LEN] = " - ";
+
+static inline void cpu_probe_loongson(struct cpuinfo_loongarch *c, unsigned int cpu)
+{
+ uint64_t *vendor = (void *)(&cpu_full_name[VENDOR_OFFSET]);
+ uint64_t *cpuname = (void *)(&cpu_full_name[CPUNAME_OFFSET]);
+
+ if (!__cpu_full_name[cpu])
+ __cpu_full_name[cpu] = cpu_full_name;
+
+ *vendor = iocsr_read64(LOONGARCH_IOCSR_VENDOR);
+ *cpuname = iocsr_read64(LOONGARCH_IOCSR_CPUNAME);
+
+ switch (c->processor_id & PRID_SERIES_MASK) {
+ case PRID_SERIES_LA132:
+ c->cputype = CPU_LOONGSON32;
+ set_isa(c, LOONGARCH_CPU_ISA_LA32S);
+ __cpu_family[cpu] = "Loongson-32bit";
+ pr_info("32-bit Loongson Processor probed (LA132 Core)\n");
+ break;
+ case PRID_SERIES_LA264:
+ c->cputype = CPU_LOONGSON64;
+ set_isa(c, LOONGARCH_CPU_ISA_LA64);
+ __cpu_family[cpu] = "Loongson-64bit";
+ pr_info("64-bit Loongson Processor probed (LA264 Core)\n");
+ break;
+ case PRID_SERIES_LA364:
+ c->cputype = CPU_LOONGSON64;
+ set_isa(c, LOONGARCH_CPU_ISA_LA64);
+ __cpu_family[cpu] = "Loongson-64bit";
+ pr_info("64-bit Loongson Processor probed (LA364 Core)\n");
+ break;
+ case PRID_SERIES_LA464:
+ c->cputype = CPU_LOONGSON64;
+ set_isa(c, LOONGARCH_CPU_ISA_LA64);
+ __cpu_family[cpu] = "Loongson-64bit";
+ pr_info("64-bit Loongson Processor probed (LA464 Core)\n");
+ break;
+ case PRID_SERIES_LA664:
+ c->cputype = CPU_LOONGSON64;
+ set_isa(c, LOONGARCH_CPU_ISA_LA64);
+ __cpu_family[cpu] = "Loongson-64bit";
+ pr_info("64-bit Loongson Processor probed (LA664 Core)\n");
+ break;
+ default: /* Default to 64 bit */
+ c->cputype = CPU_LOONGSON64;
+ set_isa(c, LOONGARCH_CPU_ISA_LA64);
+ __cpu_family[cpu] = "Loongson-64bit";
+ pr_info("64-bit Loongson Processor probed (Unknown Core)\n");
+ }
+}
+
+#ifdef CONFIG_64BIT
+/* For use by uaccess.h */
+u64 __ua_limit;
+EXPORT_SYMBOL(__ua_limit);
+#endif
+
+const char *__cpu_family[NR_CPUS];
+const char *__cpu_full_name[NR_CPUS];
+const char *__elf_platform;
+
+static void cpu_report(void)
+{
+ struct cpuinfo_loongarch *c = &current_cpu_data;
+
+ pr_info("CPU%d revision is: %08x (%s)\n",
+ smp_processor_id(), c->processor_id, cpu_family_string());
+ if (c->options & LOONGARCH_CPU_FPU)
+ pr_info("FPU%d revision is: %08x\n", smp_processor_id(), c->fpu_vers);
+}
+
+void cpu_probe(void)
+{
+ unsigned int cpu = smp_processor_id();
+ struct cpuinfo_loongarch *c = &current_cpu_data;
+
+ /*
+ * Set a default ELF platform, cpu probe may later
+ * overwrite it with a more precise value
+ */
+ set_elf_platform(cpu, "loongarch");
+
+ c->cputype = CPU_UNKNOWN;
+ c->processor_id = read_cpucfg(LOONGARCH_CPUCFG0);
+ c->fpu_vers = (read_cpucfg(LOONGARCH_CPUCFG2) & CPUCFG2_FPVERS) >> 3;
+
+ c->fpu_csr0 = FPU_CSR_RN;
+ c->fpu_mask = FPU_CSR_RSVD;
+
+ cpu_probe_common(c);
+
+ per_cpu_trap_init(cpu);
+
+ switch (c->processor_id & PRID_COMP_MASK) {
+ case PRID_COMP_LOONGSON:
+ cpu_probe_loongson(c, cpu);
+ break;
+ }
+
+ BUG_ON(!__cpu_family[cpu]);
+ BUG_ON(c->cputype == CPU_UNKNOWN);
+
+ cpu_probe_addrbits(c);
+
+#ifdef CONFIG_64BIT
+ if (cpu == 0)
+ __ua_limit = ~((1ull << cpu_vabits) - 1);
+#endif
+
+ cpu_report();
+}
diff --git a/arch/loongarch/kernel/crash_dump.c b/arch/loongarch/kernel/crash_dump.c
new file mode 100644
index 0000000000..e559307c10
--- /dev/null
+++ b/arch/loongarch/kernel/crash_dump.c
@@ -0,0 +1,23 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/crash_dump.h>
+#include <linux/io.h>
+#include <linux/uio.h>
+
+ssize_t copy_oldmem_page(struct iov_iter *iter, unsigned long pfn,
+ size_t csize, unsigned long offset)
+{
+ void *vaddr;
+
+ if (!csize)
+ return 0;
+
+ vaddr = memremap(__pfn_to_phys(pfn), PAGE_SIZE, MEMREMAP_WB);
+ if (!vaddr)
+ return -ENOMEM;
+
+ csize = copy_to_iter(vaddr + offset, csize, iter);
+
+ memunmap(vaddr);
+
+ return csize;
+}
diff --git a/arch/loongarch/kernel/dma.c b/arch/loongarch/kernel/dma.c
new file mode 100644
index 0000000000..7a9c6a9dd2
--- /dev/null
+++ b/arch/loongarch/kernel/dma.c
@@ -0,0 +1,30 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#include <linux/acpi.h>
+#include <linux/dma-direct.h>
+
+void acpi_arch_dma_setup(struct device *dev)
+{
+ int ret;
+ u64 mask, end = 0;
+ const struct bus_dma_region *map = NULL;
+
+ ret = acpi_dma_get_range(dev, &map);
+ if (!ret && map) {
+ const struct bus_dma_region *r = map;
+
+ for (end = 0; r->size; r++) {
+ if (r->dma_start + r->size - 1 > end)
+ end = r->dma_start + r->size - 1;
+ }
+
+ mask = DMA_BIT_MASK(ilog2(end) + 1);
+ dev->bus_dma_limit = end;
+ dev->dma_range_map = map;
+ dev->coherent_dma_mask = min(dev->coherent_dma_mask, mask);
+ *dev->dma_mask = min(*dev->dma_mask, mask);
+ }
+
+}
diff --git a/arch/loongarch/kernel/efi-header.S b/arch/loongarch/kernel/efi-header.S
new file mode 100644
index 0000000000..5f23b85d78
--- /dev/null
+++ b/arch/loongarch/kernel/efi-header.S
@@ -0,0 +1,99 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+
+#include <linux/pe.h>
+#include <linux/sizes.h>
+
+ .macro __EFI_PE_HEADER
+ .long PE_MAGIC
+.Lcoff_header:
+ .short IMAGE_FILE_MACHINE_LOONGARCH64 /* Machine */
+ .short .Lsection_count /* NumberOfSections */
+ .long 0 /* TimeDateStamp */
+ .long 0 /* PointerToSymbolTable */
+ .long 0 /* NumberOfSymbols */
+ .short .Lsection_table - .Loptional_header /* SizeOfOptionalHeader */
+ .short IMAGE_FILE_DEBUG_STRIPPED | \
+ IMAGE_FILE_EXECUTABLE_IMAGE | \
+ IMAGE_FILE_LINE_NUMS_STRIPPED /* Characteristics */
+
+.Loptional_header:
+ .short PE_OPT_MAGIC_PE32PLUS /* PE32+ format */
+ .byte 0x02 /* MajorLinkerVersion */
+ .byte 0x14 /* MinorLinkerVersion */
+ .long __inittext_end - .Lefi_header_end /* SizeOfCode */
+ .long _kernel_vsize /* SizeOfInitializedData */
+ .long 0 /* SizeOfUninitializedData */
+ .long __efistub_efi_pe_entry - _head /* AddressOfEntryPoint */
+ .long .Lefi_header_end - _head /* BaseOfCode */
+
+.Lextra_header_fields:
+ .quad 0 /* ImageBase */
+ .long PECOFF_SEGMENT_ALIGN /* SectionAlignment */
+ .long PECOFF_FILE_ALIGN /* FileAlignment */
+ .short 0 /* MajorOperatingSystemVersion */
+ .short 0 /* MinorOperatingSystemVersion */
+ .short LINUX_EFISTUB_MAJOR_VERSION /* MajorImageVersion */
+ .short LINUX_EFISTUB_MINOR_VERSION /* MinorImageVersion */
+ .short 0 /* MajorSubsystemVersion */
+ .short 0 /* MinorSubsystemVersion */
+ .long 0 /* Win32VersionValue */
+
+ .long _end - _head /* SizeOfImage */
+
+ /* Everything before the kernel image is considered part of the header */
+ .long .Lefi_header_end - _head /* SizeOfHeaders */
+ .long 0 /* CheckSum */
+ .short IMAGE_SUBSYSTEM_EFI_APPLICATION /* Subsystem */
+ .short 0 /* DllCharacteristics */
+ .quad 0 /* SizeOfStackReserve */
+ .quad 0 /* SizeOfStackCommit */
+ .quad 0 /* SizeOfHeapReserve */
+ .quad 0 /* SizeOfHeapCommit */
+ .long 0 /* LoaderFlags */
+ .long (.Lsection_table - .) / 8 /* NumberOfRvaAndSizes */
+
+ .quad 0 /* ExportTable */
+ .quad 0 /* ImportTable */
+ .quad 0 /* ResourceTable */
+ .quad 0 /* ExceptionTable */
+ .quad 0 /* CertificationTable */
+ .quad 0 /* BaseRelocationTable */
+
+ /* Section table */
+.Lsection_table:
+ .ascii ".text\0\0\0"
+ .long __inittext_end - .Lefi_header_end /* VirtualSize */
+ .long .Lefi_header_end - _head /* VirtualAddress */
+ .long __inittext_end - .Lefi_header_end /* SizeOfRawData */
+ .long .Lefi_header_end - _head /* PointerToRawData */
+
+ .long 0 /* PointerToRelocations */
+ .long 0 /* PointerToLineNumbers */
+ .short 0 /* NumberOfRelocations */
+ .short 0 /* NumberOfLineNumbers */
+ .long IMAGE_SCN_CNT_CODE | \
+ IMAGE_SCN_MEM_READ | \
+ IMAGE_SCN_MEM_EXECUTE /* Characteristics */
+
+ .ascii ".data\0\0\0"
+ .long _kernel_vsize /* VirtualSize */
+ .long __initdata_begin - _head /* VirtualAddress */
+ .long _kernel_rsize /* SizeOfRawData */
+ .long __initdata_begin - _head /* PointerToRawData */
+
+ .long 0 /* PointerToRelocations */
+ .long 0 /* PointerToLineNumbers */
+ .short 0 /* NumberOfRelocations */
+ .short 0 /* NumberOfLineNumbers */
+ .long IMAGE_SCN_CNT_INITIALIZED_DATA | \
+ IMAGE_SCN_MEM_READ | \
+ IMAGE_SCN_MEM_WRITE /* Characteristics */
+
+ .set .Lsection_count, (. - .Lsection_table) / 40
+
+ .balign 0x10000 /* PECOFF_SEGMENT_ALIGN */
+.Lefi_header_end:
+ .endm
diff --git a/arch/loongarch/kernel/efi.c b/arch/loongarch/kernel/efi.c
new file mode 100644
index 0000000000..9fc10cea21
--- /dev/null
+++ b/arch/loongarch/kernel/efi.c
@@ -0,0 +1,137 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * EFI initialization
+ *
+ * Author: Jianmin Lv <lvjianmin@loongson.cn>
+ * Huacai Chen <chenhuacai@loongson.cn>
+ *
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+
+#include <linux/acpi.h>
+#include <linux/efi.h>
+#include <linux/efi-bgrt.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/export.h>
+#include <linux/io.h>
+#include <linux/kobject.h>
+#include <linux/memblock.h>
+#include <linux/reboot.h>
+#include <linux/screen_info.h>
+#include <linux/uaccess.h>
+
+#include <asm/early_ioremap.h>
+#include <asm/efi.h>
+#include <asm/loongson.h>
+
+static unsigned long efi_nr_tables;
+static unsigned long efi_config_table;
+
+static unsigned long __initdata boot_memmap = EFI_INVALID_TABLE_ADDR;
+static unsigned long __initdata fdt_pointer = EFI_INVALID_TABLE_ADDR;
+
+static efi_system_table_t *efi_systab;
+static efi_config_table_type_t arch_tables[] __initdata = {
+ {LINUX_EFI_BOOT_MEMMAP_GUID, &boot_memmap, "MEMMAP" },
+ {DEVICE_TREE_GUID, &fdt_pointer, "FDTPTR" },
+ {},
+};
+
+void __init *efi_fdt_pointer(void)
+{
+ if (!efi_systab)
+ return NULL;
+
+ if (fdt_pointer == EFI_INVALID_TABLE_ADDR)
+ return NULL;
+
+ return early_memremap_ro(fdt_pointer, SZ_64K);
+}
+
+void __init efi_runtime_init(void)
+{
+ if (!efi_enabled(EFI_BOOT) || !efi_systab->runtime)
+ return;
+
+ if (efi_runtime_disabled()) {
+ pr_info("EFI runtime services will be disabled.\n");
+ return;
+ }
+
+ efi.runtime = (efi_runtime_services_t *)efi_systab->runtime;
+ efi.runtime_version = (unsigned int)efi.runtime->hdr.revision;
+
+ efi_native_runtime_setup();
+ set_bit(EFI_RUNTIME_SERVICES, &efi.flags);
+}
+
+unsigned long __initdata screen_info_table = EFI_INVALID_TABLE_ADDR;
+
+static void __init init_screen_info(void)
+{
+ struct screen_info *si;
+
+ if (screen_info_table == EFI_INVALID_TABLE_ADDR)
+ return;
+
+ si = early_memremap(screen_info_table, sizeof(*si));
+ if (!si) {
+ pr_err("Could not map screen_info config table\n");
+ return;
+ }
+ screen_info = *si;
+ memset(si, 0, sizeof(*si));
+ early_memunmap(si, sizeof(*si));
+
+ memblock_reserve(screen_info.lfb_base, screen_info.lfb_size);
+}
+
+void __init efi_init(void)
+{
+ int size;
+ void *config_tables;
+ struct efi_boot_memmap *tbl;
+
+ if (!efi_system_table)
+ return;
+
+ efi_systab = (efi_system_table_t *)early_memremap_ro(efi_system_table, sizeof(*efi_systab));
+ if (!efi_systab) {
+ pr_err("Can't find EFI system table.\n");
+ return;
+ }
+
+ efi_systab_report_header(&efi_systab->hdr, efi_systab->fw_vendor);
+
+ set_bit(EFI_64BIT, &efi.flags);
+ efi_nr_tables = efi_systab->nr_tables;
+ efi_config_table = (unsigned long)efi_systab->tables;
+
+ size = sizeof(efi_config_table_t);
+ config_tables = early_memremap(efi_config_table, efi_nr_tables * size);
+ efi_config_parse_tables(config_tables, efi_systab->nr_tables, arch_tables);
+ early_memunmap(config_tables, efi_nr_tables * size);
+
+ set_bit(EFI_CONFIG_TABLES, &efi.flags);
+
+ init_screen_info();
+
+ if (boot_memmap == EFI_INVALID_TABLE_ADDR)
+ return;
+
+ tbl = early_memremap_ro(boot_memmap, sizeof(*tbl));
+ if (tbl) {
+ struct efi_memory_map_data data;
+
+ data.phys_map = boot_memmap + sizeof(*tbl);
+ data.size = tbl->map_size;
+ data.desc_size = tbl->desc_size;
+ data.desc_version = tbl->desc_ver;
+
+ if (efi_memmap_init_early(&data) < 0)
+ panic("Unable to map EFI memory map.\n");
+
+ early_memunmap(tbl, sizeof(*tbl));
+ }
+}
diff --git a/arch/loongarch/kernel/elf.c b/arch/loongarch/kernel/elf.c
new file mode 100644
index 0000000000..0fa81ced28
--- /dev/null
+++ b/arch/loongarch/kernel/elf.c
@@ -0,0 +1,25 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Author: Huacai Chen <chenhuacai@loongson.cn>
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+
+#include <linux/binfmts.h>
+#include <linux/elf.h>
+#include <linux/export.h>
+#include <linux/sched.h>
+
+#include <asm/cpu-features.h>
+#include <asm/cpu-info.h>
+
+int arch_elf_pt_proc(void *_ehdr, void *_phdr, struct file *elf,
+ bool is_interp, struct arch_elf_state *state)
+{
+ return 0;
+}
+
+int arch_check_elf(void *_ehdr, bool has_interpreter, void *_interp_ehdr,
+ struct arch_elf_state *state)
+{
+ return 0;
+}
diff --git a/arch/loongarch/kernel/entry.S b/arch/loongarch/kernel/entry.S
new file mode 100644
index 0000000000..1ec8e4c4cc
--- /dev/null
+++ b/arch/loongarch/kernel/entry.S
@@ -0,0 +1,95 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ *
+ * Derived from MIPS:
+ * Copyright (C) 1994 - 2000, 2001, 2003 Ralf Baechle
+ * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
+ * Copyright (C) 2001 MIPS Technologies, Inc.
+ */
+
+#include <asm/asm.h>
+#include <asm/asmmacro.h>
+#include <asm/loongarch.h>
+#include <asm/regdef.h>
+#include <asm/stackframe.h>
+#include <asm/thread_info.h>
+
+ .text
+ .cfi_sections .debug_frame
+ .align 5
+SYM_CODE_START(handle_syscall)
+ csrrd t0, PERCPU_BASE_KS
+ la.pcrel t1, kernelsp
+ add.d t1, t1, t0
+ move t2, sp
+ ld.d sp, t1, 0
+
+ addi.d sp, sp, -PT_SIZE
+ cfi_st t2, PT_R3
+ cfi_rel_offset sp, PT_R3
+ st.d zero, sp, PT_R0
+ csrrd t2, LOONGARCH_CSR_PRMD
+ st.d t2, sp, PT_PRMD
+ csrrd t2, LOONGARCH_CSR_CRMD
+ st.d t2, sp, PT_CRMD
+ csrrd t2, LOONGARCH_CSR_EUEN
+ st.d t2, sp, PT_EUEN
+ csrrd t2, LOONGARCH_CSR_ECFG
+ st.d t2, sp, PT_ECFG
+ csrrd t2, LOONGARCH_CSR_ESTAT
+ st.d t2, sp, PT_ESTAT
+ cfi_st ra, PT_R1
+ cfi_st a0, PT_R4
+ cfi_st a1, PT_R5
+ cfi_st a2, PT_R6
+ cfi_st a3, PT_R7
+ cfi_st a4, PT_R8
+ cfi_st a5, PT_R9
+ cfi_st a6, PT_R10
+ cfi_st a7, PT_R11
+ csrrd ra, LOONGARCH_CSR_ERA
+ st.d ra, sp, PT_ERA
+ cfi_rel_offset ra, PT_ERA
+
+ cfi_st tp, PT_R2
+ cfi_st u0, PT_R21
+ cfi_st fp, PT_R22
+
+ SAVE_STATIC
+
+#ifdef CONFIG_KGDB
+ li.w t1, CSR_CRMD_WE
+ csrxchg t1, t1, LOONGARCH_CSR_CRMD
+#endif
+
+ move u0, t0
+ li.d tp, ~_THREAD_MASK
+ and tp, tp, sp
+
+ move a0, sp
+ bl do_syscall
+
+ RESTORE_ALL_AND_RET
+SYM_CODE_END(handle_syscall)
+_ASM_NOKPROBE(handle_syscall)
+
+SYM_CODE_START(ret_from_fork)
+ bl schedule_tail # a0 = struct task_struct *prev
+ move a0, sp
+ bl syscall_exit_to_user_mode
+ RESTORE_STATIC
+ RESTORE_SOME
+ RESTORE_SP_AND_RET
+SYM_CODE_END(ret_from_fork)
+
+SYM_CODE_START(ret_from_kernel_thread)
+ bl schedule_tail # a0 = struct task_struct *prev
+ move a0, s1
+ jirl ra, s0, 0
+ move a0, sp
+ bl syscall_exit_to_user_mode
+ RESTORE_STATIC
+ RESTORE_SOME
+ RESTORE_SP_AND_RET
+SYM_CODE_END(ret_from_kernel_thread)
diff --git a/arch/loongarch/kernel/env.c b/arch/loongarch/kernel/env.c
new file mode 100644
index 0000000000..6b3bfb0092
--- /dev/null
+++ b/arch/loongarch/kernel/env.c
@@ -0,0 +1,78 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Author: Huacai Chen <chenhuacai@loongson.cn>
+ *
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#include <linux/acpi.h>
+#include <linux/efi.h>
+#include <linux/export.h>
+#include <linux/memblock.h>
+#include <asm/early_ioremap.h>
+#include <asm/bootinfo.h>
+#include <asm/loongson.h>
+#include <asm/setup.h>
+
+u64 efi_system_table;
+struct loongson_system_configuration loongson_sysconf;
+EXPORT_SYMBOL(loongson_sysconf);
+
+void __init init_environ(void)
+{
+ int efi_boot = fw_arg0;
+ char *cmdline = early_memremap_ro(fw_arg1, COMMAND_LINE_SIZE);
+
+ if (efi_boot)
+ set_bit(EFI_BOOT, &efi.flags);
+ else
+ clear_bit(EFI_BOOT, &efi.flags);
+
+ strscpy(boot_command_line, cmdline, COMMAND_LINE_SIZE);
+ strscpy(init_command_line, cmdline, COMMAND_LINE_SIZE);
+ early_memunmap(cmdline, COMMAND_LINE_SIZE);
+
+ efi_system_table = fw_arg2;
+}
+
+static int __init init_cpu_fullname(void)
+{
+ int cpu;
+
+ if (loongson_sysconf.cpuname && !strncmp(loongson_sysconf.cpuname, "Loongson", 8)) {
+ for (cpu = 0; cpu < NR_CPUS; cpu++)
+ __cpu_full_name[cpu] = loongson_sysconf.cpuname;
+ }
+ return 0;
+}
+arch_initcall(init_cpu_fullname);
+
+static ssize_t boardinfo_show(struct kobject *kobj,
+ struct kobj_attribute *attr, char *buf)
+{
+ return sprintf(buf,
+ "BIOS Information\n"
+ "Vendor\t\t\t: %s\n"
+ "Version\t\t\t: %s\n"
+ "ROM Size\t\t: %d KB\n"
+ "Release Date\t\t: %s\n\n"
+ "Board Information\n"
+ "Manufacturer\t\t: %s\n"
+ "Board Name\t\t: %s\n"
+ "Family\t\t\t: LOONGSON64\n\n",
+ b_info.bios_vendor, b_info.bios_version,
+ b_info.bios_size, b_info.bios_release_date,
+ b_info.board_vendor, b_info.board_name);
+}
+
+static struct kobj_attribute boardinfo_attr = __ATTR(boardinfo, 0444,
+ boardinfo_show, NULL);
+
+static int __init boardinfo_init(void)
+{
+ struct kobject *loongson_kobj;
+
+ loongson_kobj = kobject_create_and_add("loongson", firmware_kobj);
+
+ return sysfs_create_file(loongson_kobj, &boardinfo_attr.attr);
+}
+late_initcall(boardinfo_init);
diff --git a/arch/loongarch/kernel/fpu.S b/arch/loongarch/kernel/fpu.S
new file mode 100644
index 0000000000..d53ab10f46
--- /dev/null
+++ b/arch/loongarch/kernel/fpu.S
@@ -0,0 +1,526 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Author: Lu Zeng <zenglu@loongson.cn>
+ * Pei Huang <huangpei@loongson.cn>
+ * Huacai Chen <chenhuacai@loongson.cn>
+ *
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#include <linux/export.h>
+#include <asm/asm.h>
+#include <asm/asmmacro.h>
+#include <asm/asm-extable.h>
+#include <asm/asm-offsets.h>
+#include <asm/errno.h>
+#include <asm/fpregdef.h>
+#include <asm/loongarch.h>
+#include <asm/regdef.h>
+
+#define FPU_REG_WIDTH 8
+#define LSX_REG_WIDTH 16
+#define LASX_REG_WIDTH 32
+
+ .macro EX insn, reg, src, offs
+.ex\@: \insn \reg, \src, \offs
+ _asm_extable .ex\@, .L_fpu_fault
+ .endm
+
+ .macro sc_save_fp base
+ EX fst.d $f0, \base, (0 * FPU_REG_WIDTH)
+ EX fst.d $f1, \base, (1 * FPU_REG_WIDTH)
+ EX fst.d $f2, \base, (2 * FPU_REG_WIDTH)
+ EX fst.d $f3, \base, (3 * FPU_REG_WIDTH)
+ EX fst.d $f4, \base, (4 * FPU_REG_WIDTH)
+ EX fst.d $f5, \base, (5 * FPU_REG_WIDTH)
+ EX fst.d $f6, \base, (6 * FPU_REG_WIDTH)
+ EX fst.d $f7, \base, (7 * FPU_REG_WIDTH)
+ EX fst.d $f8, \base, (8 * FPU_REG_WIDTH)
+ EX fst.d $f9, \base, (9 * FPU_REG_WIDTH)
+ EX fst.d $f10, \base, (10 * FPU_REG_WIDTH)
+ EX fst.d $f11, \base, (11 * FPU_REG_WIDTH)
+ EX fst.d $f12, \base, (12 * FPU_REG_WIDTH)
+ EX fst.d $f13, \base, (13 * FPU_REG_WIDTH)
+ EX fst.d $f14, \base, (14 * FPU_REG_WIDTH)
+ EX fst.d $f15, \base, (15 * FPU_REG_WIDTH)
+ EX fst.d $f16, \base, (16 * FPU_REG_WIDTH)
+ EX fst.d $f17, \base, (17 * FPU_REG_WIDTH)
+ EX fst.d $f18, \base, (18 * FPU_REG_WIDTH)
+ EX fst.d $f19, \base, (19 * FPU_REG_WIDTH)
+ EX fst.d $f20, \base, (20 * FPU_REG_WIDTH)
+ EX fst.d $f21, \base, (21 * FPU_REG_WIDTH)
+ EX fst.d $f22, \base, (22 * FPU_REG_WIDTH)
+ EX fst.d $f23, \base, (23 * FPU_REG_WIDTH)
+ EX fst.d $f24, \base, (24 * FPU_REG_WIDTH)
+ EX fst.d $f25, \base, (25 * FPU_REG_WIDTH)
+ EX fst.d $f26, \base, (26 * FPU_REG_WIDTH)
+ EX fst.d $f27, \base, (27 * FPU_REG_WIDTH)
+ EX fst.d $f28, \base, (28 * FPU_REG_WIDTH)
+ EX fst.d $f29, \base, (29 * FPU_REG_WIDTH)
+ EX fst.d $f30, \base, (30 * FPU_REG_WIDTH)
+ EX fst.d $f31, \base, (31 * FPU_REG_WIDTH)
+ .endm
+
+ .macro sc_restore_fp base
+ EX fld.d $f0, \base, (0 * FPU_REG_WIDTH)
+ EX fld.d $f1, \base, (1 * FPU_REG_WIDTH)
+ EX fld.d $f2, \base, (2 * FPU_REG_WIDTH)
+ EX fld.d $f3, \base, (3 * FPU_REG_WIDTH)
+ EX fld.d $f4, \base, (4 * FPU_REG_WIDTH)
+ EX fld.d $f5, \base, (5 * FPU_REG_WIDTH)
+ EX fld.d $f6, \base, (6 * FPU_REG_WIDTH)
+ EX fld.d $f7, \base, (7 * FPU_REG_WIDTH)
+ EX fld.d $f8, \base, (8 * FPU_REG_WIDTH)
+ EX fld.d $f9, \base, (9 * FPU_REG_WIDTH)
+ EX fld.d $f10, \base, (10 * FPU_REG_WIDTH)
+ EX fld.d $f11, \base, (11 * FPU_REG_WIDTH)
+ EX fld.d $f12, \base, (12 * FPU_REG_WIDTH)
+ EX fld.d $f13, \base, (13 * FPU_REG_WIDTH)
+ EX fld.d $f14, \base, (14 * FPU_REG_WIDTH)
+ EX fld.d $f15, \base, (15 * FPU_REG_WIDTH)
+ EX fld.d $f16, \base, (16 * FPU_REG_WIDTH)
+ EX fld.d $f17, \base, (17 * FPU_REG_WIDTH)
+ EX fld.d $f18, \base, (18 * FPU_REG_WIDTH)
+ EX fld.d $f19, \base, (19 * FPU_REG_WIDTH)
+ EX fld.d $f20, \base, (20 * FPU_REG_WIDTH)
+ EX fld.d $f21, \base, (21 * FPU_REG_WIDTH)
+ EX fld.d $f22, \base, (22 * FPU_REG_WIDTH)
+ EX fld.d $f23, \base, (23 * FPU_REG_WIDTH)
+ EX fld.d $f24, \base, (24 * FPU_REG_WIDTH)
+ EX fld.d $f25, \base, (25 * FPU_REG_WIDTH)
+ EX fld.d $f26, \base, (26 * FPU_REG_WIDTH)
+ EX fld.d $f27, \base, (27 * FPU_REG_WIDTH)
+ EX fld.d $f28, \base, (28 * FPU_REG_WIDTH)
+ EX fld.d $f29, \base, (29 * FPU_REG_WIDTH)
+ EX fld.d $f30, \base, (30 * FPU_REG_WIDTH)
+ EX fld.d $f31, \base, (31 * FPU_REG_WIDTH)
+ .endm
+
+ .macro sc_save_fcc base, tmp0, tmp1
+ movcf2gr \tmp0, $fcc0
+ move \tmp1, \tmp0
+ movcf2gr \tmp0, $fcc1
+ bstrins.d \tmp1, \tmp0, 15, 8
+ movcf2gr \tmp0, $fcc2
+ bstrins.d \tmp1, \tmp0, 23, 16
+ movcf2gr \tmp0, $fcc3
+ bstrins.d \tmp1, \tmp0, 31, 24
+ movcf2gr \tmp0, $fcc4
+ bstrins.d \tmp1, \tmp0, 39, 32
+ movcf2gr \tmp0, $fcc5
+ bstrins.d \tmp1, \tmp0, 47, 40
+ movcf2gr \tmp0, $fcc6
+ bstrins.d \tmp1, \tmp0, 55, 48
+ movcf2gr \tmp0, $fcc7
+ bstrins.d \tmp1, \tmp0, 63, 56
+ EX st.d \tmp1, \base, 0
+ .endm
+
+ .macro sc_restore_fcc base, tmp0, tmp1
+ EX ld.d \tmp0, \base, 0
+ bstrpick.d \tmp1, \tmp0, 7, 0
+ movgr2cf $fcc0, \tmp1
+ bstrpick.d \tmp1, \tmp0, 15, 8
+ movgr2cf $fcc1, \tmp1
+ bstrpick.d \tmp1, \tmp0, 23, 16
+ movgr2cf $fcc2, \tmp1
+ bstrpick.d \tmp1, \tmp0, 31, 24
+ movgr2cf $fcc3, \tmp1
+ bstrpick.d \tmp1, \tmp0, 39, 32
+ movgr2cf $fcc4, \tmp1
+ bstrpick.d \tmp1, \tmp0, 47, 40
+ movgr2cf $fcc5, \tmp1
+ bstrpick.d \tmp1, \tmp0, 55, 48
+ movgr2cf $fcc6, \tmp1
+ bstrpick.d \tmp1, \tmp0, 63, 56
+ movgr2cf $fcc7, \tmp1
+ .endm
+
+ .macro sc_save_fcsr base, tmp0
+ movfcsr2gr \tmp0, fcsr0
+ EX st.w \tmp0, \base, 0
+#if defined(CONFIG_CPU_HAS_LBT)
+ /* TM bit is always 0 if LBT not supported */
+ andi \tmp0, \tmp0, FPU_CSR_TM
+ beqz \tmp0, 1f
+ x86clrtm
+1:
+#endif
+ .endm
+
+ .macro sc_restore_fcsr base, tmp0
+ EX ld.w \tmp0, \base, 0
+ movgr2fcsr fcsr0, \tmp0
+ .endm
+
+ .macro sc_save_lsx base
+#ifdef CONFIG_CPU_HAS_LSX
+ EX vst $vr0, \base, (0 * LSX_REG_WIDTH)
+ EX vst $vr1, \base, (1 * LSX_REG_WIDTH)
+ EX vst $vr2, \base, (2 * LSX_REG_WIDTH)
+ EX vst $vr3, \base, (3 * LSX_REG_WIDTH)
+ EX vst $vr4, \base, (4 * LSX_REG_WIDTH)
+ EX vst $vr5, \base, (5 * LSX_REG_WIDTH)
+ EX vst $vr6, \base, (6 * LSX_REG_WIDTH)
+ EX vst $vr7, \base, (7 * LSX_REG_WIDTH)
+ EX vst $vr8, \base, (8 * LSX_REG_WIDTH)
+ EX vst $vr9, \base, (9 * LSX_REG_WIDTH)
+ EX vst $vr10, \base, (10 * LSX_REG_WIDTH)
+ EX vst $vr11, \base, (11 * LSX_REG_WIDTH)
+ EX vst $vr12, \base, (12 * LSX_REG_WIDTH)
+ EX vst $vr13, \base, (13 * LSX_REG_WIDTH)
+ EX vst $vr14, \base, (14 * LSX_REG_WIDTH)
+ EX vst $vr15, \base, (15 * LSX_REG_WIDTH)
+ EX vst $vr16, \base, (16 * LSX_REG_WIDTH)
+ EX vst $vr17, \base, (17 * LSX_REG_WIDTH)
+ EX vst $vr18, \base, (18 * LSX_REG_WIDTH)
+ EX vst $vr19, \base, (19 * LSX_REG_WIDTH)
+ EX vst $vr20, \base, (20 * LSX_REG_WIDTH)
+ EX vst $vr21, \base, (21 * LSX_REG_WIDTH)
+ EX vst $vr22, \base, (22 * LSX_REG_WIDTH)
+ EX vst $vr23, \base, (23 * LSX_REG_WIDTH)
+ EX vst $vr24, \base, (24 * LSX_REG_WIDTH)
+ EX vst $vr25, \base, (25 * LSX_REG_WIDTH)
+ EX vst $vr26, \base, (26 * LSX_REG_WIDTH)
+ EX vst $vr27, \base, (27 * LSX_REG_WIDTH)
+ EX vst $vr28, \base, (28 * LSX_REG_WIDTH)
+ EX vst $vr29, \base, (29 * LSX_REG_WIDTH)
+ EX vst $vr30, \base, (30 * LSX_REG_WIDTH)
+ EX vst $vr31, \base, (31 * LSX_REG_WIDTH)
+#endif
+ .endm
+
+ .macro sc_restore_lsx base
+#ifdef CONFIG_CPU_HAS_LSX
+ EX vld $vr0, \base, (0 * LSX_REG_WIDTH)
+ EX vld $vr1, \base, (1 * LSX_REG_WIDTH)
+ EX vld $vr2, \base, (2 * LSX_REG_WIDTH)
+ EX vld $vr3, \base, (3 * LSX_REG_WIDTH)
+ EX vld $vr4, \base, (4 * LSX_REG_WIDTH)
+ EX vld $vr5, \base, (5 * LSX_REG_WIDTH)
+ EX vld $vr6, \base, (6 * LSX_REG_WIDTH)
+ EX vld $vr7, \base, (7 * LSX_REG_WIDTH)
+ EX vld $vr8, \base, (8 * LSX_REG_WIDTH)
+ EX vld $vr9, \base, (9 * LSX_REG_WIDTH)
+ EX vld $vr10, \base, (10 * LSX_REG_WIDTH)
+ EX vld $vr11, \base, (11 * LSX_REG_WIDTH)
+ EX vld $vr12, \base, (12 * LSX_REG_WIDTH)
+ EX vld $vr13, \base, (13 * LSX_REG_WIDTH)
+ EX vld $vr14, \base, (14 * LSX_REG_WIDTH)
+ EX vld $vr15, \base, (15 * LSX_REG_WIDTH)
+ EX vld $vr16, \base, (16 * LSX_REG_WIDTH)
+ EX vld $vr17, \base, (17 * LSX_REG_WIDTH)
+ EX vld $vr18, \base, (18 * LSX_REG_WIDTH)
+ EX vld $vr19, \base, (19 * LSX_REG_WIDTH)
+ EX vld $vr20, \base, (20 * LSX_REG_WIDTH)
+ EX vld $vr21, \base, (21 * LSX_REG_WIDTH)
+ EX vld $vr22, \base, (22 * LSX_REG_WIDTH)
+ EX vld $vr23, \base, (23 * LSX_REG_WIDTH)
+ EX vld $vr24, \base, (24 * LSX_REG_WIDTH)
+ EX vld $vr25, \base, (25 * LSX_REG_WIDTH)
+ EX vld $vr26, \base, (26 * LSX_REG_WIDTH)
+ EX vld $vr27, \base, (27 * LSX_REG_WIDTH)
+ EX vld $vr28, \base, (28 * LSX_REG_WIDTH)
+ EX vld $vr29, \base, (29 * LSX_REG_WIDTH)
+ EX vld $vr30, \base, (30 * LSX_REG_WIDTH)
+ EX vld $vr31, \base, (31 * LSX_REG_WIDTH)
+#endif
+ .endm
+
+ .macro sc_save_lasx base
+#ifdef CONFIG_CPU_HAS_LASX
+ EX xvst $xr0, \base, (0 * LASX_REG_WIDTH)
+ EX xvst $xr1, \base, (1 * LASX_REG_WIDTH)
+ EX xvst $xr2, \base, (2 * LASX_REG_WIDTH)
+ EX xvst $xr3, \base, (3 * LASX_REG_WIDTH)
+ EX xvst $xr4, \base, (4 * LASX_REG_WIDTH)
+ EX xvst $xr5, \base, (5 * LASX_REG_WIDTH)
+ EX xvst $xr6, \base, (6 * LASX_REG_WIDTH)
+ EX xvst $xr7, \base, (7 * LASX_REG_WIDTH)
+ EX xvst $xr8, \base, (8 * LASX_REG_WIDTH)
+ EX xvst $xr9, \base, (9 * LASX_REG_WIDTH)
+ EX xvst $xr10, \base, (10 * LASX_REG_WIDTH)
+ EX xvst $xr11, \base, (11 * LASX_REG_WIDTH)
+ EX xvst $xr12, \base, (12 * LASX_REG_WIDTH)
+ EX xvst $xr13, \base, (13 * LASX_REG_WIDTH)
+ EX xvst $xr14, \base, (14 * LASX_REG_WIDTH)
+ EX xvst $xr15, \base, (15 * LASX_REG_WIDTH)
+ EX xvst $xr16, \base, (16 * LASX_REG_WIDTH)
+ EX xvst $xr17, \base, (17 * LASX_REG_WIDTH)
+ EX xvst $xr18, \base, (18 * LASX_REG_WIDTH)
+ EX xvst $xr19, \base, (19 * LASX_REG_WIDTH)
+ EX xvst $xr20, \base, (20 * LASX_REG_WIDTH)
+ EX xvst $xr21, \base, (21 * LASX_REG_WIDTH)
+ EX xvst $xr22, \base, (22 * LASX_REG_WIDTH)
+ EX xvst $xr23, \base, (23 * LASX_REG_WIDTH)
+ EX xvst $xr24, \base, (24 * LASX_REG_WIDTH)
+ EX xvst $xr25, \base, (25 * LASX_REG_WIDTH)
+ EX xvst $xr26, \base, (26 * LASX_REG_WIDTH)
+ EX xvst $xr27, \base, (27 * LASX_REG_WIDTH)
+ EX xvst $xr28, \base, (28 * LASX_REG_WIDTH)
+ EX xvst $xr29, \base, (29 * LASX_REG_WIDTH)
+ EX xvst $xr30, \base, (30 * LASX_REG_WIDTH)
+ EX xvst $xr31, \base, (31 * LASX_REG_WIDTH)
+#endif
+ .endm
+
+ .macro sc_restore_lasx base
+#ifdef CONFIG_CPU_HAS_LASX
+ EX xvld $xr0, \base, (0 * LASX_REG_WIDTH)
+ EX xvld $xr1, \base, (1 * LASX_REG_WIDTH)
+ EX xvld $xr2, \base, (2 * LASX_REG_WIDTH)
+ EX xvld $xr3, \base, (3 * LASX_REG_WIDTH)
+ EX xvld $xr4, \base, (4 * LASX_REG_WIDTH)
+ EX xvld $xr5, \base, (5 * LASX_REG_WIDTH)
+ EX xvld $xr6, \base, (6 * LASX_REG_WIDTH)
+ EX xvld $xr7, \base, (7 * LASX_REG_WIDTH)
+ EX xvld $xr8, \base, (8 * LASX_REG_WIDTH)
+ EX xvld $xr9, \base, (9 * LASX_REG_WIDTH)
+ EX xvld $xr10, \base, (10 * LASX_REG_WIDTH)
+ EX xvld $xr11, \base, (11 * LASX_REG_WIDTH)
+ EX xvld $xr12, \base, (12 * LASX_REG_WIDTH)
+ EX xvld $xr13, \base, (13 * LASX_REG_WIDTH)
+ EX xvld $xr14, \base, (14 * LASX_REG_WIDTH)
+ EX xvld $xr15, \base, (15 * LASX_REG_WIDTH)
+ EX xvld $xr16, \base, (16 * LASX_REG_WIDTH)
+ EX xvld $xr17, \base, (17 * LASX_REG_WIDTH)
+ EX xvld $xr18, \base, (18 * LASX_REG_WIDTH)
+ EX xvld $xr19, \base, (19 * LASX_REG_WIDTH)
+ EX xvld $xr20, \base, (20 * LASX_REG_WIDTH)
+ EX xvld $xr21, \base, (21 * LASX_REG_WIDTH)
+ EX xvld $xr22, \base, (22 * LASX_REG_WIDTH)
+ EX xvld $xr23, \base, (23 * LASX_REG_WIDTH)
+ EX xvld $xr24, \base, (24 * LASX_REG_WIDTH)
+ EX xvld $xr25, \base, (25 * LASX_REG_WIDTH)
+ EX xvld $xr26, \base, (26 * LASX_REG_WIDTH)
+ EX xvld $xr27, \base, (27 * LASX_REG_WIDTH)
+ EX xvld $xr28, \base, (28 * LASX_REG_WIDTH)
+ EX xvld $xr29, \base, (29 * LASX_REG_WIDTH)
+ EX xvld $xr30, \base, (30 * LASX_REG_WIDTH)
+ EX xvld $xr31, \base, (31 * LASX_REG_WIDTH)
+#endif
+ .endm
+
+/*
+ * Save a thread's fp context.
+ */
+SYM_FUNC_START(_save_fp)
+ fpu_save_csr a0 t1
+ fpu_save_double a0 t1 # clobbers t1
+ fpu_save_cc a0 t1 t2 # clobbers t1, t2
+ jr ra
+SYM_FUNC_END(_save_fp)
+EXPORT_SYMBOL(_save_fp)
+
+/*
+ * Restore a thread's fp context.
+ */
+SYM_FUNC_START(_restore_fp)
+ fpu_restore_double a0 t1 # clobbers t1
+ fpu_restore_csr a0 t1 t2
+ fpu_restore_cc a0 t1 t2 # clobbers t1, t2
+ jr ra
+SYM_FUNC_END(_restore_fp)
+
+#ifdef CONFIG_CPU_HAS_LSX
+
+/*
+ * Save a thread's LSX vector context.
+ */
+SYM_FUNC_START(_save_lsx)
+ lsx_save_all a0 t1 t2
+ jr ra
+SYM_FUNC_END(_save_lsx)
+EXPORT_SYMBOL(_save_lsx)
+
+/*
+ * Restore a thread's LSX vector context.
+ */
+SYM_FUNC_START(_restore_lsx)
+ lsx_restore_all a0 t1 t2
+ jr ra
+SYM_FUNC_END(_restore_lsx)
+
+SYM_FUNC_START(_save_lsx_upper)
+ lsx_save_all_upper a0 t0 t1
+ jr ra
+SYM_FUNC_END(_save_lsx_upper)
+
+SYM_FUNC_START(_restore_lsx_upper)
+ lsx_restore_all_upper a0 t0 t1
+ jr ra
+SYM_FUNC_END(_restore_lsx_upper)
+
+SYM_FUNC_START(_init_lsx_upper)
+ lsx_init_all_upper t1
+ jr ra
+SYM_FUNC_END(_init_lsx_upper)
+#endif
+
+#ifdef CONFIG_CPU_HAS_LASX
+
+/*
+ * Save a thread's LASX vector context.
+ */
+SYM_FUNC_START(_save_lasx)
+ lasx_save_all a0 t1 t2
+ jr ra
+SYM_FUNC_END(_save_lasx)
+EXPORT_SYMBOL(_save_lasx)
+
+/*
+ * Restore a thread's LASX vector context.
+ */
+SYM_FUNC_START(_restore_lasx)
+ lasx_restore_all a0 t1 t2
+ jr ra
+SYM_FUNC_END(_restore_lasx)
+
+SYM_FUNC_START(_save_lasx_upper)
+ lasx_save_all_upper a0 t0 t1
+ jr ra
+SYM_FUNC_END(_save_lasx_upper)
+
+SYM_FUNC_START(_restore_lasx_upper)
+ lasx_restore_all_upper a0 t0 t1
+ jr ra
+SYM_FUNC_END(_restore_lasx_upper)
+
+SYM_FUNC_START(_init_lasx_upper)
+ lasx_init_all_upper t1
+ jr ra
+SYM_FUNC_END(_init_lasx_upper)
+#endif
+
+/*
+ * Load the FPU with signalling NANS. This bit pattern we're using has
+ * the property that no matter whether considered as single or as double
+ * precision represents signaling NANS.
+ *
+ * The value to initialize fcsr0 to comes in $a0.
+ */
+
+SYM_FUNC_START(_init_fpu)
+ li.w t1, CSR_EUEN_FPEN
+ csrxchg t1, t1, LOONGARCH_CSR_EUEN
+
+ movgr2fcsr fcsr0, a0
+
+ li.w t1, -1 # SNaN
+
+ movgr2fr.d $f0, t1
+ movgr2fr.d $f1, t1
+ movgr2fr.d $f2, t1
+ movgr2fr.d $f3, t1
+ movgr2fr.d $f4, t1
+ movgr2fr.d $f5, t1
+ movgr2fr.d $f6, t1
+ movgr2fr.d $f7, t1
+ movgr2fr.d $f8, t1
+ movgr2fr.d $f9, t1
+ movgr2fr.d $f10, t1
+ movgr2fr.d $f11, t1
+ movgr2fr.d $f12, t1
+ movgr2fr.d $f13, t1
+ movgr2fr.d $f14, t1
+ movgr2fr.d $f15, t1
+ movgr2fr.d $f16, t1
+ movgr2fr.d $f17, t1
+ movgr2fr.d $f18, t1
+ movgr2fr.d $f19, t1
+ movgr2fr.d $f20, t1
+ movgr2fr.d $f21, t1
+ movgr2fr.d $f22, t1
+ movgr2fr.d $f23, t1
+ movgr2fr.d $f24, t1
+ movgr2fr.d $f25, t1
+ movgr2fr.d $f26, t1
+ movgr2fr.d $f27, t1
+ movgr2fr.d $f28, t1
+ movgr2fr.d $f29, t1
+ movgr2fr.d $f30, t1
+ movgr2fr.d $f31, t1
+
+ jr ra
+SYM_FUNC_END(_init_fpu)
+
+/*
+ * a0: fpregs
+ * a1: fcc
+ * a2: fcsr
+ */
+SYM_FUNC_START(_save_fp_context)
+ sc_save_fcc a1 t1 t2
+ sc_save_fcsr a2 t1
+ sc_save_fp a0
+ li.w a0, 0 # success
+ jr ra
+SYM_FUNC_END(_save_fp_context)
+
+/*
+ * a0: fpregs
+ * a1: fcc
+ * a2: fcsr
+ */
+SYM_FUNC_START(_restore_fp_context)
+ sc_restore_fp a0
+ sc_restore_fcc a1 t1 t2
+ sc_restore_fcsr a2 t1
+ li.w a0, 0 # success
+ jr ra
+SYM_FUNC_END(_restore_fp_context)
+
+/*
+ * a0: fpregs
+ * a1: fcc
+ * a2: fcsr
+ */
+SYM_FUNC_START(_save_lsx_context)
+ sc_save_fcc a1, t0, t1
+ sc_save_fcsr a2, t0
+ sc_save_lsx a0
+ li.w a0, 0 # success
+ jr ra
+SYM_FUNC_END(_save_lsx_context)
+
+/*
+ * a0: fpregs
+ * a1: fcc
+ * a2: fcsr
+ */
+SYM_FUNC_START(_restore_lsx_context)
+ sc_restore_lsx a0
+ sc_restore_fcc a1, t1, t2
+ sc_restore_fcsr a2, t1
+ li.w a0, 0 # success
+ jr ra
+SYM_FUNC_END(_restore_lsx_context)
+
+/*
+ * a0: fpregs
+ * a1: fcc
+ * a2: fcsr
+ */
+SYM_FUNC_START(_save_lasx_context)
+ sc_save_fcc a1, t0, t1
+ sc_save_fcsr a2, t0
+ sc_save_lasx a0
+ li.w a0, 0 # success
+ jr ra
+SYM_FUNC_END(_save_lasx_context)
+
+/*
+ * a0: fpregs
+ * a1: fcc
+ * a2: fcsr
+ */
+SYM_FUNC_START(_restore_lasx_context)
+ sc_restore_lasx a0
+ sc_restore_fcc a1, t1, t2
+ sc_restore_fcsr a2, t1
+ li.w a0, 0 # success
+ jr ra
+SYM_FUNC_END(_restore_lasx_context)
+
+.L_fpu_fault:
+ li.w a0, -EFAULT # failure
+ jr ra
diff --git a/arch/loongarch/kernel/ftrace.c b/arch/loongarch/kernel/ftrace.c
new file mode 100644
index 0000000000..8c3ec1bc7a
--- /dev/null
+++ b/arch/loongarch/kernel/ftrace.c
@@ -0,0 +1,73 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2022 Loongson Technology Corporation Limited
+ */
+
+#include <linux/init.h>
+#include <linux/ftrace.h>
+#include <linux/syscalls.h>
+#include <linux/uaccess.h>
+
+#include <asm/asm.h>
+#include <asm/asm-offsets.h>
+#include <asm/cacheflush.h>
+#include <asm/inst.h>
+#include <asm/loongarch.h>
+#include <asm/syscall.h>
+
+#include <asm-generic/sections.h>
+
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+
+/*
+ * As `call _mcount` follows LoongArch psABI, ra-saved operation and
+ * stack operation can be found before this insn.
+ */
+
+static int ftrace_get_parent_ra_addr(unsigned long insn_addr, int *ra_off)
+{
+ int limit = 32;
+ union loongarch_instruction *insn;
+
+ insn = (union loongarch_instruction *)insn_addr;
+
+ do {
+ insn--;
+ limit--;
+
+ if (is_ra_save_ins(insn))
+ *ra_off = -((1 << 12) - insn->reg2i12_format.immediate);
+
+ } while (!is_stack_alloc_ins(insn) && limit);
+
+ if (!limit)
+ return -EINVAL;
+
+ return 0;
+}
+
+void prepare_ftrace_return(unsigned long self_addr,
+ unsigned long callsite_sp, unsigned long old)
+{
+ int ra_off;
+ unsigned long return_hooker = (unsigned long)&return_to_handler;
+
+ if (unlikely(ftrace_graph_is_dead()))
+ return;
+
+ if (unlikely(atomic_read(&current->tracing_graph_pause)))
+ return;
+
+ if (ftrace_get_parent_ra_addr(self_addr, &ra_off))
+ goto out;
+
+ if (!function_graph_enter(old, self_addr, 0, NULL))
+ *(unsigned long *)(callsite_sp + ra_off) = return_hooker;
+
+ return;
+
+out:
+ ftrace_graph_stop();
+ WARN_ON(1);
+}
+#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
diff --git a/arch/loongarch/kernel/ftrace_dyn.c b/arch/loongarch/kernel/ftrace_dyn.c
new file mode 100644
index 0000000000..73858c9029
--- /dev/null
+++ b/arch/loongarch/kernel/ftrace_dyn.c
@@ -0,0 +1,341 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Based on arch/arm64/kernel/ftrace.c
+ *
+ * Copyright (C) 2022 Loongson Technology Corporation Limited
+ */
+
+#include <linux/ftrace.h>
+#include <linux/kprobes.h>
+#include <linux/uaccess.h>
+
+#include <asm/inst.h>
+#include <asm/module.h>
+
+static int ftrace_modify_code(unsigned long pc, u32 old, u32 new, bool validate)
+{
+ u32 replaced;
+
+ if (validate) {
+ if (larch_insn_read((void *)pc, &replaced))
+ return -EFAULT;
+
+ if (replaced != old)
+ return -EINVAL;
+ }
+
+ if (larch_insn_patch_text((void *)pc, new))
+ return -EPERM;
+
+ return 0;
+}
+
+#ifdef CONFIG_MODULES
+static bool reachable_by_bl(unsigned long addr, unsigned long pc)
+{
+ long offset = (long)addr - (long)pc;
+
+ return offset >= -SZ_128M && offset < SZ_128M;
+}
+
+static struct plt_entry *get_ftrace_plt(struct module *mod, unsigned long addr)
+{
+ struct plt_entry *plt = mod->arch.ftrace_trampolines;
+
+ if (addr == FTRACE_ADDR)
+ return &plt[FTRACE_PLT_IDX];
+ if (addr == FTRACE_REGS_ADDR &&
+ IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_REGS))
+ return &plt[FTRACE_REGS_PLT_IDX];
+
+ return NULL;
+}
+
+/*
+ * Find the address the callsite must branch to in order to reach '*addr'.
+ *
+ * Due to the limited range of 'bl' instruction, modules may be placed too far
+ * away to branch directly and we must use a PLT.
+ *
+ * Returns true when '*addr' contains a reachable target address, or has been
+ * modified to contain a PLT address. Returns false otherwise.
+ */
+static bool ftrace_find_callable_addr(struct dyn_ftrace *rec, struct module *mod, unsigned long *addr)
+{
+ unsigned long pc = rec->ip + LOONGARCH_INSN_SIZE;
+ struct plt_entry *plt;
+
+ /*
+ * If a custom trampoline is unreachable, rely on the ftrace_regs_caller
+ * trampoline which knows how to indirectly reach that trampoline through
+ * ops->direct_call.
+ */
+ if (*addr != FTRACE_ADDR && *addr != FTRACE_REGS_ADDR && !reachable_by_bl(*addr, pc))
+ *addr = FTRACE_REGS_ADDR;
+
+ /*
+ * When the target is within range of the 'bl' instruction, use 'addr'
+ * as-is and branch to that directly.
+ */
+ if (reachable_by_bl(*addr, pc))
+ return true;
+
+ /*
+ * 'mod' is only set at module load time, but if we end up
+ * dealing with an out-of-range condition, we can assume it
+ * is due to a module being loaded far away from the kernel.
+ *
+ * NOTE: __module_text_address() must be called with preemption
+ * disabled, but we can rely on ftrace_lock to ensure that 'mod'
+ * retains its validity throughout the remainder of this code.
+ */
+ if (!mod) {
+ preempt_disable();
+ mod = __module_text_address(pc);
+ preempt_enable();
+ }
+
+ if (WARN_ON(!mod))
+ return false;
+
+ plt = get_ftrace_plt(mod, *addr);
+ if (!plt) {
+ pr_err("ftrace: no module PLT for %ps\n", (void *)*addr);
+ return false;
+ }
+
+ *addr = (unsigned long)plt;
+ return true;
+}
+#else /* !CONFIG_MODULES */
+static bool ftrace_find_callable_addr(struct dyn_ftrace *rec, struct module *mod, unsigned long *addr)
+{
+ return true;
+}
+#endif
+
+#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
+int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr, unsigned long addr)
+{
+ u32 old, new;
+ unsigned long pc;
+
+ pc = rec->ip + LOONGARCH_INSN_SIZE;
+
+ if (!ftrace_find_callable_addr(rec, NULL, &addr))
+ return -EINVAL;
+
+ if (!ftrace_find_callable_addr(rec, NULL, &old_addr))
+ return -EINVAL;
+
+ new = larch_insn_gen_bl(pc, addr);
+ old = larch_insn_gen_bl(pc, old_addr);
+
+ return ftrace_modify_code(pc, old, new, true);
+}
+#endif /* CONFIG_DYNAMIC_FTRACE_WITH_REGS */
+
+int ftrace_update_ftrace_func(ftrace_func_t func)
+{
+ u32 new;
+ unsigned long pc;
+
+ pc = (unsigned long)&ftrace_call;
+ new = larch_insn_gen_bl(pc, (unsigned long)func);
+
+ return ftrace_modify_code(pc, 0, new, false);
+}
+
+/*
+ * The compiler has inserted 2 NOPs before the regular function prologue.
+ * T series registers are available and safe because of LoongArch's psABI.
+ *
+ * At runtime, we can replace nop with bl to enable ftrace call and replace bl
+ * with nop to disable ftrace call. The bl requires us to save the original RA
+ * value, so it saves RA at t0 here.
+ *
+ * Details are:
+ *
+ * | Compiled | Disabled | Enabled |
+ * +------------+------------------------+------------------------+
+ * | nop | move t0, ra | move t0, ra |
+ * | nop | nop | bl ftrace_caller |
+ * | func_body | func_body | func_body |
+ *
+ * The RA value will be recovered by ftrace_regs_entry, and restored into RA
+ * before returning to the regular function prologue. When a function is not
+ * being traced, the "move t0, ra" is not harmful.
+ */
+
+int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec)
+{
+ u32 old, new;
+ unsigned long pc;
+
+ pc = rec->ip;
+ old = larch_insn_gen_nop();
+ new = larch_insn_gen_move(LOONGARCH_GPR_T0, LOONGARCH_GPR_RA);
+
+ return ftrace_modify_code(pc, old, new, true);
+}
+
+int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
+{
+ u32 old, new;
+ unsigned long pc;
+
+ pc = rec->ip + LOONGARCH_INSN_SIZE;
+
+ if (!ftrace_find_callable_addr(rec, NULL, &addr))
+ return -EINVAL;
+
+ old = larch_insn_gen_nop();
+ new = larch_insn_gen_bl(pc, addr);
+
+ return ftrace_modify_code(pc, old, new, true);
+}
+
+int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec, unsigned long addr)
+{
+ u32 old, new;
+ unsigned long pc;
+
+ pc = rec->ip + LOONGARCH_INSN_SIZE;
+
+ if (!ftrace_find_callable_addr(rec, NULL, &addr))
+ return -EINVAL;
+
+ new = larch_insn_gen_nop();
+ old = larch_insn_gen_bl(pc, addr);
+
+ return ftrace_modify_code(pc, old, new, true);
+}
+
+void arch_ftrace_update_code(int command)
+{
+ command |= FTRACE_MAY_SLEEP;
+ ftrace_modify_all_code(command);
+}
+
+int __init ftrace_dyn_arch_init(void)
+{
+ return 0;
+}
+
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+void prepare_ftrace_return(unsigned long self_addr, unsigned long *parent)
+{
+ unsigned long old;
+ unsigned long return_hooker = (unsigned long)&return_to_handler;
+
+ if (unlikely(atomic_read(&current->tracing_graph_pause)))
+ return;
+
+ old = *parent;
+
+ if (!function_graph_enter(old, self_addr, 0, parent))
+ *parent = return_hooker;
+}
+
+#ifdef CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS
+void ftrace_graph_func(unsigned long ip, unsigned long parent_ip,
+ struct ftrace_ops *op, struct ftrace_regs *fregs)
+{
+ struct pt_regs *regs = &fregs->regs;
+ unsigned long *parent = (unsigned long *)&regs->regs[1];
+
+ prepare_ftrace_return(ip, (unsigned long *)parent);
+}
+#else
+static int ftrace_modify_graph_caller(bool enable)
+{
+ u32 branch, nop;
+ unsigned long pc, func;
+ extern void ftrace_graph_call(void);
+
+ pc = (unsigned long)&ftrace_graph_call;
+ func = (unsigned long)&ftrace_graph_caller;
+
+ nop = larch_insn_gen_nop();
+ branch = larch_insn_gen_b(pc, func);
+
+ if (enable)
+ return ftrace_modify_code(pc, nop, branch, true);
+ else
+ return ftrace_modify_code(pc, branch, nop, true);
+}
+
+int ftrace_enable_ftrace_graph_caller(void)
+{
+ return ftrace_modify_graph_caller(true);
+}
+
+int ftrace_disable_ftrace_graph_caller(void)
+{
+ return ftrace_modify_graph_caller(false);
+}
+#endif /* CONFIG_HAVE_DYNAMIC_FTRACE_WITH_ARGS */
+#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
+
+#ifdef CONFIG_KPROBES_ON_FTRACE
+/* Ftrace callback handler for kprobes -- called under preepmt disabled */
+void kprobe_ftrace_handler(unsigned long ip, unsigned long parent_ip,
+ struct ftrace_ops *ops, struct ftrace_regs *fregs)
+{
+ int bit;
+ struct pt_regs *regs;
+ struct kprobe *p;
+ struct kprobe_ctlblk *kcb;
+
+ bit = ftrace_test_recursion_trylock(ip, parent_ip);
+ if (bit < 0)
+ return;
+
+ p = get_kprobe((kprobe_opcode_t *)ip);
+ if (unlikely(!p) || kprobe_disabled(p))
+ goto out;
+
+ regs = ftrace_get_regs(fregs);
+ if (!regs)
+ goto out;
+
+ kcb = get_kprobe_ctlblk();
+ if (kprobe_running()) {
+ kprobes_inc_nmissed_count(p);
+ } else {
+ unsigned long orig_ip = instruction_pointer(regs);
+
+ instruction_pointer_set(regs, ip);
+
+ __this_cpu_write(current_kprobe, p);
+ kcb->kprobe_status = KPROBE_HIT_ACTIVE;
+ if (!p->pre_handler || !p->pre_handler(p, regs)) {
+ /*
+ * Emulate singlestep (and also recover regs->csr_era)
+ * as if there is a nop
+ */
+ instruction_pointer_set(regs, (unsigned long)p->addr + MCOUNT_INSN_SIZE);
+ if (unlikely(p->post_handler)) {
+ kcb->kprobe_status = KPROBE_HIT_SSDONE;
+ p->post_handler(p, regs, 0);
+ }
+ instruction_pointer_set(regs, orig_ip);
+ }
+
+ /*
+ * If pre_handler returns !0, it changes regs->csr_era. We have to
+ * skip emulating post_handler.
+ */
+ __this_cpu_write(current_kprobe, NULL);
+ }
+out:
+ ftrace_test_recursion_unlock(bit);
+}
+NOKPROBE_SYMBOL(kprobe_ftrace_handler);
+
+int arch_prepare_kprobe_ftrace(struct kprobe *p)
+{
+ p->ainsn.insn = NULL;
+ return 0;
+}
+#endif /* CONFIG_KPROBES_ON_FTRACE */
diff --git a/arch/loongarch/kernel/genex.S b/arch/loongarch/kernel/genex.S
new file mode 100644
index 0000000000..2bb3aa2dcf
--- /dev/null
+++ b/arch/loongarch/kernel/genex.S
@@ -0,0 +1,99 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ *
+ * Derived from MIPS:
+ * Copyright (C) 1994 - 2000, 2001, 2003 Ralf Baechle
+ * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
+ * Copyright (C) 2002, 2007 Maciej W. Rozycki
+ * Copyright (C) 2001, 2012 MIPS Technologies, Inc. All rights reserved.
+ */
+#include <asm/asm.h>
+#include <asm/asmmacro.h>
+#include <asm/loongarch.h>
+#include <asm/regdef.h>
+#include <asm/fpregdef.h>
+#include <asm/stackframe.h>
+#include <asm/thread_info.h>
+
+ .align 5
+SYM_FUNC_START(__arch_cpu_idle)
+ /* start of rollback region */
+ LONG_L t0, tp, TI_FLAGS
+ nop
+ andi t0, t0, _TIF_NEED_RESCHED
+ bnez t0, 1f
+ nop
+ nop
+ nop
+ idle 0
+ /* end of rollback region */
+1: jr ra
+SYM_FUNC_END(__arch_cpu_idle)
+
+SYM_CODE_START(handle_vint)
+ BACKUP_T0T1
+ SAVE_ALL
+ la_abs t1, __arch_cpu_idle
+ LONG_L t0, sp, PT_ERA
+ /* 32 byte rollback region */
+ ori t0, t0, 0x1f
+ xori t0, t0, 0x1f
+ bne t0, t1, 1f
+ LONG_S t0, sp, PT_ERA
+1: move a0, sp
+ move a1, sp
+ la_abs t0, do_vint
+ jirl ra, t0, 0
+ RESTORE_ALL_AND_RET
+SYM_CODE_END(handle_vint)
+
+SYM_CODE_START(except_vec_cex)
+ b cache_parity_error
+SYM_CODE_END(except_vec_cex)
+
+ .macro build_prep_badv
+ csrrd t0, LOONGARCH_CSR_BADV
+ PTR_S t0, sp, PT_BVADDR
+ .endm
+
+ .macro build_prep_fcsr
+ movfcsr2gr a1, fcsr0
+ .endm
+
+ .macro build_prep_none
+ .endm
+
+ .macro BUILD_HANDLER exception handler prep
+ .align 5
+ SYM_CODE_START(handle_\exception)
+ 666:
+ BACKUP_T0T1
+ SAVE_ALL
+ build_prep_\prep
+ move a0, sp
+ la_abs t0, do_\handler
+ jirl ra, t0, 0
+ 668:
+ RESTORE_ALL_AND_RET
+ SYM_CODE_END(handle_\exception)
+ SYM_DATA(unwind_hint_\exception, .word 668b - 666b)
+ .endm
+
+ BUILD_HANDLER ade ade badv
+ BUILD_HANDLER ale ale badv
+ BUILD_HANDLER bce bce none
+ BUILD_HANDLER bp bp none
+ BUILD_HANDLER fpe fpe fcsr
+ BUILD_HANDLER fpu fpu none
+ BUILD_HANDLER lsx lsx none
+ BUILD_HANDLER lasx lasx none
+ BUILD_HANDLER lbt lbt none
+ BUILD_HANDLER ri ri none
+ BUILD_HANDLER watch watch none
+ BUILD_HANDLER reserved reserved none /* others */
+
+SYM_CODE_START(handle_sys)
+ la_abs t0, handle_syscall
+ jr t0
+SYM_CODE_END(handle_sys)
diff --git a/arch/loongarch/kernel/head.S b/arch/loongarch/kernel/head.S
new file mode 100644
index 0000000000..53b883db07
--- /dev/null
+++ b/arch/loongarch/kernel/head.S
@@ -0,0 +1,149 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#include <linux/init.h>
+#include <linux/threads.h>
+
+#include <asm/addrspace.h>
+#include <asm/asm.h>
+#include <asm/asmmacro.h>
+#include <asm/bug.h>
+#include <asm/regdef.h>
+#include <asm/loongarch.h>
+#include <asm/stackframe.h>
+
+#ifdef CONFIG_EFI_STUB
+
+#include "efi-header.S"
+
+ __HEAD
+
+_head:
+ .word MZ_MAGIC /* "MZ", MS-DOS header */
+ .org 0x8
+ .dword kernel_entry /* Kernel entry point */
+ .dword _kernel_asize /* Kernel image effective size */
+ .quad PHYS_LINK_KADDR /* Kernel image load offset from start of RAM */
+ .org 0x38 /* 0x20 ~ 0x37 reserved */
+ .long LINUX_PE_MAGIC
+ .long pe_header - _head /* Offset to the PE header */
+
+pe_header:
+ __EFI_PE_HEADER
+
+SYM_DATA(kernel_asize, .long _kernel_asize);
+SYM_DATA(kernel_fsize, .long _kernel_fsize);
+SYM_DATA(kernel_offset, .long _kernel_offset);
+
+#endif
+
+ __REF
+
+ .align 12
+
+SYM_CODE_START(kernel_entry) # kernel entry point
+
+ /* Config direct window and set PG */
+ li.d t0, CSR_DMW0_INIT # UC, PLV0, 0x8000 xxxx xxxx xxxx
+ csrwr t0, LOONGARCH_CSR_DMWIN0
+ li.d t0, CSR_DMW1_INIT # CA, PLV0, 0x9000 xxxx xxxx xxxx
+ csrwr t0, LOONGARCH_CSR_DMWIN1
+
+ JUMP_VIRT_ADDR t0, t1
+
+ /* Enable PG */
+ li.w t0, 0xb0 # PLV=0, IE=0, PG=1
+ csrwr t0, LOONGARCH_CSR_CRMD
+ li.w t0, 0x04 # PLV=0, PIE=1, PWE=0
+ csrwr t0, LOONGARCH_CSR_PRMD
+ li.w t0, 0x00 # FPE=0, SXE=0, ASXE=0, BTE=0
+ csrwr t0, LOONGARCH_CSR_EUEN
+
+ la.pcrel t0, __bss_start # clear .bss
+ st.d zero, t0, 0
+ la.pcrel t1, __bss_stop - LONGSIZE
+1:
+ addi.d t0, t0, LONGSIZE
+ st.d zero, t0, 0
+ bne t0, t1, 1b
+
+ la.pcrel t0, fw_arg0
+ st.d a0, t0, 0 # firmware arguments
+ la.pcrel t0, fw_arg1
+ st.d a1, t0, 0
+ la.pcrel t0, fw_arg2
+ st.d a2, t0, 0
+
+ /* KSave3 used for percpu base, initialized as 0 */
+ csrwr zero, PERCPU_BASE_KS
+ /* GPR21 used for percpu base (runtime), initialized as 0 */
+ move u0, zero
+
+ la.pcrel tp, init_thread_union
+ /* Set the SP after an empty pt_regs. */
+ PTR_LI sp, (_THREAD_SIZE - PT_SIZE)
+ PTR_ADD sp, sp, tp
+ set_saved_sp sp, t0, t1
+
+#ifdef CONFIG_RELOCATABLE
+
+ bl relocate_kernel
+
+#ifdef CONFIG_RANDOMIZE_BASE
+ /* Repoint the sp into the new kernel */
+ PTR_LI sp, (_THREAD_SIZE - PT_SIZE)
+ PTR_ADD sp, sp, tp
+ set_saved_sp sp, t0, t1
+
+ /* Jump to the new kernel: new_pc = current_pc + random_offset */
+ pcaddi t0, 0
+ add.d t0, t0, a0
+ jirl zero, t0, 0xc
+#endif /* CONFIG_RANDOMIZE_BASE */
+
+#endif /* CONFIG_RELOCATABLE */
+
+#ifdef CONFIG_KASAN
+ bl kasan_early_init
+#endif
+
+ bl start_kernel
+ ASM_BUG()
+
+SYM_CODE_END(kernel_entry)
+
+#ifdef CONFIG_SMP
+
+/*
+ * SMP slave cpus entry point. Board specific code for bootstrap calls this
+ * function after setting up the stack and tp registers.
+ */
+SYM_CODE_START(smpboot_entry)
+ li.d t0, CSR_DMW0_INIT # UC, PLV0
+ csrwr t0, LOONGARCH_CSR_DMWIN0
+ li.d t0, CSR_DMW1_INIT # CA, PLV0
+ csrwr t0, LOONGARCH_CSR_DMWIN1
+
+ JUMP_VIRT_ADDR t0, t1
+
+ /* Enable PG */
+ li.w t0, 0xb0 # PLV=0, IE=0, PG=1
+ csrwr t0, LOONGARCH_CSR_CRMD
+ li.w t0, 0x04 # PLV=0, PIE=1, PWE=0
+ csrwr t0, LOONGARCH_CSR_PRMD
+ li.w t0, 0x00 # FPE=0, SXE=0, ASXE=0, BTE=0
+ csrwr t0, LOONGARCH_CSR_EUEN
+
+ la.pcrel t0, cpuboot_data
+ ld.d sp, t0, CPU_BOOT_STACK
+ ld.d tp, t0, CPU_BOOT_TINFO
+
+ bl start_secondary
+ ASM_BUG()
+
+SYM_CODE_END(smpboot_entry)
+
+#endif /* CONFIG_SMP */
+
+SYM_ENTRY(kernel_entry_end, SYM_L_GLOBAL, SYM_A_NONE)
diff --git a/arch/loongarch/kernel/hw_breakpoint.c b/arch/loongarch/kernel/hw_breakpoint.c
new file mode 100644
index 0000000000..fc55c4de2a
--- /dev/null
+++ b/arch/loongarch/kernel/hw_breakpoint.c
@@ -0,0 +1,549 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2022-2023 Loongson Technology Corporation Limited
+ */
+#define pr_fmt(fmt) "hw-breakpoint: " fmt
+
+#include <linux/hw_breakpoint.h>
+#include <linux/kprobes.h>
+#include <linux/perf_event.h>
+
+#include <asm/hw_breakpoint.h>
+
+/* Breakpoint currently in use for each BRP. */
+static DEFINE_PER_CPU(struct perf_event *, bp_on_reg[LOONGARCH_MAX_BRP]);
+
+/* Watchpoint currently in use for each WRP. */
+static DEFINE_PER_CPU(struct perf_event *, wp_on_reg[LOONGARCH_MAX_WRP]);
+
+int hw_breakpoint_slots(int type)
+{
+ /*
+ * We can be called early, so don't rely on
+ * our static variables being initialised.
+ */
+ switch (type) {
+ case TYPE_INST:
+ return get_num_brps();
+ case TYPE_DATA:
+ return get_num_wrps();
+ default:
+ pr_warn("unknown slot type: %d\n", type);
+ return 0;
+ }
+}
+
+#define READ_WB_REG_CASE(OFF, N, REG, T, VAL) \
+ case (OFF + N): \
+ LOONGARCH_CSR_WATCH_READ(N, REG, T, VAL); \
+ break
+
+#define WRITE_WB_REG_CASE(OFF, N, REG, T, VAL) \
+ case (OFF + N): \
+ LOONGARCH_CSR_WATCH_WRITE(N, REG, T, VAL); \
+ break
+
+#define GEN_READ_WB_REG_CASES(OFF, REG, T, VAL) \
+ READ_WB_REG_CASE(OFF, 0, REG, T, VAL); \
+ READ_WB_REG_CASE(OFF, 1, REG, T, VAL); \
+ READ_WB_REG_CASE(OFF, 2, REG, T, VAL); \
+ READ_WB_REG_CASE(OFF, 3, REG, T, VAL); \
+ READ_WB_REG_CASE(OFF, 4, REG, T, VAL); \
+ READ_WB_REG_CASE(OFF, 5, REG, T, VAL); \
+ READ_WB_REG_CASE(OFF, 6, REG, T, VAL); \
+ READ_WB_REG_CASE(OFF, 7, REG, T, VAL);
+
+#define GEN_WRITE_WB_REG_CASES(OFF, REG, T, VAL) \
+ WRITE_WB_REG_CASE(OFF, 0, REG, T, VAL); \
+ WRITE_WB_REG_CASE(OFF, 1, REG, T, VAL); \
+ WRITE_WB_REG_CASE(OFF, 2, REG, T, VAL); \
+ WRITE_WB_REG_CASE(OFF, 3, REG, T, VAL); \
+ WRITE_WB_REG_CASE(OFF, 4, REG, T, VAL); \
+ WRITE_WB_REG_CASE(OFF, 5, REG, T, VAL); \
+ WRITE_WB_REG_CASE(OFF, 6, REG, T, VAL); \
+ WRITE_WB_REG_CASE(OFF, 7, REG, T, VAL);
+
+static u64 read_wb_reg(int reg, int n, int t)
+{
+ u64 val = 0;
+
+ switch (reg + n) {
+ GEN_READ_WB_REG_CASES(CSR_CFG_ADDR, ADDR, t, val);
+ GEN_READ_WB_REG_CASES(CSR_CFG_MASK, MASK, t, val);
+ GEN_READ_WB_REG_CASES(CSR_CFG_CTRL, CTRL, t, val);
+ GEN_READ_WB_REG_CASES(CSR_CFG_ASID, ASID, t, val);
+ default:
+ pr_warn("Attempt to read from unknown breakpoint register %d\n", n);
+ }
+
+ return val;
+}
+NOKPROBE_SYMBOL(read_wb_reg);
+
+static void write_wb_reg(int reg, int n, int t, u64 val)
+{
+ switch (reg + n) {
+ GEN_WRITE_WB_REG_CASES(CSR_CFG_ADDR, ADDR, t, val);
+ GEN_WRITE_WB_REG_CASES(CSR_CFG_MASK, MASK, t, val);
+ GEN_WRITE_WB_REG_CASES(CSR_CFG_CTRL, CTRL, t, val);
+ GEN_WRITE_WB_REG_CASES(CSR_CFG_ASID, ASID, t, val);
+ default:
+ pr_warn("Attempt to write to unknown breakpoint register %d\n", n);
+ }
+}
+NOKPROBE_SYMBOL(write_wb_reg);
+
+enum hw_breakpoint_ops {
+ HW_BREAKPOINT_INSTALL,
+ HW_BREAKPOINT_UNINSTALL,
+};
+
+/*
+ * hw_breakpoint_slot_setup - Find and setup a perf slot according to operations
+ *
+ * @slots: pointer to array of slots
+ * @max_slots: max number of slots
+ * @bp: perf_event to setup
+ * @ops: operation to be carried out on the slot
+ *
+ * Return:
+ * slot index on success
+ * -ENOSPC if no slot is available/matches
+ * -EINVAL on wrong operations parameter
+ */
+
+static int hw_breakpoint_slot_setup(struct perf_event **slots, int max_slots,
+ struct perf_event *bp, enum hw_breakpoint_ops ops)
+{
+ int i;
+ struct perf_event **slot;
+
+ for (i = 0; i < max_slots; ++i) {
+ slot = &slots[i];
+ switch (ops) {
+ case HW_BREAKPOINT_INSTALL:
+ if (!*slot) {
+ *slot = bp;
+ return i;
+ }
+ break;
+ case HW_BREAKPOINT_UNINSTALL:
+ if (*slot == bp) {
+ *slot = NULL;
+ return i;
+ }
+ break;
+ default:
+ pr_warn_once("Unhandled hw breakpoint ops %d\n", ops);
+ return -EINVAL;
+ }
+ }
+
+ return -ENOSPC;
+}
+
+void ptrace_hw_copy_thread(struct task_struct *tsk)
+{
+ memset(tsk->thread.hbp_break, 0, sizeof(tsk->thread.hbp_break));
+ memset(tsk->thread.hbp_watch, 0, sizeof(tsk->thread.hbp_watch));
+}
+
+/*
+ * Unregister breakpoints from this task and reset the pointers in the thread_struct.
+ */
+void flush_ptrace_hw_breakpoint(struct task_struct *tsk)
+{
+ int i;
+ struct thread_struct *t = &tsk->thread;
+
+ for (i = 0; i < LOONGARCH_MAX_BRP; i++) {
+ if (t->hbp_break[i]) {
+ unregister_hw_breakpoint(t->hbp_break[i]);
+ t->hbp_break[i] = NULL;
+ }
+ }
+
+ for (i = 0; i < LOONGARCH_MAX_WRP; i++) {
+ if (t->hbp_watch[i]) {
+ unregister_hw_breakpoint(t->hbp_watch[i]);
+ t->hbp_watch[i] = NULL;
+ }
+ }
+}
+
+static int hw_breakpoint_control(struct perf_event *bp,
+ enum hw_breakpoint_ops ops)
+{
+ u32 ctrl;
+ int i, max_slots, enable;
+ struct perf_event **slots;
+ struct arch_hw_breakpoint *info = counter_arch_bp(bp);
+
+ if (info->ctrl.type == LOONGARCH_BREAKPOINT_EXECUTE) {
+ /* Breakpoint */
+ slots = this_cpu_ptr(bp_on_reg);
+ max_slots = boot_cpu_data.watch_ireg_count;
+ } else {
+ /* Watchpoint */
+ slots = this_cpu_ptr(wp_on_reg);
+ max_slots = boot_cpu_data.watch_dreg_count;
+ }
+
+ i = hw_breakpoint_slot_setup(slots, max_slots, bp, ops);
+
+ if (WARN_ONCE(i < 0, "Can't find any breakpoint slot"))
+ return i;
+
+ switch (ops) {
+ case HW_BREAKPOINT_INSTALL:
+ /* Set the FWPnCFG/MWPnCFG 1~4 register. */
+ write_wb_reg(CSR_CFG_ADDR, i, 0, info->address);
+ write_wb_reg(CSR_CFG_ADDR, i, 1, info->address);
+ write_wb_reg(CSR_CFG_MASK, i, 0, info->mask);
+ write_wb_reg(CSR_CFG_MASK, i, 1, info->mask);
+ write_wb_reg(CSR_CFG_ASID, i, 0, 0);
+ write_wb_reg(CSR_CFG_ASID, i, 1, 0);
+ if (info->ctrl.type == LOONGARCH_BREAKPOINT_EXECUTE) {
+ write_wb_reg(CSR_CFG_CTRL, i, 0, CTRL_PLV_ENABLE);
+ } else {
+ ctrl = encode_ctrl_reg(info->ctrl);
+ write_wb_reg(CSR_CFG_CTRL, i, 1, ctrl | CTRL_PLV_ENABLE);
+ }
+ enable = csr_read64(LOONGARCH_CSR_CRMD);
+ csr_write64(CSR_CRMD_WE | enable, LOONGARCH_CSR_CRMD);
+ break;
+ case HW_BREAKPOINT_UNINSTALL:
+ /* Reset the FWPnCFG/MWPnCFG 1~4 register. */
+ write_wb_reg(CSR_CFG_ADDR, i, 0, 0);
+ write_wb_reg(CSR_CFG_ADDR, i, 1, 0);
+ write_wb_reg(CSR_CFG_MASK, i, 0, 0);
+ write_wb_reg(CSR_CFG_MASK, i, 1, 0);
+ write_wb_reg(CSR_CFG_CTRL, i, 0, 0);
+ write_wb_reg(CSR_CFG_CTRL, i, 1, 0);
+ write_wb_reg(CSR_CFG_ASID, i, 0, 0);
+ write_wb_reg(CSR_CFG_ASID, i, 1, 0);
+ break;
+ }
+
+ return 0;
+}
+
+/*
+ * Install a perf counter breakpoint.
+ */
+int arch_install_hw_breakpoint(struct perf_event *bp)
+{
+ return hw_breakpoint_control(bp, HW_BREAKPOINT_INSTALL);
+}
+
+void arch_uninstall_hw_breakpoint(struct perf_event *bp)
+{
+ hw_breakpoint_control(bp, HW_BREAKPOINT_UNINSTALL);
+}
+
+static int get_hbp_len(u8 hbp_len)
+{
+ unsigned int len_in_bytes = 0;
+
+ switch (hbp_len) {
+ case LOONGARCH_BREAKPOINT_LEN_1:
+ len_in_bytes = 1;
+ break;
+ case LOONGARCH_BREAKPOINT_LEN_2:
+ len_in_bytes = 2;
+ break;
+ case LOONGARCH_BREAKPOINT_LEN_4:
+ len_in_bytes = 4;
+ break;
+ case LOONGARCH_BREAKPOINT_LEN_8:
+ len_in_bytes = 8;
+ break;
+ }
+
+ return len_in_bytes;
+}
+
+/*
+ * Check whether bp virtual address is in kernel space.
+ */
+int arch_check_bp_in_kernelspace(struct arch_hw_breakpoint *hw)
+{
+ unsigned int len;
+ unsigned long va;
+
+ va = hw->address;
+ len = get_hbp_len(hw->ctrl.len);
+
+ return (va >= TASK_SIZE) && ((va + len - 1) >= TASK_SIZE);
+}
+
+/*
+ * Extract generic type and length encodings from an arch_hw_breakpoint_ctrl.
+ * Hopefully this will disappear when ptrace can bypass the conversion
+ * to generic breakpoint descriptions.
+ */
+int arch_bp_generic_fields(struct arch_hw_breakpoint_ctrl ctrl,
+ int *gen_len, int *gen_type, int *offset)
+{
+ /* Type */
+ switch (ctrl.type) {
+ case LOONGARCH_BREAKPOINT_EXECUTE:
+ *gen_type = HW_BREAKPOINT_X;
+ break;
+ case LOONGARCH_BREAKPOINT_LOAD:
+ *gen_type = HW_BREAKPOINT_R;
+ break;
+ case LOONGARCH_BREAKPOINT_STORE:
+ *gen_type = HW_BREAKPOINT_W;
+ break;
+ case LOONGARCH_BREAKPOINT_LOAD | LOONGARCH_BREAKPOINT_STORE:
+ *gen_type = HW_BREAKPOINT_RW;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (!ctrl.len)
+ return -EINVAL;
+
+ *offset = __ffs(ctrl.len);
+
+ /* Len */
+ switch (ctrl.len) {
+ case LOONGARCH_BREAKPOINT_LEN_1:
+ *gen_len = HW_BREAKPOINT_LEN_1;
+ break;
+ case LOONGARCH_BREAKPOINT_LEN_2:
+ *gen_len = HW_BREAKPOINT_LEN_2;
+ break;
+ case LOONGARCH_BREAKPOINT_LEN_4:
+ *gen_len = HW_BREAKPOINT_LEN_4;
+ break;
+ case LOONGARCH_BREAKPOINT_LEN_8:
+ *gen_len = HW_BREAKPOINT_LEN_8;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+/*
+ * Construct an arch_hw_breakpoint from a perf_event.
+ */
+static int arch_build_bp_info(struct perf_event *bp,
+ const struct perf_event_attr *attr,
+ struct arch_hw_breakpoint *hw)
+{
+ /* Type */
+ switch (attr->bp_type) {
+ case HW_BREAKPOINT_X:
+ hw->ctrl.type = LOONGARCH_BREAKPOINT_EXECUTE;
+ break;
+ case HW_BREAKPOINT_R:
+ hw->ctrl.type = LOONGARCH_BREAKPOINT_LOAD;
+ break;
+ case HW_BREAKPOINT_W:
+ hw->ctrl.type = LOONGARCH_BREAKPOINT_STORE;
+ break;
+ case HW_BREAKPOINT_RW:
+ hw->ctrl.type = LOONGARCH_BREAKPOINT_LOAD | LOONGARCH_BREAKPOINT_STORE;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* Len */
+ switch (attr->bp_len) {
+ case HW_BREAKPOINT_LEN_1:
+ hw->ctrl.len = LOONGARCH_BREAKPOINT_LEN_1;
+ break;
+ case HW_BREAKPOINT_LEN_2:
+ hw->ctrl.len = LOONGARCH_BREAKPOINT_LEN_2;
+ break;
+ case HW_BREAKPOINT_LEN_4:
+ hw->ctrl.len = LOONGARCH_BREAKPOINT_LEN_4;
+ break;
+ case HW_BREAKPOINT_LEN_8:
+ hw->ctrl.len = LOONGARCH_BREAKPOINT_LEN_8;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* Address */
+ hw->address = attr->bp_addr;
+
+ return 0;
+}
+
+/*
+ * Validate the arch-specific HW Breakpoint register settings.
+ */
+int hw_breakpoint_arch_parse(struct perf_event *bp,
+ const struct perf_event_attr *attr,
+ struct arch_hw_breakpoint *hw)
+{
+ int ret;
+ u64 alignment_mask, offset;
+
+ /* Build the arch_hw_breakpoint. */
+ ret = arch_build_bp_info(bp, attr, hw);
+ if (ret)
+ return ret;
+
+ if (hw->ctrl.type != LOONGARCH_BREAKPOINT_EXECUTE)
+ alignment_mask = 0x7;
+ else
+ alignment_mask = 0x3;
+ offset = hw->address & alignment_mask;
+
+ hw->address &= ~alignment_mask;
+ hw->ctrl.len <<= offset;
+
+ return 0;
+}
+
+static void update_bp_registers(struct pt_regs *regs, int enable, int type)
+{
+ u32 ctrl;
+ int i, max_slots;
+ struct perf_event **slots;
+ struct arch_hw_breakpoint *info;
+
+ switch (type) {
+ case 0:
+ slots = this_cpu_ptr(bp_on_reg);
+ max_slots = boot_cpu_data.watch_ireg_count;
+ break;
+ case 1:
+ slots = this_cpu_ptr(wp_on_reg);
+ max_slots = boot_cpu_data.watch_dreg_count;
+ break;
+ default:
+ return;
+ }
+
+ for (i = 0; i < max_slots; ++i) {
+ if (!slots[i])
+ continue;
+
+ info = counter_arch_bp(slots[i]);
+ if (enable) {
+ if ((info->ctrl.type == LOONGARCH_BREAKPOINT_EXECUTE) && (type == 0)) {
+ write_wb_reg(CSR_CFG_CTRL, i, 0, CTRL_PLV_ENABLE);
+ write_wb_reg(CSR_CFG_CTRL, i, 0, CTRL_PLV_ENABLE);
+ } else {
+ ctrl = read_wb_reg(CSR_CFG_CTRL, i, 1);
+ if (info->ctrl.type == LOONGARCH_BREAKPOINT_LOAD)
+ ctrl |= 0x1 << MWPnCFG3_LoadEn;
+ if (info->ctrl.type == LOONGARCH_BREAKPOINT_STORE)
+ ctrl |= 0x1 << MWPnCFG3_StoreEn;
+ write_wb_reg(CSR_CFG_CTRL, i, 1, ctrl);
+ }
+ regs->csr_prmd |= CSR_PRMD_PWE;
+ } else {
+ if ((info->ctrl.type == LOONGARCH_BREAKPOINT_EXECUTE) && (type == 0)) {
+ write_wb_reg(CSR_CFG_CTRL, i, 0, 0);
+ } else {
+ ctrl = read_wb_reg(CSR_CFG_CTRL, i, 1);
+ if (info->ctrl.type == LOONGARCH_BREAKPOINT_LOAD)
+ ctrl &= ~0x1 << MWPnCFG3_LoadEn;
+ if (info->ctrl.type == LOONGARCH_BREAKPOINT_STORE)
+ ctrl &= ~0x1 << MWPnCFG3_StoreEn;
+ write_wb_reg(CSR_CFG_CTRL, i, 1, ctrl);
+ }
+ regs->csr_prmd &= ~CSR_PRMD_PWE;
+ }
+ }
+}
+NOKPROBE_SYMBOL(update_bp_registers);
+
+/*
+ * Debug exception handlers.
+ */
+void breakpoint_handler(struct pt_regs *regs)
+{
+ int i;
+ struct perf_event *bp, **slots;
+
+ slots = this_cpu_ptr(bp_on_reg);
+
+ for (i = 0; i < boot_cpu_data.watch_ireg_count; ++i) {
+ bp = slots[i];
+ if (bp == NULL)
+ continue;
+ perf_bp_event(bp, regs);
+ }
+ update_bp_registers(regs, 0, 0);
+}
+NOKPROBE_SYMBOL(breakpoint_handler);
+
+void watchpoint_handler(struct pt_regs *regs)
+{
+ int i;
+ struct perf_event *wp, **slots;
+
+ slots = this_cpu_ptr(wp_on_reg);
+
+ for (i = 0; i < boot_cpu_data.watch_dreg_count; ++i) {
+ wp = slots[i];
+ if (wp == NULL)
+ continue;
+ perf_bp_event(wp, regs);
+ }
+ update_bp_registers(regs, 0, 1);
+}
+NOKPROBE_SYMBOL(watchpoint_handler);
+
+static int __init arch_hw_breakpoint_init(void)
+{
+ int cpu;
+
+ boot_cpu_data.watch_ireg_count = get_num_brps();
+ boot_cpu_data.watch_dreg_count = get_num_wrps();
+
+ pr_info("Found %d breakpoint and %d watchpoint registers.\n",
+ boot_cpu_data.watch_ireg_count, boot_cpu_data.watch_dreg_count);
+
+ for (cpu = 1; cpu < NR_CPUS; cpu++) {
+ cpu_data[cpu].watch_ireg_count = boot_cpu_data.watch_ireg_count;
+ cpu_data[cpu].watch_dreg_count = boot_cpu_data.watch_dreg_count;
+ }
+
+ return 0;
+}
+arch_initcall(arch_hw_breakpoint_init);
+
+void hw_breakpoint_thread_switch(struct task_struct *next)
+{
+ u64 addr, mask;
+ struct pt_regs *regs = task_pt_regs(next);
+
+ if (test_tsk_thread_flag(next, TIF_SINGLESTEP)) {
+ addr = read_wb_reg(CSR_CFG_ADDR, 0, 0);
+ mask = read_wb_reg(CSR_CFG_MASK, 0, 0);
+ if (!((regs->csr_era ^ addr) & ~mask))
+ csr_write32(CSR_FWPC_SKIP, LOONGARCH_CSR_FWPS);
+ regs->csr_prmd |= CSR_PRMD_PWE;
+ } else {
+ /* Update breakpoints */
+ update_bp_registers(regs, 1, 0);
+ /* Update watchpoints */
+ update_bp_registers(regs, 1, 1);
+ }
+}
+
+void hw_breakpoint_pmu_read(struct perf_event *bp)
+{
+}
+
+/*
+ * Dummy function to register with die_notifier.
+ */
+int hw_breakpoint_exceptions_notify(struct notifier_block *unused,
+ unsigned long val, void *data)
+{
+ return NOTIFY_DONE;
+}
diff --git a/arch/loongarch/kernel/idle.c b/arch/loongarch/kernel/idle.c
new file mode 100644
index 0000000000..0b5dd2faeb
--- /dev/null
+++ b/arch/loongarch/kernel/idle.c
@@ -0,0 +1,17 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * LoongArch idle loop support.
+ *
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#include <linux/cpu.h>
+#include <linux/irqflags.h>
+#include <asm/cpu.h>
+#include <asm/idle.h>
+
+void __cpuidle arch_cpu_idle(void)
+{
+ raw_local_irq_enable();
+ __arch_cpu_idle(); /* idle instruction needs irq enabled */
+ raw_local_irq_disable();
+}
diff --git a/arch/loongarch/kernel/image-vars.h b/arch/loongarch/kernel/image-vars.h
new file mode 100644
index 0000000000..e561989d02
--- /dev/null
+++ b/arch/loongarch/kernel/image-vars.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#ifndef __LOONGARCH_KERNEL_IMAGE_VARS_H
+#define __LOONGARCH_KERNEL_IMAGE_VARS_H
+
+#ifdef CONFIG_EFI_STUB
+
+__efistub_strcmp = strcmp;
+__efistub_kernel_entry = kernel_entry;
+__efistub_kernel_asize = kernel_asize;
+__efistub_kernel_fsize = kernel_fsize;
+__efistub_kernel_offset = kernel_offset;
+__efistub_screen_info = screen_info;
+
+#endif
+
+#endif /* __LOONGARCH_KERNEL_IMAGE_VARS_H */
diff --git a/arch/loongarch/kernel/inst.c b/arch/loongarch/kernel/inst.c
new file mode 100644
index 0000000000..3050329556
--- /dev/null
+++ b/arch/loongarch/kernel/inst.c
@@ -0,0 +1,338 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#include <linux/sizes.h>
+#include <linux/uaccess.h>
+
+#include <asm/cacheflush.h>
+#include <asm/inst.h>
+
+static DEFINE_RAW_SPINLOCK(patch_lock);
+
+void simu_pc(struct pt_regs *regs, union loongarch_instruction insn)
+{
+ unsigned long pc = regs->csr_era;
+ unsigned int rd = insn.reg1i20_format.rd;
+ unsigned int imm = insn.reg1i20_format.immediate;
+
+ if (pc & 3) {
+ pr_warn("%s: invalid pc 0x%lx\n", __func__, pc);
+ return;
+ }
+
+ switch (insn.reg1i20_format.opcode) {
+ case pcaddi_op:
+ regs->regs[rd] = pc + sign_extend64(imm << 2, 21);
+ break;
+ case pcaddu12i_op:
+ regs->regs[rd] = pc + sign_extend64(imm << 12, 31);
+ break;
+ case pcaddu18i_op:
+ regs->regs[rd] = pc + sign_extend64(imm << 18, 37);
+ break;
+ case pcalau12i_op:
+ regs->regs[rd] = pc + sign_extend64(imm << 12, 31);
+ regs->regs[rd] &= ~((1 << 12) - 1);
+ break;
+ default:
+ pr_info("%s: unknown opcode\n", __func__);
+ return;
+ }
+
+ regs->csr_era += LOONGARCH_INSN_SIZE;
+}
+
+void simu_branch(struct pt_regs *regs, union loongarch_instruction insn)
+{
+ unsigned int imm, imm_l, imm_h, rd, rj;
+ unsigned long pc = regs->csr_era;
+
+ if (pc & 3) {
+ pr_warn("%s: invalid pc 0x%lx\n", __func__, pc);
+ return;
+ }
+
+ imm_l = insn.reg0i26_format.immediate_l;
+ imm_h = insn.reg0i26_format.immediate_h;
+ switch (insn.reg0i26_format.opcode) {
+ case b_op:
+ regs->csr_era = pc + sign_extend64((imm_h << 16 | imm_l) << 2, 27);
+ return;
+ case bl_op:
+ regs->csr_era = pc + sign_extend64((imm_h << 16 | imm_l) << 2, 27);
+ regs->regs[1] = pc + LOONGARCH_INSN_SIZE;
+ return;
+ }
+
+ imm_l = insn.reg1i21_format.immediate_l;
+ imm_h = insn.reg1i21_format.immediate_h;
+ rj = insn.reg1i21_format.rj;
+ switch (insn.reg1i21_format.opcode) {
+ case beqz_op:
+ if (regs->regs[rj] == 0)
+ regs->csr_era = pc + sign_extend64((imm_h << 16 | imm_l) << 2, 22);
+ else
+ regs->csr_era = pc + LOONGARCH_INSN_SIZE;
+ return;
+ case bnez_op:
+ if (regs->regs[rj] != 0)
+ regs->csr_era = pc + sign_extend64((imm_h << 16 | imm_l) << 2, 22);
+ else
+ regs->csr_era = pc + LOONGARCH_INSN_SIZE;
+ return;
+ }
+
+ imm = insn.reg2i16_format.immediate;
+ rj = insn.reg2i16_format.rj;
+ rd = insn.reg2i16_format.rd;
+ switch (insn.reg2i16_format.opcode) {
+ case beq_op:
+ if (regs->regs[rj] == regs->regs[rd])
+ regs->csr_era = pc + sign_extend64(imm << 2, 17);
+ else
+ regs->csr_era = pc + LOONGARCH_INSN_SIZE;
+ break;
+ case bne_op:
+ if (regs->regs[rj] != regs->regs[rd])
+ regs->csr_era = pc + sign_extend64(imm << 2, 17);
+ else
+ regs->csr_era = pc + LOONGARCH_INSN_SIZE;
+ break;
+ case blt_op:
+ if ((long)regs->regs[rj] < (long)regs->regs[rd])
+ regs->csr_era = pc + sign_extend64(imm << 2, 17);
+ else
+ regs->csr_era = pc + LOONGARCH_INSN_SIZE;
+ break;
+ case bge_op:
+ if ((long)regs->regs[rj] >= (long)regs->regs[rd])
+ regs->csr_era = pc + sign_extend64(imm << 2, 17);
+ else
+ regs->csr_era = pc + LOONGARCH_INSN_SIZE;
+ break;
+ case bltu_op:
+ if (regs->regs[rj] < regs->regs[rd])
+ regs->csr_era = pc + sign_extend64(imm << 2, 17);
+ else
+ regs->csr_era = pc + LOONGARCH_INSN_SIZE;
+ break;
+ case bgeu_op:
+ if (regs->regs[rj] >= regs->regs[rd])
+ regs->csr_era = pc + sign_extend64(imm << 2, 17);
+ else
+ regs->csr_era = pc + LOONGARCH_INSN_SIZE;
+ break;
+ case jirl_op:
+ regs->csr_era = regs->regs[rj] + sign_extend64(imm << 2, 17);
+ regs->regs[rd] = pc + LOONGARCH_INSN_SIZE;
+ break;
+ default:
+ pr_info("%s: unknown opcode\n", __func__);
+ return;
+ }
+}
+
+bool insns_not_supported(union loongarch_instruction insn)
+{
+ switch (insn.reg3_format.opcode) {
+ case amswapw_op ... ammindbdu_op:
+ pr_notice("atomic memory access instructions are not supported\n");
+ return true;
+ }
+
+ switch (insn.reg2i14_format.opcode) {
+ case llw_op:
+ case lld_op:
+ case scw_op:
+ case scd_op:
+ pr_notice("ll and sc instructions are not supported\n");
+ return true;
+ }
+
+ switch (insn.reg1i21_format.opcode) {
+ case bceqz_op:
+ pr_notice("bceqz and bcnez instructions are not supported\n");
+ return true;
+ }
+
+ return false;
+}
+
+bool insns_need_simulation(union loongarch_instruction insn)
+{
+ if (is_pc_ins(&insn))
+ return true;
+
+ if (is_branch_ins(&insn))
+ return true;
+
+ return false;
+}
+
+void arch_simulate_insn(union loongarch_instruction insn, struct pt_regs *regs)
+{
+ if (is_pc_ins(&insn))
+ simu_pc(regs, insn);
+ else if (is_branch_ins(&insn))
+ simu_branch(regs, insn);
+}
+
+int larch_insn_read(void *addr, u32 *insnp)
+{
+ int ret;
+ u32 val;
+
+ ret = copy_from_kernel_nofault(&val, addr, LOONGARCH_INSN_SIZE);
+ if (!ret)
+ *insnp = val;
+
+ return ret;
+}
+
+int larch_insn_write(void *addr, u32 insn)
+{
+ int ret;
+ unsigned long flags = 0;
+
+ raw_spin_lock_irqsave(&patch_lock, flags);
+ ret = copy_to_kernel_nofault(addr, &insn, LOONGARCH_INSN_SIZE);
+ raw_spin_unlock_irqrestore(&patch_lock, flags);
+
+ return ret;
+}
+
+int larch_insn_patch_text(void *addr, u32 insn)
+{
+ int ret;
+ u32 *tp = addr;
+
+ if ((unsigned long)tp & 3)
+ return -EINVAL;
+
+ ret = larch_insn_write(tp, insn);
+ if (!ret)
+ flush_icache_range((unsigned long)tp,
+ (unsigned long)tp + LOONGARCH_INSN_SIZE);
+
+ return ret;
+}
+
+u32 larch_insn_gen_nop(void)
+{
+ return INSN_NOP;
+}
+
+u32 larch_insn_gen_b(unsigned long pc, unsigned long dest)
+{
+ long offset = dest - pc;
+ union loongarch_instruction insn;
+
+ if ((offset & 3) || offset < -SZ_128M || offset >= SZ_128M) {
+ pr_warn("The generated b instruction is out of range.\n");
+ return INSN_BREAK;
+ }
+
+ emit_b(&insn, offset >> 2);
+
+ return insn.word;
+}
+
+u32 larch_insn_gen_bl(unsigned long pc, unsigned long dest)
+{
+ long offset = dest - pc;
+ union loongarch_instruction insn;
+
+ if ((offset & 3) || offset < -SZ_128M || offset >= SZ_128M) {
+ pr_warn("The generated bl instruction is out of range.\n");
+ return INSN_BREAK;
+ }
+
+ emit_bl(&insn, offset >> 2);
+
+ return insn.word;
+}
+
+u32 larch_insn_gen_break(int imm)
+{
+ union loongarch_instruction insn;
+
+ if (imm < 0 || imm >= SZ_32K) {
+ pr_warn("The generated break instruction is out of range.\n");
+ return INSN_BREAK;
+ }
+
+ emit_break(&insn, imm);
+
+ return insn.word;
+}
+
+u32 larch_insn_gen_or(enum loongarch_gpr rd, enum loongarch_gpr rj, enum loongarch_gpr rk)
+{
+ union loongarch_instruction insn;
+
+ emit_or(&insn, rd, rj, rk);
+
+ return insn.word;
+}
+
+u32 larch_insn_gen_move(enum loongarch_gpr rd, enum loongarch_gpr rj)
+{
+ return larch_insn_gen_or(rd, rj, 0);
+}
+
+u32 larch_insn_gen_lu12iw(enum loongarch_gpr rd, int imm)
+{
+ union loongarch_instruction insn;
+
+ if (imm < -SZ_512K || imm >= SZ_512K) {
+ pr_warn("The generated lu12i.w instruction is out of range.\n");
+ return INSN_BREAK;
+ }
+
+ emit_lu12iw(&insn, rd, imm);
+
+ return insn.word;
+}
+
+u32 larch_insn_gen_lu32id(enum loongarch_gpr rd, int imm)
+{
+ union loongarch_instruction insn;
+
+ if (imm < -SZ_512K || imm >= SZ_512K) {
+ pr_warn("The generated lu32i.d instruction is out of range.\n");
+ return INSN_BREAK;
+ }
+
+ emit_lu32id(&insn, rd, imm);
+
+ return insn.word;
+}
+
+u32 larch_insn_gen_lu52id(enum loongarch_gpr rd, enum loongarch_gpr rj, int imm)
+{
+ union loongarch_instruction insn;
+
+ if (imm < -SZ_2K || imm >= SZ_2K) {
+ pr_warn("The generated lu52i.d instruction is out of range.\n");
+ return INSN_BREAK;
+ }
+
+ emit_lu52id(&insn, rd, rj, imm);
+
+ return insn.word;
+}
+
+u32 larch_insn_gen_jirl(enum loongarch_gpr rd, enum loongarch_gpr rj, int imm)
+{
+ union loongarch_instruction insn;
+
+ if ((imm & 3) || imm < -SZ_128K || imm >= SZ_128K) {
+ pr_warn("The generated jirl instruction is out of range.\n");
+ return INSN_BREAK;
+ }
+
+ emit_jirl(&insn, rj, rd, imm >> 2);
+
+ return insn.word;
+}
diff --git a/arch/loongarch/kernel/io.c b/arch/loongarch/kernel/io.c
new file mode 100644
index 0000000000..cb85bda5a6
--- /dev/null
+++ b/arch/loongarch/kernel/io.c
@@ -0,0 +1,94 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#include <linux/export.h>
+#include <linux/types.h>
+#include <linux/io.h>
+
+/*
+ * Copy data from IO memory space to "real" memory space.
+ */
+void __memcpy_fromio(void *to, const volatile void __iomem *from, size_t count)
+{
+ while (count && !IS_ALIGNED((unsigned long)from, 8)) {
+ *(u8 *)to = __raw_readb(from);
+ from++;
+ to++;
+ count--;
+ }
+
+ while (count >= 8) {
+ *(u64 *)to = __raw_readq(from);
+ from += 8;
+ to += 8;
+ count -= 8;
+ }
+
+ while (count) {
+ *(u8 *)to = __raw_readb(from);
+ from++;
+ to++;
+ count--;
+ }
+}
+EXPORT_SYMBOL(__memcpy_fromio);
+
+/*
+ * Copy data from "real" memory space to IO memory space.
+ */
+void __memcpy_toio(volatile void __iomem *to, const void *from, size_t count)
+{
+ while (count && !IS_ALIGNED((unsigned long)to, 8)) {
+ __raw_writeb(*(u8 *)from, to);
+ from++;
+ to++;
+ count--;
+ }
+
+ while (count >= 8) {
+ __raw_writeq(*(u64 *)from, to);
+ from += 8;
+ to += 8;
+ count -= 8;
+ }
+
+ while (count) {
+ __raw_writeb(*(u8 *)from, to);
+ from++;
+ to++;
+ count--;
+ }
+}
+EXPORT_SYMBOL(__memcpy_toio);
+
+/*
+ * "memset" on IO memory space.
+ */
+void __memset_io(volatile void __iomem *dst, int c, size_t count)
+{
+ u64 qc = (u8)c;
+
+ qc |= qc << 8;
+ qc |= qc << 16;
+ qc |= qc << 32;
+
+ while (count && !IS_ALIGNED((unsigned long)dst, 8)) {
+ __raw_writeb(c, dst);
+ dst++;
+ count--;
+ }
+
+ while (count >= 8) {
+ __raw_writeq(qc, dst);
+ dst += 8;
+ count -= 8;
+ }
+
+ while (count) {
+ __raw_writeb(c, dst);
+ dst++;
+ count--;
+ }
+}
+EXPORT_SYMBOL(__memset_io);
diff --git a/arch/loongarch/kernel/irq.c b/arch/loongarch/kernel/irq.c
new file mode 100644
index 0000000000..883e5066ae
--- /dev/null
+++ b/arch/loongarch/kernel/irq.c
@@ -0,0 +1,137 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#include <linux/kernel.h>
+#include <linux/acpi.h>
+#include <linux/atomic.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/irqchip.h>
+#include <linux/kernel_stat.h>
+#include <linux/proc_fs.h>
+#include <linux/mm.h>
+#include <linux/sched.h>
+#include <linux/seq_file.h>
+#include <linux/kallsyms.h>
+#include <linux/uaccess.h>
+
+#include <asm/irq.h>
+#include <asm/loongson.h>
+#include <asm/setup.h>
+
+DEFINE_PER_CPU(unsigned long, irq_stack);
+DEFINE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat);
+EXPORT_PER_CPU_SYMBOL(irq_stat);
+
+struct acpi_vector_group pch_group[MAX_IO_PICS];
+struct acpi_vector_group msi_group[MAX_IO_PICS];
+/*
+ * 'what should we do if we get a hw irq event on an illegal vector'.
+ * each architecture has to answer this themselves.
+ */
+void ack_bad_irq(unsigned int irq)
+{
+ pr_warn("Unexpected IRQ # %d\n", irq);
+}
+
+atomic_t irq_err_count;
+
+asmlinkage void spurious_interrupt(void)
+{
+ atomic_inc(&irq_err_count);
+}
+
+int arch_show_interrupts(struct seq_file *p, int prec)
+{
+#ifdef CONFIG_SMP
+ show_ipi_list(p, prec);
+#endif
+ seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
+ return 0;
+}
+
+static int __init early_pci_mcfg_parse(struct acpi_table_header *header)
+{
+ struct acpi_table_mcfg *mcfg;
+ struct acpi_mcfg_allocation *mptr;
+ int i, n;
+
+ if (header->length < sizeof(struct acpi_table_mcfg))
+ return -EINVAL;
+
+ n = (header->length - sizeof(struct acpi_table_mcfg)) /
+ sizeof(struct acpi_mcfg_allocation);
+ mcfg = (struct acpi_table_mcfg *)header;
+ mptr = (struct acpi_mcfg_allocation *) &mcfg[1];
+
+ for (i = 0; i < n; i++, mptr++) {
+ msi_group[i].pci_segment = mptr->pci_segment;
+ pch_group[i].node = msi_group[i].node = (mptr->address >> 44) & 0xf;
+ }
+
+ return 0;
+}
+
+static void __init init_vec_parent_group(void)
+{
+ int i;
+
+ for (i = 0; i < MAX_IO_PICS; i++) {
+ msi_group[i].pci_segment = -1;
+ msi_group[i].node = -1;
+ pch_group[i].node = -1;
+ }
+
+ acpi_table_parse(ACPI_SIG_MCFG, early_pci_mcfg_parse);
+}
+
+static int __init get_ipi_irq(void)
+{
+ struct irq_domain *d = irq_find_matching_fwnode(cpuintc_handle, DOMAIN_BUS_ANY);
+
+ if (d)
+ return irq_create_mapping(d, INT_IPI);
+
+ return -EINVAL;
+}
+
+void __init init_IRQ(void)
+{
+ int i;
+#ifdef CONFIG_SMP
+ int r, ipi_irq;
+ static int ipi_dummy_dev;
+#endif
+ unsigned int order = get_order(IRQ_STACK_SIZE);
+ struct page *page;
+
+ clear_csr_ecfg(ECFG0_IM);
+ clear_csr_estat(ESTATF_IP);
+
+ init_vec_parent_group();
+ irqchip_init();
+#ifdef CONFIG_SMP
+ ipi_irq = get_ipi_irq();
+ if (ipi_irq < 0)
+ panic("IPI IRQ mapping failed\n");
+ irq_set_percpu_devid(ipi_irq);
+ r = request_percpu_irq(ipi_irq, loongson_ipi_interrupt, "IPI", &ipi_dummy_dev);
+ if (r < 0)
+ panic("IPI IRQ request failed\n");
+#endif
+
+ for (i = 0; i < NR_IRQS; i++)
+ irq_set_noprobe(i);
+
+ for_each_possible_cpu(i) {
+ page = alloc_pages_node(cpu_to_node(i), GFP_KERNEL, order);
+
+ per_cpu(irq_stack, i) = (unsigned long)page_address(page);
+ pr_debug("CPU%d IRQ stack at 0x%lx - 0x%lx\n", i,
+ per_cpu(irq_stack, i), per_cpu(irq_stack, i) + IRQ_STACK_SIZE);
+ }
+
+ set_csr_ecfg(ECFGF_IP0 | ECFGF_IP1 | ECFGF_IP2 | ECFGF_IPI | ECFGF_PMC);
+}
diff --git a/arch/loongarch/kernel/jump_label.c b/arch/loongarch/kernel/jump_label.c
new file mode 100644
index 0000000000..31891214b7
--- /dev/null
+++ b/arch/loongarch/kernel/jump_label.c
@@ -0,0 +1,22 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2023 Loongson Technology Corporation Limited
+ *
+ * Based on arch/arm64/kernel/jump_label.c
+ */
+#include <linux/kernel.h>
+#include <linux/jump_label.h>
+#include <asm/inst.h>
+
+void arch_jump_label_transform(struct jump_entry *entry, enum jump_label_type type)
+{
+ u32 insn;
+ void *addr = (void *)jump_entry_code(entry);
+
+ if (type == JUMP_LABEL_JMP)
+ insn = larch_insn_gen_b(jump_entry_code(entry), jump_entry_target(entry));
+ else
+ insn = larch_insn_gen_nop();
+
+ larch_insn_patch_text(addr, insn);
+}
diff --git a/arch/loongarch/kernel/kfpu.c b/arch/loongarch/kernel/kfpu.c
new file mode 100644
index 0000000000..ec5b28e570
--- /dev/null
+++ b/arch/loongarch/kernel/kfpu.c
@@ -0,0 +1,90 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2023 Loongson Technology Corporation Limited
+ */
+
+#include <linux/cpu.h>
+#include <linux/init.h>
+#include <asm/fpu.h>
+#include <asm/smp.h>
+
+static unsigned int euen_mask = CSR_EUEN_FPEN;
+
+/*
+ * The critical section between kernel_fpu_begin() and kernel_fpu_end()
+ * is non-reentrant. It is the caller's responsibility to avoid reentrance.
+ * See drivers/gpu/drm/amd/display/amdgpu_dm/dc_fpu.c as an example.
+ */
+static DEFINE_PER_CPU(bool, in_kernel_fpu);
+static DEFINE_PER_CPU(unsigned int, euen_current);
+
+void kernel_fpu_begin(void)
+{
+ unsigned int *euen_curr;
+
+ preempt_disable();
+
+ WARN_ON(this_cpu_read(in_kernel_fpu));
+
+ this_cpu_write(in_kernel_fpu, true);
+ euen_curr = this_cpu_ptr(&euen_current);
+
+ *euen_curr = csr_xchg32(euen_mask, euen_mask, LOONGARCH_CSR_EUEN);
+
+#ifdef CONFIG_CPU_HAS_LASX
+ if (*euen_curr & CSR_EUEN_LASXEN)
+ _save_lasx(&current->thread.fpu);
+ else
+#endif
+#ifdef CONFIG_CPU_HAS_LSX
+ if (*euen_curr & CSR_EUEN_LSXEN)
+ _save_lsx(&current->thread.fpu);
+ else
+#endif
+ if (*euen_curr & CSR_EUEN_FPEN)
+ _save_fp(&current->thread.fpu);
+
+ write_fcsr(LOONGARCH_FCSR0, 0);
+}
+EXPORT_SYMBOL_GPL(kernel_fpu_begin);
+
+void kernel_fpu_end(void)
+{
+ unsigned int *euen_curr;
+
+ WARN_ON(!this_cpu_read(in_kernel_fpu));
+
+ euen_curr = this_cpu_ptr(&euen_current);
+
+#ifdef CONFIG_CPU_HAS_LASX
+ if (*euen_curr & CSR_EUEN_LASXEN)
+ _restore_lasx(&current->thread.fpu);
+ else
+#endif
+#ifdef CONFIG_CPU_HAS_LSX
+ if (*euen_curr & CSR_EUEN_LSXEN)
+ _restore_lsx(&current->thread.fpu);
+ else
+#endif
+ if (*euen_curr & CSR_EUEN_FPEN)
+ _restore_fp(&current->thread.fpu);
+
+ *euen_curr = csr_xchg32(*euen_curr, euen_mask, LOONGARCH_CSR_EUEN);
+
+ this_cpu_write(in_kernel_fpu, false);
+
+ preempt_enable();
+}
+EXPORT_SYMBOL_GPL(kernel_fpu_end);
+
+static int __init init_euen_mask(void)
+{
+ if (cpu_has_lsx)
+ euen_mask |= CSR_EUEN_LSXEN;
+
+ if (cpu_has_lasx)
+ euen_mask |= CSR_EUEN_LASXEN;
+
+ return 0;
+}
+arch_initcall(init_euen_mask);
diff --git a/arch/loongarch/kernel/kgdb.c b/arch/loongarch/kernel/kgdb.c
new file mode 100644
index 0000000000..445c452d72
--- /dev/null
+++ b/arch/loongarch/kernel/kgdb.c
@@ -0,0 +1,727 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * LoongArch KGDB support
+ *
+ * Copyright (C) 2023 Loongson Technology Corporation Limited
+ */
+
+#include <linux/hw_breakpoint.h>
+#include <linux/kdebug.h>
+#include <linux/kgdb.h>
+#include <linux/processor.h>
+#include <linux/ptrace.h>
+#include <linux/sched.h>
+#include <linux/smp.h>
+
+#include <asm/cacheflush.h>
+#include <asm/fpu.h>
+#include <asm/hw_breakpoint.h>
+#include <asm/inst.h>
+#include <asm/irq_regs.h>
+#include <asm/ptrace.h>
+#include <asm/sigcontext.h>
+
+int kgdb_watch_activated;
+static unsigned int stepped_opcode;
+static unsigned long stepped_address;
+
+struct dbg_reg_def_t dbg_reg_def[DBG_MAX_REG_NUM] = {
+ { "r0", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[0]) },
+ { "r1", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[1]) },
+ { "r2", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[2]) },
+ { "r3", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[3]) },
+ { "r4", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[4]) },
+ { "r5", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[5]) },
+ { "r6", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[6]) },
+ { "r7", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[7]) },
+ { "r8", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[8]) },
+ { "r9", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[9]) },
+ { "r10", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[10]) },
+ { "r11", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[11]) },
+ { "r12", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[12]) },
+ { "r13", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[13]) },
+ { "r14", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[14]) },
+ { "r15", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[15]) },
+ { "r16", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[16]) },
+ { "r17", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[17]) },
+ { "r18", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[18]) },
+ { "r19", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[19]) },
+ { "r20", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[20]) },
+ { "r21", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[21]) },
+ { "r22", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[22]) },
+ { "r23", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[23]) },
+ { "r24", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[24]) },
+ { "r25", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[25]) },
+ { "r26", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[26]) },
+ { "r27", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[27]) },
+ { "r28", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[28]) },
+ { "r29", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[29]) },
+ { "r30", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[30]) },
+ { "r31", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[31]) },
+ { "orig_a0", GDB_SIZEOF_REG, offsetof(struct pt_regs, orig_a0) },
+ { "pc", GDB_SIZEOF_REG, offsetof(struct pt_regs, csr_era) },
+ { "badv", GDB_SIZEOF_REG, offsetof(struct pt_regs, csr_badvaddr) },
+ { "f0", GDB_SIZEOF_REG, 0 },
+ { "f1", GDB_SIZEOF_REG, 1 },
+ { "f2", GDB_SIZEOF_REG, 2 },
+ { "f3", GDB_SIZEOF_REG, 3 },
+ { "f4", GDB_SIZEOF_REG, 4 },
+ { "f5", GDB_SIZEOF_REG, 5 },
+ { "f6", GDB_SIZEOF_REG, 6 },
+ { "f7", GDB_SIZEOF_REG, 7 },
+ { "f8", GDB_SIZEOF_REG, 8 },
+ { "f9", GDB_SIZEOF_REG, 9 },
+ { "f10", GDB_SIZEOF_REG, 10 },
+ { "f11", GDB_SIZEOF_REG, 11 },
+ { "f12", GDB_SIZEOF_REG, 12 },
+ { "f13", GDB_SIZEOF_REG, 13 },
+ { "f14", GDB_SIZEOF_REG, 14 },
+ { "f15", GDB_SIZEOF_REG, 15 },
+ { "f16", GDB_SIZEOF_REG, 16 },
+ { "f17", GDB_SIZEOF_REG, 17 },
+ { "f18", GDB_SIZEOF_REG, 18 },
+ { "f19", GDB_SIZEOF_REG, 19 },
+ { "f20", GDB_SIZEOF_REG, 20 },
+ { "f21", GDB_SIZEOF_REG, 21 },
+ { "f22", GDB_SIZEOF_REG, 22 },
+ { "f23", GDB_SIZEOF_REG, 23 },
+ { "f24", GDB_SIZEOF_REG, 24 },
+ { "f25", GDB_SIZEOF_REG, 25 },
+ { "f26", GDB_SIZEOF_REG, 26 },
+ { "f27", GDB_SIZEOF_REG, 27 },
+ { "f28", GDB_SIZEOF_REG, 28 },
+ { "f29", GDB_SIZEOF_REG, 29 },
+ { "f30", GDB_SIZEOF_REG, 30 },
+ { "f31", GDB_SIZEOF_REG, 31 },
+ { "fcc0", 1, 0 },
+ { "fcc1", 1, 1 },
+ { "fcc2", 1, 2 },
+ { "fcc3", 1, 3 },
+ { "fcc4", 1, 4 },
+ { "fcc5", 1, 5 },
+ { "fcc6", 1, 6 },
+ { "fcc7", 1, 7 },
+ { "fcsr", 4, 0 },
+};
+
+char *dbg_get_reg(int regno, void *mem, struct pt_regs *regs)
+{
+ int reg_offset, reg_size;
+
+ if (regno < 0 || regno >= DBG_MAX_REG_NUM)
+ return NULL;
+
+ reg_offset = dbg_reg_def[regno].offset;
+ reg_size = dbg_reg_def[regno].size;
+
+ if (reg_offset == -1)
+ goto out;
+
+ /* Handle general-purpose/orig_a0/pc/badv registers */
+ if (regno <= DBG_PT_REGS_END) {
+ memcpy(mem, (void *)regs + reg_offset, reg_size);
+ goto out;
+ }
+
+ if (!(regs->csr_euen & CSR_EUEN_FPEN))
+ goto out;
+
+ save_fp(current);
+
+ /* Handle FP registers */
+ switch (regno) {
+ case DBG_FCSR: /* Process the fcsr */
+ memcpy(mem, (void *)&current->thread.fpu.fcsr, reg_size);
+ break;
+ case DBG_FCC_BASE ... DBG_FCC_END: /* Process the fcc */
+ memcpy(mem, (void *)&current->thread.fpu.fcc + reg_offset, reg_size);
+ break;
+ case DBG_FPR_BASE ... DBG_FPR_END: /* Process the fpr */
+ memcpy(mem, (void *)&current->thread.fpu.fpr[reg_offset], reg_size);
+ break;
+ default:
+ break;
+ }
+
+out:
+ return dbg_reg_def[regno].name;
+}
+
+int dbg_set_reg(int regno, void *mem, struct pt_regs *regs)
+{
+ int reg_offset, reg_size;
+
+ if (regno < 0 || regno >= DBG_MAX_REG_NUM)
+ return -EINVAL;
+
+ reg_offset = dbg_reg_def[regno].offset;
+ reg_size = dbg_reg_def[regno].size;
+
+ if (reg_offset == -1)
+ return 0;
+
+ /* Handle general-purpose/orig_a0/pc/badv registers */
+ if (regno <= DBG_PT_REGS_END) {
+ memcpy((void *)regs + reg_offset, mem, reg_size);
+ return 0;
+ }
+
+ if (!(regs->csr_euen & CSR_EUEN_FPEN))
+ return 0;
+
+ /* Handle FP registers */
+ switch (regno) {
+ case DBG_FCSR: /* Process the fcsr */
+ memcpy((void *)&current->thread.fpu.fcsr, mem, reg_size);
+ break;
+ case DBG_FCC_BASE ... DBG_FCC_END: /* Process the fcc */
+ memcpy((void *)&current->thread.fpu.fcc + reg_offset, mem, reg_size);
+ break;
+ case DBG_FPR_BASE ... DBG_FPR_END: /* Process the fpr */
+ memcpy((void *)&current->thread.fpu.fpr[reg_offset], mem, reg_size);
+ break;
+ default:
+ break;
+ }
+
+ restore_fp(current);
+
+ return 0;
+}
+
+/*
+ * Similar to regs_to_gdb_regs() except that process is sleeping and so
+ * we may not be able to get all the info.
+ */
+void sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *p)
+{
+ /* Initialize to zero */
+ memset((char *)gdb_regs, 0, NUMREGBYTES);
+
+ gdb_regs[DBG_LOONGARCH_RA] = p->thread.reg01;
+ gdb_regs[DBG_LOONGARCH_TP] = (long)p;
+ gdb_regs[DBG_LOONGARCH_SP] = p->thread.reg03;
+
+ /* S0 - S8 */
+ gdb_regs[DBG_LOONGARCH_S0] = p->thread.reg23;
+ gdb_regs[DBG_LOONGARCH_S1] = p->thread.reg24;
+ gdb_regs[DBG_LOONGARCH_S2] = p->thread.reg25;
+ gdb_regs[DBG_LOONGARCH_S3] = p->thread.reg26;
+ gdb_regs[DBG_LOONGARCH_S4] = p->thread.reg27;
+ gdb_regs[DBG_LOONGARCH_S5] = p->thread.reg28;
+ gdb_regs[DBG_LOONGARCH_S6] = p->thread.reg29;
+ gdb_regs[DBG_LOONGARCH_S7] = p->thread.reg30;
+ gdb_regs[DBG_LOONGARCH_S8] = p->thread.reg31;
+
+ /*
+ * PC use return address (RA), i.e. the moment after return from __switch_to()
+ */
+ gdb_regs[DBG_LOONGARCH_PC] = p->thread.reg01;
+}
+
+void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long pc)
+{
+ regs->csr_era = pc;
+}
+
+void arch_kgdb_breakpoint(void)
+{
+ __asm__ __volatile__ ( \
+ ".globl kgdb_breakinst\n\t" \
+ "nop\n" \
+ "kgdb_breakinst:\tbreak 2\n\t"); /* BRK_KDB = 2 */
+}
+
+/*
+ * Calls linux_debug_hook before the kernel dies. If KGDB is enabled,
+ * then try to fall into the debugger
+ */
+static int kgdb_loongarch_notify(struct notifier_block *self, unsigned long cmd, void *ptr)
+{
+ struct die_args *args = (struct die_args *)ptr;
+ struct pt_regs *regs = args->regs;
+
+ /* Userspace events, ignore. */
+ if (user_mode(regs))
+ return NOTIFY_DONE;
+
+ if (!kgdb_io_module_registered)
+ return NOTIFY_DONE;
+
+ if (atomic_read(&kgdb_active) != -1)
+ kgdb_nmicallback(smp_processor_id(), regs);
+
+ if (kgdb_handle_exception(args->trapnr, args->signr, cmd, regs))
+ return NOTIFY_DONE;
+
+ if (atomic_read(&kgdb_setting_breakpoint))
+ if (regs->csr_era == (unsigned long)&kgdb_breakinst)
+ regs->csr_era += LOONGARCH_INSN_SIZE;
+
+ return NOTIFY_STOP;
+}
+
+bool kgdb_breakpoint_handler(struct pt_regs *regs)
+{
+ struct die_args args = {
+ .regs = regs,
+ .str = "Break",
+ .err = BRK_KDB,
+ .trapnr = read_csr_excode(),
+ .signr = SIGTRAP,
+
+ };
+
+ return (kgdb_loongarch_notify(NULL, DIE_TRAP, &args) == NOTIFY_STOP) ? true : false;
+}
+
+static struct notifier_block kgdb_notifier = {
+ .notifier_call = kgdb_loongarch_notify,
+};
+
+static inline void kgdb_arch_update_addr(struct pt_regs *regs,
+ char *remcom_in_buffer)
+{
+ unsigned long addr;
+ char *ptr;
+
+ ptr = &remcom_in_buffer[1];
+ if (kgdb_hex2long(&ptr, &addr))
+ regs->csr_era = addr;
+}
+
+/* Calculate the new address for after a step */
+static int get_step_address(struct pt_regs *regs, unsigned long *next_addr)
+{
+ char cj_val;
+ unsigned int si, si_l, si_h, rd, rj, cj;
+ unsigned long pc = instruction_pointer(regs);
+ union loongarch_instruction *ip = (union loongarch_instruction *)pc;
+
+ if (pc & 3) {
+ pr_warn("%s: invalid pc 0x%lx\n", __func__, pc);
+ return -EINVAL;
+ }
+
+ *next_addr = pc + LOONGARCH_INSN_SIZE;
+
+ si_h = ip->reg0i26_format.immediate_h;
+ si_l = ip->reg0i26_format.immediate_l;
+ switch (ip->reg0i26_format.opcode) {
+ case b_op:
+ *next_addr = pc + sign_extend64((si_h << 16 | si_l) << 2, 27);
+ return 0;
+ case bl_op:
+ *next_addr = pc + sign_extend64((si_h << 16 | si_l) << 2, 27);
+ regs->regs[1] = pc + LOONGARCH_INSN_SIZE;
+ return 0;
+ }
+
+ rj = ip->reg1i21_format.rj;
+ cj = (rj & 0x07) + DBG_FCC_BASE;
+ si_l = ip->reg1i21_format.immediate_l;
+ si_h = ip->reg1i21_format.immediate_h;
+ dbg_get_reg(cj, &cj_val, regs);
+ switch (ip->reg1i21_format.opcode) {
+ case beqz_op:
+ if (regs->regs[rj] == 0)
+ *next_addr = pc + sign_extend64((si_h << 16 | si_l) << 2, 22);
+ return 0;
+ case bnez_op:
+ if (regs->regs[rj] != 0)
+ *next_addr = pc + sign_extend64((si_h << 16 | si_l) << 2, 22);
+ return 0;
+ case bceqz_op: /* bceqz_op = bcnez_op */
+ if (((rj & 0x18) == 0x00) && !cj_val) /* bceqz */
+ *next_addr = pc + sign_extend64((si_h << 16 | si_l) << 2, 22);
+ if (((rj & 0x18) == 0x08) && cj_val) /* bcnez */
+ *next_addr = pc + sign_extend64((si_h << 16 | si_l) << 2, 22);
+ return 0;
+ }
+
+ rj = ip->reg2i16_format.rj;
+ rd = ip->reg2i16_format.rd;
+ si = ip->reg2i16_format.immediate;
+ switch (ip->reg2i16_format.opcode) {
+ case beq_op:
+ if (regs->regs[rj] == regs->regs[rd])
+ *next_addr = pc + sign_extend64(si << 2, 17);
+ return 0;
+ case bne_op:
+ if (regs->regs[rj] != regs->regs[rd])
+ *next_addr = pc + sign_extend64(si << 2, 17);
+ return 0;
+ case blt_op:
+ if ((long)regs->regs[rj] < (long)regs->regs[rd])
+ *next_addr = pc + sign_extend64(si << 2, 17);
+ return 0;
+ case bge_op:
+ if ((long)regs->regs[rj] >= (long)regs->regs[rd])
+ *next_addr = pc + sign_extend64(si << 2, 17);
+ return 0;
+ case bltu_op:
+ if (regs->regs[rj] < regs->regs[rd])
+ *next_addr = pc + sign_extend64(si << 2, 17);
+ return 0;
+ case bgeu_op:
+ if (regs->regs[rj] >= regs->regs[rd])
+ *next_addr = pc + sign_extend64(si << 2, 17);
+ return 0;
+ case jirl_op:
+ regs->regs[rd] = pc + LOONGARCH_INSN_SIZE;
+ *next_addr = regs->regs[rj] + sign_extend64(si << 2, 17);
+ return 0;
+ }
+
+ return 0;
+}
+
+static int do_single_step(struct pt_regs *regs)
+{
+ int error = 0;
+ unsigned long addr = 0; /* Determine where the target instruction will send us to */
+
+ error = get_step_address(regs, &addr);
+ if (error)
+ return error;
+
+ /* Store the opcode in the stepped address */
+ error = get_kernel_nofault(stepped_opcode, (void *)addr);
+ if (error)
+ return error;
+
+ stepped_address = addr;
+
+ /* Replace the opcode with the break instruction */
+ error = copy_to_kernel_nofault((void *)stepped_address,
+ arch_kgdb_ops.gdb_bpt_instr, BREAK_INSTR_SIZE);
+ flush_icache_range(addr, addr + BREAK_INSTR_SIZE);
+
+ if (error) {
+ stepped_opcode = 0;
+ stepped_address = 0;
+ } else {
+ kgdb_single_step = 1;
+ atomic_set(&kgdb_cpu_doing_single_step, raw_smp_processor_id());
+ }
+
+ return error;
+}
+
+/* Undo a single step */
+static void undo_single_step(struct pt_regs *regs)
+{
+ if (stepped_opcode) {
+ copy_to_kernel_nofault((void *)stepped_address,
+ (void *)&stepped_opcode, BREAK_INSTR_SIZE);
+ flush_icache_range(stepped_address, stepped_address + BREAK_INSTR_SIZE);
+ }
+
+ stepped_opcode = 0;
+ stepped_address = 0;
+ kgdb_single_step = 0;
+ atomic_set(&kgdb_cpu_doing_single_step, -1);
+}
+
+int kgdb_arch_handle_exception(int vector, int signo, int err_code,
+ char *remcom_in_buffer, char *remcom_out_buffer,
+ struct pt_regs *regs)
+{
+ int ret = 0;
+
+ undo_single_step(regs);
+ regs->csr_prmd |= CSR_PRMD_PWE;
+
+ switch (remcom_in_buffer[0]) {
+ case 'D':
+ case 'k':
+ regs->csr_prmd &= ~CSR_PRMD_PWE;
+ fallthrough;
+ case 'c':
+ kgdb_arch_update_addr(regs, remcom_in_buffer);
+ break;
+ case 's':
+ kgdb_arch_update_addr(regs, remcom_in_buffer);
+ ret = do_single_step(regs);
+ break;
+ default:
+ ret = -1;
+ }
+
+ return ret;
+}
+
+static struct hw_breakpoint {
+ unsigned int enabled;
+ unsigned long addr;
+ int len;
+ int type;
+ struct perf_event * __percpu *pev;
+} breakinfo[LOONGARCH_MAX_BRP];
+
+static int hw_break_reserve_slot(int breakno)
+{
+ int cpu, cnt = 0;
+ struct perf_event **pevent;
+
+ for_each_online_cpu(cpu) {
+ cnt++;
+ pevent = per_cpu_ptr(breakinfo[breakno].pev, cpu);
+ if (dbg_reserve_bp_slot(*pevent))
+ goto fail;
+ }
+
+ return 0;
+
+fail:
+ for_each_online_cpu(cpu) {
+ cnt--;
+ if (!cnt)
+ break;
+ pevent = per_cpu_ptr(breakinfo[breakno].pev, cpu);
+ dbg_release_bp_slot(*pevent);
+ }
+
+ return -1;
+}
+
+static int hw_break_release_slot(int breakno)
+{
+ int cpu;
+ struct perf_event **pevent;
+
+ if (dbg_is_early)
+ return 0;
+
+ for_each_online_cpu(cpu) {
+ pevent = per_cpu_ptr(breakinfo[breakno].pev, cpu);
+ if (dbg_release_bp_slot(*pevent))
+ /*
+ * The debugger is responsible for handing the retry on
+ * remove failure.
+ */
+ return -1;
+ }
+
+ return 0;
+}
+
+static int kgdb_set_hw_break(unsigned long addr, int len, enum kgdb_bptype bptype)
+{
+ int i;
+
+ for (i = 0; i < LOONGARCH_MAX_BRP; i++)
+ if (!breakinfo[i].enabled)
+ break;
+
+ if (i == LOONGARCH_MAX_BRP)
+ return -1;
+
+ switch (bptype) {
+ case BP_HARDWARE_BREAKPOINT:
+ breakinfo[i].type = HW_BREAKPOINT_X;
+ break;
+ case BP_READ_WATCHPOINT:
+ breakinfo[i].type = HW_BREAKPOINT_R;
+ break;
+ case BP_WRITE_WATCHPOINT:
+ breakinfo[i].type = HW_BREAKPOINT_W;
+ break;
+ case BP_ACCESS_WATCHPOINT:
+ breakinfo[i].type = HW_BREAKPOINT_RW;
+ break;
+ default:
+ return -1;
+ }
+
+ switch (len) {
+ case 1:
+ breakinfo[i].len = HW_BREAKPOINT_LEN_1;
+ break;
+ case 2:
+ breakinfo[i].len = HW_BREAKPOINT_LEN_2;
+ break;
+ case 4:
+ breakinfo[i].len = HW_BREAKPOINT_LEN_4;
+ break;
+ case 8:
+ breakinfo[i].len = HW_BREAKPOINT_LEN_8;
+ break;
+ default:
+ return -1;
+ }
+
+ breakinfo[i].addr = addr;
+ if (hw_break_reserve_slot(i)) {
+ breakinfo[i].addr = 0;
+ return -1;
+ }
+ breakinfo[i].enabled = 1;
+
+ return 0;
+}
+
+static int kgdb_remove_hw_break(unsigned long addr, int len, enum kgdb_bptype bptype)
+{
+ int i;
+
+ for (i = 0; i < LOONGARCH_MAX_BRP; i++)
+ if (breakinfo[i].addr == addr && breakinfo[i].enabled)
+ break;
+
+ if (i == LOONGARCH_MAX_BRP)
+ return -1;
+
+ if (hw_break_release_slot(i)) {
+ pr_err("Cannot remove hw breakpoint at %lx\n", addr);
+ return -1;
+ }
+ breakinfo[i].enabled = 0;
+
+ return 0;
+}
+
+static void kgdb_disable_hw_break(struct pt_regs *regs)
+{
+ int i;
+ int cpu = raw_smp_processor_id();
+ struct perf_event *bp;
+
+ for (i = 0; i < LOONGARCH_MAX_BRP; i++) {
+ if (!breakinfo[i].enabled)
+ continue;
+
+ bp = *per_cpu_ptr(breakinfo[i].pev, cpu);
+ if (bp->attr.disabled == 1)
+ continue;
+
+ arch_uninstall_hw_breakpoint(bp);
+ bp->attr.disabled = 1;
+ }
+
+ /* Disable hardware debugging while we are in kgdb */
+ csr_xchg32(0, CSR_CRMD_WE, LOONGARCH_CSR_CRMD);
+}
+
+static void kgdb_remove_all_hw_break(void)
+{
+ int i;
+ int cpu = raw_smp_processor_id();
+ struct perf_event *bp;
+
+ for (i = 0; i < LOONGARCH_MAX_BRP; i++) {
+ if (!breakinfo[i].enabled)
+ continue;
+
+ bp = *per_cpu_ptr(breakinfo[i].pev, cpu);
+ if (!bp->attr.disabled) {
+ arch_uninstall_hw_breakpoint(bp);
+ bp->attr.disabled = 1;
+ continue;
+ }
+
+ if (hw_break_release_slot(i))
+ pr_err("KGDB: hw bpt remove failed %lx\n", breakinfo[i].addr);
+ breakinfo[i].enabled = 0;
+ }
+
+ csr_xchg32(0, CSR_CRMD_WE, LOONGARCH_CSR_CRMD);
+ kgdb_watch_activated = 0;
+}
+
+static void kgdb_correct_hw_break(void)
+{
+ int i, activated = 0;
+
+ for (i = 0; i < LOONGARCH_MAX_BRP; i++) {
+ struct perf_event *bp;
+ int val;
+ int cpu = raw_smp_processor_id();
+
+ if (!breakinfo[i].enabled)
+ continue;
+
+ bp = *per_cpu_ptr(breakinfo[i].pev, cpu);
+ if (bp->attr.disabled != 1)
+ continue;
+
+ bp->attr.bp_addr = breakinfo[i].addr;
+ bp->attr.bp_len = breakinfo[i].len;
+ bp->attr.bp_type = breakinfo[i].type;
+
+ val = hw_breakpoint_arch_parse(bp, &bp->attr, counter_arch_bp(bp));
+ if (val)
+ return;
+
+ val = arch_install_hw_breakpoint(bp);
+ if (!val)
+ bp->attr.disabled = 0;
+ activated = 1;
+ }
+
+ csr_xchg32(activated ? CSR_CRMD_WE : 0, CSR_CRMD_WE, LOONGARCH_CSR_CRMD);
+ kgdb_watch_activated = activated;
+}
+
+const struct kgdb_arch arch_kgdb_ops = {
+ .gdb_bpt_instr = {0x02, 0x00, break_op >> 1, 0x00}, /* BRK_KDB = 2 */
+ .flags = KGDB_HW_BREAKPOINT,
+ .set_hw_breakpoint = kgdb_set_hw_break,
+ .remove_hw_breakpoint = kgdb_remove_hw_break,
+ .disable_hw_break = kgdb_disable_hw_break,
+ .remove_all_hw_break = kgdb_remove_all_hw_break,
+ .correct_hw_break = kgdb_correct_hw_break,
+};
+
+int kgdb_arch_init(void)
+{
+ return register_die_notifier(&kgdb_notifier);
+}
+
+void kgdb_arch_late(void)
+{
+ int i, cpu;
+ struct perf_event_attr attr;
+ struct perf_event **pevent;
+
+ hw_breakpoint_init(&attr);
+
+ attr.bp_addr = (unsigned long)kgdb_arch_init;
+ attr.bp_len = HW_BREAKPOINT_LEN_4;
+ attr.bp_type = HW_BREAKPOINT_W;
+ attr.disabled = 1;
+
+ for (i = 0; i < LOONGARCH_MAX_BRP; i++) {
+ if (breakinfo[i].pev)
+ continue;
+
+ breakinfo[i].pev = register_wide_hw_breakpoint(&attr, NULL, NULL);
+ if (IS_ERR((void * __force)breakinfo[i].pev)) {
+ pr_err("kgdb: Could not allocate hw breakpoints.\n");
+ breakinfo[i].pev = NULL;
+ return;
+ }
+
+ for_each_online_cpu(cpu) {
+ pevent = per_cpu_ptr(breakinfo[i].pev, cpu);
+ if (pevent[0]->destroy) {
+ pevent[0]->destroy = NULL;
+ release_bp_slot(*pevent);
+ }
+ }
+ }
+}
+
+void kgdb_arch_exit(void)
+{
+ int i;
+
+ for (i = 0; i < LOONGARCH_MAX_BRP; i++) {
+ if (breakinfo[i].pev) {
+ unregister_wide_hw_breakpoint(breakinfo[i].pev);
+ breakinfo[i].pev = NULL;
+ }
+ }
+
+ unregister_die_notifier(&kgdb_notifier);
+}
diff --git a/arch/loongarch/kernel/kprobes.c b/arch/loongarch/kernel/kprobes.c
new file mode 100644
index 0000000000..17b040bd60
--- /dev/null
+++ b/arch/loongarch/kernel/kprobes.c
@@ -0,0 +1,338 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#include <linux/kdebug.h>
+#include <linux/kprobes.h>
+#include <linux/preempt.h>
+#include <asm/break.h>
+
+#define KPROBE_BP_INSN larch_insn_gen_break(BRK_KPROBE_BP)
+#define KPROBE_SSTEPBP_INSN larch_insn_gen_break(BRK_KPROBE_SSTEPBP)
+
+DEFINE_PER_CPU(struct kprobe *, current_kprobe);
+DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
+
+static void arch_prepare_ss_slot(struct kprobe *p)
+{
+ p->ainsn.insn[0] = *p->addr;
+ p->ainsn.insn[1] = KPROBE_SSTEPBP_INSN;
+ p->ainsn.restore = (unsigned long)p->addr + LOONGARCH_INSN_SIZE;
+}
+NOKPROBE_SYMBOL(arch_prepare_ss_slot);
+
+static void arch_prepare_simulate(struct kprobe *p)
+{
+ p->ainsn.restore = 0;
+}
+NOKPROBE_SYMBOL(arch_prepare_simulate);
+
+int arch_prepare_kprobe(struct kprobe *p)
+{
+ union loongarch_instruction insn;
+
+ if ((unsigned long)p->addr & 0x3)
+ return -EILSEQ;
+
+ /* copy instruction */
+ p->opcode = *p->addr;
+ insn.word = p->opcode;
+
+ /* decode instruction */
+ if (insns_not_supported(insn))
+ return -EINVAL;
+
+ if (insns_need_simulation(insn)) {
+ p->ainsn.insn = NULL;
+ } else {
+ p->ainsn.insn = get_insn_slot();
+ if (!p->ainsn.insn)
+ return -ENOMEM;
+ }
+
+ /* prepare the instruction */
+ if (p->ainsn.insn)
+ arch_prepare_ss_slot(p);
+ else
+ arch_prepare_simulate(p);
+
+ return 0;
+}
+NOKPROBE_SYMBOL(arch_prepare_kprobe);
+
+/* Install breakpoint in text */
+void arch_arm_kprobe(struct kprobe *p)
+{
+ *p->addr = KPROBE_BP_INSN;
+ flush_insn_slot(p);
+}
+NOKPROBE_SYMBOL(arch_arm_kprobe);
+
+/* Remove breakpoint from text */
+void arch_disarm_kprobe(struct kprobe *p)
+{
+ *p->addr = p->opcode;
+ flush_insn_slot(p);
+}
+NOKPROBE_SYMBOL(arch_disarm_kprobe);
+
+void arch_remove_kprobe(struct kprobe *p)
+{
+ if (p->ainsn.insn) {
+ free_insn_slot(p->ainsn.insn, 0);
+ p->ainsn.insn = NULL;
+ }
+}
+NOKPROBE_SYMBOL(arch_remove_kprobe);
+
+static void save_previous_kprobe(struct kprobe_ctlblk *kcb)
+{
+ kcb->prev_kprobe.kp = kprobe_running();
+ kcb->prev_kprobe.status = kcb->kprobe_status;
+}
+NOKPROBE_SYMBOL(save_previous_kprobe);
+
+static void restore_previous_kprobe(struct kprobe_ctlblk *kcb)
+{
+ __this_cpu_write(current_kprobe, kcb->prev_kprobe.kp);
+ kcb->kprobe_status = kcb->prev_kprobe.status;
+}
+NOKPROBE_SYMBOL(restore_previous_kprobe);
+
+static void set_current_kprobe(struct kprobe *p)
+{
+ __this_cpu_write(current_kprobe, p);
+}
+NOKPROBE_SYMBOL(set_current_kprobe);
+
+/*
+ * Interrupts need to be disabled before single-step mode is set,
+ * and not reenabled until after single-step mode ends.
+ * Without disabling interrupt on local CPU, there is a chance of
+ * interrupt occurrence in the period of exception return and start
+ * of out-of-line single-step, that result in wrongly single stepping
+ * into the interrupt handler.
+ */
+static void save_local_irqflag(struct kprobe_ctlblk *kcb,
+ struct pt_regs *regs)
+{
+ kcb->saved_status = regs->csr_prmd;
+ regs->csr_prmd &= ~CSR_PRMD_PIE;
+}
+NOKPROBE_SYMBOL(save_local_irqflag);
+
+static void restore_local_irqflag(struct kprobe_ctlblk *kcb,
+ struct pt_regs *regs)
+{
+ regs->csr_prmd = kcb->saved_status;
+}
+NOKPROBE_SYMBOL(restore_local_irqflag);
+
+static void post_kprobe_handler(struct kprobe *cur, struct kprobe_ctlblk *kcb,
+ struct pt_regs *regs)
+{
+ /* return addr restore if non-branching insn */
+ if (cur->ainsn.restore != 0)
+ instruction_pointer_set(regs, cur->ainsn.restore);
+
+ /* restore back original saved kprobe variables and continue */
+ if (kcb->kprobe_status == KPROBE_REENTER) {
+ restore_previous_kprobe(kcb);
+ preempt_enable_no_resched();
+ return;
+ }
+
+ /*
+ * update the kcb status even if the cur->post_handler is
+ * not set because reset_curent_kprobe() doesn't update kcb.
+ */
+ kcb->kprobe_status = KPROBE_HIT_SSDONE;
+ if (cur->post_handler)
+ cur->post_handler(cur, regs, 0);
+
+ reset_current_kprobe();
+ preempt_enable_no_resched();
+}
+NOKPROBE_SYMBOL(post_kprobe_handler);
+
+static void setup_singlestep(struct kprobe *p, struct pt_regs *regs,
+ struct kprobe_ctlblk *kcb, int reenter)
+{
+ union loongarch_instruction insn;
+
+ if (reenter) {
+ save_previous_kprobe(kcb);
+ set_current_kprobe(p);
+ kcb->kprobe_status = KPROBE_REENTER;
+ } else {
+ kcb->kprobe_status = KPROBE_HIT_SS;
+ }
+
+ if (p->ainsn.insn) {
+ /* IRQs and single stepping do not mix well */
+ save_local_irqflag(kcb, regs);
+ /* set ip register to prepare for single stepping */
+ regs->csr_era = (unsigned long)p->ainsn.insn;
+ } else {
+ /* simulate single steping */
+ insn.word = p->opcode;
+ arch_simulate_insn(insn, regs);
+ /* now go for post processing */
+ post_kprobe_handler(p, kcb, regs);
+ }
+}
+NOKPROBE_SYMBOL(setup_singlestep);
+
+static bool reenter_kprobe(struct kprobe *p, struct pt_regs *regs,
+ struct kprobe_ctlblk *kcb)
+{
+ switch (kcb->kprobe_status) {
+ case KPROBE_HIT_SS:
+ case KPROBE_HIT_SSDONE:
+ case KPROBE_HIT_ACTIVE:
+ kprobes_inc_nmissed_count(p);
+ setup_singlestep(p, regs, kcb, 1);
+ break;
+ case KPROBE_REENTER:
+ pr_warn("Failed to recover from reentered kprobes.\n");
+ dump_kprobe(p);
+ WARN_ON_ONCE(1);
+ break;
+ default:
+ WARN_ON(1);
+ return false;
+ }
+
+ return true;
+}
+NOKPROBE_SYMBOL(reenter_kprobe);
+
+bool kprobe_breakpoint_handler(struct pt_regs *regs)
+{
+ struct kprobe_ctlblk *kcb;
+ struct kprobe *p, *cur_kprobe;
+ kprobe_opcode_t *addr = (kprobe_opcode_t *)regs->csr_era;
+
+ /*
+ * We don't want to be preempted for the entire
+ * duration of kprobe processing.
+ */
+ preempt_disable();
+ kcb = get_kprobe_ctlblk();
+ cur_kprobe = kprobe_running();
+
+ p = get_kprobe(addr);
+ if (p) {
+ if (cur_kprobe) {
+ if (reenter_kprobe(p, regs, kcb))
+ return true;
+ } else {
+ /* Probe hit */
+ set_current_kprobe(p);
+ kcb->kprobe_status = KPROBE_HIT_ACTIVE;
+
+ /*
+ * If we have no pre-handler or it returned 0, we
+ * continue with normal processing. If we have a
+ * pre-handler and it returned non-zero, it will
+ * modify the execution path and no need to single
+ * stepping. Let's just reset current kprobe and exit.
+ *
+ * pre_handler can hit a breakpoint and can step thru
+ * before return.
+ */
+ if (!p->pre_handler || !p->pre_handler(p, regs)) {
+ setup_singlestep(p, regs, kcb, 0);
+ } else {
+ reset_current_kprobe();
+ preempt_enable_no_resched();
+ }
+ return true;
+ }
+ }
+
+ if (*addr != KPROBE_BP_INSN) {
+ /*
+ * The breakpoint instruction was removed right
+ * after we hit it. Another cpu has removed
+ * either a probepoint or a debugger breakpoint
+ * at this address. In either case, no further
+ * handling of this interrupt is appropriate.
+ * Return back to original instruction, and continue.
+ */
+ regs->csr_era = (unsigned long)addr;
+ preempt_enable_no_resched();
+ return true;
+ }
+
+ preempt_enable_no_resched();
+ return false;
+}
+NOKPROBE_SYMBOL(kprobe_breakpoint_handler);
+
+bool kprobe_singlestep_handler(struct pt_regs *regs)
+{
+ struct kprobe *cur = kprobe_running();
+ struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
+ unsigned long addr = instruction_pointer(regs);
+
+ if (cur && (kcb->kprobe_status & (KPROBE_HIT_SS | KPROBE_REENTER)) &&
+ ((unsigned long)&cur->ainsn.insn[1] == addr)) {
+ restore_local_irqflag(kcb, regs);
+ post_kprobe_handler(cur, kcb, regs);
+ return true;
+ }
+
+ preempt_enable_no_resched();
+ return false;
+}
+NOKPROBE_SYMBOL(kprobe_singlestep_handler);
+
+bool kprobe_fault_handler(struct pt_regs *regs, int trapnr)
+{
+ struct kprobe *cur = kprobe_running();
+ struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
+
+ switch (kcb->kprobe_status) {
+ case KPROBE_HIT_SS:
+ case KPROBE_REENTER:
+ /*
+ * We are here because the instruction being single
+ * stepped caused a page fault. We reset the current
+ * kprobe and the ip points back to the probe address
+ * and allow the page fault handler to continue as a
+ * normal page fault.
+ */
+ regs->csr_era = (unsigned long)cur->addr;
+ WARN_ON_ONCE(!instruction_pointer(regs));
+
+ if (kcb->kprobe_status == KPROBE_REENTER) {
+ restore_previous_kprobe(kcb);
+ } else {
+ restore_local_irqflag(kcb, regs);
+ reset_current_kprobe();
+ }
+ preempt_enable_no_resched();
+ break;
+ }
+ return false;
+}
+NOKPROBE_SYMBOL(kprobe_fault_handler);
+
+/*
+ * Provide a blacklist of symbols identifying ranges which cannot be kprobed.
+ * This blacklist is exposed to userspace via debugfs (kprobes/blacklist).
+ */
+int __init arch_populate_kprobe_blacklist(void)
+{
+ return kprobe_add_area_blacklist((unsigned long)__irqentry_text_start,
+ (unsigned long)__irqentry_text_end);
+}
+
+int __init arch_init_kprobes(void)
+{
+ return 0;
+}
+
+int arch_trampoline_kprobe(struct kprobe *p)
+{
+ return 0;
+}
+NOKPROBE_SYMBOL(arch_trampoline_kprobe);
diff --git a/arch/loongarch/kernel/lbt.S b/arch/loongarch/kernel/lbt.S
new file mode 100644
index 0000000000..9c75120a26
--- /dev/null
+++ b/arch/loongarch/kernel/lbt.S
@@ -0,0 +1,155 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Author: Qi Hu <huqi@loongson.cn>
+ * Huacai Chen <chenhuacai@loongson.cn>
+ *
+ * Copyright (C) 2020-2023 Loongson Technology Corporation Limited
+ */
+#include <asm/asm.h>
+#include <asm/asmmacro.h>
+#include <asm/asm-extable.h>
+#include <asm/asm-offsets.h>
+#include <asm/errno.h>
+#include <asm/regdef.h>
+
+#define SCR_REG_WIDTH 8
+
+ .macro EX insn, reg, src, offs
+.ex\@: \insn \reg, \src, \offs
+ _asm_extable .ex\@, .L_lbt_fault
+ .endm
+
+/*
+ * Save a thread's lbt context.
+ */
+SYM_FUNC_START(_save_lbt)
+ movscr2gr t1, $scr0 # save scr
+ stptr.d t1, a0, THREAD_SCR0
+ movscr2gr t1, $scr1
+ stptr.d t1, a0, THREAD_SCR1
+ movscr2gr t1, $scr2
+ stptr.d t1, a0, THREAD_SCR2
+ movscr2gr t1, $scr3
+ stptr.d t1, a0, THREAD_SCR3
+
+ x86mfflag t1, 0x3f # save eflags
+ stptr.d t1, a0, THREAD_EFLAGS
+ jr ra
+SYM_FUNC_END(_save_lbt)
+EXPORT_SYMBOL(_save_lbt)
+
+/*
+ * Restore a thread's lbt context.
+ */
+SYM_FUNC_START(_restore_lbt)
+ ldptr.d t1, a0, THREAD_SCR0 # restore scr
+ movgr2scr $scr0, t1
+ ldptr.d t1, a0, THREAD_SCR1
+ movgr2scr $scr1, t1
+ ldptr.d t1, a0, THREAD_SCR2
+ movgr2scr $scr2, t1
+ ldptr.d t1, a0, THREAD_SCR3
+ movgr2scr $scr3, t1
+
+ ldptr.d t1, a0, THREAD_EFLAGS # restore eflags
+ x86mtflag t1, 0x3f
+ jr ra
+SYM_FUNC_END(_restore_lbt)
+EXPORT_SYMBOL(_restore_lbt)
+
+/*
+ * Load scr/eflag with zero.
+ */
+SYM_FUNC_START(_init_lbt)
+ movgr2scr $scr0, zero
+ movgr2scr $scr1, zero
+ movgr2scr $scr2, zero
+ movgr2scr $scr3, zero
+
+ x86mtflag zero, 0x3f
+ jr ra
+SYM_FUNC_END(_init_lbt)
+
+/*
+ * a0: scr
+ * a1: eflag
+ */
+SYM_FUNC_START(_save_lbt_context)
+ movscr2gr t1, $scr0 # save scr
+ EX st.d t1, a0, (0 * SCR_REG_WIDTH)
+ movscr2gr t1, $scr1
+ EX st.d t1, a0, (1 * SCR_REG_WIDTH)
+ movscr2gr t1, $scr2
+ EX st.d t1, a0, (2 * SCR_REG_WIDTH)
+ movscr2gr t1, $scr3
+ EX st.d t1, a0, (3 * SCR_REG_WIDTH)
+
+ x86mfflag t1, 0x3f # save eflags
+ EX st.w t1, a1, 0
+ li.w a0, 0 # success
+ jr ra
+SYM_FUNC_END(_save_lbt_context)
+
+/*
+ * a0: scr
+ * a1: eflag
+ */
+SYM_FUNC_START(_restore_lbt_context)
+ EX ld.d t1, a0, (0 * SCR_REG_WIDTH) # restore scr
+ movgr2scr $scr0, t1
+ EX ld.d t1, a0, (1 * SCR_REG_WIDTH)
+ movgr2scr $scr1, t1
+ EX ld.d t1, a0, (2 * SCR_REG_WIDTH)
+ movgr2scr $scr2, t1
+ EX ld.d t1, a0, (3 * SCR_REG_WIDTH)
+ movgr2scr $scr3, t1
+
+ EX ld.w t1, a1, 0 # restore eflags
+ x86mtflag t1, 0x3f
+ li.w a0, 0 # success
+ jr ra
+SYM_FUNC_END(_restore_lbt_context)
+
+/*
+ * a0: ftop
+ */
+SYM_FUNC_START(_save_ftop_context)
+ x86mftop t1
+ st.w t1, a0, 0
+ li.w a0, 0 # success
+ jr ra
+SYM_FUNC_END(_save_ftop_context)
+
+/*
+ * a0: ftop
+ */
+SYM_FUNC_START(_restore_ftop_context)
+ ld.w t1, a0, 0
+ andi t1, t1, 0x7
+ la.pcrel a0, 1f
+ alsl.d a0, t1, a0, 3
+ jr a0
+1:
+ x86mttop 0
+ b 2f
+ x86mttop 1
+ b 2f
+ x86mttop 2
+ b 2f
+ x86mttop 3
+ b 2f
+ x86mttop 4
+ b 2f
+ x86mttop 5
+ b 2f
+ x86mttop 6
+ b 2f
+ x86mttop 7
+2:
+ li.w a0, 0 # success
+ jr ra
+SYM_FUNC_END(_restore_ftop_context)
+
+.L_lbt_fault:
+ li.w a0, -EFAULT # failure
+ jr ra
diff --git a/arch/loongarch/kernel/machine_kexec.c b/arch/loongarch/kernel/machine_kexec.c
new file mode 100644
index 0000000000..2dcb9e0036
--- /dev/null
+++ b/arch/loongarch/kernel/machine_kexec.c
@@ -0,0 +1,304 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * machine_kexec.c for kexec
+ *
+ * Copyright (C) 2022 Loongson Technology Corporation Limited
+ */
+#include <linux/compiler.h>
+#include <linux/cpu.h>
+#include <linux/kexec.h>
+#include <linux/crash_dump.h>
+#include <linux/delay.h>
+#include <linux/irq.h>
+#include <linux/libfdt.h>
+#include <linux/mm.h>
+#include <linux/of_fdt.h>
+#include <linux/reboot.h>
+#include <linux/sched.h>
+#include <linux/sched/task_stack.h>
+
+#include <asm/bootinfo.h>
+#include <asm/cacheflush.h>
+#include <asm/page.h>
+
+/* 0x100000 ~ 0x200000 is safe */
+#define KEXEC_CONTROL_CODE TO_CACHE(0x100000UL)
+#define KEXEC_CMDLINE_ADDR TO_CACHE(0x108000UL)
+
+static unsigned long reboot_code_buffer;
+static cpumask_t cpus_in_crash = CPU_MASK_NONE;
+
+#ifdef CONFIG_SMP
+static void (*relocated_kexec_smp_wait)(void *);
+atomic_t kexec_ready_to_reboot = ATOMIC_INIT(0);
+#endif
+
+static unsigned long efi_boot;
+static unsigned long cmdline_ptr;
+static unsigned long systable_ptr;
+static unsigned long start_addr;
+static unsigned long first_ind_entry;
+
+static void kexec_image_info(const struct kimage *kimage)
+{
+ unsigned long i;
+
+ pr_debug("kexec kimage info:\n");
+ pr_debug("\ttype: %d\n", kimage->type);
+ pr_debug("\tstart: %lx\n", kimage->start);
+ pr_debug("\thead: %lx\n", kimage->head);
+ pr_debug("\tnr_segments: %lu\n", kimage->nr_segments);
+
+ for (i = 0; i < kimage->nr_segments; i++) {
+ pr_debug("\t segment[%lu]: %016lx - %016lx", i,
+ kimage->segment[i].mem,
+ kimage->segment[i].mem + kimage->segment[i].memsz);
+ pr_debug("\t\t0x%lx bytes, %lu pages\n",
+ (unsigned long)kimage->segment[i].memsz,
+ (unsigned long)kimage->segment[i].memsz / PAGE_SIZE);
+ }
+}
+
+int machine_kexec_prepare(struct kimage *kimage)
+{
+ int i;
+ char *bootloader = "kexec";
+ void *cmdline_ptr = (void *)KEXEC_CMDLINE_ADDR;
+
+ kexec_image_info(kimage);
+
+ kimage->arch.efi_boot = fw_arg0;
+ kimage->arch.systable_ptr = fw_arg2;
+
+ /* Find the command line */
+ for (i = 0; i < kimage->nr_segments; i++) {
+ if (!strncmp(bootloader, (char __user *)kimage->segment[i].buf, strlen(bootloader))) {
+ if (!copy_from_user(cmdline_ptr, kimage->segment[i].buf, COMMAND_LINE_SIZE))
+ kimage->arch.cmdline_ptr = (unsigned long)cmdline_ptr;
+ break;
+ }
+ }
+
+ if (!kimage->arch.cmdline_ptr) {
+ pr_err("Command line not included in the provided image\n");
+ return -EINVAL;
+ }
+
+ /* kexec/kdump need a safe page to save reboot_code_buffer */
+ kimage->control_code_page = virt_to_page((void *)KEXEC_CONTROL_CODE);
+
+ reboot_code_buffer = (unsigned long)page_address(kimage->control_code_page);
+ memcpy((void *)reboot_code_buffer, relocate_new_kernel, relocate_new_kernel_size);
+
+#ifdef CONFIG_SMP
+ /* All secondary cpus now may jump to kexec_smp_wait cycle */
+ relocated_kexec_smp_wait = reboot_code_buffer + (void *)(kexec_smp_wait - relocate_new_kernel);
+#endif
+
+ return 0;
+}
+
+void machine_kexec_cleanup(struct kimage *kimage)
+{
+}
+
+void kexec_reboot(void)
+{
+ do_kexec_t do_kexec = NULL;
+
+ /*
+ * We know we were online, and there will be no incoming IPIs at
+ * this point. Mark online again before rebooting so that the crash
+ * analysis tool will see us correctly.
+ */
+ set_cpu_online(smp_processor_id(), true);
+
+ /* Ensure remote CPUs observe that we're online before rebooting. */
+ smp_mb__after_atomic();
+
+ /*
+ * Make sure we get correct instructions written by the
+ * machine_kexec_prepare() CPU.
+ */
+ __asm__ __volatile__ ("\tibar 0\n"::);
+
+#ifdef CONFIG_SMP
+ /* All secondary cpus go to kexec_smp_wait */
+ if (smp_processor_id() > 0) {
+ relocated_kexec_smp_wait(NULL);
+ unreachable();
+ }
+#endif
+
+ do_kexec = (void *)reboot_code_buffer;
+ do_kexec(efi_boot, cmdline_ptr, systable_ptr, start_addr, first_ind_entry);
+
+ unreachable();
+}
+
+
+#ifdef CONFIG_SMP
+static void kexec_shutdown_secondary(void *regs)
+{
+ int cpu = smp_processor_id();
+
+ if (!cpu_online(cpu))
+ return;
+
+ /* We won't be sent IPIs any more. */
+ set_cpu_online(cpu, false);
+
+ local_irq_disable();
+ while (!atomic_read(&kexec_ready_to_reboot))
+ cpu_relax();
+
+ kexec_reboot();
+}
+
+static void crash_shutdown_secondary(void *passed_regs)
+{
+ int cpu = smp_processor_id();
+ struct pt_regs *regs = passed_regs;
+
+ /*
+ * If we are passed registers, use those. Otherwise get the
+ * regs from the last interrupt, which should be correct, as
+ * we are in an interrupt. But if the regs are not there,
+ * pull them from the top of the stack. They are probably
+ * wrong, but we need something to keep from crashing again.
+ */
+ if (!regs)
+ regs = get_irq_regs();
+ if (!regs)
+ regs = task_pt_regs(current);
+
+ if (!cpu_online(cpu))
+ return;
+
+ /* We won't be sent IPIs any more. */
+ set_cpu_online(cpu, false);
+
+ local_irq_disable();
+ if (!cpumask_test_cpu(cpu, &cpus_in_crash))
+ crash_save_cpu(regs, cpu);
+ cpumask_set_cpu(cpu, &cpus_in_crash);
+
+ while (!atomic_read(&kexec_ready_to_reboot))
+ cpu_relax();
+
+ kexec_reboot();
+}
+
+void crash_smp_send_stop(void)
+{
+ unsigned int ncpus;
+ unsigned long timeout;
+ static int cpus_stopped;
+
+ /*
+ * This function can be called twice in panic path, but obviously
+ * we should execute this only once.
+ */
+ if (cpus_stopped)
+ return;
+
+ cpus_stopped = 1;
+
+ /* Excluding the panic cpu */
+ ncpus = num_online_cpus() - 1;
+
+ smp_call_function(crash_shutdown_secondary, NULL, 0);
+ smp_wmb();
+
+ /*
+ * The crash CPU sends an IPI and wait for other CPUs to
+ * respond. Delay of at least 10 seconds.
+ */
+ timeout = MSEC_PER_SEC * 10;
+ pr_emerg("Sending IPI to other cpus...\n");
+ while ((cpumask_weight(&cpus_in_crash) < ncpus) && timeout--) {
+ mdelay(1);
+ cpu_relax();
+ }
+}
+#endif /* defined(CONFIG_SMP) */
+
+void machine_shutdown(void)
+{
+ int cpu;
+
+ /* All CPUs go to reboot_code_buffer */
+ for_each_possible_cpu(cpu)
+ if (!cpu_online(cpu))
+ cpu_device_up(get_cpu_device(cpu));
+
+#ifdef CONFIG_SMP
+ smp_call_function(kexec_shutdown_secondary, NULL, 0);
+#endif
+}
+
+void machine_crash_shutdown(struct pt_regs *regs)
+{
+ int crashing_cpu;
+
+ local_irq_disable();
+
+ crashing_cpu = smp_processor_id();
+ crash_save_cpu(regs, crashing_cpu);
+
+#ifdef CONFIG_SMP
+ crash_smp_send_stop();
+#endif
+ cpumask_set_cpu(crashing_cpu, &cpus_in_crash);
+
+ pr_info("Starting crashdump kernel...\n");
+}
+
+void machine_kexec(struct kimage *image)
+{
+ unsigned long entry, *ptr;
+ struct kimage_arch *internal = &image->arch;
+
+ efi_boot = internal->efi_boot;
+ cmdline_ptr = internal->cmdline_ptr;
+ systable_ptr = internal->systable_ptr;
+
+ start_addr = (unsigned long)phys_to_virt(image->start);
+
+ first_ind_entry = (image->type == KEXEC_TYPE_DEFAULT) ?
+ (unsigned long)phys_to_virt(image->head & PAGE_MASK) : 0;
+
+ /*
+ * The generic kexec code builds a page list with physical
+ * addresses. they are directly accessible through XKPRANGE
+ * hence the phys_to_virt() call.
+ */
+ for (ptr = &image->head; (entry = *ptr) && !(entry & IND_DONE);
+ ptr = (entry & IND_INDIRECTION) ?
+ phys_to_virt(entry & PAGE_MASK) : ptr + 1) {
+ if (*ptr & IND_SOURCE || *ptr & IND_INDIRECTION ||
+ *ptr & IND_DESTINATION)
+ *ptr = (unsigned long) phys_to_virt(*ptr);
+ }
+
+ /* Mark offline before disabling local irq. */
+ set_cpu_online(smp_processor_id(), false);
+
+ /* We do not want to be bothered. */
+ local_irq_disable();
+
+ pr_notice("EFI boot flag 0x%lx\n", efi_boot);
+ pr_notice("Command line at 0x%lx\n", cmdline_ptr);
+ pr_notice("System table at 0x%lx\n", systable_ptr);
+ pr_notice("We will call new kernel at 0x%lx\n", start_addr);
+ pr_notice("Bye ...\n");
+
+ /* Make reboot code buffer available to the boot CPU. */
+ flush_cache_all();
+
+#ifdef CONFIG_SMP
+ atomic_set(&kexec_ready_to_reboot, 1);
+#endif
+
+ kexec_reboot();
+}
diff --git a/arch/loongarch/kernel/mcount.S b/arch/loongarch/kernel/mcount.S
new file mode 100644
index 0000000000..3015896016
--- /dev/null
+++ b/arch/loongarch/kernel/mcount.S
@@ -0,0 +1,98 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * LoongArch specific _mcount support
+ *
+ * Copyright (C) 2022 Loongson Technology Corporation Limited
+ */
+
+#include <linux/export.h>
+#include <asm/ftrace.h>
+#include <asm/regdef.h>
+#include <asm/stackframe.h>
+
+ .text
+
+#define MCOUNT_S0_OFFSET (0)
+#define MCOUNT_RA_OFFSET (SZREG)
+#define MCOUNT_STACK_SIZE (2 * SZREG)
+
+ .macro MCOUNT_SAVE_REGS
+ PTR_ADDI sp, sp, -MCOUNT_STACK_SIZE
+ PTR_S s0, sp, MCOUNT_S0_OFFSET
+ PTR_S ra, sp, MCOUNT_RA_OFFSET
+ move s0, a0
+ .endm
+
+ .macro MCOUNT_RESTORE_REGS
+ move a0, s0
+ PTR_L ra, sp, MCOUNT_RA_OFFSET
+ PTR_L s0, sp, MCOUNT_S0_OFFSET
+ PTR_ADDI sp, sp, MCOUNT_STACK_SIZE
+ .endm
+
+SYM_FUNC_START(_mcount)
+ la.pcrel t1, ftrace_stub
+ la.pcrel t2, ftrace_trace_function /* Prepare t2 for (1) */
+ PTR_L t2, t2, 0
+ beq t1, t2, fgraph_trace
+
+ MCOUNT_SAVE_REGS
+
+ move a0, ra /* arg0: self return address */
+ move a1, s0 /* arg1: parent's return address */
+ jirl ra, t2, 0 /* (1) call *ftrace_trace_function */
+
+ MCOUNT_RESTORE_REGS
+
+fgraph_trace:
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+ la.pcrel t1, ftrace_stub
+ la.pcrel t3, ftrace_graph_return
+ PTR_L t3, t3, 0
+ bne t1, t3, ftrace_graph_caller
+ la.pcrel t1, ftrace_graph_entry_stub
+ la.pcrel t3, ftrace_graph_entry
+ PTR_L t3, t3, 0
+ bne t1, t3, ftrace_graph_caller
+#endif
+
+SYM_INNER_LABEL(ftrace_stub, SYM_L_GLOBAL)
+ jr ra
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+SYM_INNER_LABEL(ftrace_graph_func, SYM_L_GLOBAL)
+ bl ftrace_stub
+#endif
+SYM_FUNC_END(_mcount)
+EXPORT_SYMBOL(_mcount)
+
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+SYM_FUNC_START(ftrace_graph_caller)
+ MCOUNT_SAVE_REGS
+
+ PTR_ADDI a0, ra, -4 /* arg0: Callsite self return addr */
+ PTR_ADDI a1, sp, MCOUNT_STACK_SIZE /* arg1: Callsite sp */
+ move a2, s0 /* arg2: Callsite parent ra */
+ bl prepare_ftrace_return
+
+ MCOUNT_RESTORE_REGS
+ jr ra
+SYM_FUNC_END(ftrace_graph_caller)
+
+SYM_FUNC_START(return_to_handler)
+ PTR_ADDI sp, sp, -FGRET_REGS_SIZE
+ PTR_S a0, sp, FGRET_REGS_A0
+ PTR_S a1, sp, FGRET_REGS_A1
+ PTR_S zero, sp, FGRET_REGS_FP
+
+ move a0, sp
+ bl ftrace_return_to_handler
+
+ /* Restore the real parent address: a0 -> ra */
+ move ra, a0
+
+ PTR_L a0, sp, FGRET_REGS_A0
+ PTR_L a1, sp, FGRET_REGS_A1
+ PTR_ADDI sp, sp, FGRET_REGS_SIZE
+ jr ra
+SYM_FUNC_END(return_to_handler)
+#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
diff --git a/arch/loongarch/kernel/mcount_dyn.S b/arch/loongarch/kernel/mcount_dyn.S
new file mode 100644
index 0000000000..482aa553aa
--- /dev/null
+++ b/arch/loongarch/kernel/mcount_dyn.S
@@ -0,0 +1,160 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2022 Loongson Technology Corporation Limited
+ */
+
+#include <asm/ftrace.h>
+#include <asm/regdef.h>
+#include <asm/stackframe.h>
+
+ .text
+/*
+ * Due to -fpatchable-function-entry=2: the compiler inserted 2 NOPs before the
+ * regular C function prologue. When PC arrived here, the last 2 instructions
+ * are as follows:
+ * move t0, ra
+ * bl callsite (for modules, callsite is a tramplione)
+ *
+ * modules trampoline is as follows:
+ * lu12i.w t1, callsite[31:12]
+ * lu32i.d t1, callsite[51:32]
+ * lu52i.d t1, t1, callsite[63:52]
+ * jirl zero, t1, callsite[11:0] >> 2
+ *
+ * See arch/loongarch/kernel/ftrace_dyn.c for details. Here, pay attention to
+ * that the T series regs are available and safe because each C functions
+ * follows the LoongArch's psABI as well.
+ */
+
+ .macro ftrace_regs_entry allregs=0
+ PTR_ADDI sp, sp, -PT_SIZE
+ PTR_S t0, sp, PT_R1 /* Save parent ra at PT_R1(RA) */
+ PTR_S a0, sp, PT_R4
+ PTR_S a1, sp, PT_R5
+ PTR_S a2, sp, PT_R6
+ PTR_S a3, sp, PT_R7
+ PTR_S a4, sp, PT_R8
+ PTR_S a5, sp, PT_R9
+ PTR_S a6, sp, PT_R10
+ PTR_S a7, sp, PT_R11
+ PTR_S fp, sp, PT_R22
+ .if \allregs
+ PTR_S tp, sp, PT_R2
+ PTR_S t0, sp, PT_R12
+ PTR_S t2, sp, PT_R14
+ PTR_S t3, sp, PT_R15
+ PTR_S t4, sp, PT_R16
+ PTR_S t5, sp, PT_R17
+ PTR_S t6, sp, PT_R18
+ PTR_S t7, sp, PT_R19
+ PTR_S t8, sp, PT_R20
+ PTR_S u0, sp, PT_R21
+ PTR_S s0, sp, PT_R23
+ PTR_S s1, sp, PT_R24
+ PTR_S s2, sp, PT_R25
+ PTR_S s3, sp, PT_R26
+ PTR_S s4, sp, PT_R27
+ PTR_S s5, sp, PT_R28
+ PTR_S s6, sp, PT_R29
+ PTR_S s7, sp, PT_R30
+ PTR_S s8, sp, PT_R31
+ /* Clear it for later use as a flag sometimes. */
+ PTR_S zero, sp, PT_R0
+ .endif
+ PTR_S ra, sp, PT_ERA /* Save trace function ra at PT_ERA */
+ move t1, zero
+ PTR_S t1, sp, PT_R13
+ PTR_ADDI t8, sp, PT_SIZE
+ PTR_S t8, sp, PT_R3
+ .endm
+
+SYM_FUNC_START(ftrace_stub)
+ jr ra
+SYM_FUNC_END(ftrace_stub)
+
+SYM_CODE_START(ftrace_common)
+ PTR_ADDI a0, ra, -8 /* arg0: ip */
+ move a1, t0 /* arg1: parent_ip */
+ la.pcrel t1, function_trace_op
+ PTR_L a2, t1, 0 /* arg2: op */
+ move a3, sp /* arg3: regs */
+
+SYM_INNER_LABEL(ftrace_call, SYM_L_GLOBAL)
+ bl ftrace_stub
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+SYM_INNER_LABEL(ftrace_graph_call, SYM_L_GLOBAL)
+ nop /* b ftrace_graph_caller */
+#endif
+
+/*
+ * As we didn't use S series regs in this assmembly code and all calls
+ * are C function which will save S series regs by themselves, there is
+ * no need to restore S series regs. The T series is available and safe
+ * at the callsite, so there is no need to restore the T series regs.
+ */
+ftrace_common_return:
+ PTR_L ra, sp, PT_R1
+ PTR_L a0, sp, PT_R4
+ PTR_L a1, sp, PT_R5
+ PTR_L a2, sp, PT_R6
+ PTR_L a3, sp, PT_R7
+ PTR_L a4, sp, PT_R8
+ PTR_L a5, sp, PT_R9
+ PTR_L a6, sp, PT_R10
+ PTR_L a7, sp, PT_R11
+ PTR_L fp, sp, PT_R22
+ PTR_L t0, sp, PT_ERA
+ PTR_L t1, sp, PT_R13
+ PTR_ADDI sp, sp, PT_SIZE
+ bnez t1, .Ldirect
+ jr t0
+.Ldirect:
+ jr t1
+SYM_CODE_END(ftrace_common)
+
+SYM_CODE_START(ftrace_caller)
+ ftrace_regs_entry allregs=0
+ b ftrace_common
+SYM_CODE_END(ftrace_caller)
+
+#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
+SYM_CODE_START(ftrace_regs_caller)
+ ftrace_regs_entry allregs=1
+ b ftrace_common
+SYM_CODE_END(ftrace_regs_caller)
+#endif
+
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+SYM_CODE_START(ftrace_graph_caller)
+ PTR_L a0, sp, PT_ERA
+ PTR_ADDI a0, a0, -8 /* arg0: self_addr */
+ PTR_ADDI a1, sp, PT_R1 /* arg1: parent */
+ bl prepare_ftrace_return
+ b ftrace_common_return
+SYM_CODE_END(ftrace_graph_caller)
+
+SYM_CODE_START(return_to_handler)
+ /* Save return value regs */
+ PTR_ADDI sp, sp, -FGRET_REGS_SIZE
+ PTR_S a0, sp, FGRET_REGS_A0
+ PTR_S a1, sp, FGRET_REGS_A1
+ PTR_S zero, sp, FGRET_REGS_FP
+
+ move a0, sp
+ bl ftrace_return_to_handler
+ move ra, a0
+
+ /* Restore return value regs */
+ PTR_L a0, sp, FGRET_REGS_A0
+ PTR_L a1, sp, FGRET_REGS_A1
+ PTR_ADDI sp, sp, FGRET_REGS_SIZE
+
+ jr ra
+SYM_CODE_END(return_to_handler)
+#endif
+
+#ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS
+SYM_CODE_START(ftrace_stub_direct_tramp)
+ jr t0
+SYM_CODE_END(ftrace_stub_direct_tramp)
+#endif /* CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS */
diff --git a/arch/loongarch/kernel/mem.c b/arch/loongarch/kernel/mem.c
new file mode 100644
index 0000000000..aed901c57f
--- /dev/null
+++ b/arch/loongarch/kernel/mem.c
@@ -0,0 +1,63 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#include <linux/efi.h>
+#include <linux/initrd.h>
+#include <linux/memblock.h>
+
+#include <asm/bootinfo.h>
+#include <asm/loongson.h>
+#include <asm/sections.h>
+
+void __init memblock_init(void)
+{
+ u32 mem_type;
+ u64 mem_start, mem_end, mem_size;
+ efi_memory_desc_t *md;
+
+ /* Parse memory information */
+ for_each_efi_memory_desc(md) {
+ mem_type = md->type;
+ mem_start = md->phys_addr;
+ mem_size = md->num_pages << EFI_PAGE_SHIFT;
+ mem_end = mem_start + mem_size;
+
+ switch (mem_type) {
+ case EFI_LOADER_CODE:
+ case EFI_LOADER_DATA:
+ case EFI_BOOT_SERVICES_CODE:
+ case EFI_BOOT_SERVICES_DATA:
+ case EFI_PERSISTENT_MEMORY:
+ case EFI_CONVENTIONAL_MEMORY:
+ memblock_add(mem_start, mem_size);
+ if (max_low_pfn < (mem_end >> PAGE_SHIFT))
+ max_low_pfn = mem_end >> PAGE_SHIFT;
+ break;
+ case EFI_PAL_CODE:
+ case EFI_UNUSABLE_MEMORY:
+ case EFI_ACPI_RECLAIM_MEMORY:
+ memblock_add(mem_start, mem_size);
+ fallthrough;
+ case EFI_RESERVED_TYPE:
+ case EFI_RUNTIME_SERVICES_CODE:
+ case EFI_RUNTIME_SERVICES_DATA:
+ case EFI_MEMORY_MAPPED_IO:
+ case EFI_MEMORY_MAPPED_IO_PORT_SPACE:
+ memblock_reserve(mem_start, mem_size);
+ break;
+ }
+ }
+
+ memblock_set_current_limit(PFN_PHYS(max_low_pfn));
+
+ /* Reserve the first 2MB */
+ memblock_reserve(PHYS_OFFSET, 0x200000);
+
+ /* Reserve the kernel text/data/bss */
+ memblock_reserve(__pa_symbol(&_text),
+ __pa_symbol(&_end) - __pa_symbol(&_text));
+
+ memblock_set_node(0, PHYS_ADDR_MAX, &memblock.memory, 0);
+ memblock_set_node(0, PHYS_ADDR_MAX, &memblock.reserved, 0);
+}
diff --git a/arch/loongarch/kernel/module-sections.c b/arch/loongarch/kernel/module-sections.c
new file mode 100644
index 0000000000..e2f30ff9af
--- /dev/null
+++ b/arch/loongarch/kernel/module-sections.c
@@ -0,0 +1,185 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+
+#include <linux/elf.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/moduleloader.h>
+#include <linux/ftrace.h>
+
+Elf_Addr module_emit_got_entry(struct module *mod, Elf_Shdr *sechdrs, Elf_Addr val)
+{
+ struct mod_section *got_sec = &mod->arch.got;
+ int i = got_sec->num_entries;
+ struct got_entry *got = get_got_entry(val, sechdrs, got_sec);
+
+ if (got)
+ return (Elf_Addr)got;
+
+ /* There is no GOT entry for val yet, create a new one. */
+ got = (struct got_entry *)sechdrs[got_sec->shndx].sh_addr;
+ got[i] = emit_got_entry(val);
+
+ got_sec->num_entries++;
+ if (got_sec->num_entries > got_sec->max_entries) {
+ /*
+ * This may happen when the module contains a GOT_HI20 without
+ * a paired GOT_LO12. Such a module is broken, reject it.
+ */
+ pr_err("%s: module contains bad GOT relocation\n", mod->name);
+ return 0;
+ }
+
+ return (Elf_Addr)&got[i];
+}
+
+Elf_Addr module_emit_plt_entry(struct module *mod, Elf_Shdr *sechdrs, Elf_Addr val)
+{
+ int nr;
+ struct mod_section *plt_sec = &mod->arch.plt;
+ struct mod_section *plt_idx_sec = &mod->arch.plt_idx;
+ struct plt_entry *plt = get_plt_entry(val, sechdrs, plt_sec, plt_idx_sec);
+ struct plt_idx_entry *plt_idx;
+
+ if (plt)
+ return (Elf_Addr)plt;
+
+ nr = plt_sec->num_entries;
+
+ /* There is no duplicate entry, create a new one */
+ plt = (struct plt_entry *)sechdrs[plt_sec->shndx].sh_addr;
+ plt[nr] = emit_plt_entry(val);
+ plt_idx = (struct plt_idx_entry *)sechdrs[plt_idx_sec->shndx].sh_addr;
+ plt_idx[nr] = emit_plt_idx_entry(val);
+
+ plt_sec->num_entries++;
+ plt_idx_sec->num_entries++;
+ BUG_ON(plt_sec->num_entries > plt_sec->max_entries);
+
+ return (Elf_Addr)&plt[nr];
+}
+
+static int is_rela_equal(const Elf_Rela *x, const Elf_Rela *y)
+{
+ return x->r_info == y->r_info && x->r_addend == y->r_addend;
+}
+
+static bool duplicate_rela(const Elf_Rela *rela, int idx)
+{
+ int i;
+
+ for (i = 0; i < idx; i++) {
+ if (is_rela_equal(&rela[i], &rela[idx]))
+ return true;
+ }
+
+ return false;
+}
+
+static void count_max_entries(Elf_Rela *relas, int num,
+ unsigned int *plts, unsigned int *gots)
+{
+ unsigned int i, type;
+
+ for (i = 0; i < num; i++) {
+ type = ELF_R_TYPE(relas[i].r_info);
+ switch (type) {
+ case R_LARCH_SOP_PUSH_PLT_PCREL:
+ case R_LARCH_B26:
+ if (!duplicate_rela(relas, i))
+ (*plts)++;
+ break;
+ case R_LARCH_GOT_PC_HI20:
+ if (!duplicate_rela(relas, i))
+ (*gots)++;
+ break;
+ default:
+ break; /* Do nothing. */
+ }
+ }
+}
+
+int module_frob_arch_sections(Elf_Ehdr *ehdr, Elf_Shdr *sechdrs,
+ char *secstrings, struct module *mod)
+{
+ unsigned int i, num_plts = 0, num_gots = 0;
+ Elf_Shdr *got_sec, *plt_sec, *plt_idx_sec, *tramp = NULL;
+
+ /*
+ * Find the empty .plt sections.
+ */
+ for (i = 0; i < ehdr->e_shnum; i++) {
+ if (!strcmp(secstrings + sechdrs[i].sh_name, ".got"))
+ mod->arch.got.shndx = i;
+ else if (!strcmp(secstrings + sechdrs[i].sh_name, ".plt"))
+ mod->arch.plt.shndx = i;
+ else if (!strcmp(secstrings + sechdrs[i].sh_name, ".plt.idx"))
+ mod->arch.plt_idx.shndx = i;
+ else if (!strcmp(secstrings + sechdrs[i].sh_name, ".ftrace_trampoline"))
+ tramp = sechdrs + i;
+ }
+
+ if (!mod->arch.got.shndx) {
+ pr_err("%s: module GOT section(s) missing\n", mod->name);
+ return -ENOEXEC;
+ }
+ if (!mod->arch.plt.shndx) {
+ pr_err("%s: module PLT section(s) missing\n", mod->name);
+ return -ENOEXEC;
+ }
+ if (!mod->arch.plt_idx.shndx) {
+ pr_err("%s: module PLT.IDX section(s) missing\n", mod->name);
+ return -ENOEXEC;
+ }
+
+ /* Calculate the maxinum number of entries */
+ for (i = 0; i < ehdr->e_shnum; i++) {
+ int num_rela = sechdrs[i].sh_size / sizeof(Elf_Rela);
+ Elf_Rela *relas = (void *)ehdr + sechdrs[i].sh_offset;
+ Elf_Shdr *dst_sec = sechdrs + sechdrs[i].sh_info;
+
+ if (sechdrs[i].sh_type != SHT_RELA)
+ continue;
+
+ /* ignore relocations that operate on non-exec sections */
+ if (!(dst_sec->sh_flags & SHF_EXECINSTR))
+ continue;
+
+ count_max_entries(relas, num_rela, &num_plts, &num_gots);
+ }
+
+ got_sec = sechdrs + mod->arch.got.shndx;
+ got_sec->sh_type = SHT_NOBITS;
+ got_sec->sh_flags = SHF_ALLOC;
+ got_sec->sh_addralign = L1_CACHE_BYTES;
+ got_sec->sh_size = (num_gots + 1) * sizeof(struct got_entry);
+ mod->arch.got.num_entries = 0;
+ mod->arch.got.max_entries = num_gots;
+
+ plt_sec = sechdrs + mod->arch.plt.shndx;
+ plt_sec->sh_type = SHT_NOBITS;
+ plt_sec->sh_flags = SHF_EXECINSTR | SHF_ALLOC;
+ plt_sec->sh_addralign = L1_CACHE_BYTES;
+ plt_sec->sh_size = (num_plts + 1) * sizeof(struct plt_entry);
+ mod->arch.plt.num_entries = 0;
+ mod->arch.plt.max_entries = num_plts;
+
+ plt_idx_sec = sechdrs + mod->arch.plt_idx.shndx;
+ plt_idx_sec->sh_type = SHT_NOBITS;
+ plt_idx_sec->sh_flags = SHF_ALLOC;
+ plt_idx_sec->sh_addralign = L1_CACHE_BYTES;
+ plt_idx_sec->sh_size = (num_plts + 1) * sizeof(struct plt_idx_entry);
+ mod->arch.plt_idx.num_entries = 0;
+ mod->arch.plt_idx.max_entries = num_plts;
+
+ if (tramp) {
+ tramp->sh_type = SHT_NOBITS;
+ tramp->sh_flags = SHF_EXECINSTR | SHF_ALLOC;
+ tramp->sh_addralign = __alignof__(struct plt_entry);
+ tramp->sh_size = NR_FTRACE_PLTS * sizeof(struct plt_entry);
+ }
+
+ return 0;
+}
diff --git a/arch/loongarch/kernel/module.c b/arch/loongarch/kernel/module.c
new file mode 100644
index 0000000000..b13b2858fe
--- /dev/null
+++ b/arch/loongarch/kernel/module.c
@@ -0,0 +1,529 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Author: Hanlu Li <lihanlu@loongson.cn>
+ * Huacai Chen <chenhuacai@loongson.cn>
+ *
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+
+#define pr_fmt(fmt) "kmod: " fmt
+
+#include <linux/moduleloader.h>
+#include <linux/elf.h>
+#include <linux/mm.h>
+#include <linux/numa.h>
+#include <linux/vmalloc.h>
+#include <linux/slab.h>
+#include <linux/fs.h>
+#include <linux/ftrace.h>
+#include <linux/string.h>
+#include <linux/kernel.h>
+#include <asm/alternative.h>
+#include <asm/inst.h>
+
+static int rela_stack_push(s64 stack_value, s64 *rela_stack, size_t *rela_stack_top)
+{
+ if (*rela_stack_top >= RELA_STACK_DEPTH)
+ return -ENOEXEC;
+
+ rela_stack[(*rela_stack_top)++] = stack_value;
+ pr_debug("%s stack_value = 0x%llx\n", __func__, stack_value);
+
+ return 0;
+}
+
+static int rela_stack_pop(s64 *stack_value, s64 *rela_stack, size_t *rela_stack_top)
+{
+ if (*rela_stack_top == 0)
+ return -ENOEXEC;
+
+ *stack_value = rela_stack[--(*rela_stack_top)];
+ pr_debug("%s stack_value = 0x%llx\n", __func__, *stack_value);
+
+ return 0;
+}
+
+static int apply_r_larch_none(struct module *mod, u32 *location, Elf_Addr v,
+ s64 *rela_stack, size_t *rela_stack_top, unsigned int type)
+{
+ return 0;
+}
+
+static int apply_r_larch_error(struct module *me, u32 *location, Elf_Addr v,
+ s64 *rela_stack, size_t *rela_stack_top, unsigned int type)
+{
+ pr_err("%s: Unsupport relocation type %u, please add its support.\n", me->name, type);
+ return -EINVAL;
+}
+
+static int apply_r_larch_32(struct module *mod, u32 *location, Elf_Addr v,
+ s64 *rela_stack, size_t *rela_stack_top, unsigned int type)
+{
+ *location = v;
+ return 0;
+}
+
+static int apply_r_larch_64(struct module *mod, u32 *location, Elf_Addr v,
+ s64 *rela_stack, size_t *rela_stack_top, unsigned int type)
+{
+ *(Elf_Addr *)location = v;
+ return 0;
+}
+
+static int apply_r_larch_sop_push_pcrel(struct module *mod, u32 *location, Elf_Addr v,
+ s64 *rela_stack, size_t *rela_stack_top, unsigned int type)
+{
+ return rela_stack_push(v - (u64)location, rela_stack, rela_stack_top);
+}
+
+static int apply_r_larch_sop_push_absolute(struct module *mod, u32 *location, Elf_Addr v,
+ s64 *rela_stack, size_t *rela_stack_top, unsigned int type)
+{
+ return rela_stack_push(v, rela_stack, rela_stack_top);
+}
+
+static int apply_r_larch_sop_push_dup(struct module *mod, u32 *location, Elf_Addr v,
+ s64 *rela_stack, size_t *rela_stack_top, unsigned int type)
+{
+ int err = 0;
+ s64 opr1;
+
+ err = rela_stack_pop(&opr1, rela_stack, rela_stack_top);
+ if (err)
+ return err;
+ err = rela_stack_push(opr1, rela_stack, rela_stack_top);
+ if (err)
+ return err;
+ err = rela_stack_push(opr1, rela_stack, rela_stack_top);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static int apply_r_larch_sop_push_plt_pcrel(struct module *mod,
+ Elf_Shdr *sechdrs, u32 *location, Elf_Addr v,
+ s64 *rela_stack, size_t *rela_stack_top, unsigned int type)
+{
+ ptrdiff_t offset = (void *)v - (void *)location;
+
+ if (offset >= SZ_128M)
+ v = module_emit_plt_entry(mod, sechdrs, v);
+
+ if (offset < -SZ_128M)
+ v = module_emit_plt_entry(mod, sechdrs, v);
+
+ return apply_r_larch_sop_push_pcrel(mod, location, v, rela_stack, rela_stack_top, type);
+}
+
+static int apply_r_larch_sop(struct module *mod, u32 *location, Elf_Addr v,
+ s64 *rela_stack, size_t *rela_stack_top, unsigned int type)
+{
+ int err = 0;
+ s64 opr1, opr2, opr3;
+
+ if (type == R_LARCH_SOP_IF_ELSE) {
+ err = rela_stack_pop(&opr3, rela_stack, rela_stack_top);
+ if (err)
+ return err;
+ }
+
+ err = rela_stack_pop(&opr2, rela_stack, rela_stack_top);
+ if (err)
+ return err;
+ err = rela_stack_pop(&opr1, rela_stack, rela_stack_top);
+ if (err)
+ return err;
+
+ switch (type) {
+ case R_LARCH_SOP_AND:
+ err = rela_stack_push(opr1 & opr2, rela_stack, rela_stack_top);
+ break;
+ case R_LARCH_SOP_ADD:
+ err = rela_stack_push(opr1 + opr2, rela_stack, rela_stack_top);
+ break;
+ case R_LARCH_SOP_SUB:
+ err = rela_stack_push(opr1 - opr2, rela_stack, rela_stack_top);
+ break;
+ case R_LARCH_SOP_SL:
+ err = rela_stack_push(opr1 << opr2, rela_stack, rela_stack_top);
+ break;
+ case R_LARCH_SOP_SR:
+ err = rela_stack_push(opr1 >> opr2, rela_stack, rela_stack_top);
+ break;
+ case R_LARCH_SOP_IF_ELSE:
+ err = rela_stack_push(opr1 ? opr2 : opr3, rela_stack, rela_stack_top);
+ break;
+ default:
+ pr_err("%s: Unsupport relocation type %u\n", mod->name, type);
+ return -EINVAL;
+ }
+
+ return err;
+}
+
+static int apply_r_larch_sop_imm_field(struct module *mod, u32 *location, Elf_Addr v,
+ s64 *rela_stack, size_t *rela_stack_top, unsigned int type)
+{
+ int err = 0;
+ s64 opr1;
+ union loongarch_instruction *insn = (union loongarch_instruction *)location;
+
+ err = rela_stack_pop(&opr1, rela_stack, rela_stack_top);
+ if (err)
+ return err;
+
+ switch (type) {
+ case R_LARCH_SOP_POP_32_U_10_12:
+ if (!unsigned_imm_check(opr1, 12))
+ goto overflow;
+
+ /* (*(uint32_t *) PC) [21 ... 10] = opr [11 ... 0] */
+ insn->reg2i12_format.immediate = opr1 & 0xfff;
+ return 0;
+ case R_LARCH_SOP_POP_32_S_10_12:
+ if (!signed_imm_check(opr1, 12))
+ goto overflow;
+
+ insn->reg2i12_format.immediate = opr1 & 0xfff;
+ return 0;
+ case R_LARCH_SOP_POP_32_S_10_16:
+ if (!signed_imm_check(opr1, 16))
+ goto overflow;
+
+ insn->reg2i16_format.immediate = opr1 & 0xffff;
+ return 0;
+ case R_LARCH_SOP_POP_32_S_10_16_S2:
+ if (opr1 % 4)
+ goto unaligned;
+
+ if (!signed_imm_check(opr1, 18))
+ goto overflow;
+
+ insn->reg2i16_format.immediate = (opr1 >> 2) & 0xffff;
+ return 0;
+ case R_LARCH_SOP_POP_32_S_5_20:
+ if (!signed_imm_check(opr1, 20))
+ goto overflow;
+
+ insn->reg1i20_format.immediate = (opr1) & 0xfffff;
+ return 0;
+ case R_LARCH_SOP_POP_32_S_0_5_10_16_S2:
+ if (opr1 % 4)
+ goto unaligned;
+
+ if (!signed_imm_check(opr1, 23))
+ goto overflow;
+
+ opr1 >>= 2;
+ insn->reg1i21_format.immediate_l = opr1 & 0xffff;
+ insn->reg1i21_format.immediate_h = (opr1 >> 16) & 0x1f;
+ return 0;
+ case R_LARCH_SOP_POP_32_S_0_10_10_16_S2:
+ if (opr1 % 4)
+ goto unaligned;
+
+ if (!signed_imm_check(opr1, 28))
+ goto overflow;
+
+ opr1 >>= 2;
+ insn->reg0i26_format.immediate_l = opr1 & 0xffff;
+ insn->reg0i26_format.immediate_h = (opr1 >> 16) & 0x3ff;
+ return 0;
+ case R_LARCH_SOP_POP_32_U:
+ if (!unsigned_imm_check(opr1, 32))
+ goto overflow;
+
+ /* (*(uint32_t *) PC) = opr */
+ *location = (u32)opr1;
+ return 0;
+ default:
+ pr_err("%s: Unsupport relocation type %u\n", mod->name, type);
+ return -EINVAL;
+ }
+
+overflow:
+ pr_err("module %s: opr1 = 0x%llx overflow! dangerous %s (%u) relocation\n",
+ mod->name, opr1, __func__, type);
+ return -ENOEXEC;
+
+unaligned:
+ pr_err("module %s: opr1 = 0x%llx unaligned! dangerous %s (%u) relocation\n",
+ mod->name, opr1, __func__, type);
+ return -ENOEXEC;
+}
+
+static int apply_r_larch_add_sub(struct module *mod, u32 *location, Elf_Addr v,
+ s64 *rela_stack, size_t *rela_stack_top, unsigned int type)
+{
+ switch (type) {
+ case R_LARCH_ADD32:
+ *(s32 *)location += v;
+ return 0;
+ case R_LARCH_ADD64:
+ *(s64 *)location += v;
+ return 0;
+ case R_LARCH_SUB32:
+ *(s32 *)location -= v;
+ return 0;
+ case R_LARCH_SUB64:
+ *(s64 *)location -= v;
+ return 0;
+ default:
+ pr_err("%s: Unsupport relocation type %u\n", mod->name, type);
+ return -EINVAL;
+ }
+}
+
+static int apply_r_larch_b26(struct module *mod,
+ Elf_Shdr *sechdrs, u32 *location, Elf_Addr v,
+ s64 *rela_stack, size_t *rela_stack_top, unsigned int type)
+{
+ ptrdiff_t offset = (void *)v - (void *)location;
+ union loongarch_instruction *insn = (union loongarch_instruction *)location;
+
+ if (offset >= SZ_128M)
+ v = module_emit_plt_entry(mod, sechdrs, v);
+
+ if (offset < -SZ_128M)
+ v = module_emit_plt_entry(mod, sechdrs, v);
+
+ offset = (void *)v - (void *)location;
+
+ if (offset & 3) {
+ pr_err("module %s: jump offset = 0x%llx unaligned! dangerous R_LARCH_B26 (%u) relocation\n",
+ mod->name, (long long)offset, type);
+ return -ENOEXEC;
+ }
+
+ if (!signed_imm_check(offset, 28)) {
+ pr_err("module %s: jump offset = 0x%llx overflow! dangerous R_LARCH_B26 (%u) relocation\n",
+ mod->name, (long long)offset, type);
+ return -ENOEXEC;
+ }
+
+ offset >>= 2;
+ insn->reg0i26_format.immediate_l = offset & 0xffff;
+ insn->reg0i26_format.immediate_h = (offset >> 16) & 0x3ff;
+
+ return 0;
+}
+
+static int apply_r_larch_pcala(struct module *mod, u32 *location, Elf_Addr v,
+ s64 *rela_stack, size_t *rela_stack_top, unsigned int type)
+{
+ union loongarch_instruction *insn = (union loongarch_instruction *)location;
+ /* Use s32 for a sign-extension deliberately. */
+ s32 offset_hi20 = (void *)((v + 0x800) & ~0xfff) -
+ (void *)((Elf_Addr)location & ~0xfff);
+ Elf_Addr anchor = (((Elf_Addr)location) & ~0xfff) + offset_hi20;
+ ptrdiff_t offset_rem = (void *)v - (void *)anchor;
+
+ switch (type) {
+ case R_LARCH_PCALA_LO12:
+ insn->reg2i12_format.immediate = v & 0xfff;
+ break;
+ case R_LARCH_PCALA_HI20:
+ v = offset_hi20 >> 12;
+ insn->reg1i20_format.immediate = v & 0xfffff;
+ break;
+ case R_LARCH_PCALA64_LO20:
+ v = offset_rem >> 32;
+ insn->reg1i20_format.immediate = v & 0xfffff;
+ break;
+ case R_LARCH_PCALA64_HI12:
+ v = offset_rem >> 52;
+ insn->reg2i12_format.immediate = v & 0xfff;
+ break;
+ default:
+ pr_err("%s: Unsupport relocation type %u\n", mod->name, type);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int apply_r_larch_got_pc(struct module *mod,
+ Elf_Shdr *sechdrs, u32 *location, Elf_Addr v,
+ s64 *rela_stack, size_t *rela_stack_top, unsigned int type)
+{
+ Elf_Addr got = module_emit_got_entry(mod, sechdrs, v);
+
+ if (!got)
+ return -EINVAL;
+
+ switch (type) {
+ case R_LARCH_GOT_PC_LO12:
+ type = R_LARCH_PCALA_LO12;
+ break;
+ case R_LARCH_GOT_PC_HI20:
+ type = R_LARCH_PCALA_HI20;
+ break;
+ default:
+ pr_err("%s: Unsupport relocation type %u\n", mod->name, type);
+ return -EINVAL;
+ }
+
+ return apply_r_larch_pcala(mod, location, got, rela_stack, rela_stack_top, type);
+}
+
+static int apply_r_larch_32_pcrel(struct module *mod, u32 *location, Elf_Addr v,
+ s64 *rela_stack, size_t *rela_stack_top, unsigned int type)
+{
+ ptrdiff_t offset = (void *)v - (void *)location;
+
+ *(u32 *)location = offset;
+ return 0;
+}
+
+static int apply_r_larch_64_pcrel(struct module *mod, u32 *location, Elf_Addr v,
+ s64 *rela_stack, size_t *rela_stack_top, unsigned int type)
+{
+ ptrdiff_t offset = (void *)v - (void *)location;
+
+ *(u64 *)location = offset;
+ return 0;
+}
+
+/*
+ * reloc_handlers_rela() - Apply a particular relocation to a module
+ * @mod: the module to apply the reloc to
+ * @location: the address at which the reloc is to be applied
+ * @v: the value of the reloc, with addend for RELA-style
+ * @rela_stack: the stack used for store relocation info, LOCAL to THIS module
+ * @rela_stac_top: where the stack operation(pop/push) applies to
+ *
+ * Return: 0 upon success, else -ERRNO
+ */
+typedef int (*reloc_rela_handler)(struct module *mod, u32 *location, Elf_Addr v,
+ s64 *rela_stack, size_t *rela_stack_top, unsigned int type);
+
+/* The handlers for known reloc types */
+static reloc_rela_handler reloc_rela_handlers[] = {
+ [R_LARCH_NONE ... R_LARCH_64_PCREL] = apply_r_larch_error,
+
+ [R_LARCH_NONE] = apply_r_larch_none,
+ [R_LARCH_32] = apply_r_larch_32,
+ [R_LARCH_64] = apply_r_larch_64,
+ [R_LARCH_MARK_LA] = apply_r_larch_none,
+ [R_LARCH_MARK_PCREL] = apply_r_larch_none,
+ [R_LARCH_SOP_PUSH_PCREL] = apply_r_larch_sop_push_pcrel,
+ [R_LARCH_SOP_PUSH_ABSOLUTE] = apply_r_larch_sop_push_absolute,
+ [R_LARCH_SOP_PUSH_DUP] = apply_r_larch_sop_push_dup,
+ [R_LARCH_SOP_SUB ... R_LARCH_SOP_IF_ELSE] = apply_r_larch_sop,
+ [R_LARCH_SOP_POP_32_S_10_5 ... R_LARCH_SOP_POP_32_U] = apply_r_larch_sop_imm_field,
+ [R_LARCH_ADD32 ... R_LARCH_SUB64] = apply_r_larch_add_sub,
+ [R_LARCH_PCALA_HI20...R_LARCH_PCALA64_HI12] = apply_r_larch_pcala,
+ [R_LARCH_32_PCREL] = apply_r_larch_32_pcrel,
+ [R_LARCH_64_PCREL] = apply_r_larch_64_pcrel,
+};
+
+int apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab,
+ unsigned int symindex, unsigned int relsec,
+ struct module *mod)
+{
+ int i, err;
+ unsigned int type;
+ s64 rela_stack[RELA_STACK_DEPTH];
+ size_t rela_stack_top = 0;
+ reloc_rela_handler handler;
+ void *location;
+ Elf_Addr v;
+ Elf_Sym *sym;
+ Elf_Rela *rel = (void *) sechdrs[relsec].sh_addr;
+
+ pr_debug("%s: Applying relocate section %u to %u\n", __func__, relsec,
+ sechdrs[relsec].sh_info);
+
+ rela_stack_top = 0;
+ for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
+ /* This is where to make the change */
+ location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr + rel[i].r_offset;
+ /* This is the symbol it is referring to */
+ sym = (Elf_Sym *)sechdrs[symindex].sh_addr + ELF_R_SYM(rel[i].r_info);
+ if (IS_ERR_VALUE(sym->st_value)) {
+ /* Ignore unresolved weak symbol */
+ if (ELF_ST_BIND(sym->st_info) == STB_WEAK)
+ continue;
+ pr_warn("%s: Unknown symbol %s\n", mod->name, strtab + sym->st_name);
+ return -ENOENT;
+ }
+
+ type = ELF_R_TYPE(rel[i].r_info);
+
+ if (type < ARRAY_SIZE(reloc_rela_handlers))
+ handler = reloc_rela_handlers[type];
+ else
+ handler = NULL;
+
+ if (!handler) {
+ pr_err("%s: Unknown relocation type %u\n", mod->name, type);
+ return -EINVAL;
+ }
+
+ pr_debug("type %d st_value %llx r_addend %llx loc %llx\n",
+ (int)ELF_R_TYPE(rel[i].r_info),
+ sym->st_value, rel[i].r_addend, (u64)location);
+
+ v = sym->st_value + rel[i].r_addend;
+ switch (type) {
+ case R_LARCH_B26:
+ err = apply_r_larch_b26(mod, sechdrs, location,
+ v, rela_stack, &rela_stack_top, type);
+ break;
+ case R_LARCH_GOT_PC_HI20...R_LARCH_GOT_PC_LO12:
+ err = apply_r_larch_got_pc(mod, sechdrs, location,
+ v, rela_stack, &rela_stack_top, type);
+ break;
+ case R_LARCH_SOP_PUSH_PLT_PCREL:
+ err = apply_r_larch_sop_push_plt_pcrel(mod, sechdrs, location,
+ v, rela_stack, &rela_stack_top, type);
+ break;
+ default:
+ err = handler(mod, location, v, rela_stack, &rela_stack_top, type);
+ }
+ if (err)
+ return err;
+ }
+
+ return 0;
+}
+
+void *module_alloc(unsigned long size)
+{
+ return __vmalloc_node_range(size, 1, MODULES_VADDR, MODULES_END,
+ GFP_KERNEL, PAGE_KERNEL, 0, NUMA_NO_NODE, __builtin_return_address(0));
+}
+
+static void module_init_ftrace_plt(const Elf_Ehdr *hdr,
+ const Elf_Shdr *sechdrs, struct module *mod)
+{
+#ifdef CONFIG_DYNAMIC_FTRACE
+ struct plt_entry *ftrace_plts;
+
+ ftrace_plts = (void *)sechdrs->sh_addr;
+
+ ftrace_plts[FTRACE_PLT_IDX] = emit_plt_entry(FTRACE_ADDR);
+
+ if (IS_ENABLED(CONFIG_DYNAMIC_FTRACE_WITH_REGS))
+ ftrace_plts[FTRACE_REGS_PLT_IDX] = emit_plt_entry(FTRACE_REGS_ADDR);
+
+ mod->arch.ftrace_trampolines = ftrace_plts;
+#endif
+}
+
+int module_finalize(const Elf_Ehdr *hdr,
+ const Elf_Shdr *sechdrs, struct module *mod)
+{
+ const Elf_Shdr *s, *se;
+ const char *secstrs = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
+
+ for (s = sechdrs, se = sechdrs + hdr->e_shnum; s < se; s++) {
+ if (!strcmp(".altinstructions", secstrs + s->sh_name))
+ apply_alternatives((void *)s->sh_addr, (void *)s->sh_addr + s->sh_size);
+ if (!strcmp(".ftrace_trampoline", secstrs + s->sh_name))
+ module_init_ftrace_plt(hdr, s, mod);
+ }
+
+ return 0;
+}
diff --git a/arch/loongarch/kernel/numa.c b/arch/loongarch/kernel/numa.c
new file mode 100644
index 0000000000..6e65ff12d5
--- /dev/null
+++ b/arch/loongarch/kernel/numa.c
@@ -0,0 +1,447 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Author: Xiang Gao <gaoxiang@loongson.cn>
+ * Huacai Chen <chenhuacai@loongson.cn>
+ *
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/mmzone.h>
+#include <linux/export.h>
+#include <linux/nodemask.h>
+#include <linux/swap.h>
+#include <linux/memblock.h>
+#include <linux/pfn.h>
+#include <linux/acpi.h>
+#include <linux/efi.h>
+#include <linux/irq.h>
+#include <linux/pci.h>
+#include <asm/bootinfo.h>
+#include <asm/loongson.h>
+#include <asm/numa.h>
+#include <asm/page.h>
+#include <asm/pgalloc.h>
+#include <asm/sections.h>
+#include <asm/time.h>
+
+int numa_off;
+struct pglist_data *node_data[MAX_NUMNODES];
+unsigned char node_distances[MAX_NUMNODES][MAX_NUMNODES];
+
+EXPORT_SYMBOL(node_data);
+EXPORT_SYMBOL(node_distances);
+
+static struct numa_meminfo numa_meminfo;
+cpumask_t cpus_on_node[MAX_NUMNODES];
+cpumask_t phys_cpus_on_node[MAX_NUMNODES];
+EXPORT_SYMBOL(cpus_on_node);
+
+/*
+ * apicid, cpu, node mappings
+ */
+s16 __cpuid_to_node[CONFIG_NR_CPUS] = {
+ [0 ... CONFIG_NR_CPUS - 1] = NUMA_NO_NODE
+};
+EXPORT_SYMBOL(__cpuid_to_node);
+
+nodemask_t numa_nodes_parsed __initdata;
+
+#ifdef CONFIG_HAVE_SETUP_PER_CPU_AREA
+unsigned long __per_cpu_offset[NR_CPUS] __read_mostly;
+EXPORT_SYMBOL(__per_cpu_offset);
+
+static int __init pcpu_cpu_to_node(int cpu)
+{
+ return early_cpu_to_node(cpu);
+}
+
+static int __init pcpu_cpu_distance(unsigned int from, unsigned int to)
+{
+ if (early_cpu_to_node(from) == early_cpu_to_node(to))
+ return LOCAL_DISTANCE;
+ else
+ return REMOTE_DISTANCE;
+}
+
+void __init pcpu_populate_pte(unsigned long addr)
+{
+ populate_kernel_pte(addr);
+}
+
+void __init setup_per_cpu_areas(void)
+{
+ unsigned long delta;
+ unsigned int cpu;
+ int rc = -EINVAL;
+
+ if (pcpu_chosen_fc == PCPU_FC_AUTO) {
+ if (nr_node_ids >= 8)
+ pcpu_chosen_fc = PCPU_FC_PAGE;
+ else
+ pcpu_chosen_fc = PCPU_FC_EMBED;
+ }
+
+ /*
+ * Always reserve area for module percpu variables. That's
+ * what the legacy allocator did.
+ */
+ if (pcpu_chosen_fc != PCPU_FC_PAGE) {
+ rc = pcpu_embed_first_chunk(PERCPU_MODULE_RESERVE,
+ PERCPU_DYNAMIC_RESERVE, PMD_SIZE,
+ pcpu_cpu_distance, pcpu_cpu_to_node);
+ if (rc < 0)
+ pr_warn("%s allocator failed (%d), falling back to page size\n",
+ pcpu_fc_names[pcpu_chosen_fc], rc);
+ }
+ if (rc < 0)
+ rc = pcpu_page_first_chunk(PERCPU_MODULE_RESERVE, pcpu_cpu_to_node);
+ if (rc < 0)
+ panic("cannot initialize percpu area (err=%d)", rc);
+
+ delta = (unsigned long)pcpu_base_addr - (unsigned long)__per_cpu_start;
+ for_each_possible_cpu(cpu)
+ __per_cpu_offset[cpu] = delta + pcpu_unit_offsets[cpu];
+}
+#endif
+
+/*
+ * Get nodeid by logical cpu number.
+ * __cpuid_to_node maps phyical cpu id to node, so we
+ * should use cpu_logical_map(cpu) to index it.
+ *
+ * This routine is only used in early phase during
+ * booting, after setup_per_cpu_areas calling and numa_node
+ * initialization, cpu_to_node will be used instead.
+ */
+int early_cpu_to_node(int cpu)
+{
+ int physid = cpu_logical_map(cpu);
+
+ if (physid < 0)
+ return NUMA_NO_NODE;
+
+ return __cpuid_to_node[physid];
+}
+
+void __init early_numa_add_cpu(int cpuid, s16 node)
+{
+ int cpu = __cpu_number_map[cpuid];
+
+ if (cpu < 0)
+ return;
+
+ cpumask_set_cpu(cpu, &cpus_on_node[node]);
+ cpumask_set_cpu(cpuid, &phys_cpus_on_node[node]);
+}
+
+void numa_add_cpu(unsigned int cpu)
+{
+ int nid = cpu_to_node(cpu);
+ cpumask_set_cpu(cpu, &cpus_on_node[nid]);
+}
+
+void numa_remove_cpu(unsigned int cpu)
+{
+ int nid = cpu_to_node(cpu);
+ cpumask_clear_cpu(cpu, &cpus_on_node[nid]);
+}
+
+static int __init numa_add_memblk_to(int nid, u64 start, u64 end,
+ struct numa_meminfo *mi)
+{
+ /* ignore zero length blks */
+ if (start == end)
+ return 0;
+
+ /* whine about and ignore invalid blks */
+ if (start > end || nid < 0 || nid >= MAX_NUMNODES) {
+ pr_warn("NUMA: Warning: invalid memblk node %d [mem %#010Lx-%#010Lx]\n",
+ nid, start, end - 1);
+ return 0;
+ }
+
+ if (mi->nr_blks >= NR_NODE_MEMBLKS) {
+ pr_err("NUMA: too many memblk ranges\n");
+ return -EINVAL;
+ }
+
+ mi->blk[mi->nr_blks].start = PFN_ALIGN(start);
+ mi->blk[mi->nr_blks].end = PFN_ALIGN(end - PAGE_SIZE + 1);
+ mi->blk[mi->nr_blks].nid = nid;
+ mi->nr_blks++;
+ return 0;
+}
+
+/**
+ * numa_add_memblk - Add one numa_memblk to numa_meminfo
+ * @nid: NUMA node ID of the new memblk
+ * @start: Start address of the new memblk
+ * @end: End address of the new memblk
+ *
+ * Add a new memblk to the default numa_meminfo.
+ *
+ * RETURNS:
+ * 0 on success, -errno on failure.
+ */
+int __init numa_add_memblk(int nid, u64 start, u64 end)
+{
+ return numa_add_memblk_to(nid, start, end, &numa_meminfo);
+}
+
+static void __init alloc_node_data(int nid)
+{
+ void *nd;
+ unsigned long nd_pa;
+ size_t nd_sz = roundup(sizeof(pg_data_t), PAGE_SIZE);
+
+ nd_pa = memblock_phys_alloc_try_nid(nd_sz, SMP_CACHE_BYTES, nid);
+ if (!nd_pa) {
+ pr_err("Cannot find %zu Byte for node_data (initial node: %d)\n", nd_sz, nid);
+ return;
+ }
+
+ nd = __va(nd_pa);
+
+ node_data[nid] = nd;
+ memset(nd, 0, sizeof(pg_data_t));
+}
+
+static void __init node_mem_init(unsigned int node)
+{
+ unsigned long start_pfn, end_pfn;
+ unsigned long node_addrspace_offset;
+
+ node_addrspace_offset = nid_to_addrbase(node);
+ pr_info("Node%d's addrspace_offset is 0x%lx\n",
+ node, node_addrspace_offset);
+
+ get_pfn_range_for_nid(node, &start_pfn, &end_pfn);
+ pr_info("Node%d: start_pfn=0x%lx, end_pfn=0x%lx\n",
+ node, start_pfn, end_pfn);
+
+ alloc_node_data(node);
+}
+
+#ifdef CONFIG_ACPI_NUMA
+
+/*
+ * Sanity check to catch more bad NUMA configurations (they are amazingly
+ * common). Make sure the nodes cover all memory.
+ */
+static bool __init numa_meminfo_cover_memory(const struct numa_meminfo *mi)
+{
+ int i;
+ u64 numaram, biosram;
+
+ numaram = 0;
+ for (i = 0; i < mi->nr_blks; i++) {
+ u64 s = mi->blk[i].start >> PAGE_SHIFT;
+ u64 e = mi->blk[i].end >> PAGE_SHIFT;
+
+ numaram += e - s;
+ numaram -= __absent_pages_in_range(mi->blk[i].nid, s, e);
+ if ((s64)numaram < 0)
+ numaram = 0;
+ }
+ max_pfn = max_low_pfn;
+ biosram = max_pfn - absent_pages_in_range(0, max_pfn);
+
+ BUG_ON((s64)(biosram - numaram) >= (1 << (20 - PAGE_SHIFT)));
+ return true;
+}
+
+static void __init add_node_intersection(u32 node, u64 start, u64 size, u32 type)
+{
+ static unsigned long num_physpages;
+
+ num_physpages += (size >> PAGE_SHIFT);
+ pr_info("Node%d: mem_type:%d, mem_start:0x%llx, mem_size:0x%llx Bytes\n",
+ node, type, start, size);
+ pr_info(" start_pfn:0x%llx, end_pfn:0x%llx, num_physpages:0x%lx\n",
+ start >> PAGE_SHIFT, (start + size) >> PAGE_SHIFT, num_physpages);
+ memblock_set_node(start, size, &memblock.memory, node);
+}
+
+/*
+ * add_numamem_region
+ *
+ * Add a uasable memory region described by BIOS. The
+ * routine gets each intersection between BIOS's region
+ * and node's region, and adds them into node's memblock
+ * pool.
+ *
+ */
+static void __init add_numamem_region(u64 start, u64 end, u32 type)
+{
+ u32 i;
+ u64 ofs = start;
+
+ if (start >= end) {
+ pr_debug("Invalid region: %016llx-%016llx\n", start, end);
+ return;
+ }
+
+ for (i = 0; i < numa_meminfo.nr_blks; i++) {
+ struct numa_memblk *mb = &numa_meminfo.blk[i];
+
+ if (ofs > mb->end)
+ continue;
+
+ if (end > mb->end) {
+ add_node_intersection(mb->nid, ofs, mb->end - ofs, type);
+ ofs = mb->end;
+ } else {
+ add_node_intersection(mb->nid, ofs, end - ofs, type);
+ break;
+ }
+ }
+}
+
+static void __init init_node_memblock(void)
+{
+ u32 mem_type;
+ u64 mem_end, mem_start, mem_size;
+ efi_memory_desc_t *md;
+
+ /* Parse memory information and activate */
+ for_each_efi_memory_desc(md) {
+ mem_type = md->type;
+ mem_start = md->phys_addr;
+ mem_size = md->num_pages << EFI_PAGE_SHIFT;
+ mem_end = mem_start + mem_size;
+
+ switch (mem_type) {
+ case EFI_LOADER_CODE:
+ case EFI_LOADER_DATA:
+ case EFI_BOOT_SERVICES_CODE:
+ case EFI_BOOT_SERVICES_DATA:
+ case EFI_PERSISTENT_MEMORY:
+ case EFI_CONVENTIONAL_MEMORY:
+ add_numamem_region(mem_start, mem_end, mem_type);
+ break;
+ case EFI_PAL_CODE:
+ case EFI_UNUSABLE_MEMORY:
+ case EFI_ACPI_RECLAIM_MEMORY:
+ add_numamem_region(mem_start, mem_end, mem_type);
+ fallthrough;
+ case EFI_RESERVED_TYPE:
+ case EFI_RUNTIME_SERVICES_CODE:
+ case EFI_RUNTIME_SERVICES_DATA:
+ case EFI_MEMORY_MAPPED_IO:
+ case EFI_MEMORY_MAPPED_IO_PORT_SPACE:
+ pr_info("Resvd: mem_type:%d, mem_start:0x%llx, mem_size:0x%llx Bytes\n",
+ mem_type, mem_start, mem_size);
+ break;
+ }
+ }
+}
+
+static void __init numa_default_distance(void)
+{
+ int row, col;
+
+ for (row = 0; row < MAX_NUMNODES; row++)
+ for (col = 0; col < MAX_NUMNODES; col++) {
+ if (col == row)
+ node_distances[row][col] = LOCAL_DISTANCE;
+ else
+ /* We assume that one node per package here!
+ *
+ * A SLIT should be used for multiple nodes
+ * per package to override default setting.
+ */
+ node_distances[row][col] = REMOTE_DISTANCE;
+ }
+}
+
+/*
+ * fake_numa_init() - For Non-ACPI systems
+ * Return: 0 on success, -errno on failure.
+ */
+static int __init fake_numa_init(void)
+{
+ phys_addr_t start = memblock_start_of_DRAM();
+ phys_addr_t end = memblock_end_of_DRAM() - 1;
+
+ node_set(0, numa_nodes_parsed);
+ pr_info("Faking a node at [mem %pap-%pap]\n", &start, &end);
+
+ return numa_add_memblk(0, start, end + 1);
+}
+
+int __init init_numa_memory(void)
+{
+ int i;
+ int ret;
+ int node;
+
+ for (i = 0; i < NR_CPUS; i++)
+ set_cpuid_to_node(i, NUMA_NO_NODE);
+
+ numa_default_distance();
+ nodes_clear(numa_nodes_parsed);
+ nodes_clear(node_possible_map);
+ nodes_clear(node_online_map);
+ memset(&numa_meminfo, 0, sizeof(numa_meminfo));
+
+ /* Parse SRAT and SLIT if provided by firmware. */
+ ret = acpi_disabled ? fake_numa_init() : acpi_numa_init();
+ if (ret < 0)
+ return ret;
+
+ node_possible_map = numa_nodes_parsed;
+ if (WARN_ON(nodes_empty(node_possible_map)))
+ return -EINVAL;
+
+ init_node_memblock();
+ if (numa_meminfo_cover_memory(&numa_meminfo) == false)
+ return -EINVAL;
+
+ for_each_node_mask(node, node_possible_map) {
+ node_mem_init(node);
+ node_set_online(node);
+ }
+ max_low_pfn = PHYS_PFN(memblock_end_of_DRAM());
+
+ setup_nr_node_ids();
+ loongson_sysconf.nr_nodes = nr_node_ids;
+ loongson_sysconf.cores_per_node = cpumask_weight(&phys_cpus_on_node[0]);
+
+ return 0;
+}
+
+#endif
+
+void __init paging_init(void)
+{
+ unsigned int node;
+ unsigned long zones_size[MAX_NR_ZONES] = {0, };
+
+ for_each_online_node(node) {
+ unsigned long start_pfn, end_pfn;
+
+ get_pfn_range_for_nid(node, &start_pfn, &end_pfn);
+
+ if (end_pfn > max_low_pfn)
+ max_low_pfn = end_pfn;
+ }
+#ifdef CONFIG_ZONE_DMA32
+ zones_size[ZONE_DMA32] = MAX_DMA32_PFN;
+#endif
+ zones_size[ZONE_NORMAL] = max_low_pfn;
+ free_area_init(zones_size);
+}
+
+void __init mem_init(void)
+{
+ high_memory = (void *) __va(max_low_pfn << PAGE_SHIFT);
+ memblock_free_all();
+}
+
+int pcibus_to_node(struct pci_bus *bus)
+{
+ return dev_to_node(&bus->dev);
+}
+EXPORT_SYMBOL(pcibus_to_node);
diff --git a/arch/loongarch/kernel/perf_event.c b/arch/loongarch/kernel/perf_event.c
new file mode 100644
index 0000000000..0491bf453c
--- /dev/null
+++ b/arch/loongarch/kernel/perf_event.c
@@ -0,0 +1,887 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Linux performance counter support for LoongArch.
+ *
+ * Copyright (C) 2022 Loongson Technology Corporation Limited
+ *
+ * Derived from MIPS:
+ * Copyright (C) 2010 MIPS Technologies, Inc.
+ * Copyright (C) 2011 Cavium Networks, Inc.
+ * Author: Deng-Cheng Zhu
+ */
+
+#include <linux/cpumask.h>
+#include <linux/interrupt.h>
+#include <linux/smp.h>
+#include <linux/kernel.h>
+#include <linux/perf_event.h>
+#include <linux/uaccess.h>
+#include <linux/sched/task_stack.h>
+
+#include <asm/irq.h>
+#include <asm/irq_regs.h>
+#include <asm/stacktrace.h>
+#include <asm/unwind.h>
+
+/*
+ * Get the return address for a single stackframe and return a pointer to the
+ * next frame tail.
+ */
+static unsigned long
+user_backtrace(struct perf_callchain_entry_ctx *entry, unsigned long fp)
+{
+ unsigned long err;
+ unsigned long __user *user_frame_tail;
+ struct stack_frame buftail;
+
+ user_frame_tail = (unsigned long __user *)(fp - sizeof(struct stack_frame));
+
+ /* Also check accessibility of one struct frame_tail beyond */
+ if (!access_ok(user_frame_tail, sizeof(buftail)))
+ return 0;
+
+ pagefault_disable();
+ err = __copy_from_user_inatomic(&buftail, user_frame_tail, sizeof(buftail));
+ pagefault_enable();
+
+ if (err || (unsigned long)user_frame_tail >= buftail.fp)
+ return 0;
+
+ perf_callchain_store(entry, buftail.ra);
+
+ return buftail.fp;
+}
+
+void perf_callchain_user(struct perf_callchain_entry_ctx *entry,
+ struct pt_regs *regs)
+{
+ unsigned long fp;
+
+ if (perf_guest_state()) {
+ /* We don't support guest os callchain now */
+ return;
+ }
+
+ perf_callchain_store(entry, regs->csr_era);
+
+ fp = regs->regs[22];
+
+ while (entry->nr < entry->max_stack && fp && !((unsigned long)fp & 0xf))
+ fp = user_backtrace(entry, fp);
+}
+
+void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry,
+ struct pt_regs *regs)
+{
+ struct unwind_state state;
+ unsigned long addr;
+
+ for (unwind_start(&state, current, regs);
+ !unwind_done(&state); unwind_next_frame(&state)) {
+ addr = unwind_get_return_address(&state);
+ if (!addr || perf_callchain_store(entry, addr))
+ return;
+ }
+}
+
+#define LOONGARCH_MAX_HWEVENTS 32
+
+struct cpu_hw_events {
+ /* Array of events on this cpu. */
+ struct perf_event *events[LOONGARCH_MAX_HWEVENTS];
+
+ /*
+ * Set the bit (indexed by the counter number) when the counter
+ * is used for an event.
+ */
+ unsigned long used_mask[BITS_TO_LONGS(LOONGARCH_MAX_HWEVENTS)];
+
+ /*
+ * Software copy of the control register for each performance counter.
+ */
+ unsigned int saved_ctrl[LOONGARCH_MAX_HWEVENTS];
+};
+static DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = {
+ .saved_ctrl = {0},
+};
+
+/* The description of LoongArch performance events. */
+struct loongarch_perf_event {
+ unsigned int event_id;
+};
+
+static struct loongarch_perf_event raw_event;
+static DEFINE_MUTEX(raw_event_mutex);
+
+#define C(x) PERF_COUNT_HW_CACHE_##x
+#define HW_OP_UNSUPPORTED 0xffffffff
+#define CACHE_OP_UNSUPPORTED 0xffffffff
+
+#define PERF_MAP_ALL_UNSUPPORTED \
+ [0 ... PERF_COUNT_HW_MAX - 1] = {HW_OP_UNSUPPORTED}
+
+#define PERF_CACHE_MAP_ALL_UNSUPPORTED \
+[0 ... C(MAX) - 1] = { \
+ [0 ... C(OP_MAX) - 1] = { \
+ [0 ... C(RESULT_MAX) - 1] = {CACHE_OP_UNSUPPORTED}, \
+ }, \
+}
+
+struct loongarch_pmu {
+ u64 max_period;
+ u64 valid_count;
+ u64 overflow;
+ const char *name;
+ unsigned int num_counters;
+ u64 (*read_counter)(unsigned int idx);
+ void (*write_counter)(unsigned int idx, u64 val);
+ const struct loongarch_perf_event *(*map_raw_event)(u64 config);
+ const struct loongarch_perf_event (*general_event_map)[PERF_COUNT_HW_MAX];
+ const struct loongarch_perf_event (*cache_event_map)
+ [PERF_COUNT_HW_CACHE_MAX]
+ [PERF_COUNT_HW_CACHE_OP_MAX]
+ [PERF_COUNT_HW_CACHE_RESULT_MAX];
+};
+
+static struct loongarch_pmu loongarch_pmu;
+
+#define M_PERFCTL_EVENT(event) (event & CSR_PERFCTRL_EVENT)
+
+#define M_PERFCTL_COUNT_EVENT_WHENEVER (CSR_PERFCTRL_PLV0 | \
+ CSR_PERFCTRL_PLV1 | \
+ CSR_PERFCTRL_PLV2 | \
+ CSR_PERFCTRL_PLV3 | \
+ CSR_PERFCTRL_IE)
+
+#define M_PERFCTL_CONFIG_MASK 0x1f0000
+
+static void pause_local_counters(void);
+static void resume_local_counters(void);
+
+static u64 loongarch_pmu_read_counter(unsigned int idx)
+{
+ u64 val = -1;
+
+ switch (idx) {
+ case 0:
+ val = read_csr_perfcntr0();
+ break;
+ case 1:
+ val = read_csr_perfcntr1();
+ break;
+ case 2:
+ val = read_csr_perfcntr2();
+ break;
+ case 3:
+ val = read_csr_perfcntr3();
+ break;
+ default:
+ WARN_ONCE(1, "Invalid performance counter number (%d)\n", idx);
+ return 0;
+ }
+
+ return val;
+}
+
+static void loongarch_pmu_write_counter(unsigned int idx, u64 val)
+{
+ switch (idx) {
+ case 0:
+ write_csr_perfcntr0(val);
+ return;
+ case 1:
+ write_csr_perfcntr1(val);
+ return;
+ case 2:
+ write_csr_perfcntr2(val);
+ return;
+ case 3:
+ write_csr_perfcntr3(val);
+ return;
+ default:
+ WARN_ONCE(1, "Invalid performance counter number (%d)\n", idx);
+ return;
+ }
+}
+
+static unsigned int loongarch_pmu_read_control(unsigned int idx)
+{
+ unsigned int val = -1;
+
+ switch (idx) {
+ case 0:
+ val = read_csr_perfctrl0();
+ break;
+ case 1:
+ val = read_csr_perfctrl1();
+ break;
+ case 2:
+ val = read_csr_perfctrl2();
+ break;
+ case 3:
+ val = read_csr_perfctrl3();
+ break;
+ default:
+ WARN_ONCE(1, "Invalid performance counter number (%d)\n", idx);
+ return 0;
+ }
+
+ return val;
+}
+
+static void loongarch_pmu_write_control(unsigned int idx, unsigned int val)
+{
+ switch (idx) {
+ case 0:
+ write_csr_perfctrl0(val);
+ return;
+ case 1:
+ write_csr_perfctrl1(val);
+ return;
+ case 2:
+ write_csr_perfctrl2(val);
+ return;
+ case 3:
+ write_csr_perfctrl3(val);
+ return;
+ default:
+ WARN_ONCE(1, "Invalid performance counter number (%d)\n", idx);
+ return;
+ }
+}
+
+static int loongarch_pmu_alloc_counter(struct cpu_hw_events *cpuc, struct hw_perf_event *hwc)
+{
+ int i;
+
+ for (i = 0; i < loongarch_pmu.num_counters; i++) {
+ if (!test_and_set_bit(i, cpuc->used_mask))
+ return i;
+ }
+
+ return -EAGAIN;
+}
+
+static void loongarch_pmu_enable_event(struct hw_perf_event *evt, int idx)
+{
+ unsigned int cpu;
+ struct perf_event *event = container_of(evt, struct perf_event, hw);
+ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
+
+ WARN_ON(idx < 0 || idx >= loongarch_pmu.num_counters);
+
+ /* Make sure interrupt enabled. */
+ cpuc->saved_ctrl[idx] = M_PERFCTL_EVENT(evt->event_base) |
+ (evt->config_base & M_PERFCTL_CONFIG_MASK) | CSR_PERFCTRL_IE;
+
+ cpu = (event->cpu >= 0) ? event->cpu : smp_processor_id();
+
+ /*
+ * We do not actually let the counter run. Leave it until start().
+ */
+ pr_debug("Enabling perf counter for CPU%d\n", cpu);
+}
+
+static void loongarch_pmu_disable_event(int idx)
+{
+ unsigned long flags;
+ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
+
+ WARN_ON(idx < 0 || idx >= loongarch_pmu.num_counters);
+
+ local_irq_save(flags);
+ cpuc->saved_ctrl[idx] = loongarch_pmu_read_control(idx) &
+ ~M_PERFCTL_COUNT_EVENT_WHENEVER;
+ loongarch_pmu_write_control(idx, cpuc->saved_ctrl[idx]);
+ local_irq_restore(flags);
+}
+
+static int loongarch_pmu_event_set_period(struct perf_event *event,
+ struct hw_perf_event *hwc,
+ int idx)
+{
+ int ret = 0;
+ u64 left = local64_read(&hwc->period_left);
+ u64 period = hwc->sample_period;
+
+ if (unlikely((left + period) & (1ULL << 63))) {
+ /* left underflowed by more than period. */
+ left = period;
+ local64_set(&hwc->period_left, left);
+ hwc->last_period = period;
+ ret = 1;
+ } else if (unlikely((left + period) <= period)) {
+ /* left underflowed by less than period. */
+ left += period;
+ local64_set(&hwc->period_left, left);
+ hwc->last_period = period;
+ ret = 1;
+ }
+
+ if (left > loongarch_pmu.max_period) {
+ left = loongarch_pmu.max_period;
+ local64_set(&hwc->period_left, left);
+ }
+
+ local64_set(&hwc->prev_count, loongarch_pmu.overflow - left);
+
+ loongarch_pmu.write_counter(idx, loongarch_pmu.overflow - left);
+
+ perf_event_update_userpage(event);
+
+ return ret;
+}
+
+static void loongarch_pmu_event_update(struct perf_event *event,
+ struct hw_perf_event *hwc,
+ int idx)
+{
+ u64 delta;
+ u64 prev_raw_count, new_raw_count;
+
+again:
+ prev_raw_count = local64_read(&hwc->prev_count);
+ new_raw_count = loongarch_pmu.read_counter(idx);
+
+ if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
+ new_raw_count) != prev_raw_count)
+ goto again;
+
+ delta = new_raw_count - prev_raw_count;
+
+ local64_add(delta, &event->count);
+ local64_sub(delta, &hwc->period_left);
+}
+
+static void loongarch_pmu_start(struct perf_event *event, int flags)
+{
+ struct hw_perf_event *hwc = &event->hw;
+
+ if (flags & PERF_EF_RELOAD)
+ WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE));
+
+ hwc->state = 0;
+
+ /* Set the period for the event. */
+ loongarch_pmu_event_set_period(event, hwc, hwc->idx);
+
+ /* Enable the event. */
+ loongarch_pmu_enable_event(hwc, hwc->idx);
+}
+
+static void loongarch_pmu_stop(struct perf_event *event, int flags)
+{
+ struct hw_perf_event *hwc = &event->hw;
+
+ if (!(hwc->state & PERF_HES_STOPPED)) {
+ /* We are working on a local event. */
+ loongarch_pmu_disable_event(hwc->idx);
+ barrier();
+ loongarch_pmu_event_update(event, hwc, hwc->idx);
+ hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
+ }
+}
+
+static int loongarch_pmu_add(struct perf_event *event, int flags)
+{
+ int idx, err = 0;
+ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
+ struct hw_perf_event *hwc = &event->hw;
+
+ perf_pmu_disable(event->pmu);
+
+ /* To look for a free counter for this event. */
+ idx = loongarch_pmu_alloc_counter(cpuc, hwc);
+ if (idx < 0) {
+ err = idx;
+ goto out;
+ }
+
+ /*
+ * If there is an event in the counter we are going to use then
+ * make sure it is disabled.
+ */
+ event->hw.idx = idx;
+ loongarch_pmu_disable_event(idx);
+ cpuc->events[idx] = event;
+
+ hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
+ if (flags & PERF_EF_START)
+ loongarch_pmu_start(event, PERF_EF_RELOAD);
+
+ /* Propagate our changes to the userspace mapping. */
+ perf_event_update_userpage(event);
+
+out:
+ perf_pmu_enable(event->pmu);
+ return err;
+}
+
+static void loongarch_pmu_del(struct perf_event *event, int flags)
+{
+ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
+ struct hw_perf_event *hwc = &event->hw;
+ int idx = hwc->idx;
+
+ WARN_ON(idx < 0 || idx >= loongarch_pmu.num_counters);
+
+ loongarch_pmu_stop(event, PERF_EF_UPDATE);
+ cpuc->events[idx] = NULL;
+ clear_bit(idx, cpuc->used_mask);
+
+ perf_event_update_userpage(event);
+}
+
+static void loongarch_pmu_read(struct perf_event *event)
+{
+ struct hw_perf_event *hwc = &event->hw;
+
+ /* Don't read disabled counters! */
+ if (hwc->idx < 0)
+ return;
+
+ loongarch_pmu_event_update(event, hwc, hwc->idx);
+}
+
+static void loongarch_pmu_enable(struct pmu *pmu)
+{
+ resume_local_counters();
+}
+
+static void loongarch_pmu_disable(struct pmu *pmu)
+{
+ pause_local_counters();
+}
+
+static DEFINE_MUTEX(pmu_reserve_mutex);
+static atomic_t active_events = ATOMIC_INIT(0);
+
+static int get_pmc_irq(void)
+{
+ struct irq_domain *d = irq_find_matching_fwnode(cpuintc_handle, DOMAIN_BUS_ANY);
+
+ if (d)
+ return irq_create_mapping(d, INT_PCOV);
+
+ return -EINVAL;
+}
+
+static void reset_counters(void *arg);
+static int __hw_perf_event_init(struct perf_event *event);
+
+static void hw_perf_event_destroy(struct perf_event *event)
+{
+ if (atomic_dec_and_mutex_lock(&active_events, &pmu_reserve_mutex)) {
+ on_each_cpu(reset_counters, NULL, 1);
+ free_irq(get_pmc_irq(), &loongarch_pmu);
+ mutex_unlock(&pmu_reserve_mutex);
+ }
+}
+
+static void handle_associated_event(struct cpu_hw_events *cpuc, int idx,
+ struct perf_sample_data *data, struct pt_regs *regs)
+{
+ struct perf_event *event = cpuc->events[idx];
+ struct hw_perf_event *hwc = &event->hw;
+
+ loongarch_pmu_event_update(event, hwc, idx);
+ data->period = event->hw.last_period;
+ if (!loongarch_pmu_event_set_period(event, hwc, idx))
+ return;
+
+ if (perf_event_overflow(event, data, regs))
+ loongarch_pmu_disable_event(idx);
+}
+
+static irqreturn_t pmu_handle_irq(int irq, void *dev)
+{
+ int n;
+ int handled = IRQ_NONE;
+ uint64_t counter;
+ struct pt_regs *regs;
+ struct perf_sample_data data;
+ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
+
+ /*
+ * First we pause the local counters, so that when we are locked
+ * here, the counters are all paused. When it gets locked due to
+ * perf_disable(), the timer interrupt handler will be delayed.
+ *
+ * See also loongarch_pmu_start().
+ */
+ pause_local_counters();
+
+ regs = get_irq_regs();
+
+ perf_sample_data_init(&data, 0, 0);
+
+ for (n = 0; n < loongarch_pmu.num_counters; n++) {
+ if (test_bit(n, cpuc->used_mask)) {
+ counter = loongarch_pmu.read_counter(n);
+ if (counter & loongarch_pmu.overflow) {
+ handle_associated_event(cpuc, n, &data, regs);
+ handled = IRQ_HANDLED;
+ }
+ }
+ }
+
+ resume_local_counters();
+
+ /*
+ * Do all the work for the pending perf events. We can do this
+ * in here because the performance counter interrupt is a regular
+ * interrupt, not NMI.
+ */
+ if (handled == IRQ_HANDLED)
+ irq_work_run();
+
+ return handled;
+}
+
+static int loongarch_pmu_event_init(struct perf_event *event)
+{
+ int r, irq;
+ unsigned long flags;
+
+ /* does not support taken branch sampling */
+ if (has_branch_stack(event))
+ return -EOPNOTSUPP;
+
+ switch (event->attr.type) {
+ case PERF_TYPE_RAW:
+ case PERF_TYPE_HARDWARE:
+ case PERF_TYPE_HW_CACHE:
+ break;
+
+ default:
+ /* Init it to avoid false validate_group */
+ event->hw.event_base = 0xffffffff;
+ return -ENOENT;
+ }
+
+ if (event->cpu >= 0 && !cpu_online(event->cpu))
+ return -ENODEV;
+
+ irq = get_pmc_irq();
+ flags = IRQF_PERCPU | IRQF_NOBALANCING | IRQF_NO_THREAD | IRQF_NO_SUSPEND | IRQF_SHARED;
+ if (!atomic_inc_not_zero(&active_events)) {
+ mutex_lock(&pmu_reserve_mutex);
+ if (atomic_read(&active_events) == 0) {
+ r = request_irq(irq, pmu_handle_irq, flags, "Perf_PMU", &loongarch_pmu);
+ if (r < 0) {
+ mutex_unlock(&pmu_reserve_mutex);
+ pr_warn("PMU IRQ request failed\n");
+ return -ENODEV;
+ }
+ }
+ atomic_inc(&active_events);
+ mutex_unlock(&pmu_reserve_mutex);
+ }
+
+ return __hw_perf_event_init(event);
+}
+
+static struct pmu pmu = {
+ .pmu_enable = loongarch_pmu_enable,
+ .pmu_disable = loongarch_pmu_disable,
+ .event_init = loongarch_pmu_event_init,
+ .add = loongarch_pmu_add,
+ .del = loongarch_pmu_del,
+ .start = loongarch_pmu_start,
+ .stop = loongarch_pmu_stop,
+ .read = loongarch_pmu_read,
+};
+
+static unsigned int loongarch_pmu_perf_event_encode(const struct loongarch_perf_event *pev)
+{
+ return M_PERFCTL_EVENT(pev->event_id);
+}
+
+static const struct loongarch_perf_event *loongarch_pmu_map_general_event(int idx)
+{
+ const struct loongarch_perf_event *pev;
+
+ pev = &(*loongarch_pmu.general_event_map)[idx];
+
+ if (pev->event_id == HW_OP_UNSUPPORTED)
+ return ERR_PTR(-ENOENT);
+
+ return pev;
+}
+
+static const struct loongarch_perf_event *loongarch_pmu_map_cache_event(u64 config)
+{
+ unsigned int cache_type, cache_op, cache_result;
+ const struct loongarch_perf_event *pev;
+
+ cache_type = (config >> 0) & 0xff;
+ if (cache_type >= PERF_COUNT_HW_CACHE_MAX)
+ return ERR_PTR(-EINVAL);
+
+ cache_op = (config >> 8) & 0xff;
+ if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX)
+ return ERR_PTR(-EINVAL);
+
+ cache_result = (config >> 16) & 0xff;
+ if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
+ return ERR_PTR(-EINVAL);
+
+ pev = &((*loongarch_pmu.cache_event_map)
+ [cache_type]
+ [cache_op]
+ [cache_result]);
+
+ if (pev->event_id == CACHE_OP_UNSUPPORTED)
+ return ERR_PTR(-ENOENT);
+
+ return pev;
+}
+
+static int validate_group(struct perf_event *event)
+{
+ struct cpu_hw_events fake_cpuc;
+ struct perf_event *sibling, *leader = event->group_leader;
+
+ memset(&fake_cpuc, 0, sizeof(fake_cpuc));
+
+ if (loongarch_pmu_alloc_counter(&fake_cpuc, &leader->hw) < 0)
+ return -EINVAL;
+
+ for_each_sibling_event(sibling, leader) {
+ if (loongarch_pmu_alloc_counter(&fake_cpuc, &sibling->hw) < 0)
+ return -EINVAL;
+ }
+
+ if (loongarch_pmu_alloc_counter(&fake_cpuc, &event->hw) < 0)
+ return -EINVAL;
+
+ return 0;
+}
+
+static void reset_counters(void *arg)
+{
+ int n;
+ int counters = loongarch_pmu.num_counters;
+
+ for (n = 0; n < counters; n++) {
+ loongarch_pmu_write_control(n, 0);
+ loongarch_pmu.write_counter(n, 0);
+ }
+}
+
+static const struct loongarch_perf_event loongson_event_map[PERF_COUNT_HW_MAX] = {
+ PERF_MAP_ALL_UNSUPPORTED,
+ [PERF_COUNT_HW_CPU_CYCLES] = { 0x00 },
+ [PERF_COUNT_HW_INSTRUCTIONS] = { 0x01 },
+ [PERF_COUNT_HW_CACHE_REFERENCES] = { 0x08 },
+ [PERF_COUNT_HW_CACHE_MISSES] = { 0x09 },
+ [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x02 },
+ [PERF_COUNT_HW_BRANCH_MISSES] = { 0x03 },
+};
+
+static const struct loongarch_perf_event loongson_cache_map
+ [PERF_COUNT_HW_CACHE_MAX]
+ [PERF_COUNT_HW_CACHE_OP_MAX]
+ [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
+PERF_CACHE_MAP_ALL_UNSUPPORTED,
+[C(L1D)] = {
+ /*
+ * Like some other architectures (e.g. ARM), the performance
+ * counters don't differentiate between read and write
+ * accesses/misses, so this isn't strictly correct, but it's the
+ * best we can do. Writes and reads get combined.
+ */
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = { 0x8 },
+ [C(RESULT_MISS)] = { 0x9 },
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = { 0x8 },
+ [C(RESULT_MISS)] = { 0x9 },
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = { 0xaa },
+ [C(RESULT_MISS)] = { 0xa9 },
+ },
+},
+[C(L1I)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = { 0x6 },
+ [C(RESULT_MISS)] = { 0x7 },
+ },
+},
+[C(LL)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = { 0xc },
+ [C(RESULT_MISS)] = { 0xd },
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = { 0xc },
+ [C(RESULT_MISS)] = { 0xd },
+ },
+},
+[C(ITLB)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_MISS)] = { 0x3b },
+ },
+},
+[C(DTLB)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = { 0x4 },
+ [C(RESULT_MISS)] = { 0x3c },
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = { 0x4 },
+ [C(RESULT_MISS)] = { 0x3c },
+ },
+},
+[C(BPU)] = {
+ /* Using the same code for *HW_BRANCH* */
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = { 0x02 },
+ [C(RESULT_MISS)] = { 0x03 },
+ },
+},
+};
+
+static int __hw_perf_event_init(struct perf_event *event)
+{
+ int err;
+ struct hw_perf_event *hwc = &event->hw;
+ struct perf_event_attr *attr = &event->attr;
+ const struct loongarch_perf_event *pev;
+
+ /* Returning LoongArch event descriptor for generic perf event. */
+ if (PERF_TYPE_HARDWARE == event->attr.type) {
+ if (event->attr.config >= PERF_COUNT_HW_MAX)
+ return -EINVAL;
+ pev = loongarch_pmu_map_general_event(event->attr.config);
+ } else if (PERF_TYPE_HW_CACHE == event->attr.type) {
+ pev = loongarch_pmu_map_cache_event(event->attr.config);
+ } else if (PERF_TYPE_RAW == event->attr.type) {
+ /* We are working on the global raw event. */
+ mutex_lock(&raw_event_mutex);
+ pev = loongarch_pmu.map_raw_event(event->attr.config);
+ } else {
+ /* The event type is not (yet) supported. */
+ return -EOPNOTSUPP;
+ }
+
+ if (IS_ERR(pev)) {
+ if (PERF_TYPE_RAW == event->attr.type)
+ mutex_unlock(&raw_event_mutex);
+ return PTR_ERR(pev);
+ }
+
+ /*
+ * We allow max flexibility on how each individual counter shared
+ * by the single CPU operates (the mode exclusion and the range).
+ */
+ hwc->config_base = CSR_PERFCTRL_IE;
+
+ hwc->event_base = loongarch_pmu_perf_event_encode(pev);
+ if (PERF_TYPE_RAW == event->attr.type)
+ mutex_unlock(&raw_event_mutex);
+
+ if (!attr->exclude_user) {
+ hwc->config_base |= CSR_PERFCTRL_PLV3;
+ hwc->config_base |= CSR_PERFCTRL_PLV2;
+ }
+ if (!attr->exclude_kernel) {
+ hwc->config_base |= CSR_PERFCTRL_PLV0;
+ }
+ if (!attr->exclude_hv) {
+ hwc->config_base |= CSR_PERFCTRL_PLV1;
+ }
+
+ hwc->config_base &= M_PERFCTL_CONFIG_MASK;
+ /*
+ * The event can belong to another cpu. We do not assign a local
+ * counter for it for now.
+ */
+ hwc->idx = -1;
+ hwc->config = 0;
+
+ if (!hwc->sample_period) {
+ hwc->sample_period = loongarch_pmu.max_period;
+ hwc->last_period = hwc->sample_period;
+ local64_set(&hwc->period_left, hwc->sample_period);
+ }
+
+ err = 0;
+ if (event->group_leader != event)
+ err = validate_group(event);
+
+ event->destroy = hw_perf_event_destroy;
+
+ if (err)
+ event->destroy(event);
+
+ return err;
+}
+
+static void pause_local_counters(void)
+{
+ unsigned long flags;
+ int ctr = loongarch_pmu.num_counters;
+ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
+
+ local_irq_save(flags);
+ do {
+ ctr--;
+ cpuc->saved_ctrl[ctr] = loongarch_pmu_read_control(ctr);
+ loongarch_pmu_write_control(ctr, cpuc->saved_ctrl[ctr] &
+ ~M_PERFCTL_COUNT_EVENT_WHENEVER);
+ } while (ctr > 0);
+ local_irq_restore(flags);
+}
+
+static void resume_local_counters(void)
+{
+ int ctr = loongarch_pmu.num_counters;
+ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
+
+ do {
+ ctr--;
+ loongarch_pmu_write_control(ctr, cpuc->saved_ctrl[ctr]);
+ } while (ctr > 0);
+}
+
+static const struct loongarch_perf_event *loongarch_pmu_map_raw_event(u64 config)
+{
+ raw_event.event_id = M_PERFCTL_EVENT(config);
+
+ return &raw_event;
+}
+
+static int __init init_hw_perf_events(void)
+{
+ int counters;
+
+ if (!cpu_has_pmp)
+ return -ENODEV;
+
+ pr_info("Performance counters: ");
+ counters = ((read_cpucfg(LOONGARCH_CPUCFG6) & CPUCFG6_PMNUM) >> 4) + 1;
+
+ loongarch_pmu.num_counters = counters;
+ loongarch_pmu.max_period = (1ULL << 63) - 1;
+ loongarch_pmu.valid_count = (1ULL << 63) - 1;
+ loongarch_pmu.overflow = 1ULL << 63;
+ loongarch_pmu.name = "loongarch/loongson64";
+ loongarch_pmu.read_counter = loongarch_pmu_read_counter;
+ loongarch_pmu.write_counter = loongarch_pmu_write_counter;
+ loongarch_pmu.map_raw_event = loongarch_pmu_map_raw_event;
+ loongarch_pmu.general_event_map = &loongson_event_map;
+ loongarch_pmu.cache_event_map = &loongson_cache_map;
+
+ on_each_cpu(reset_counters, NULL, 1);
+
+ pr_cont("%s PMU enabled, %d %d-bit counters available to each CPU.\n",
+ loongarch_pmu.name, counters, 64);
+
+ perf_pmu_register(&pmu, "cpu", PERF_TYPE_RAW);
+
+ return 0;
+}
+early_initcall(init_hw_perf_events);
diff --git a/arch/loongarch/kernel/perf_regs.c b/arch/loongarch/kernel/perf_regs.c
new file mode 100644
index 0000000000..263ac4ab5a
--- /dev/null
+++ b/arch/loongarch/kernel/perf_regs.c
@@ -0,0 +1,53 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2022 Loongson Technology Corporation Limited
+ *
+ * Derived from MIPS:
+ * Copyright (C) 2013 Cavium, Inc.
+ */
+
+#include <linux/perf_event.h>
+
+#include <asm/ptrace.h>
+
+#ifdef CONFIG_32BIT
+u64 perf_reg_abi(struct task_struct *tsk)
+{
+ return PERF_SAMPLE_REGS_ABI_32;
+}
+#else /* Must be CONFIG_64BIT */
+u64 perf_reg_abi(struct task_struct *tsk)
+{
+ if (test_tsk_thread_flag(tsk, TIF_32BIT_REGS))
+ return PERF_SAMPLE_REGS_ABI_32;
+ else
+ return PERF_SAMPLE_REGS_ABI_64;
+}
+#endif /* CONFIG_32BIT */
+
+int perf_reg_validate(u64 mask)
+{
+ if (!mask)
+ return -EINVAL;
+ if (mask & ~((1ull << PERF_REG_LOONGARCH_MAX) - 1))
+ return -EINVAL;
+ return 0;
+}
+
+u64 perf_reg_value(struct pt_regs *regs, int idx)
+{
+ if (WARN_ON_ONCE((u32)idx >= PERF_REG_LOONGARCH_MAX))
+ return 0;
+
+ if ((u32)idx == PERF_REG_LOONGARCH_PC)
+ return regs->csr_era;
+
+ return regs->regs[idx];
+}
+
+void perf_get_regs_user(struct perf_regs *regs_user,
+ struct pt_regs *regs)
+{
+ regs_user->regs = task_pt_regs(current);
+ regs_user->abi = perf_reg_abi(current);
+}
diff --git a/arch/loongarch/kernel/proc.c b/arch/loongarch/kernel/proc.c
new file mode 100644
index 0000000000..0d33cbc47e
--- /dev/null
+++ b/arch/loongarch/kernel/proc.c
@@ -0,0 +1,130 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/seq_file.h>
+#include <asm/bootinfo.h>
+#include <asm/cpu.h>
+#include <asm/cpu-features.h>
+#include <asm/idle.h>
+#include <asm/processor.h>
+#include <asm/time.h>
+
+/*
+ * No lock; only written during early bootup by CPU 0.
+ */
+static RAW_NOTIFIER_HEAD(proc_cpuinfo_chain);
+
+int __ref register_proc_cpuinfo_notifier(struct notifier_block *nb)
+{
+ return raw_notifier_chain_register(&proc_cpuinfo_chain, nb);
+}
+
+int proc_cpuinfo_notifier_call_chain(unsigned long val, void *v)
+{
+ return raw_notifier_call_chain(&proc_cpuinfo_chain, val, v);
+}
+
+static int show_cpuinfo(struct seq_file *m, void *v)
+{
+ unsigned long n = (unsigned long) v - 1;
+ unsigned int version = cpu_data[n].processor_id & 0xff;
+ unsigned int fp_version = cpu_data[n].fpu_vers;
+ struct proc_cpuinfo_notifier_args proc_cpuinfo_notifier_args;
+
+#ifdef CONFIG_SMP
+ if (!cpu_online(n))
+ return 0;
+#endif
+
+ /*
+ * For the first processor also print the system type
+ */
+ if (n == 0)
+ seq_printf(m, "system type\t\t: %s\n\n", get_system_type());
+
+ seq_printf(m, "processor\t\t: %ld\n", n);
+ seq_printf(m, "package\t\t\t: %d\n", cpu_data[n].package);
+ seq_printf(m, "core\t\t\t: %d\n", cpu_data[n].core);
+ seq_printf(m, "global_id\t\t: %d\n", cpu_data[n].global_id);
+ seq_printf(m, "CPU Family\t\t: %s\n", __cpu_family[n]);
+ seq_printf(m, "Model Name\t\t: %s\n", __cpu_full_name[n]);
+ seq_printf(m, "CPU Revision\t\t: 0x%02x\n", version);
+ seq_printf(m, "FPU Revision\t\t: 0x%02x\n", fp_version);
+ seq_printf(m, "CPU MHz\t\t\t: %llu.%02llu\n",
+ cpu_clock_freq / 1000000, (cpu_clock_freq / 10000) % 100);
+ seq_printf(m, "BogoMIPS\t\t: %llu.%02llu\n",
+ (lpj_fine * cpu_clock_freq / const_clock_freq) / (500000/HZ),
+ ((lpj_fine * cpu_clock_freq / const_clock_freq) / (5000/HZ)) % 100);
+ seq_printf(m, "TLB Entries\t\t: %d\n", cpu_data[n].tlbsize);
+ seq_printf(m, "Address Sizes\t\t: %d bits physical, %d bits virtual\n",
+ cpu_pabits + 1, cpu_vabits + 1);
+
+ seq_printf(m, "ISA\t\t\t:");
+ if (cpu_has_loongarch32)
+ seq_printf(m, " loongarch32");
+ if (cpu_has_loongarch64)
+ seq_printf(m, " loongarch64");
+ seq_printf(m, "\n");
+
+ seq_printf(m, "Features\t\t:");
+ if (cpu_has_cpucfg) seq_printf(m, " cpucfg");
+ if (cpu_has_lam) seq_printf(m, " lam");
+ if (cpu_has_ual) seq_printf(m, " ual");
+ if (cpu_has_fpu) seq_printf(m, " fpu");
+ if (cpu_has_lsx) seq_printf(m, " lsx");
+ if (cpu_has_lasx) seq_printf(m, " lasx");
+ if (cpu_has_crc32) seq_printf(m, " crc32");
+ if (cpu_has_complex) seq_printf(m, " complex");
+ if (cpu_has_crypto) seq_printf(m, " crypto");
+ if (cpu_has_ptw) seq_printf(m, " ptw");
+ if (cpu_has_lvz) seq_printf(m, " lvz");
+ if (cpu_has_lbt_x86) seq_printf(m, " lbt_x86");
+ if (cpu_has_lbt_arm) seq_printf(m, " lbt_arm");
+ if (cpu_has_lbt_mips) seq_printf(m, " lbt_mips");
+ seq_printf(m, "\n");
+
+ seq_printf(m, "Hardware Watchpoint\t: %s",
+ cpu_has_watch ? "yes, " : "no\n");
+ if (cpu_has_watch) {
+ seq_printf(m, "iwatch count: %d, dwatch count: %d\n",
+ cpu_data[n].watch_ireg_count, cpu_data[n].watch_dreg_count);
+ }
+
+ proc_cpuinfo_notifier_args.m = m;
+ proc_cpuinfo_notifier_args.n = n;
+
+ raw_notifier_call_chain(&proc_cpuinfo_chain, 0,
+ &proc_cpuinfo_notifier_args);
+
+ seq_printf(m, "\n");
+
+ return 0;
+}
+
+static void *c_start(struct seq_file *m, loff_t *pos)
+{
+ unsigned long i = *pos;
+
+ return i < nr_cpu_ids ? (void *)(i + 1) : NULL;
+}
+
+static void *c_next(struct seq_file *m, void *v, loff_t *pos)
+{
+ ++*pos;
+ return c_start(m, pos);
+}
+
+static void c_stop(struct seq_file *m, void *v)
+{
+}
+
+const struct seq_operations cpuinfo_op = {
+ .start = c_start,
+ .next = c_next,
+ .stop = c_stop,
+ .show = show_cpuinfo,
+};
diff --git a/arch/loongarch/kernel/process.c b/arch/loongarch/kernel/process.c
new file mode 100644
index 0000000000..f2ff8b5d59
--- /dev/null
+++ b/arch/loongarch/kernel/process.c
@@ -0,0 +1,375 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Author: Huacai Chen <chenhuacai@loongson.cn>
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ *
+ * Derived from MIPS:
+ * Copyright (C) 1994 - 1999, 2000 by Ralf Baechle and others.
+ * Copyright (C) 2005, 2006 by Ralf Baechle (ralf@linux-mips.org)
+ * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
+ * Copyright (C) 2004 Thiemo Seufer
+ * Copyright (C) 2013 Imagination Technologies Ltd.
+ */
+#include <linux/cpu.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/sched.h>
+#include <linux/sched/debug.h>
+#include <linux/sched/task.h>
+#include <linux/sched/task_stack.h>
+#include <linux/hw_breakpoint.h>
+#include <linux/mm.h>
+#include <linux/stddef.h>
+#include <linux/unistd.h>
+#include <linux/export.h>
+#include <linux/ptrace.h>
+#include <linux/mman.h>
+#include <linux/personality.h>
+#include <linux/sys.h>
+#include <linux/completion.h>
+#include <linux/kallsyms.h>
+#include <linux/random.h>
+#include <linux/prctl.h>
+#include <linux/nmi.h>
+
+#include <asm/asm.h>
+#include <asm/bootinfo.h>
+#include <asm/cpu.h>
+#include <asm/elf.h>
+#include <asm/exec.h>
+#include <asm/fpu.h>
+#include <asm/lbt.h>
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <asm/irq_regs.h>
+#include <asm/loongarch.h>
+#include <asm/pgtable.h>
+#include <asm/processor.h>
+#include <asm/reg.h>
+#include <asm/unwind.h>
+#include <asm/vdso.h>
+
+#ifdef CONFIG_STACKPROTECTOR
+#include <linux/stackprotector.h>
+unsigned long __stack_chk_guard __read_mostly;
+EXPORT_SYMBOL(__stack_chk_guard);
+#endif
+
+/*
+ * Idle related variables and functions
+ */
+
+unsigned long boot_option_idle_override = IDLE_NO_OVERRIDE;
+EXPORT_SYMBOL(boot_option_idle_override);
+
+asmlinkage void ret_from_fork(void);
+asmlinkage void ret_from_kernel_thread(void);
+
+void start_thread(struct pt_regs *regs, unsigned long pc, unsigned long sp)
+{
+ unsigned long crmd;
+ unsigned long prmd;
+ unsigned long euen;
+
+ /* New thread loses kernel privileges. */
+ crmd = regs->csr_crmd & ~(PLV_MASK);
+ crmd |= PLV_USER;
+ regs->csr_crmd = crmd;
+
+ prmd = regs->csr_prmd & ~(PLV_MASK);
+ prmd |= PLV_USER;
+ regs->csr_prmd = prmd;
+
+ euen = regs->csr_euen & ~(CSR_EUEN_FPEN);
+ regs->csr_euen = euen;
+ lose_fpu(0);
+ lose_lbt(0);
+ current->thread.fpu.fcsr = boot_cpu_data.fpu_csr0;
+
+ clear_thread_flag(TIF_LSX_CTX_LIVE);
+ clear_thread_flag(TIF_LASX_CTX_LIVE);
+ clear_thread_flag(TIF_LBT_CTX_LIVE);
+ clear_used_math();
+ regs->csr_era = pc;
+ regs->regs[3] = sp;
+}
+
+void flush_thread(void)
+{
+ flush_ptrace_hw_breakpoint(current);
+}
+
+void exit_thread(struct task_struct *tsk)
+{
+}
+
+int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
+{
+ /*
+ * Save any process state which is live in hardware registers to the
+ * parent context prior to duplication. This prevents the new child
+ * state becoming stale if the parent is preempted before copy_thread()
+ * gets a chance to save the parent's live hardware registers to the
+ * child context.
+ */
+ preempt_disable();
+
+ if (is_fpu_owner()) {
+ if (is_lasx_enabled())
+ save_lasx(current);
+ else if (is_lsx_enabled())
+ save_lsx(current);
+ else
+ save_fp(current);
+ }
+
+ preempt_enable();
+
+ if (!used_math())
+ memcpy(dst, src, offsetof(struct task_struct, thread.fpu.fpr));
+ else
+ memcpy(dst, src, offsetof(struct task_struct, thread.lbt.scr0));
+
+#ifdef CONFIG_CPU_HAS_LBT
+ memcpy(&dst->thread.lbt, &src->thread.lbt, sizeof(struct loongarch_lbt));
+#endif
+
+ return 0;
+}
+
+/*
+ * Copy architecture-specific thread state
+ */
+int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
+{
+ unsigned long childksp;
+ unsigned long tls = args->tls;
+ unsigned long usp = args->stack;
+ unsigned long clone_flags = args->flags;
+ struct pt_regs *childregs, *regs = current_pt_regs();
+
+ childksp = (unsigned long)task_stack_page(p) + THREAD_SIZE;
+
+ /* set up new TSS. */
+ childregs = (struct pt_regs *) childksp - 1;
+ /* Put the stack after the struct pt_regs. */
+ childksp = (unsigned long) childregs;
+ p->thread.sched_cfa = 0;
+ p->thread.csr_euen = 0;
+ p->thread.csr_crmd = csr_read32(LOONGARCH_CSR_CRMD);
+ p->thread.csr_prmd = csr_read32(LOONGARCH_CSR_PRMD);
+ p->thread.csr_ecfg = csr_read32(LOONGARCH_CSR_ECFG);
+ if (unlikely(args->fn)) {
+ /* kernel thread */
+ p->thread.reg03 = childksp;
+ p->thread.reg23 = (unsigned long)args->fn;
+ p->thread.reg24 = (unsigned long)args->fn_arg;
+ p->thread.reg01 = (unsigned long)ret_from_kernel_thread;
+ p->thread.sched_ra = (unsigned long)ret_from_kernel_thread;
+ memset(childregs, 0, sizeof(struct pt_regs));
+ childregs->csr_euen = p->thread.csr_euen;
+ childregs->csr_crmd = p->thread.csr_crmd;
+ childregs->csr_prmd = p->thread.csr_prmd;
+ childregs->csr_ecfg = p->thread.csr_ecfg;
+ goto out;
+ }
+
+ /* user thread */
+ *childregs = *regs;
+ childregs->regs[4] = 0; /* Child gets zero as return value */
+ if (usp)
+ childregs->regs[3] = usp;
+
+ p->thread.reg03 = (unsigned long) childregs;
+ p->thread.reg01 = (unsigned long) ret_from_fork;
+ p->thread.sched_ra = (unsigned long) ret_from_fork;
+
+ /*
+ * New tasks lose permission to use the fpu. This accelerates context
+ * switching for most programs since they don't use the fpu.
+ */
+ childregs->csr_euen = 0;
+
+ if (clone_flags & CLONE_SETTLS)
+ childregs->regs[2] = tls;
+
+out:
+ ptrace_hw_copy_thread(p);
+ clear_tsk_thread_flag(p, TIF_USEDFPU);
+ clear_tsk_thread_flag(p, TIF_USEDSIMD);
+ clear_tsk_thread_flag(p, TIF_USEDLBT);
+ clear_tsk_thread_flag(p, TIF_LSX_CTX_LIVE);
+ clear_tsk_thread_flag(p, TIF_LASX_CTX_LIVE);
+ clear_tsk_thread_flag(p, TIF_LBT_CTX_LIVE);
+
+ return 0;
+}
+
+unsigned long __get_wchan(struct task_struct *task)
+{
+ unsigned long pc = 0;
+ struct unwind_state state;
+
+ if (!try_get_task_stack(task))
+ return 0;
+
+ for (unwind_start(&state, task, NULL);
+ !unwind_done(&state); unwind_next_frame(&state)) {
+ pc = unwind_get_return_address(&state);
+ if (!pc)
+ break;
+ if (in_sched_functions(pc))
+ continue;
+ break;
+ }
+
+ put_task_stack(task);
+
+ return pc;
+}
+
+bool in_irq_stack(unsigned long stack, struct stack_info *info)
+{
+ unsigned long nextsp;
+ unsigned long begin = (unsigned long)this_cpu_read(irq_stack);
+ unsigned long end = begin + IRQ_STACK_START;
+
+ if (stack < begin || stack >= end)
+ return false;
+
+ nextsp = *(unsigned long *)end;
+ if (nextsp & (SZREG - 1))
+ return false;
+
+ info->begin = begin;
+ info->end = end;
+ info->next_sp = nextsp;
+ info->type = STACK_TYPE_IRQ;
+
+ return true;
+}
+
+bool in_task_stack(unsigned long stack, struct task_struct *task,
+ struct stack_info *info)
+{
+ unsigned long begin = (unsigned long)task_stack_page(task);
+ unsigned long end = begin + THREAD_SIZE;
+
+ if (stack < begin || stack >= end)
+ return false;
+
+ info->begin = begin;
+ info->end = end;
+ info->next_sp = 0;
+ info->type = STACK_TYPE_TASK;
+
+ return true;
+}
+
+int get_stack_info(unsigned long stack, struct task_struct *task,
+ struct stack_info *info)
+{
+ task = task ? : current;
+
+ if (!stack || stack & (SZREG - 1))
+ goto unknown;
+
+ if (in_task_stack(stack, task, info))
+ return 0;
+
+ if (task != current)
+ goto unknown;
+
+ if (in_irq_stack(stack, info))
+ return 0;
+
+unknown:
+ info->type = STACK_TYPE_UNKNOWN;
+ return -EINVAL;
+}
+
+unsigned long stack_top(void)
+{
+ unsigned long top = TASK_SIZE & PAGE_MASK;
+
+ /* Space for the VDSO & data page */
+ top -= PAGE_ALIGN(current->thread.vdso->size);
+ top -= VVAR_SIZE;
+
+ /* Space to randomize the VDSO base */
+ if (current->flags & PF_RANDOMIZE)
+ top -= VDSO_RANDOMIZE_SIZE;
+
+ return top;
+}
+
+/*
+ * Don't forget that the stack pointer must be aligned on a 8 bytes
+ * boundary for 32-bits ABI and 16 bytes for 64-bits ABI.
+ */
+unsigned long arch_align_stack(unsigned long sp)
+{
+ if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
+ sp -= get_random_u32_below(PAGE_SIZE);
+
+ return sp & STACK_ALIGN;
+}
+
+static DEFINE_PER_CPU(call_single_data_t, backtrace_csd);
+static struct cpumask backtrace_csd_busy;
+
+static void handle_backtrace(void *info)
+{
+ nmi_cpu_backtrace(get_irq_regs());
+ cpumask_clear_cpu(smp_processor_id(), &backtrace_csd_busy);
+}
+
+static void raise_backtrace(cpumask_t *mask)
+{
+ call_single_data_t *csd;
+ int cpu;
+
+ for_each_cpu(cpu, mask) {
+ /*
+ * If we previously sent an IPI to the target CPU & it hasn't
+ * cleared its bit in the busy cpumask then it didn't handle
+ * our previous IPI & it's not safe for us to reuse the
+ * call_single_data_t.
+ */
+ if (cpumask_test_and_set_cpu(cpu, &backtrace_csd_busy)) {
+ pr_warn("Unable to send backtrace IPI to CPU%u - perhaps it hung?\n",
+ cpu);
+ continue;
+ }
+
+ csd = &per_cpu(backtrace_csd, cpu);
+ csd->func = handle_backtrace;
+ smp_call_function_single_async(cpu, csd);
+ }
+}
+
+void arch_trigger_cpumask_backtrace(const cpumask_t *mask, int exclude_cpu)
+{
+ nmi_trigger_cpumask_backtrace(mask, exclude_cpu, raise_backtrace);
+}
+
+#ifdef CONFIG_64BIT
+void loongarch_dump_regs64(u64 *uregs, const struct pt_regs *regs)
+{
+ unsigned int i;
+
+ for (i = LOONGARCH_EF_R1; i <= LOONGARCH_EF_R31; i++) {
+ uregs[i] = regs->regs[i - LOONGARCH_EF_R0];
+ }
+
+ uregs[LOONGARCH_EF_ORIG_A0] = regs->orig_a0;
+ uregs[LOONGARCH_EF_CSR_ERA] = regs->csr_era;
+ uregs[LOONGARCH_EF_CSR_BADV] = regs->csr_badvaddr;
+ uregs[LOONGARCH_EF_CSR_CRMD] = regs->csr_crmd;
+ uregs[LOONGARCH_EF_CSR_PRMD] = regs->csr_prmd;
+ uregs[LOONGARCH_EF_CSR_EUEN] = regs->csr_euen;
+ uregs[LOONGARCH_EF_CSR_ECFG] = regs->csr_ecfg;
+ uregs[LOONGARCH_EF_CSR_ESTAT] = regs->csr_estat;
+}
+#endif /* CONFIG_64BIT */
diff --git a/arch/loongarch/kernel/ptrace.c b/arch/loongarch/kernel/ptrace.c
new file mode 100644
index 0000000000..c114c5ef13
--- /dev/null
+++ b/arch/loongarch/kernel/ptrace.c
@@ -0,0 +1,1080 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Author: Hanlu Li <lihanlu@loongson.cn>
+ * Huacai Chen <chenhuacai@loongson.cn>
+ *
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ *
+ * Derived from MIPS:
+ * Copyright (C) 1992 Ross Biro
+ * Copyright (C) Linus Torvalds
+ * Copyright (C) 1994, 95, 96, 97, 98, 2000 Ralf Baechle
+ * Copyright (C) 1996 David S. Miller
+ * Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com
+ * Copyright (C) 1999 MIPS Technologies, Inc.
+ * Copyright (C) 2000 Ulf Carlsson
+ */
+#include <linux/kernel.h>
+#include <linux/audit.h>
+#include <linux/compiler.h>
+#include <linux/context_tracking.h>
+#include <linux/elf.h>
+#include <linux/errno.h>
+#include <linux/hw_breakpoint.h>
+#include <linux/mm.h>
+#include <linux/nospec.h>
+#include <linux/ptrace.h>
+#include <linux/regset.h>
+#include <linux/sched.h>
+#include <linux/sched/task_stack.h>
+#include <linux/security.h>
+#include <linux/smp.h>
+#include <linux/stddef.h>
+#include <linux/seccomp.h>
+#include <linux/thread_info.h>
+#include <linux/uaccess.h>
+
+#include <asm/byteorder.h>
+#include <asm/cpu.h>
+#include <asm/cpu-info.h>
+#include <asm/fpu.h>
+#include <asm/lbt.h>
+#include <asm/loongarch.h>
+#include <asm/page.h>
+#include <asm/pgtable.h>
+#include <asm/processor.h>
+#include <asm/ptrace.h>
+#include <asm/reg.h>
+#include <asm/syscall.h>
+
+static void init_fp_ctx(struct task_struct *target)
+{
+ /* The target already has context */
+ if (tsk_used_math(target))
+ return;
+
+ /* Begin with data registers set to all 1s... */
+ memset(&target->thread.fpu.fpr, ~0, sizeof(target->thread.fpu.fpr));
+ set_stopped_child_used_math(target);
+}
+
+/*
+ * Called by kernel/ptrace.c when detaching..
+ *
+ * Make sure single step bits etc are not set.
+ */
+void ptrace_disable(struct task_struct *child)
+{
+ /* Don't load the watchpoint registers for the ex-child. */
+ clear_tsk_thread_flag(child, TIF_LOAD_WATCH);
+ clear_tsk_thread_flag(child, TIF_SINGLESTEP);
+}
+
+/* regset get/set implementations */
+
+static int gpr_get(struct task_struct *target,
+ const struct user_regset *regset,
+ struct membuf to)
+{
+ int r;
+ struct pt_regs *regs = task_pt_regs(target);
+
+ r = membuf_write(&to, &regs->regs, sizeof(u64) * GPR_NUM);
+ r = membuf_write(&to, &regs->orig_a0, sizeof(u64));
+ r = membuf_write(&to, &regs->csr_era, sizeof(u64));
+ r = membuf_write(&to, &regs->csr_badvaddr, sizeof(u64));
+
+ return r;
+}
+
+static int gpr_set(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ const void *kbuf, const void __user *ubuf)
+{
+ int err;
+ int a0_start = sizeof(u64) * GPR_NUM;
+ int era_start = a0_start + sizeof(u64);
+ int badvaddr_start = era_start + sizeof(u64);
+ struct pt_regs *regs = task_pt_regs(target);
+
+ err = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+ &regs->regs,
+ 0, a0_start);
+ err |= user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+ &regs->orig_a0,
+ a0_start, a0_start + sizeof(u64));
+ err |= user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+ &regs->csr_era,
+ era_start, era_start + sizeof(u64));
+ err |= user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+ &regs->csr_badvaddr,
+ badvaddr_start, badvaddr_start + sizeof(u64));
+
+ return err;
+}
+
+
+/*
+ * Get the general floating-point registers.
+ */
+static int gfpr_get(struct task_struct *target, struct membuf *to)
+{
+ return membuf_write(to, &target->thread.fpu.fpr,
+ sizeof(elf_fpreg_t) * NUM_FPU_REGS);
+}
+
+static int gfpr_get_simd(struct task_struct *target, struct membuf *to)
+{
+ int i, r;
+ u64 fpr_val;
+
+ BUILD_BUG_ON(sizeof(fpr_val) != sizeof(elf_fpreg_t));
+ for (i = 0; i < NUM_FPU_REGS; i++) {
+ fpr_val = get_fpr64(&target->thread.fpu.fpr[i], 0);
+ r = membuf_write(to, &fpr_val, sizeof(elf_fpreg_t));
+ }
+
+ return r;
+}
+
+/*
+ * Choose the appropriate helper for general registers, and then copy
+ * the FCC and FCSR registers separately.
+ */
+static int fpr_get(struct task_struct *target,
+ const struct user_regset *regset,
+ struct membuf to)
+{
+ int r;
+
+ save_fpu_regs(target);
+
+ if (sizeof(target->thread.fpu.fpr[0]) == sizeof(elf_fpreg_t))
+ r = gfpr_get(target, &to);
+ else
+ r = gfpr_get_simd(target, &to);
+
+ r = membuf_write(&to, &target->thread.fpu.fcc, sizeof(target->thread.fpu.fcc));
+ r = membuf_write(&to, &target->thread.fpu.fcsr, sizeof(target->thread.fpu.fcsr));
+
+ return r;
+}
+
+static int gfpr_set(struct task_struct *target,
+ unsigned int *pos, unsigned int *count,
+ const void **kbuf, const void __user **ubuf)
+{
+ return user_regset_copyin(pos, count, kbuf, ubuf,
+ &target->thread.fpu.fpr,
+ 0, NUM_FPU_REGS * sizeof(elf_fpreg_t));
+}
+
+static int gfpr_set_simd(struct task_struct *target,
+ unsigned int *pos, unsigned int *count,
+ const void **kbuf, const void __user **ubuf)
+{
+ int i, err;
+ u64 fpr_val;
+
+ BUILD_BUG_ON(sizeof(fpr_val) != sizeof(elf_fpreg_t));
+ for (i = 0; i < NUM_FPU_REGS && *count > 0; i++) {
+ err = user_regset_copyin(pos, count, kbuf, ubuf,
+ &fpr_val, i * sizeof(elf_fpreg_t),
+ (i + 1) * sizeof(elf_fpreg_t));
+ if (err)
+ return err;
+ set_fpr64(&target->thread.fpu.fpr[i], 0, fpr_val);
+ }
+
+ return 0;
+}
+
+/*
+ * Choose the appropriate helper for general registers, and then copy
+ * the FCC register separately.
+ */
+static int fpr_set(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ const void *kbuf, const void __user *ubuf)
+{
+ const int fcc_start = NUM_FPU_REGS * sizeof(elf_fpreg_t);
+ const int fcsr_start = fcc_start + sizeof(u64);
+ int err;
+
+ BUG_ON(count % sizeof(elf_fpreg_t));
+ if (pos + count > sizeof(elf_fpregset_t))
+ return -EIO;
+
+ init_fp_ctx(target);
+
+ if (sizeof(target->thread.fpu.fpr[0]) == sizeof(elf_fpreg_t))
+ err = gfpr_set(target, &pos, &count, &kbuf, &ubuf);
+ else
+ err = gfpr_set_simd(target, &pos, &count, &kbuf, &ubuf);
+ if (err)
+ return err;
+
+ err |= user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+ &target->thread.fpu.fcc, fcc_start,
+ fcc_start + sizeof(u64));
+ err |= user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+ &target->thread.fpu.fcsr, fcsr_start,
+ fcsr_start + sizeof(u32));
+
+ return err;
+}
+
+static int cfg_get(struct task_struct *target,
+ const struct user_regset *regset,
+ struct membuf to)
+{
+ int i, r;
+ u32 cfg_val;
+
+ i = 0;
+ while (to.left > 0) {
+ cfg_val = read_cpucfg(i++);
+ r = membuf_write(&to, &cfg_val, sizeof(u32));
+ }
+
+ return r;
+}
+
+/*
+ * CFG registers are read-only.
+ */
+static int cfg_set(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ const void *kbuf, const void __user *ubuf)
+{
+ return 0;
+}
+
+#ifdef CONFIG_CPU_HAS_LSX
+
+static void copy_pad_fprs(struct task_struct *target,
+ const struct user_regset *regset,
+ struct membuf *to, unsigned int live_sz)
+{
+ int i, j;
+ unsigned long long fill = ~0ull;
+ unsigned int cp_sz, pad_sz;
+
+ cp_sz = min(regset->size, live_sz);
+ pad_sz = regset->size - cp_sz;
+ WARN_ON(pad_sz % sizeof(fill));
+
+ for (i = 0; i < NUM_FPU_REGS; i++) {
+ membuf_write(to, &target->thread.fpu.fpr[i], cp_sz);
+ for (j = 0; j < (pad_sz / sizeof(fill)); j++) {
+ membuf_store(to, fill);
+ }
+ }
+}
+
+static int simd_get(struct task_struct *target,
+ const struct user_regset *regset,
+ struct membuf to)
+{
+ const unsigned int wr_size = NUM_FPU_REGS * regset->size;
+
+ save_fpu_regs(target);
+
+ if (!tsk_used_math(target)) {
+ /* The task hasn't used FP or LSX, fill with 0xff */
+ copy_pad_fprs(target, regset, &to, 0);
+ } else if (!test_tsk_thread_flag(target, TIF_LSX_CTX_LIVE)) {
+ /* Copy scalar FP context, fill the rest with 0xff */
+ copy_pad_fprs(target, regset, &to, 8);
+#ifdef CONFIG_CPU_HAS_LASX
+ } else if (!test_tsk_thread_flag(target, TIF_LASX_CTX_LIVE)) {
+ /* Copy LSX 128 Bit context, fill the rest with 0xff */
+ copy_pad_fprs(target, regset, &to, 16);
+#endif
+ } else if (sizeof(target->thread.fpu.fpr[0]) == regset->size) {
+ /* Trivially copy the vector registers */
+ membuf_write(&to, &target->thread.fpu.fpr, wr_size);
+ } else {
+ /* Copy as much context as possible, fill the rest with 0xff */
+ copy_pad_fprs(target, regset, &to, sizeof(target->thread.fpu.fpr[0]));
+ }
+
+ return 0;
+}
+
+static int simd_set(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ const void *kbuf, const void __user *ubuf)
+{
+ const unsigned int wr_size = NUM_FPU_REGS * regset->size;
+ unsigned int cp_sz;
+ int i, err, start;
+
+ init_fp_ctx(target);
+
+ if (sizeof(target->thread.fpu.fpr[0]) == regset->size) {
+ /* Trivially copy the vector registers */
+ err = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+ &target->thread.fpu.fpr,
+ 0, wr_size);
+ } else {
+ /* Copy as much context as possible */
+ cp_sz = min_t(unsigned int, regset->size,
+ sizeof(target->thread.fpu.fpr[0]));
+
+ i = start = err = 0;
+ for (; i < NUM_FPU_REGS; i++, start += regset->size) {
+ err |= user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+ &target->thread.fpu.fpr[i],
+ start, start + cp_sz);
+ }
+ }
+
+ return err;
+}
+
+#endif /* CONFIG_CPU_HAS_LSX */
+
+#ifdef CONFIG_CPU_HAS_LBT
+static int lbt_get(struct task_struct *target,
+ const struct user_regset *regset,
+ struct membuf to)
+{
+ int r;
+
+ r = membuf_write(&to, &target->thread.lbt.scr0, sizeof(target->thread.lbt.scr0));
+ r = membuf_write(&to, &target->thread.lbt.scr1, sizeof(target->thread.lbt.scr1));
+ r = membuf_write(&to, &target->thread.lbt.scr2, sizeof(target->thread.lbt.scr2));
+ r = membuf_write(&to, &target->thread.lbt.scr3, sizeof(target->thread.lbt.scr3));
+ r = membuf_write(&to, &target->thread.lbt.eflags, sizeof(u32));
+ r = membuf_write(&to, &target->thread.fpu.ftop, sizeof(u32));
+
+ return r;
+}
+
+static int lbt_set(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ const void *kbuf, const void __user *ubuf)
+{
+ int err = 0;
+ const int eflags_start = 4 * sizeof(target->thread.lbt.scr0);
+ const int ftop_start = eflags_start + sizeof(u32);
+
+ err |= user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+ &target->thread.lbt.scr0,
+ 0, 4 * sizeof(target->thread.lbt.scr0));
+ err |= user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+ &target->thread.lbt.eflags,
+ eflags_start, ftop_start);
+ err |= user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+ &target->thread.fpu.ftop,
+ ftop_start, ftop_start + sizeof(u32));
+
+ return err;
+}
+#endif /* CONFIG_CPU_HAS_LBT */
+
+#ifdef CONFIG_HAVE_HW_BREAKPOINT
+
+/*
+ * Handle hitting a HW-breakpoint.
+ */
+static void ptrace_hbptriggered(struct perf_event *bp,
+ struct perf_sample_data *data,
+ struct pt_regs *regs)
+{
+ int i;
+ struct arch_hw_breakpoint *bkpt = counter_arch_bp(bp);
+
+ for (i = 0; i < LOONGARCH_MAX_BRP; ++i)
+ if (current->thread.hbp_break[i] == bp)
+ break;
+
+ for (i = 0; i < LOONGARCH_MAX_WRP; ++i)
+ if (current->thread.hbp_watch[i] == bp)
+ break;
+
+ force_sig_ptrace_errno_trap(i, (void __user *)bkpt->address);
+}
+
+static struct perf_event *ptrace_hbp_get_event(unsigned int note_type,
+ struct task_struct *tsk,
+ unsigned long idx)
+{
+ struct perf_event *bp;
+
+ switch (note_type) {
+ case NT_LOONGARCH_HW_BREAK:
+ if (idx >= LOONGARCH_MAX_BRP)
+ return ERR_PTR(-EINVAL);
+ idx = array_index_nospec(idx, LOONGARCH_MAX_BRP);
+ bp = tsk->thread.hbp_break[idx];
+ break;
+ case NT_LOONGARCH_HW_WATCH:
+ if (idx >= LOONGARCH_MAX_WRP)
+ return ERR_PTR(-EINVAL);
+ idx = array_index_nospec(idx, LOONGARCH_MAX_WRP);
+ bp = tsk->thread.hbp_watch[idx];
+ break;
+ }
+
+ return bp;
+}
+
+static int ptrace_hbp_set_event(unsigned int note_type,
+ struct task_struct *tsk,
+ unsigned long idx,
+ struct perf_event *bp)
+{
+ switch (note_type) {
+ case NT_LOONGARCH_HW_BREAK:
+ if (idx >= LOONGARCH_MAX_BRP)
+ return -EINVAL;
+ idx = array_index_nospec(idx, LOONGARCH_MAX_BRP);
+ tsk->thread.hbp_break[idx] = bp;
+ break;
+ case NT_LOONGARCH_HW_WATCH:
+ if (idx >= LOONGARCH_MAX_WRP)
+ return -EINVAL;
+ idx = array_index_nospec(idx, LOONGARCH_MAX_WRP);
+ tsk->thread.hbp_watch[idx] = bp;
+ break;
+ }
+
+ return 0;
+}
+
+static struct perf_event *ptrace_hbp_create(unsigned int note_type,
+ struct task_struct *tsk,
+ unsigned long idx)
+{
+ int err, type;
+ struct perf_event *bp;
+ struct perf_event_attr attr;
+
+ switch (note_type) {
+ case NT_LOONGARCH_HW_BREAK:
+ type = HW_BREAKPOINT_X;
+ break;
+ case NT_LOONGARCH_HW_WATCH:
+ type = HW_BREAKPOINT_RW;
+ break;
+ default:
+ return ERR_PTR(-EINVAL);
+ }
+
+ ptrace_breakpoint_init(&attr);
+
+ /*
+ * Initialise fields to sane defaults
+ * (i.e. values that will pass validation).
+ */
+ attr.bp_addr = 0;
+ attr.bp_len = HW_BREAKPOINT_LEN_4;
+ attr.bp_type = type;
+ attr.disabled = 1;
+
+ bp = register_user_hw_breakpoint(&attr, ptrace_hbptriggered, NULL, tsk);
+ if (IS_ERR(bp))
+ return bp;
+
+ err = ptrace_hbp_set_event(note_type, tsk, idx, bp);
+ if (err)
+ return ERR_PTR(err);
+
+ return bp;
+}
+
+static int ptrace_hbp_fill_attr_ctrl(unsigned int note_type,
+ struct arch_hw_breakpoint_ctrl ctrl,
+ struct perf_event_attr *attr)
+{
+ int err, len, type, offset;
+
+ err = arch_bp_generic_fields(ctrl, &len, &type, &offset);
+ if (err)
+ return err;
+
+ switch (note_type) {
+ case NT_LOONGARCH_HW_BREAK:
+ if ((type & HW_BREAKPOINT_X) != type)
+ return -EINVAL;
+ break;
+ case NT_LOONGARCH_HW_WATCH:
+ if ((type & HW_BREAKPOINT_RW) != type)
+ return -EINVAL;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ attr->bp_len = len;
+ attr->bp_type = type;
+ attr->bp_addr += offset;
+
+ return 0;
+}
+
+static int ptrace_hbp_get_resource_info(unsigned int note_type, u64 *info)
+{
+ u8 num;
+ u64 reg = 0;
+
+ switch (note_type) {
+ case NT_LOONGARCH_HW_BREAK:
+ num = hw_breakpoint_slots(TYPE_INST);
+ break;
+ case NT_LOONGARCH_HW_WATCH:
+ num = hw_breakpoint_slots(TYPE_DATA);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ *info = reg | num;
+
+ return 0;
+}
+
+static struct perf_event *ptrace_hbp_get_initialised_bp(unsigned int note_type,
+ struct task_struct *tsk,
+ unsigned long idx)
+{
+ struct perf_event *bp = ptrace_hbp_get_event(note_type, tsk, idx);
+
+ if (!bp)
+ bp = ptrace_hbp_create(note_type, tsk, idx);
+
+ return bp;
+}
+
+static int ptrace_hbp_get_ctrl(unsigned int note_type,
+ struct task_struct *tsk,
+ unsigned long idx, u32 *ctrl)
+{
+ struct perf_event *bp = ptrace_hbp_get_event(note_type, tsk, idx);
+
+ if (IS_ERR(bp))
+ return PTR_ERR(bp);
+
+ *ctrl = bp ? encode_ctrl_reg(counter_arch_bp(bp)->ctrl) : 0;
+
+ return 0;
+}
+
+static int ptrace_hbp_get_mask(unsigned int note_type,
+ struct task_struct *tsk,
+ unsigned long idx, u64 *mask)
+{
+ struct perf_event *bp = ptrace_hbp_get_event(note_type, tsk, idx);
+
+ if (IS_ERR(bp))
+ return PTR_ERR(bp);
+
+ *mask = bp ? counter_arch_bp(bp)->mask : 0;
+
+ return 0;
+}
+
+static int ptrace_hbp_get_addr(unsigned int note_type,
+ struct task_struct *tsk,
+ unsigned long idx, u64 *addr)
+{
+ struct perf_event *bp = ptrace_hbp_get_event(note_type, tsk, idx);
+
+ if (IS_ERR(bp))
+ return PTR_ERR(bp);
+
+ *addr = bp ? counter_arch_bp(bp)->address : 0;
+
+ return 0;
+}
+
+static int ptrace_hbp_set_ctrl(unsigned int note_type,
+ struct task_struct *tsk,
+ unsigned long idx, u32 uctrl)
+{
+ int err;
+ struct perf_event *bp;
+ struct perf_event_attr attr;
+ struct arch_hw_breakpoint_ctrl ctrl;
+
+ bp = ptrace_hbp_get_initialised_bp(note_type, tsk, idx);
+ if (IS_ERR(bp))
+ return PTR_ERR(bp);
+
+ attr = bp->attr;
+ decode_ctrl_reg(uctrl, &ctrl);
+ err = ptrace_hbp_fill_attr_ctrl(note_type, ctrl, &attr);
+ if (err)
+ return err;
+
+ return modify_user_hw_breakpoint(bp, &attr);
+}
+
+static int ptrace_hbp_set_mask(unsigned int note_type,
+ struct task_struct *tsk,
+ unsigned long idx, u64 mask)
+{
+ struct perf_event *bp;
+ struct perf_event_attr attr;
+ struct arch_hw_breakpoint *info;
+
+ bp = ptrace_hbp_get_initialised_bp(note_type, tsk, idx);
+ if (IS_ERR(bp))
+ return PTR_ERR(bp);
+
+ attr = bp->attr;
+ info = counter_arch_bp(bp);
+ info->mask = mask;
+
+ return modify_user_hw_breakpoint(bp, &attr);
+}
+
+static int ptrace_hbp_set_addr(unsigned int note_type,
+ struct task_struct *tsk,
+ unsigned long idx, u64 addr)
+{
+ struct perf_event *bp;
+ struct perf_event_attr attr;
+
+ bp = ptrace_hbp_get_initialised_bp(note_type, tsk, idx);
+ if (IS_ERR(bp))
+ return PTR_ERR(bp);
+
+ attr = bp->attr;
+ attr.bp_addr = addr;
+
+ return modify_user_hw_breakpoint(bp, &attr);
+}
+
+#define PTRACE_HBP_ADDR_SZ sizeof(u64)
+#define PTRACE_HBP_MASK_SZ sizeof(u64)
+#define PTRACE_HBP_CTRL_SZ sizeof(u32)
+#define PTRACE_HBP_PAD_SZ sizeof(u32)
+
+static int hw_break_get(struct task_struct *target,
+ const struct user_regset *regset,
+ struct membuf to)
+{
+ u64 info;
+ u32 ctrl;
+ u64 addr, mask;
+ int ret, idx = 0;
+ unsigned int note_type = regset->core_note_type;
+
+ /* Resource info */
+ ret = ptrace_hbp_get_resource_info(note_type, &info);
+ if (ret)
+ return ret;
+
+ membuf_write(&to, &info, sizeof(info));
+
+ /* (address, mask, ctrl) registers */
+ while (to.left) {
+ ret = ptrace_hbp_get_addr(note_type, target, idx, &addr);
+ if (ret)
+ return ret;
+
+ ret = ptrace_hbp_get_mask(note_type, target, idx, &mask);
+ if (ret)
+ return ret;
+
+ ret = ptrace_hbp_get_ctrl(note_type, target, idx, &ctrl);
+ if (ret)
+ return ret;
+
+ membuf_store(&to, addr);
+ membuf_store(&to, mask);
+ membuf_store(&to, ctrl);
+ membuf_zero(&to, sizeof(u32));
+ idx++;
+ }
+
+ return 0;
+}
+
+static int hw_break_set(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ const void *kbuf, const void __user *ubuf)
+{
+ u32 ctrl;
+ u64 addr, mask;
+ int ret, idx = 0, offset, limit;
+ unsigned int note_type = regset->core_note_type;
+
+ /* Resource info */
+ offset = offsetof(struct user_watch_state, dbg_regs);
+ user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, 0, offset);
+
+ /* (address, mask, ctrl) registers */
+ limit = regset->n * regset->size;
+ while (count && offset < limit) {
+ if (count < PTRACE_HBP_ADDR_SZ)
+ return -EINVAL;
+
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &addr,
+ offset, offset + PTRACE_HBP_ADDR_SZ);
+ if (ret)
+ return ret;
+
+ ret = ptrace_hbp_set_addr(note_type, target, idx, addr);
+ if (ret)
+ return ret;
+ offset += PTRACE_HBP_ADDR_SZ;
+
+ if (!count)
+ break;
+
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &mask,
+ offset, offset + PTRACE_HBP_MASK_SZ);
+ if (ret)
+ return ret;
+
+ ret = ptrace_hbp_set_mask(note_type, target, idx, mask);
+ if (ret)
+ return ret;
+ offset += PTRACE_HBP_MASK_SZ;
+
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &ctrl,
+ offset, offset + PTRACE_HBP_CTRL_SZ);
+ if (ret)
+ return ret;
+
+ ret = ptrace_hbp_set_ctrl(note_type, target, idx, ctrl);
+ if (ret)
+ return ret;
+ offset += PTRACE_HBP_CTRL_SZ;
+
+ user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
+ offset, offset + PTRACE_HBP_PAD_SZ);
+ offset += PTRACE_HBP_PAD_SZ;
+
+ idx++;
+ }
+
+ return 0;
+}
+
+#endif
+
+struct pt_regs_offset {
+ const char *name;
+ int offset;
+};
+
+#define REG_OFFSET_NAME(n, r) {.name = #n, .offset = offsetof(struct pt_regs, r)}
+#define REG_OFFSET_END {.name = NULL, .offset = 0}
+
+static const struct pt_regs_offset regoffset_table[] = {
+ REG_OFFSET_NAME(r0, regs[0]),
+ REG_OFFSET_NAME(r1, regs[1]),
+ REG_OFFSET_NAME(r2, regs[2]),
+ REG_OFFSET_NAME(r3, regs[3]),
+ REG_OFFSET_NAME(r4, regs[4]),
+ REG_OFFSET_NAME(r5, regs[5]),
+ REG_OFFSET_NAME(r6, regs[6]),
+ REG_OFFSET_NAME(r7, regs[7]),
+ REG_OFFSET_NAME(r8, regs[8]),
+ REG_OFFSET_NAME(r9, regs[9]),
+ REG_OFFSET_NAME(r10, regs[10]),
+ REG_OFFSET_NAME(r11, regs[11]),
+ REG_OFFSET_NAME(r12, regs[12]),
+ REG_OFFSET_NAME(r13, regs[13]),
+ REG_OFFSET_NAME(r14, regs[14]),
+ REG_OFFSET_NAME(r15, regs[15]),
+ REG_OFFSET_NAME(r16, regs[16]),
+ REG_OFFSET_NAME(r17, regs[17]),
+ REG_OFFSET_NAME(r18, regs[18]),
+ REG_OFFSET_NAME(r19, regs[19]),
+ REG_OFFSET_NAME(r20, regs[20]),
+ REG_OFFSET_NAME(r21, regs[21]),
+ REG_OFFSET_NAME(r22, regs[22]),
+ REG_OFFSET_NAME(r23, regs[23]),
+ REG_OFFSET_NAME(r24, regs[24]),
+ REG_OFFSET_NAME(r25, regs[25]),
+ REG_OFFSET_NAME(r26, regs[26]),
+ REG_OFFSET_NAME(r27, regs[27]),
+ REG_OFFSET_NAME(r28, regs[28]),
+ REG_OFFSET_NAME(r29, regs[29]),
+ REG_OFFSET_NAME(r30, regs[30]),
+ REG_OFFSET_NAME(r31, regs[31]),
+ REG_OFFSET_NAME(orig_a0, orig_a0),
+ REG_OFFSET_NAME(csr_era, csr_era),
+ REG_OFFSET_NAME(csr_badvaddr, csr_badvaddr),
+ REG_OFFSET_NAME(csr_crmd, csr_crmd),
+ REG_OFFSET_NAME(csr_prmd, csr_prmd),
+ REG_OFFSET_NAME(csr_euen, csr_euen),
+ REG_OFFSET_NAME(csr_ecfg, csr_ecfg),
+ REG_OFFSET_NAME(csr_estat, csr_estat),
+ REG_OFFSET_END,
+};
+
+/**
+ * regs_query_register_offset() - query register offset from its name
+ * @name: the name of a register
+ *
+ * regs_query_register_offset() returns the offset of a register in struct
+ * pt_regs from its name. If the name is invalid, this returns -EINVAL;
+ */
+int regs_query_register_offset(const char *name)
+{
+ const struct pt_regs_offset *roff;
+
+ for (roff = regoffset_table; roff->name != NULL; roff++)
+ if (!strcmp(roff->name, name))
+ return roff->offset;
+ return -EINVAL;
+}
+
+enum loongarch_regset {
+ REGSET_GPR,
+ REGSET_FPR,
+ REGSET_CPUCFG,
+#ifdef CONFIG_CPU_HAS_LSX
+ REGSET_LSX,
+#endif
+#ifdef CONFIG_CPU_HAS_LASX
+ REGSET_LASX,
+#endif
+#ifdef CONFIG_CPU_HAS_LBT
+ REGSET_LBT,
+#endif
+#ifdef CONFIG_HAVE_HW_BREAKPOINT
+ REGSET_HW_BREAK,
+ REGSET_HW_WATCH,
+#endif
+};
+
+static const struct user_regset loongarch64_regsets[] = {
+ [REGSET_GPR] = {
+ .core_note_type = NT_PRSTATUS,
+ .n = ELF_NGREG,
+ .size = sizeof(elf_greg_t),
+ .align = sizeof(elf_greg_t),
+ .regset_get = gpr_get,
+ .set = gpr_set,
+ },
+ [REGSET_FPR] = {
+ .core_note_type = NT_PRFPREG,
+ .n = ELF_NFPREG,
+ .size = sizeof(elf_fpreg_t),
+ .align = sizeof(elf_fpreg_t),
+ .regset_get = fpr_get,
+ .set = fpr_set,
+ },
+ [REGSET_CPUCFG] = {
+ .core_note_type = NT_LOONGARCH_CPUCFG,
+ .n = 64,
+ .size = sizeof(u32),
+ .align = sizeof(u32),
+ .regset_get = cfg_get,
+ .set = cfg_set,
+ },
+#ifdef CONFIG_CPU_HAS_LSX
+ [REGSET_LSX] = {
+ .core_note_type = NT_LOONGARCH_LSX,
+ .n = NUM_FPU_REGS,
+ .size = 16,
+ .align = 16,
+ .regset_get = simd_get,
+ .set = simd_set,
+ },
+#endif
+#ifdef CONFIG_CPU_HAS_LASX
+ [REGSET_LASX] = {
+ .core_note_type = NT_LOONGARCH_LASX,
+ .n = NUM_FPU_REGS,
+ .size = 32,
+ .align = 32,
+ .regset_get = simd_get,
+ .set = simd_set,
+ },
+#endif
+#ifdef CONFIG_CPU_HAS_LBT
+ [REGSET_LBT] = {
+ .core_note_type = NT_LOONGARCH_LBT,
+ .n = 5,
+ .size = sizeof(u64),
+ .align = sizeof(u64),
+ .regset_get = lbt_get,
+ .set = lbt_set,
+ },
+#endif
+#ifdef CONFIG_HAVE_HW_BREAKPOINT
+ [REGSET_HW_BREAK] = {
+ .core_note_type = NT_LOONGARCH_HW_BREAK,
+ .n = sizeof(struct user_watch_state) / sizeof(u32),
+ .size = sizeof(u32),
+ .align = sizeof(u32),
+ .regset_get = hw_break_get,
+ .set = hw_break_set,
+ },
+ [REGSET_HW_WATCH] = {
+ .core_note_type = NT_LOONGARCH_HW_WATCH,
+ .n = sizeof(struct user_watch_state) / sizeof(u32),
+ .size = sizeof(u32),
+ .align = sizeof(u32),
+ .regset_get = hw_break_get,
+ .set = hw_break_set,
+ },
+#endif
+};
+
+static const struct user_regset_view user_loongarch64_view = {
+ .name = "loongarch64",
+ .e_machine = ELF_ARCH,
+ .regsets = loongarch64_regsets,
+ .n = ARRAY_SIZE(loongarch64_regsets),
+};
+
+
+const struct user_regset_view *task_user_regset_view(struct task_struct *task)
+{
+ return &user_loongarch64_view;
+}
+
+static inline int read_user(struct task_struct *target, unsigned long addr,
+ unsigned long __user *data)
+{
+ unsigned long tmp = 0;
+
+ switch (addr) {
+ case 0 ... 31:
+ tmp = task_pt_regs(target)->regs[addr];
+ break;
+ case ARG0:
+ tmp = task_pt_regs(target)->orig_a0;
+ break;
+ case PC:
+ tmp = task_pt_regs(target)->csr_era;
+ break;
+ case BADVADDR:
+ tmp = task_pt_regs(target)->csr_badvaddr;
+ break;
+ default:
+ return -EIO;
+ }
+
+ return put_user(tmp, data);
+}
+
+static inline int write_user(struct task_struct *target, unsigned long addr,
+ unsigned long data)
+{
+ switch (addr) {
+ case 0 ... 31:
+ task_pt_regs(target)->regs[addr] = data;
+ break;
+ case ARG0:
+ task_pt_regs(target)->orig_a0 = data;
+ break;
+ case PC:
+ task_pt_regs(target)->csr_era = data;
+ break;
+ case BADVADDR:
+ task_pt_regs(target)->csr_badvaddr = data;
+ break;
+ default:
+ return -EIO;
+ }
+
+ return 0;
+}
+
+long arch_ptrace(struct task_struct *child, long request,
+ unsigned long addr, unsigned long data)
+{
+ int ret;
+ unsigned long __user *datap = (void __user *) data;
+
+ switch (request) {
+ case PTRACE_PEEKUSR:
+ ret = read_user(child, addr, datap);
+ break;
+
+ case PTRACE_POKEUSR:
+ ret = write_user(child, addr, data);
+ break;
+
+ default:
+ ret = ptrace_request(child, request, addr, data);
+ break;
+ }
+
+ return ret;
+}
+
+#ifdef CONFIG_HAVE_HW_BREAKPOINT
+static void ptrace_triggered(struct perf_event *bp,
+ struct perf_sample_data *data, struct pt_regs *regs)
+{
+ struct perf_event_attr attr;
+
+ attr = bp->attr;
+ attr.disabled = true;
+ modify_user_hw_breakpoint(bp, &attr);
+}
+
+static int set_single_step(struct task_struct *tsk, unsigned long addr)
+{
+ struct perf_event *bp;
+ struct perf_event_attr attr;
+ struct arch_hw_breakpoint *info;
+ struct thread_struct *thread = &tsk->thread;
+
+ bp = thread->hbp_break[0];
+ if (!bp) {
+ ptrace_breakpoint_init(&attr);
+
+ attr.bp_addr = addr;
+ attr.bp_len = HW_BREAKPOINT_LEN_8;
+ attr.bp_type = HW_BREAKPOINT_X;
+
+ bp = register_user_hw_breakpoint(&attr, ptrace_triggered,
+ NULL, tsk);
+ if (IS_ERR(bp))
+ return PTR_ERR(bp);
+
+ thread->hbp_break[0] = bp;
+ } else {
+ int err;
+
+ attr = bp->attr;
+ attr.bp_addr = addr;
+
+ /* Reenable breakpoint */
+ attr.disabled = false;
+ err = modify_user_hw_breakpoint(bp, &attr);
+ if (unlikely(err))
+ return err;
+
+ csr_write64(attr.bp_addr, LOONGARCH_CSR_IB0ADDR);
+ }
+ info = counter_arch_bp(bp);
+ info->mask = TASK_SIZE - 1;
+
+ return 0;
+}
+
+/* ptrace API */
+void user_enable_single_step(struct task_struct *task)
+{
+ struct thread_info *ti = task_thread_info(task);
+
+ set_single_step(task, task_pt_regs(task)->csr_era);
+ task->thread.single_step = task_pt_regs(task)->csr_era;
+ set_ti_thread_flag(ti, TIF_SINGLESTEP);
+}
+
+void user_disable_single_step(struct task_struct *task)
+{
+ clear_tsk_thread_flag(task, TIF_SINGLESTEP);
+}
+#endif
diff --git a/arch/loongarch/kernel/relocate.c b/arch/loongarch/kernel/relocate.c
new file mode 100644
index 0000000000..1acfa704c8
--- /dev/null
+++ b/arch/loongarch/kernel/relocate.c
@@ -0,0 +1,246 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Support for Kernel relocation at boot time
+ *
+ * Copyright (C) 2023 Loongson Technology Corporation Limited
+ */
+
+#include <linux/elf.h>
+#include <linux/kernel.h>
+#include <linux/printk.h>
+#include <linux/panic_notifier.h>
+#include <linux/start_kernel.h>
+#include <asm/bootinfo.h>
+#include <asm/early_ioremap.h>
+#include <asm/inst.h>
+#include <asm/sections.h>
+#include <asm/setup.h>
+
+#define RELOCATED(x) ((void *)((long)x + reloc_offset))
+#define RELOCATED_KASLR(x) ((void *)((long)x + random_offset))
+
+static unsigned long reloc_offset;
+
+static inline void __init relocate_relative(void)
+{
+ Elf64_Rela *rela, *rela_end;
+ rela = (Elf64_Rela *)&__rela_dyn_begin;
+ rela_end = (Elf64_Rela *)&__rela_dyn_end;
+
+ for ( ; rela < rela_end; rela++) {
+ Elf64_Addr addr = rela->r_offset;
+ Elf64_Addr relocated_addr = rela->r_addend;
+
+ if (rela->r_info != R_LARCH_RELATIVE)
+ continue;
+
+ if (relocated_addr >= VMLINUX_LOAD_ADDRESS)
+ relocated_addr = (Elf64_Addr)RELOCATED(relocated_addr);
+
+ *(Elf64_Addr *)RELOCATED(addr) = relocated_addr;
+ }
+}
+
+static inline void __init relocate_absolute(long random_offset)
+{
+ void *begin, *end;
+ struct rela_la_abs *p;
+
+ begin = RELOCATED_KASLR(&__la_abs_begin);
+ end = RELOCATED_KASLR(&__la_abs_end);
+
+ for (p = begin; (void *)p < end; p++) {
+ long v = p->symvalue;
+ uint32_t lu12iw, ori, lu32id, lu52id;
+ union loongarch_instruction *insn = (void *)p->pc;
+
+ lu12iw = (v >> 12) & 0xfffff;
+ ori = v & 0xfff;
+ lu32id = (v >> 32) & 0xfffff;
+ lu52id = v >> 52;
+
+ insn[0].reg1i20_format.immediate = lu12iw;
+ insn[1].reg2i12_format.immediate = ori;
+ insn[2].reg1i20_format.immediate = lu32id;
+ insn[3].reg2i12_format.immediate = lu52id;
+ }
+}
+
+#ifdef CONFIG_RANDOMIZE_BASE
+static inline __init unsigned long rotate_xor(unsigned long hash,
+ const void *area, size_t size)
+{
+ size_t i, diff;
+ const typeof(hash) *ptr = PTR_ALIGN(area, sizeof(hash));
+
+ diff = (void *)ptr - area;
+ if (size < diff + sizeof(hash))
+ return hash;
+
+ size = ALIGN_DOWN(size - diff, sizeof(hash));
+
+ for (i = 0; i < size / sizeof(hash); i++) {
+ /* Rotate by odd number of bits and XOR. */
+ hash = (hash << ((sizeof(hash) * 8) - 7)) | (hash >> 7);
+ hash ^= ptr[i];
+ }
+
+ return hash;
+}
+
+static inline __init unsigned long get_random_boot(void)
+{
+ unsigned long hash = 0;
+ unsigned long entropy = random_get_entropy();
+
+ /* Attempt to create a simple but unpredictable starting entropy. */
+ hash = rotate_xor(hash, linux_banner, strlen(linux_banner));
+
+ /* Add in any runtime entropy we can get */
+ hash = rotate_xor(hash, &entropy, sizeof(entropy));
+
+ return hash;
+}
+
+static int __init nokaslr(char *p)
+{
+ pr_info("KASLR is disabled.\n");
+
+ return 0; /* Print a notice and silence the boot warning */
+}
+early_param("nokaslr", nokaslr);
+
+static inline __init bool kaslr_disabled(void)
+{
+ char *str;
+ const char *builtin_cmdline = CONFIG_CMDLINE;
+
+ str = strstr(builtin_cmdline, "nokaslr");
+ if (str == builtin_cmdline || (str > builtin_cmdline && *(str - 1) == ' '))
+ return true;
+
+ str = strstr(boot_command_line, "nokaslr");
+ if (str == boot_command_line || (str > boot_command_line && *(str - 1) == ' '))
+ return true;
+
+ return false;
+}
+
+/* Choose a new address for the kernel */
+static inline void __init *determine_relocation_address(void)
+{
+ unsigned long kernel_length;
+ unsigned long random_offset;
+ void *destination = _text;
+
+ if (kaslr_disabled())
+ return destination;
+
+ kernel_length = (long)_end - (long)_text;
+
+ random_offset = get_random_boot() << 16;
+ random_offset &= (CONFIG_RANDOMIZE_BASE_MAX_OFFSET - 1);
+ if (random_offset < kernel_length)
+ random_offset += ALIGN(kernel_length, 0xffff);
+
+ return RELOCATED_KASLR(destination);
+}
+
+static inline int __init relocation_addr_valid(void *location_new)
+{
+ if ((unsigned long)location_new & 0x00000ffff)
+ return 0; /* Inappropriately aligned new location */
+
+ if ((unsigned long)location_new < (unsigned long)_end)
+ return 0; /* New location overlaps original kernel */
+
+ return 1;
+}
+#endif
+
+static inline void __init update_reloc_offset(unsigned long *addr, long random_offset)
+{
+ unsigned long *new_addr = (unsigned long *)RELOCATED_KASLR(addr);
+
+ *new_addr = (unsigned long)reloc_offset;
+}
+
+unsigned long __init relocate_kernel(void)
+{
+ unsigned long kernel_length;
+ unsigned long random_offset = 0;
+ void *location_new = _text; /* Default to original kernel start */
+ char *cmdline = early_ioremap(fw_arg1, COMMAND_LINE_SIZE); /* Boot command line is passed in fw_arg1 */
+
+ strscpy(boot_command_line, cmdline, COMMAND_LINE_SIZE);
+
+#ifdef CONFIG_RANDOMIZE_BASE
+ location_new = determine_relocation_address();
+
+ /* Sanity check relocation address */
+ if (relocation_addr_valid(location_new))
+ random_offset = (unsigned long)location_new - (unsigned long)(_text);
+#endif
+ reloc_offset = (unsigned long)_text - VMLINUX_LOAD_ADDRESS;
+
+ if (random_offset) {
+ kernel_length = (long)(_end) - (long)(_text);
+
+ /* Copy the kernel to it's new location */
+ memcpy(location_new, _text, kernel_length);
+
+ /* Sync the caches ready for execution of new kernel */
+ __asm__ __volatile__ (
+ "ibar 0 \t\n"
+ "dbar 0 \t\n"
+ ::: "memory");
+
+ reloc_offset += random_offset;
+
+ /* The current thread is now within the relocated kernel */
+ __current_thread_info = RELOCATED_KASLR(__current_thread_info);
+
+ update_reloc_offset(&reloc_offset, random_offset);
+ }
+
+ if (reloc_offset)
+ relocate_relative();
+
+ relocate_absolute(random_offset);
+
+ return random_offset;
+}
+
+/*
+ * Show relocation information on panic.
+ */
+static void show_kernel_relocation(const char *level)
+{
+ if (reloc_offset > 0) {
+ printk(level);
+ pr_cont("Kernel relocated by 0x%lx\n", reloc_offset);
+ pr_cont(" .text @ 0x%px\n", _text);
+ pr_cont(" .data @ 0x%px\n", _sdata);
+ pr_cont(" .bss @ 0x%px\n", __bss_start);
+ }
+}
+
+static int kernel_location_notifier_fn(struct notifier_block *self,
+ unsigned long v, void *p)
+{
+ show_kernel_relocation(KERN_EMERG);
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block kernel_location_notifier = {
+ .notifier_call = kernel_location_notifier_fn
+};
+
+static int __init register_kernel_offset_dumper(void)
+{
+ atomic_notifier_chain_register(&panic_notifier_list,
+ &kernel_location_notifier);
+ return 0;
+}
+
+arch_initcall(register_kernel_offset_dumper);
diff --git a/arch/loongarch/kernel/relocate_kernel.S b/arch/loongarch/kernel/relocate_kernel.S
new file mode 100644
index 0000000000..f49f6b0537
--- /dev/null
+++ b/arch/loongarch/kernel/relocate_kernel.S
@@ -0,0 +1,111 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * relocate_kernel.S for kexec
+ *
+ * Copyright (C) 2022 Loongson Technology Corporation Limited
+ */
+
+#include <linux/kexec.h>
+
+#include <asm/asm.h>
+#include <asm/asmmacro.h>
+#include <asm/regdef.h>
+#include <asm/loongarch.h>
+#include <asm/stackframe.h>
+#include <asm/addrspace.h>
+
+SYM_CODE_START(relocate_new_kernel)
+ /*
+ * a0: EFI boot flag for the new kernel
+ * a1: Command line pointer for the new kernel
+ * a2: System table pointer for the new kernel
+ * a3: Start address to jump to after relocation
+ * a4: Pointer to the current indirection page entry
+ */
+ move s0, a4
+
+ /*
+ * In case of a kdump/crash kernel, the indirection page is not
+ * populated as the kernel is directly copied to a reserved location
+ */
+ beqz s0, done
+
+process_entry:
+ PTR_L s1, s0, 0
+ PTR_ADDI s0, s0, SZREG
+
+ /* destination page */
+ andi s2, s1, IND_DESTINATION
+ beqz s2, 1f
+ li.w t0, ~0x1
+ and s3, s1, t0 /* store destination addr in s3 */
+ b process_entry
+
+1:
+ /* indirection page, update s0 */
+ andi s2, s1, IND_INDIRECTION
+ beqz s2, 1f
+ li.w t0, ~0x2
+ and s0, s1, t0
+ b process_entry
+
+1:
+ /* done page */
+ andi s2, s1, IND_DONE
+ beqz s2, 1f
+ b done
+
+1:
+ /* source page */
+ andi s2, s1, IND_SOURCE
+ beqz s2, process_entry
+ li.w t0, ~0x8
+ and s1, s1, t0
+ li.w s5, (1 << _PAGE_SHIFT) / SZREG
+
+copy_word:
+ /* copy page word by word */
+ REG_L s4, s1, 0
+ REG_S s4, s3, 0
+ PTR_ADDI s3, s3, SZREG
+ PTR_ADDI s1, s1, SZREG
+ LONG_ADDI s5, s5, -1
+ beqz s5, process_entry
+ b copy_word
+
+done:
+ ibar 0
+ dbar 0
+
+ /*
+ * Jump to the new kernel,
+ * make sure the values of a0, a1, a2 and a3 are not changed.
+ */
+ jr a3
+SYM_CODE_END(relocate_new_kernel)
+
+#ifdef CONFIG_SMP
+/*
+ * Other CPUs should wait until code is relocated and
+ * then start at the entry point from LOONGARCH_IOCSR_MBUF0.
+ */
+SYM_CODE_START(kexec_smp_wait)
+1: li.w t0, 0x100 /* wait for init loop */
+2: addi.w t0, t0, -1 /* limit mailbox access */
+ bnez t0, 2b
+ li.w t1, LOONGARCH_IOCSR_MBUF0
+ iocsrrd.w s0, t1 /* check PC as an indicator */
+ beqz s0, 1b
+ iocsrrd.d s0, t1 /* get PC via mailbox */
+
+ li.d t0, CACHE_BASE
+ or s0, s0, t0 /* s0 = TO_CACHE(s0) */
+ jr s0 /* jump to initial PC */
+SYM_CODE_END(kexec_smp_wait)
+#endif
+
+relocate_new_kernel_end:
+
+SYM_DATA_START(relocate_new_kernel_size)
+ PTR relocate_new_kernel_end - relocate_new_kernel
+SYM_DATA_END(relocate_new_kernel_size)
diff --git a/arch/loongarch/kernel/reset.c b/arch/loongarch/kernel/reset.c
new file mode 100644
index 0000000000..1ef8c63835
--- /dev/null
+++ b/arch/loongarch/kernel/reset.c
@@ -0,0 +1,79 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#include <linux/kernel.h>
+#include <linux/acpi.h>
+#include <linux/efi.h>
+#include <linux/export.h>
+#include <linux/pm.h>
+#include <linux/types.h>
+#include <linux/reboot.h>
+#include <linux/delay.h>
+#include <linux/console.h>
+
+#include <acpi/reboot.h>
+#include <asm/idle.h>
+#include <asm/loongarch.h>
+#include <asm/loongson.h>
+
+void (*pm_power_off)(void);
+EXPORT_SYMBOL(pm_power_off);
+
+void machine_halt(void)
+{
+#ifdef CONFIG_SMP
+ preempt_disable();
+ smp_send_stop();
+#endif
+ local_irq_disable();
+ clear_csr_ecfg(ECFG0_IM);
+
+ pr_notice("\n\n** You can safely turn off the power now **\n\n");
+ console_flush_on_panic(CONSOLE_FLUSH_PENDING);
+
+ while (true) {
+ __arch_cpu_idle();
+ }
+}
+
+void machine_power_off(void)
+{
+#ifdef CONFIG_SMP
+ preempt_disable();
+ smp_send_stop();
+#endif
+#ifdef CONFIG_PM
+ if (!acpi_disabled)
+ enable_pci_wakeup();
+#endif
+ do_kernel_power_off();
+#ifdef CONFIG_EFI
+ efi.reset_system(EFI_RESET_SHUTDOWN, EFI_SUCCESS, 0, NULL);
+#endif
+
+ while (true) {
+ __arch_cpu_idle();
+ }
+}
+
+void machine_restart(char *command)
+{
+#ifdef CONFIG_SMP
+ preempt_disable();
+ smp_send_stop();
+#endif
+ do_kernel_restart(command);
+#ifdef CONFIG_EFI
+ if (efi_capsule_pending(NULL))
+ efi_reboot(REBOOT_WARM, NULL);
+ else
+ efi_reboot(REBOOT_COLD, NULL);
+#endif
+ if (!acpi_disabled)
+ acpi_reboot();
+
+ while (true) {
+ __arch_cpu_idle();
+ }
+}
diff --git a/arch/loongarch/kernel/rethook.c b/arch/loongarch/kernel/rethook.c
new file mode 100644
index 0000000000..db1c5f5024
--- /dev/null
+++ b/arch/loongarch/kernel/rethook.c
@@ -0,0 +1,28 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Generic return hook for LoongArch.
+ */
+
+#include <linux/kprobes.h>
+#include <linux/rethook.h>
+#include "rethook.h"
+
+/* This is called from arch_rethook_trampoline() */
+unsigned long __used arch_rethook_trampoline_callback(struct pt_regs *regs)
+{
+ return rethook_trampoline_handler(regs, 0);
+}
+NOKPROBE_SYMBOL(arch_rethook_trampoline_callback);
+
+void arch_rethook_prepare(struct rethook_node *rhn, struct pt_regs *regs, bool mcount)
+{
+ rhn->frame = 0;
+ rhn->ret_addr = regs->regs[1];
+
+ /* replace return addr with trampoline */
+ regs->regs[1] = (unsigned long)arch_rethook_trampoline;
+}
+NOKPROBE_SYMBOL(arch_rethook_prepare);
+
+/* ASM function that handles the rethook must not be probed itself */
+NOKPROBE_SYMBOL(arch_rethook_trampoline);
diff --git a/arch/loongarch/kernel/rethook.h b/arch/loongarch/kernel/rethook.h
new file mode 100644
index 0000000000..3f1c1edf0d
--- /dev/null
+++ b/arch/loongarch/kernel/rethook.h
@@ -0,0 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __LOONGARCH_RETHOOK_H
+#define __LOONGARCH_RETHOOK_H
+
+unsigned long arch_rethook_trampoline_callback(struct pt_regs *regs);
+void arch_rethook_prepare(struct rethook_node *rhn, struct pt_regs *regs, bool mcount);
+
+#endif
diff --git a/arch/loongarch/kernel/rethook_trampoline.S b/arch/loongarch/kernel/rethook_trampoline.S
new file mode 100644
index 0000000000..bd5772c963
--- /dev/null
+++ b/arch/loongarch/kernel/rethook_trampoline.S
@@ -0,0 +1,96 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+#include <linux/linkage.h>
+#include <asm/stackframe.h>
+
+ .text
+
+ .macro save_all_base_regs
+ cfi_st ra, PT_R1
+ cfi_st tp, PT_R2
+ cfi_st a0, PT_R4
+ cfi_st a1, PT_R5
+ cfi_st a2, PT_R6
+ cfi_st a3, PT_R7
+ cfi_st a4, PT_R8
+ cfi_st a5, PT_R9
+ cfi_st a6, PT_R10
+ cfi_st a7, PT_R11
+ cfi_st t0, PT_R12
+ cfi_st t1, PT_R13
+ cfi_st t2, PT_R14
+ cfi_st t3, PT_R15
+ cfi_st t4, PT_R16
+ cfi_st t5, PT_R17
+ cfi_st t6, PT_R18
+ cfi_st t7, PT_R19
+ cfi_st t8, PT_R20
+ cfi_st u0, PT_R21
+ cfi_st fp, PT_R22
+ cfi_st s0, PT_R23
+ cfi_st s1, PT_R24
+ cfi_st s2, PT_R25
+ cfi_st s3, PT_R26
+ cfi_st s4, PT_R27
+ cfi_st s5, PT_R28
+ cfi_st s6, PT_R29
+ cfi_st s7, PT_R30
+ cfi_st s8, PT_R31
+ csrrd t0, LOONGARCH_CSR_CRMD
+ andi t0, t0, 0x7 /* extract bit[1:0] PLV, bit[2] IE */
+ LONG_S t0, sp, PT_CRMD
+ .endm
+
+ .macro restore_all_base_regs
+ cfi_ld tp, PT_R2
+ cfi_ld a0, PT_R4
+ cfi_ld a1, PT_R5
+ cfi_ld a2, PT_R6
+ cfi_ld a3, PT_R7
+ cfi_ld a4, PT_R8
+ cfi_ld a5, PT_R9
+ cfi_ld a6, PT_R10
+ cfi_ld a7, PT_R11
+ cfi_ld t0, PT_R12
+ cfi_ld t1, PT_R13
+ cfi_ld t2, PT_R14
+ cfi_ld t3, PT_R15
+ cfi_ld t4, PT_R16
+ cfi_ld t5, PT_R17
+ cfi_ld t6, PT_R18
+ cfi_ld t7, PT_R19
+ cfi_ld t8, PT_R20
+ cfi_ld u0, PT_R21
+ cfi_ld fp, PT_R22
+ cfi_ld s0, PT_R23
+ cfi_ld s1, PT_R24
+ cfi_ld s2, PT_R25
+ cfi_ld s3, PT_R26
+ cfi_ld s4, PT_R27
+ cfi_ld s5, PT_R28
+ cfi_ld s6, PT_R29
+ cfi_ld s7, PT_R30
+ cfi_ld s8, PT_R31
+ LONG_L t0, sp, PT_CRMD
+ li.d t1, 0x7 /* mask bit[1:0] PLV, bit[2] IE */
+ csrxchg t0, t1, LOONGARCH_CSR_CRMD
+ .endm
+
+SYM_CODE_START(arch_rethook_trampoline)
+ addi.d sp, sp, -PT_SIZE
+ save_all_base_regs
+
+ addi.d t0, sp, PT_SIZE
+ LONG_S t0, sp, PT_R3
+
+ move a0, sp /* pt_regs */
+
+ bl arch_rethook_trampoline_callback
+
+ /* use the result as the return-address */
+ move ra, a0
+
+ restore_all_base_regs
+ addi.d sp, sp, PT_SIZE
+
+ jr ra
+SYM_CODE_END(arch_rethook_trampoline)
diff --git a/arch/loongarch/kernel/setup.c b/arch/loongarch/kernel/setup.c
new file mode 100644
index 0000000000..aed65915e9
--- /dev/null
+++ b/arch/loongarch/kernel/setup.c
@@ -0,0 +1,633 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ *
+ * Derived from MIPS:
+ * Copyright (C) 1995 Linus Torvalds
+ * Copyright (C) 1995 Waldorf Electronics
+ * Copyright (C) 1994, 95, 96, 97, 98, 99, 2000, 01, 02, 03 Ralf Baechle
+ * Copyright (C) 1996 Stoned Elipot
+ * Copyright (C) 1999 Silicon Graphics, Inc.
+ * Copyright (C) 2000, 2001, 2002, 2007 Maciej W. Rozycki
+ */
+#include <linux/init.h>
+#include <linux/acpi.h>
+#include <linux/cpu.h>
+#include <linux/dmi.h>
+#include <linux/efi.h>
+#include <linux/export.h>
+#include <linux/screen_info.h>
+#include <linux/memblock.h>
+#include <linux/initrd.h>
+#include <linux/ioport.h>
+#include <linux/kexec.h>
+#include <linux/crash_dump.h>
+#include <linux/root_dev.h>
+#include <linux/console.h>
+#include <linux/pfn.h>
+#include <linux/platform_device.h>
+#include <linux/sizes.h>
+#include <linux/device.h>
+#include <linux/dma-map-ops.h>
+#include <linux/libfdt.h>
+#include <linux/of_fdt.h>
+#include <linux/of_address.h>
+#include <linux/suspend.h>
+#include <linux/swiotlb.h>
+
+#include <asm/addrspace.h>
+#include <asm/alternative.h>
+#include <asm/bootinfo.h>
+#include <asm/cache.h>
+#include <asm/cpu.h>
+#include <asm/dma.h>
+#include <asm/efi.h>
+#include <asm/loongson.h>
+#include <asm/numa.h>
+#include <asm/pgalloc.h>
+#include <asm/sections.h>
+#include <asm/setup.h>
+#include <asm/time.h>
+
+#define SMBIOS_BIOSSIZE_OFFSET 0x09
+#define SMBIOS_BIOSEXTERN_OFFSET 0x13
+#define SMBIOS_FREQLOW_OFFSET 0x16
+#define SMBIOS_FREQHIGH_OFFSET 0x17
+#define SMBIOS_FREQLOW_MASK 0xFF
+#define SMBIOS_CORE_PACKAGE_OFFSET 0x23
+#define LOONGSON_EFI_ENABLE (1 << 3)
+
+struct screen_info screen_info __section(".data");
+
+unsigned long fw_arg0, fw_arg1, fw_arg2;
+DEFINE_PER_CPU(unsigned long, kernelsp);
+struct cpuinfo_loongarch cpu_data[NR_CPUS] __read_mostly;
+
+EXPORT_SYMBOL(cpu_data);
+
+struct loongson_board_info b_info;
+static const char dmi_empty_string[] = " ";
+
+/*
+ * Setup information
+ *
+ * These are initialized so they are in the .data section
+ */
+char init_command_line[COMMAND_LINE_SIZE] __initdata;
+
+static int num_standard_resources;
+static struct resource *standard_resources;
+
+static struct resource code_resource = { .name = "Kernel code", };
+static struct resource data_resource = { .name = "Kernel data", };
+static struct resource bss_resource = { .name = "Kernel bss", };
+
+const char *get_system_type(void)
+{
+ return "generic-loongson-machine";
+}
+
+void __init arch_cpu_finalize_init(void)
+{
+ alternative_instructions();
+}
+
+static const char *dmi_string_parse(const struct dmi_header *dm, u8 s)
+{
+ const u8 *bp = ((u8 *) dm) + dm->length;
+
+ if (s) {
+ s--;
+ while (s > 0 && *bp) {
+ bp += strlen(bp) + 1;
+ s--;
+ }
+
+ if (*bp != 0) {
+ size_t len = strlen(bp)+1;
+ size_t cmp_len = len > 8 ? 8 : len;
+
+ if (!memcmp(bp, dmi_empty_string, cmp_len))
+ return dmi_empty_string;
+
+ return bp;
+ }
+ }
+
+ return "";
+}
+
+static void __init parse_cpu_table(const struct dmi_header *dm)
+{
+ long freq_temp = 0;
+ char *dmi_data = (char *)dm;
+
+ freq_temp = ((*(dmi_data + SMBIOS_FREQHIGH_OFFSET) << 8) +
+ ((*(dmi_data + SMBIOS_FREQLOW_OFFSET)) & SMBIOS_FREQLOW_MASK));
+ cpu_clock_freq = freq_temp * 1000000;
+
+ loongson_sysconf.cpuname = (void *)dmi_string_parse(dm, dmi_data[16]);
+ loongson_sysconf.cores_per_package = *(dmi_data + SMBIOS_CORE_PACKAGE_OFFSET);
+
+ pr_info("CpuClock = %llu\n", cpu_clock_freq);
+}
+
+static void __init parse_bios_table(const struct dmi_header *dm)
+{
+ char *dmi_data = (char *)dm;
+
+ b_info.bios_size = (*(dmi_data + SMBIOS_BIOSSIZE_OFFSET) + 1) << 6;
+}
+
+static void __init find_tokens(const struct dmi_header *dm, void *dummy)
+{
+ switch (dm->type) {
+ case 0x0: /* Extern BIOS */
+ parse_bios_table(dm);
+ break;
+ case 0x4: /* Calling interface */
+ parse_cpu_table(dm);
+ break;
+ }
+}
+static void __init smbios_parse(void)
+{
+ b_info.bios_vendor = (void *)dmi_get_system_info(DMI_BIOS_VENDOR);
+ b_info.bios_version = (void *)dmi_get_system_info(DMI_BIOS_VERSION);
+ b_info.bios_release_date = (void *)dmi_get_system_info(DMI_BIOS_DATE);
+ b_info.board_vendor = (void *)dmi_get_system_info(DMI_BOARD_VENDOR);
+ b_info.board_name = (void *)dmi_get_system_info(DMI_BOARD_NAME);
+ dmi_walk(find_tokens, NULL);
+}
+
+#ifdef CONFIG_ARCH_WRITECOMBINE
+bool wc_enabled = true;
+#else
+bool wc_enabled = false;
+#endif
+
+EXPORT_SYMBOL(wc_enabled);
+
+static int __init setup_writecombine(char *p)
+{
+ if (!strcmp(p, "on"))
+ wc_enabled = true;
+ else if (!strcmp(p, "off"))
+ wc_enabled = false;
+ else
+ pr_warn("Unknown writecombine setting \"%s\".\n", p);
+
+ return 0;
+}
+early_param("writecombine", setup_writecombine);
+
+static int usermem __initdata;
+
+static int __init early_parse_mem(char *p)
+{
+ phys_addr_t start, size;
+
+ if (!p) {
+ pr_err("mem parameter is empty, do nothing\n");
+ return -EINVAL;
+ }
+
+ /*
+ * If a user specifies memory size, we
+ * blow away any automatically generated
+ * size.
+ */
+ if (usermem == 0) {
+ usermem = 1;
+ memblock_remove(memblock_start_of_DRAM(),
+ memblock_end_of_DRAM() - memblock_start_of_DRAM());
+ }
+ start = 0;
+ size = memparse(p, &p);
+ if (*p == '@')
+ start = memparse(p + 1, &p);
+ else {
+ pr_err("Invalid format!\n");
+ return -EINVAL;
+ }
+
+ if (!IS_ENABLED(CONFIG_NUMA))
+ memblock_add(start, size);
+ else
+ memblock_add_node(start, size, pa_to_nid(start), MEMBLOCK_NONE);
+
+ return 0;
+}
+early_param("mem", early_parse_mem);
+
+static void __init arch_reserve_vmcore(void)
+{
+#ifdef CONFIG_PROC_VMCORE
+ u64 i;
+ phys_addr_t start, end;
+
+ if (!is_kdump_kernel())
+ return;
+
+ if (!elfcorehdr_size) {
+ for_each_mem_range(i, &start, &end) {
+ if (elfcorehdr_addr >= start && elfcorehdr_addr < end) {
+ /*
+ * Reserve from the elf core header to the end of
+ * the memory segment, that should all be kdump
+ * reserved memory.
+ */
+ elfcorehdr_size = end - elfcorehdr_addr;
+ break;
+ }
+ }
+ }
+
+ if (memblock_is_region_reserved(elfcorehdr_addr, elfcorehdr_size)) {
+ pr_warn("elfcorehdr is overlapped\n");
+ return;
+ }
+
+ memblock_reserve(elfcorehdr_addr, elfcorehdr_size);
+
+ pr_info("Reserving %llu KiB of memory at 0x%llx for elfcorehdr\n",
+ elfcorehdr_size >> 10, elfcorehdr_addr);
+#endif
+}
+
+/* 2MB alignment for crash kernel regions */
+#define CRASH_ALIGN SZ_2M
+#define CRASH_ADDR_MAX SZ_4G
+
+static void __init arch_parse_crashkernel(void)
+{
+#ifdef CONFIG_KEXEC
+ int ret;
+ unsigned long long total_mem;
+ unsigned long long crash_base, crash_size;
+
+ total_mem = memblock_phys_mem_size();
+ ret = parse_crashkernel(boot_command_line, total_mem, &crash_size, &crash_base);
+ if (ret < 0 || crash_size <= 0)
+ return;
+
+ if (crash_base <= 0) {
+ crash_base = memblock_phys_alloc_range(crash_size, CRASH_ALIGN, CRASH_ALIGN, CRASH_ADDR_MAX);
+ if (!crash_base) {
+ pr_warn("crashkernel reservation failed - No suitable area found.\n");
+ return;
+ }
+ } else if (!memblock_phys_alloc_range(crash_size, CRASH_ALIGN, crash_base, crash_base + crash_size)) {
+ pr_warn("Invalid memory region reserved for crash kernel\n");
+ return;
+ }
+
+ crashk_res.start = crash_base;
+ crashk_res.end = crash_base + crash_size - 1;
+#endif
+}
+
+static void __init fdt_setup(void)
+{
+#ifdef CONFIG_OF_EARLY_FLATTREE
+ void *fdt_pointer;
+
+ /* ACPI-based systems do not require parsing fdt */
+ if (acpi_os_get_root_pointer())
+ return;
+
+ /* Look for a device tree configuration table entry */
+ fdt_pointer = efi_fdt_pointer();
+ if (!fdt_pointer || fdt_check_header(fdt_pointer))
+ return;
+
+ early_init_dt_scan(fdt_pointer);
+ early_init_fdt_reserve_self();
+
+ max_low_pfn = PFN_PHYS(memblock_end_of_DRAM());
+#endif
+}
+
+static void __init bootcmdline_init(char **cmdline_p)
+{
+ /*
+ * If CONFIG_CMDLINE_FORCE is enabled then initializing the command line
+ * is trivial - we simply use the built-in command line unconditionally &
+ * unmodified.
+ */
+ if (IS_ENABLED(CONFIG_CMDLINE_FORCE)) {
+ strscpy(boot_command_line, CONFIG_CMDLINE, COMMAND_LINE_SIZE);
+ goto out;
+ }
+
+#ifdef CONFIG_OF_FLATTREE
+ /*
+ * If CONFIG_CMDLINE_BOOTLOADER is enabled and we are in FDT-based system,
+ * the boot_command_line will be overwritten by early_init_dt_scan_chosen().
+ * So we need to append init_command_line (the original copy of boot_command_line)
+ * to boot_command_line.
+ */
+ if (initial_boot_params) {
+ if (boot_command_line[0])
+ strlcat(boot_command_line, " ", COMMAND_LINE_SIZE);
+
+ strlcat(boot_command_line, init_command_line, COMMAND_LINE_SIZE);
+ goto out;
+ }
+#endif
+
+ /*
+ * Append built-in command line to the bootloader command line if
+ * CONFIG_CMDLINE_EXTEND is enabled.
+ */
+ if (IS_ENABLED(CONFIG_CMDLINE_EXTEND) && CONFIG_CMDLINE[0]) {
+ strlcat(boot_command_line, " ", COMMAND_LINE_SIZE);
+ strlcat(boot_command_line, CONFIG_CMDLINE, COMMAND_LINE_SIZE);
+ }
+
+ /*
+ * Use built-in command line if the bootloader command line is empty.
+ */
+ if (IS_ENABLED(CONFIG_CMDLINE_BOOTLOADER) && !boot_command_line[0])
+ strscpy(boot_command_line, CONFIG_CMDLINE, COMMAND_LINE_SIZE);
+
+out:
+ *cmdline_p = boot_command_line;
+}
+
+void __init platform_init(void)
+{
+ arch_reserve_vmcore();
+ arch_parse_crashkernel();
+
+#ifdef CONFIG_ACPI_TABLE_UPGRADE
+ acpi_table_upgrade();
+#endif
+#ifdef CONFIG_ACPI
+ acpi_gbl_use_default_register_widths = false;
+ acpi_boot_table_init();
+#endif
+ unflatten_and_copy_device_tree();
+
+#ifdef CONFIG_NUMA
+ init_numa_memory();
+#endif
+ dmi_setup();
+ smbios_parse();
+ pr_info("The BIOS Version: %s\n", b_info.bios_version);
+
+ efi_runtime_init();
+}
+
+static void __init check_kernel_sections_mem(void)
+{
+ phys_addr_t start = __pa_symbol(&_text);
+ phys_addr_t size = __pa_symbol(&_end) - start;
+
+ if (!memblock_is_region_memory(start, size)) {
+ pr_info("Kernel sections are not in the memory maps\n");
+ memblock_add(start, size);
+ }
+}
+
+/*
+ * arch_mem_init - initialize memory management subsystem
+ */
+static void __init arch_mem_init(char **cmdline_p)
+{
+ if (usermem)
+ pr_info("User-defined physical RAM map overwrite\n");
+
+ check_kernel_sections_mem();
+
+ early_init_fdt_scan_reserved_mem();
+
+ /*
+ * In order to reduce the possibility of kernel panic when failed to
+ * get IO TLB memory under CONFIG_SWIOTLB, it is better to allocate
+ * low memory as small as possible before swiotlb_init(), so make
+ * sparse_init() using top-down allocation.
+ */
+ memblock_set_bottom_up(false);
+ sparse_init();
+ memblock_set_bottom_up(true);
+
+ swiotlb_init(true, SWIOTLB_VERBOSE);
+
+ dma_contiguous_reserve(PFN_PHYS(max_low_pfn));
+
+ /* Reserve for hibernation. */
+ register_nosave_region(PFN_DOWN(__pa_symbol(&__nosave_begin)),
+ PFN_UP(__pa_symbol(&__nosave_end)));
+
+ memblock_dump_all();
+
+ early_memtest(PFN_PHYS(ARCH_PFN_OFFSET), PFN_PHYS(max_low_pfn));
+}
+
+static void __init resource_init(void)
+{
+ long i = 0;
+ size_t res_size;
+ struct resource *res;
+ struct memblock_region *region;
+
+ code_resource.start = __pa_symbol(&_text);
+ code_resource.end = __pa_symbol(&_etext) - 1;
+ data_resource.start = __pa_symbol(&_etext);
+ data_resource.end = __pa_symbol(&_edata) - 1;
+ bss_resource.start = __pa_symbol(&__bss_start);
+ bss_resource.end = __pa_symbol(&__bss_stop) - 1;
+
+ num_standard_resources = memblock.memory.cnt;
+ res_size = num_standard_resources * sizeof(*standard_resources);
+ standard_resources = memblock_alloc(res_size, SMP_CACHE_BYTES);
+
+ for_each_mem_region(region) {
+ res = &standard_resources[i++];
+ if (!memblock_is_nomap(region)) {
+ res->name = "System RAM";
+ res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
+ res->start = __pfn_to_phys(memblock_region_memory_base_pfn(region));
+ res->end = __pfn_to_phys(memblock_region_memory_end_pfn(region)) - 1;
+ } else {
+ res->name = "Reserved";
+ res->flags = IORESOURCE_MEM;
+ res->start = __pfn_to_phys(memblock_region_reserved_base_pfn(region));
+ res->end = __pfn_to_phys(memblock_region_reserved_end_pfn(region)) - 1;
+ }
+
+ request_resource(&iomem_resource, res);
+
+ /*
+ * We don't know which RAM region contains kernel data,
+ * so we try it repeatedly and let the resource manager
+ * test it.
+ */
+ request_resource(res, &code_resource);
+ request_resource(res, &data_resource);
+ request_resource(res, &bss_resource);
+ }
+
+#ifdef CONFIG_KEXEC
+ if (crashk_res.start < crashk_res.end) {
+ insert_resource(&iomem_resource, &crashk_res);
+ pr_info("Reserving %ldMB of memory at %ldMB for crashkernel\n",
+ (unsigned long)((crashk_res.end - crashk_res.start + 1) >> 20),
+ (unsigned long)(crashk_res.start >> 20));
+ }
+#endif
+}
+
+static int __init add_legacy_isa_io(struct fwnode_handle *fwnode,
+ resource_size_t hw_start, resource_size_t size)
+{
+ int ret = 0;
+ unsigned long vaddr;
+ struct logic_pio_hwaddr *range;
+
+ range = kzalloc(sizeof(*range), GFP_ATOMIC);
+ if (!range)
+ return -ENOMEM;
+
+ range->fwnode = fwnode;
+ range->size = size = round_up(size, PAGE_SIZE);
+ range->hw_start = hw_start;
+ range->flags = LOGIC_PIO_CPU_MMIO;
+
+ ret = logic_pio_register_range(range);
+ if (ret) {
+ kfree(range);
+ return ret;
+ }
+
+ /* Legacy ISA must placed at the start of PCI_IOBASE */
+ if (range->io_start != 0) {
+ logic_pio_unregister_range(range);
+ kfree(range);
+ return -EINVAL;
+ }
+
+ vaddr = (unsigned long)(PCI_IOBASE + range->io_start);
+ ioremap_page_range(vaddr, vaddr + size, hw_start, pgprot_device(PAGE_KERNEL));
+
+ return 0;
+}
+
+static __init int arch_reserve_pio_range(void)
+{
+ struct device_node *np;
+
+ for_each_node_by_name(np, "isa") {
+ struct of_range range;
+ struct of_range_parser parser;
+
+ pr_info("ISA Bridge: %pOF\n", np);
+
+ if (of_range_parser_init(&parser, np)) {
+ pr_info("Failed to parse resources.\n");
+ of_node_put(np);
+ break;
+ }
+
+ for_each_of_range(&parser, &range) {
+ switch (range.flags & IORESOURCE_TYPE_BITS) {
+ case IORESOURCE_IO:
+ pr_info(" IO 0x%016llx..0x%016llx -> 0x%016llx\n",
+ range.cpu_addr,
+ range.cpu_addr + range.size - 1,
+ range.bus_addr);
+ if (add_legacy_isa_io(&np->fwnode, range.cpu_addr, range.size))
+ pr_warn("Failed to reserve legacy IO in Logic PIO\n");
+ break;
+ case IORESOURCE_MEM:
+ pr_info(" MEM 0x%016llx..0x%016llx -> 0x%016llx\n",
+ range.cpu_addr,
+ range.cpu_addr + range.size - 1,
+ range.bus_addr);
+ break;
+ }
+ }
+ }
+
+ return 0;
+}
+arch_initcall(arch_reserve_pio_range);
+
+static int __init reserve_memblock_reserved_regions(void)
+{
+ u64 i, j;
+
+ for (i = 0; i < num_standard_resources; ++i) {
+ struct resource *mem = &standard_resources[i];
+ phys_addr_t r_start, r_end, mem_size = resource_size(mem);
+
+ if (!memblock_is_region_reserved(mem->start, mem_size))
+ continue;
+
+ for_each_reserved_mem_range(j, &r_start, &r_end) {
+ resource_size_t start, end;
+
+ start = max(PFN_PHYS(PFN_DOWN(r_start)), mem->start);
+ end = min(PFN_PHYS(PFN_UP(r_end)) - 1, mem->end);
+
+ if (start > mem->end || end < mem->start)
+ continue;
+
+ reserve_region_with_split(mem, start, end, "Reserved");
+ }
+ }
+
+ return 0;
+}
+arch_initcall(reserve_memblock_reserved_regions);
+
+#ifdef CONFIG_SMP
+static void __init prefill_possible_map(void)
+{
+ int i, possible;
+
+ possible = num_processors + disabled_cpus;
+ if (possible > nr_cpu_ids)
+ possible = nr_cpu_ids;
+
+ pr_info("SMP: Allowing %d CPUs, %d hotplug CPUs\n",
+ possible, max((possible - num_processors), 0));
+
+ for (i = 0; i < possible; i++)
+ set_cpu_possible(i, true);
+ for (; i < NR_CPUS; i++)
+ set_cpu_possible(i, false);
+
+ set_nr_cpu_ids(possible);
+}
+#endif
+
+void __init setup_arch(char **cmdline_p)
+{
+ cpu_probe();
+
+ init_environ();
+ efi_init();
+ fdt_setup();
+ memblock_init();
+ pagetable_init();
+ bootcmdline_init(cmdline_p);
+ parse_early_param();
+ reserve_initrd_mem();
+
+ platform_init();
+ arch_mem_init(cmdline_p);
+
+ resource_init();
+#ifdef CONFIG_SMP
+ plat_smp_setup();
+ prefill_possible_map();
+#endif
+
+ paging_init();
+
+#ifdef CONFIG_KASAN
+ kasan_init();
+#endif
+}
diff --git a/arch/loongarch/kernel/signal.c b/arch/loongarch/kernel/signal.c
new file mode 100644
index 0000000000..4a3686d133
--- /dev/null
+++ b/arch/loongarch/kernel/signal.c
@@ -0,0 +1,1075 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Author: Hanlu Li <lihanlu@loongson.cn>
+ * Huacai Chen <chenhuacai@loongson.cn>
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ *
+ * Derived from MIPS:
+ * Copyright (C) 1991, 1992 Linus Torvalds
+ * Copyright (C) 1994 - 2000 Ralf Baechle
+ * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
+ * Copyright (C) 2014, Imagination Technologies Ltd.
+ */
+#include <linux/audit.h>
+#include <linux/cache.h>
+#include <linux/context_tracking.h>
+#include <linux/entry-common.h>
+#include <linux/irqflags.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/personality.h>
+#include <linux/smp.h>
+#include <linux/kernel.h>
+#include <linux/signal.h>
+#include <linux/errno.h>
+#include <linux/wait.h>
+#include <linux/ptrace.h>
+#include <linux/unistd.h>
+#include <linux/compiler.h>
+#include <linux/syscalls.h>
+#include <linux/uaccess.h>
+
+#include <asm/asm.h>
+#include <asm/cacheflush.h>
+#include <asm/cpu-features.h>
+#include <asm/fpu.h>
+#include <asm/lbt.h>
+#include <asm/ucontext.h>
+#include <asm/vdso.h>
+
+#ifdef DEBUG_SIG
+# define DEBUGP(fmt, args...) printk("%s: " fmt, __func__, ##args)
+#else
+# define DEBUGP(fmt, args...)
+#endif
+
+/* Make sure we will not lose FPU ownership */
+#define lock_fpu_owner() ({ preempt_disable(); pagefault_disable(); })
+#define unlock_fpu_owner() ({ pagefault_enable(); preempt_enable(); })
+/* Make sure we will not lose LBT ownership */
+#define lock_lbt_owner() ({ preempt_disable(); pagefault_disable(); })
+#define unlock_lbt_owner() ({ pagefault_enable(); preempt_enable(); })
+
+/* Assembly functions to move context to/from the FPU */
+extern asmlinkage int
+_save_fp_context(void __user *fpregs, void __user *fcc, void __user *csr);
+extern asmlinkage int
+_restore_fp_context(void __user *fpregs, void __user *fcc, void __user *csr);
+extern asmlinkage int
+_save_lsx_context(void __user *fpregs, void __user *fcc, void __user *fcsr);
+extern asmlinkage int
+_restore_lsx_context(void __user *fpregs, void __user *fcc, void __user *fcsr);
+extern asmlinkage int
+_save_lasx_context(void __user *fpregs, void __user *fcc, void __user *fcsr);
+extern asmlinkage int
+_restore_lasx_context(void __user *fpregs, void __user *fcc, void __user *fcsr);
+
+#ifdef CONFIG_CPU_HAS_LBT
+extern asmlinkage int _save_lbt_context(void __user *regs, void __user *eflags);
+extern asmlinkage int _restore_lbt_context(void __user *regs, void __user *eflags);
+extern asmlinkage int _save_ftop_context(void __user *ftop);
+extern asmlinkage int _restore_ftop_context(void __user *ftop);
+#endif
+
+struct rt_sigframe {
+ struct siginfo rs_info;
+ struct ucontext rs_uctx;
+};
+
+struct _ctx_layout {
+ struct sctx_info *addr;
+ unsigned int size;
+};
+
+struct extctx_layout {
+ unsigned long size;
+ unsigned int flags;
+ struct _ctx_layout fpu;
+ struct _ctx_layout lsx;
+ struct _ctx_layout lasx;
+ struct _ctx_layout lbt;
+ struct _ctx_layout end;
+};
+
+static void __user *get_ctx_through_ctxinfo(struct sctx_info *info)
+{
+ return (void __user *)((char *)info + sizeof(struct sctx_info));
+}
+
+/*
+ * Thread saved context copy to/from a signal context presumed to be on the
+ * user stack, and therefore accessed with appropriate macros from uaccess.h.
+ */
+static int copy_fpu_to_sigcontext(struct fpu_context __user *ctx)
+{
+ int i;
+ int err = 0;
+ uint64_t __user *regs = (uint64_t *)&ctx->regs;
+ uint64_t __user *fcc = &ctx->fcc;
+ uint32_t __user *fcsr = &ctx->fcsr;
+
+ for (i = 0; i < NUM_FPU_REGS; i++) {
+ err |=
+ __put_user(get_fpr64(&current->thread.fpu.fpr[i], 0),
+ &regs[i]);
+ }
+ err |= __put_user(current->thread.fpu.fcc, fcc);
+ err |= __put_user(current->thread.fpu.fcsr, fcsr);
+
+ return err;
+}
+
+static int copy_fpu_from_sigcontext(struct fpu_context __user *ctx)
+{
+ int i;
+ int err = 0;
+ u64 fpr_val;
+ uint64_t __user *regs = (uint64_t *)&ctx->regs;
+ uint64_t __user *fcc = &ctx->fcc;
+ uint32_t __user *fcsr = &ctx->fcsr;
+
+ for (i = 0; i < NUM_FPU_REGS; i++) {
+ err |= __get_user(fpr_val, &regs[i]);
+ set_fpr64(&current->thread.fpu.fpr[i], 0, fpr_val);
+ }
+ err |= __get_user(current->thread.fpu.fcc, fcc);
+ err |= __get_user(current->thread.fpu.fcsr, fcsr);
+
+ return err;
+}
+
+static int copy_lsx_to_sigcontext(struct lsx_context __user *ctx)
+{
+ int i;
+ int err = 0;
+ uint64_t __user *regs = (uint64_t *)&ctx->regs;
+ uint64_t __user *fcc = &ctx->fcc;
+ uint32_t __user *fcsr = &ctx->fcsr;
+
+ for (i = 0; i < NUM_FPU_REGS; i++) {
+ err |= __put_user(get_fpr64(&current->thread.fpu.fpr[i], 0),
+ &regs[2*i]);
+ err |= __put_user(get_fpr64(&current->thread.fpu.fpr[i], 1),
+ &regs[2*i+1]);
+ }
+ err |= __put_user(current->thread.fpu.fcc, fcc);
+ err |= __put_user(current->thread.fpu.fcsr, fcsr);
+
+ return err;
+}
+
+static int copy_lsx_from_sigcontext(struct lsx_context __user *ctx)
+{
+ int i;
+ int err = 0;
+ u64 fpr_val;
+ uint64_t __user *regs = (uint64_t *)&ctx->regs;
+ uint64_t __user *fcc = &ctx->fcc;
+ uint32_t __user *fcsr = &ctx->fcsr;
+
+ for (i = 0; i < NUM_FPU_REGS; i++) {
+ err |= __get_user(fpr_val, &regs[2*i]);
+ set_fpr64(&current->thread.fpu.fpr[i], 0, fpr_val);
+ err |= __get_user(fpr_val, &regs[2*i+1]);
+ set_fpr64(&current->thread.fpu.fpr[i], 1, fpr_val);
+ }
+ err |= __get_user(current->thread.fpu.fcc, fcc);
+ err |= __get_user(current->thread.fpu.fcsr, fcsr);
+
+ return err;
+}
+
+static int copy_lasx_to_sigcontext(struct lasx_context __user *ctx)
+{
+ int i;
+ int err = 0;
+ uint64_t __user *regs = (uint64_t *)&ctx->regs;
+ uint64_t __user *fcc = &ctx->fcc;
+ uint32_t __user *fcsr = &ctx->fcsr;
+
+ for (i = 0; i < NUM_FPU_REGS; i++) {
+ err |= __put_user(get_fpr64(&current->thread.fpu.fpr[i], 0),
+ &regs[4*i]);
+ err |= __put_user(get_fpr64(&current->thread.fpu.fpr[i], 1),
+ &regs[4*i+1]);
+ err |= __put_user(get_fpr64(&current->thread.fpu.fpr[i], 2),
+ &regs[4*i+2]);
+ err |= __put_user(get_fpr64(&current->thread.fpu.fpr[i], 3),
+ &regs[4*i+3]);
+ }
+ err |= __put_user(current->thread.fpu.fcc, fcc);
+ err |= __put_user(current->thread.fpu.fcsr, fcsr);
+
+ return err;
+}
+
+static int copy_lasx_from_sigcontext(struct lasx_context __user *ctx)
+{
+ int i;
+ int err = 0;
+ u64 fpr_val;
+ uint64_t __user *regs = (uint64_t *)&ctx->regs;
+ uint64_t __user *fcc = &ctx->fcc;
+ uint32_t __user *fcsr = &ctx->fcsr;
+
+ for (i = 0; i < NUM_FPU_REGS; i++) {
+ err |= __get_user(fpr_val, &regs[4*i]);
+ set_fpr64(&current->thread.fpu.fpr[i], 0, fpr_val);
+ err |= __get_user(fpr_val, &regs[4*i+1]);
+ set_fpr64(&current->thread.fpu.fpr[i], 1, fpr_val);
+ err |= __get_user(fpr_val, &regs[4*i+2]);
+ set_fpr64(&current->thread.fpu.fpr[i], 2, fpr_val);
+ err |= __get_user(fpr_val, &regs[4*i+3]);
+ set_fpr64(&current->thread.fpu.fpr[i], 3, fpr_val);
+ }
+ err |= __get_user(current->thread.fpu.fcc, fcc);
+ err |= __get_user(current->thread.fpu.fcsr, fcsr);
+
+ return err;
+}
+
+#ifdef CONFIG_CPU_HAS_LBT
+static int copy_lbt_to_sigcontext(struct lbt_context __user *ctx)
+{
+ int err = 0;
+ uint64_t __user *regs = (uint64_t *)&ctx->regs;
+ uint32_t __user *eflags = (uint32_t *)&ctx->eflags;
+
+ err |= __put_user(current->thread.lbt.scr0, &regs[0]);
+ err |= __put_user(current->thread.lbt.scr1, &regs[1]);
+ err |= __put_user(current->thread.lbt.scr2, &regs[2]);
+ err |= __put_user(current->thread.lbt.scr3, &regs[3]);
+ err |= __put_user(current->thread.lbt.eflags, eflags);
+
+ return err;
+}
+
+static int copy_lbt_from_sigcontext(struct lbt_context __user *ctx)
+{
+ int err = 0;
+ uint64_t __user *regs = (uint64_t *)&ctx->regs;
+ uint32_t __user *eflags = (uint32_t *)&ctx->eflags;
+
+ err |= __get_user(current->thread.lbt.scr0, &regs[0]);
+ err |= __get_user(current->thread.lbt.scr1, &regs[1]);
+ err |= __get_user(current->thread.lbt.scr2, &regs[2]);
+ err |= __get_user(current->thread.lbt.scr3, &regs[3]);
+ err |= __get_user(current->thread.lbt.eflags, eflags);
+
+ return err;
+}
+
+static int copy_ftop_to_sigcontext(struct lbt_context __user *ctx)
+{
+ uint32_t __user *ftop = &ctx->ftop;
+
+ return __put_user(current->thread.fpu.ftop, ftop);
+}
+
+static int copy_ftop_from_sigcontext(struct lbt_context __user *ctx)
+{
+ uint32_t __user *ftop = &ctx->ftop;
+
+ return __get_user(current->thread.fpu.ftop, ftop);
+}
+#endif
+
+/*
+ * Wrappers for the assembly _{save,restore}_fp_context functions.
+ */
+static int save_hw_fpu_context(struct fpu_context __user *ctx)
+{
+ uint64_t __user *regs = (uint64_t *)&ctx->regs;
+ uint64_t __user *fcc = &ctx->fcc;
+ uint32_t __user *fcsr = &ctx->fcsr;
+
+ return _save_fp_context(regs, fcc, fcsr);
+}
+
+static int restore_hw_fpu_context(struct fpu_context __user *ctx)
+{
+ uint64_t __user *regs = (uint64_t *)&ctx->regs;
+ uint64_t __user *fcc = &ctx->fcc;
+ uint32_t __user *fcsr = &ctx->fcsr;
+
+ return _restore_fp_context(regs, fcc, fcsr);
+}
+
+static int save_hw_lsx_context(struct lsx_context __user *ctx)
+{
+ uint64_t __user *regs = (uint64_t *)&ctx->regs;
+ uint64_t __user *fcc = &ctx->fcc;
+ uint32_t __user *fcsr = &ctx->fcsr;
+
+ return _save_lsx_context(regs, fcc, fcsr);
+}
+
+static int restore_hw_lsx_context(struct lsx_context __user *ctx)
+{
+ uint64_t __user *regs = (uint64_t *)&ctx->regs;
+ uint64_t __user *fcc = &ctx->fcc;
+ uint32_t __user *fcsr = &ctx->fcsr;
+
+ return _restore_lsx_context(regs, fcc, fcsr);
+}
+
+static int save_hw_lasx_context(struct lasx_context __user *ctx)
+{
+ uint64_t __user *regs = (uint64_t *)&ctx->regs;
+ uint64_t __user *fcc = &ctx->fcc;
+ uint32_t __user *fcsr = &ctx->fcsr;
+
+ return _save_lasx_context(regs, fcc, fcsr);
+}
+
+static int restore_hw_lasx_context(struct lasx_context __user *ctx)
+{
+ uint64_t __user *regs = (uint64_t *)&ctx->regs;
+ uint64_t __user *fcc = &ctx->fcc;
+ uint32_t __user *fcsr = &ctx->fcsr;
+
+ return _restore_lasx_context(regs, fcc, fcsr);
+}
+
+/*
+ * Wrappers for the assembly _{save,restore}_lbt_context functions.
+ */
+#ifdef CONFIG_CPU_HAS_LBT
+static int save_hw_lbt_context(struct lbt_context __user *ctx)
+{
+ uint64_t __user *regs = (uint64_t *)&ctx->regs;
+ uint32_t __user *eflags = (uint32_t *)&ctx->eflags;
+
+ return _save_lbt_context(regs, eflags);
+}
+
+static int restore_hw_lbt_context(struct lbt_context __user *ctx)
+{
+ uint64_t __user *regs = (uint64_t *)&ctx->regs;
+ uint32_t __user *eflags = (uint32_t *)&ctx->eflags;
+
+ return _restore_lbt_context(regs, eflags);
+}
+
+static int save_hw_ftop_context(struct lbt_context __user *ctx)
+{
+ uint32_t __user *ftop = &ctx->ftop;
+
+ return _save_ftop_context(ftop);
+}
+
+static int restore_hw_ftop_context(struct lbt_context __user *ctx)
+{
+ uint32_t __user *ftop = &ctx->ftop;
+
+ return _restore_ftop_context(ftop);
+}
+#endif
+
+static int fcsr_pending(unsigned int __user *fcsr)
+{
+ int err, sig = 0;
+ unsigned int csr, enabled;
+
+ err = __get_user(csr, fcsr);
+ enabled = ((csr & FPU_CSR_ALL_E) << 24);
+ /*
+ * If the signal handler set some FPU exceptions, clear it and
+ * send SIGFPE.
+ */
+ if (csr & enabled) {
+ csr &= ~enabled;
+ err |= __put_user(csr, fcsr);
+ sig = SIGFPE;
+ }
+ return err ?: sig;
+}
+
+/*
+ * Helper routines
+ */
+static int protected_save_fpu_context(struct extctx_layout *extctx)
+{
+ int err = 0;
+ struct sctx_info __user *info = extctx->fpu.addr;
+ struct fpu_context __user *fpu_ctx = (struct fpu_context *)get_ctx_through_ctxinfo(info);
+ uint64_t __user *regs = (uint64_t *)&fpu_ctx->regs;
+ uint64_t __user *fcc = &fpu_ctx->fcc;
+ uint32_t __user *fcsr = &fpu_ctx->fcsr;
+
+ while (1) {
+ lock_fpu_owner();
+ if (is_fpu_owner())
+ err = save_hw_fpu_context(fpu_ctx);
+ else
+ err = copy_fpu_to_sigcontext(fpu_ctx);
+ unlock_fpu_owner();
+
+ err |= __put_user(FPU_CTX_MAGIC, &info->magic);
+ err |= __put_user(extctx->fpu.size, &info->size);
+
+ if (likely(!err))
+ break;
+ /* Touch the FPU context and try again */
+ err = __put_user(0, &regs[0]) |
+ __put_user(0, &regs[31]) |
+ __put_user(0, fcc) |
+ __put_user(0, fcsr);
+ if (err)
+ return err; /* really bad sigcontext */
+ }
+
+ return err;
+}
+
+static int protected_restore_fpu_context(struct extctx_layout *extctx)
+{
+ int err = 0, sig = 0, tmp __maybe_unused;
+ struct sctx_info __user *info = extctx->fpu.addr;
+ struct fpu_context __user *fpu_ctx = (struct fpu_context *)get_ctx_through_ctxinfo(info);
+ uint64_t __user *regs = (uint64_t *)&fpu_ctx->regs;
+ uint64_t __user *fcc = &fpu_ctx->fcc;
+ uint32_t __user *fcsr = &fpu_ctx->fcsr;
+
+ err = sig = fcsr_pending(fcsr);
+ if (err < 0)
+ return err;
+
+ while (1) {
+ lock_fpu_owner();
+ if (is_fpu_owner())
+ err = restore_hw_fpu_context(fpu_ctx);
+ else
+ err = copy_fpu_from_sigcontext(fpu_ctx);
+ unlock_fpu_owner();
+
+ if (likely(!err))
+ break;
+ /* Touch the FPU context and try again */
+ err = __get_user(tmp, &regs[0]) |
+ __get_user(tmp, &regs[31]) |
+ __get_user(tmp, fcc) |
+ __get_user(tmp, fcsr);
+ if (err)
+ break; /* really bad sigcontext */
+ }
+
+ return err ?: sig;
+}
+
+static int protected_save_lsx_context(struct extctx_layout *extctx)
+{
+ int err = 0;
+ struct sctx_info __user *info = extctx->lsx.addr;
+ struct lsx_context __user *lsx_ctx = (struct lsx_context *)get_ctx_through_ctxinfo(info);
+ uint64_t __user *regs = (uint64_t *)&lsx_ctx->regs;
+ uint64_t __user *fcc = &lsx_ctx->fcc;
+ uint32_t __user *fcsr = &lsx_ctx->fcsr;
+
+ while (1) {
+ lock_fpu_owner();
+ if (is_lsx_enabled())
+ err = save_hw_lsx_context(lsx_ctx);
+ else {
+ if (is_fpu_owner())
+ save_fp(current);
+ err = copy_lsx_to_sigcontext(lsx_ctx);
+ }
+ unlock_fpu_owner();
+
+ err |= __put_user(LSX_CTX_MAGIC, &info->magic);
+ err |= __put_user(extctx->lsx.size, &info->size);
+
+ if (likely(!err))
+ break;
+ /* Touch the LSX context and try again */
+ err = __put_user(0, &regs[0]) |
+ __put_user(0, &regs[32*2-1]) |
+ __put_user(0, fcc) |
+ __put_user(0, fcsr);
+ if (err)
+ return err; /* really bad sigcontext */
+ }
+
+ return err;
+}
+
+static int protected_restore_lsx_context(struct extctx_layout *extctx)
+{
+ int err = 0, sig = 0, tmp __maybe_unused;
+ struct sctx_info __user *info = extctx->lsx.addr;
+ struct lsx_context __user *lsx_ctx = (struct lsx_context *)get_ctx_through_ctxinfo(info);
+ uint64_t __user *regs = (uint64_t *)&lsx_ctx->regs;
+ uint64_t __user *fcc = &lsx_ctx->fcc;
+ uint32_t __user *fcsr = &lsx_ctx->fcsr;
+
+ err = sig = fcsr_pending(fcsr);
+ if (err < 0)
+ return err;
+
+ while (1) {
+ lock_fpu_owner();
+ if (is_lsx_enabled())
+ err = restore_hw_lsx_context(lsx_ctx);
+ else {
+ err = copy_lsx_from_sigcontext(lsx_ctx);
+ if (is_fpu_owner())
+ restore_fp(current);
+ }
+ unlock_fpu_owner();
+
+ if (likely(!err))
+ break;
+ /* Touch the LSX context and try again */
+ err = __get_user(tmp, &regs[0]) |
+ __get_user(tmp, &regs[32*2-1]) |
+ __get_user(tmp, fcc) |
+ __get_user(tmp, fcsr);
+ if (err)
+ break; /* really bad sigcontext */
+ }
+
+ return err ?: sig;
+}
+
+static int protected_save_lasx_context(struct extctx_layout *extctx)
+{
+ int err = 0;
+ struct sctx_info __user *info = extctx->lasx.addr;
+ struct lasx_context __user *lasx_ctx =
+ (struct lasx_context *)get_ctx_through_ctxinfo(info);
+ uint64_t __user *regs = (uint64_t *)&lasx_ctx->regs;
+ uint64_t __user *fcc = &lasx_ctx->fcc;
+ uint32_t __user *fcsr = &lasx_ctx->fcsr;
+
+ while (1) {
+ lock_fpu_owner();
+ if (is_lasx_enabled())
+ err = save_hw_lasx_context(lasx_ctx);
+ else {
+ if (is_lsx_enabled())
+ save_lsx(current);
+ else if (is_fpu_owner())
+ save_fp(current);
+ err = copy_lasx_to_sigcontext(lasx_ctx);
+ }
+ unlock_fpu_owner();
+
+ err |= __put_user(LASX_CTX_MAGIC, &info->magic);
+ err |= __put_user(extctx->lasx.size, &info->size);
+
+ if (likely(!err))
+ break;
+ /* Touch the LASX context and try again */
+ err = __put_user(0, &regs[0]) |
+ __put_user(0, &regs[32*4-1]) |
+ __put_user(0, fcc) |
+ __put_user(0, fcsr);
+ if (err)
+ return err; /* really bad sigcontext */
+ }
+
+ return err;
+}
+
+static int protected_restore_lasx_context(struct extctx_layout *extctx)
+{
+ int err = 0, sig = 0, tmp __maybe_unused;
+ struct sctx_info __user *info = extctx->lasx.addr;
+ struct lasx_context __user *lasx_ctx =
+ (struct lasx_context *)get_ctx_through_ctxinfo(info);
+ uint64_t __user *regs = (uint64_t *)&lasx_ctx->regs;
+ uint64_t __user *fcc = &lasx_ctx->fcc;
+ uint32_t __user *fcsr = &lasx_ctx->fcsr;
+
+ err = sig = fcsr_pending(fcsr);
+ if (err < 0)
+ return err;
+
+ while (1) {
+ lock_fpu_owner();
+ if (is_lasx_enabled())
+ err = restore_hw_lasx_context(lasx_ctx);
+ else {
+ err = copy_lasx_from_sigcontext(lasx_ctx);
+ if (is_lsx_enabled())
+ restore_lsx(current);
+ else if (is_fpu_owner())
+ restore_fp(current);
+ }
+ unlock_fpu_owner();
+
+ if (likely(!err))
+ break;
+ /* Touch the LASX context and try again */
+ err = __get_user(tmp, &regs[0]) |
+ __get_user(tmp, &regs[32*4-1]) |
+ __get_user(tmp, fcc) |
+ __get_user(tmp, fcsr);
+ if (err)
+ break; /* really bad sigcontext */
+ }
+
+ return err ?: sig;
+}
+
+#ifdef CONFIG_CPU_HAS_LBT
+static int protected_save_lbt_context(struct extctx_layout *extctx)
+{
+ int err = 0;
+ struct sctx_info __user *info = extctx->lbt.addr;
+ struct lbt_context __user *lbt_ctx =
+ (struct lbt_context *)get_ctx_through_ctxinfo(info);
+ uint64_t __user *regs = (uint64_t *)&lbt_ctx->regs;
+ uint32_t __user *eflags = (uint32_t *)&lbt_ctx->eflags;
+
+ while (1) {
+ lock_lbt_owner();
+ if (is_lbt_owner())
+ err |= save_hw_lbt_context(lbt_ctx);
+ else
+ err |= copy_lbt_to_sigcontext(lbt_ctx);
+ if (is_fpu_owner())
+ err |= save_hw_ftop_context(lbt_ctx);
+ else
+ err |= copy_ftop_to_sigcontext(lbt_ctx);
+ unlock_lbt_owner();
+
+ err |= __put_user(LBT_CTX_MAGIC, &info->magic);
+ err |= __put_user(extctx->lbt.size, &info->size);
+
+ if (likely(!err))
+ break;
+ /* Touch the LBT context and try again */
+ err = __put_user(0, &regs[0]) | __put_user(0, eflags);
+
+ if (err)
+ return err;
+ }
+
+ return err;
+}
+
+static int protected_restore_lbt_context(struct extctx_layout *extctx)
+{
+ int err = 0, tmp __maybe_unused;
+ struct sctx_info __user *info = extctx->lbt.addr;
+ struct lbt_context __user *lbt_ctx =
+ (struct lbt_context *)get_ctx_through_ctxinfo(info);
+ uint64_t __user *regs = (uint64_t *)&lbt_ctx->regs;
+ uint32_t __user *eflags = (uint32_t *)&lbt_ctx->eflags;
+
+ while (1) {
+ lock_lbt_owner();
+ if (is_lbt_owner())
+ err |= restore_hw_lbt_context(lbt_ctx);
+ else
+ err |= copy_lbt_from_sigcontext(lbt_ctx);
+ if (is_fpu_owner())
+ err |= restore_hw_ftop_context(lbt_ctx);
+ else
+ err |= copy_ftop_from_sigcontext(lbt_ctx);
+ unlock_lbt_owner();
+
+ if (likely(!err))
+ break;
+ /* Touch the LBT context and try again */
+ err = __get_user(tmp, &regs[0]) | __get_user(tmp, eflags);
+
+ if (err)
+ return err;
+ }
+
+ return err;
+}
+#endif
+
+static int setup_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc,
+ struct extctx_layout *extctx)
+{
+ int i, err = 0;
+ struct sctx_info __user *info;
+
+ err |= __put_user(regs->csr_era, &sc->sc_pc);
+ err |= __put_user(extctx->flags, &sc->sc_flags);
+
+ err |= __put_user(0, &sc->sc_regs[0]);
+ for (i = 1; i < 32; i++)
+ err |= __put_user(regs->regs[i], &sc->sc_regs[i]);
+
+ if (extctx->lasx.addr)
+ err |= protected_save_lasx_context(extctx);
+ else if (extctx->lsx.addr)
+ err |= protected_save_lsx_context(extctx);
+ else if (extctx->fpu.addr)
+ err |= protected_save_fpu_context(extctx);
+
+#ifdef CONFIG_CPU_HAS_LBT
+ if (extctx->lbt.addr)
+ err |= protected_save_lbt_context(extctx);
+#endif
+
+ /* Set the "end" magic */
+ info = (struct sctx_info *)extctx->end.addr;
+ err |= __put_user(0, &info->magic);
+ err |= __put_user(0, &info->size);
+
+ return err;
+}
+
+static int parse_extcontext(struct sigcontext __user *sc, struct extctx_layout *extctx)
+{
+ int err = 0;
+ unsigned int magic, size;
+ struct sctx_info __user *info = (struct sctx_info __user *)&sc->sc_extcontext;
+
+ while(1) {
+ err |= __get_user(magic, &info->magic);
+ err |= __get_user(size, &info->size);
+ if (err)
+ return err;
+
+ switch (magic) {
+ case 0: /* END */
+ goto done;
+
+ case FPU_CTX_MAGIC:
+ if (size < (sizeof(struct sctx_info) +
+ sizeof(struct fpu_context)))
+ goto invalid;
+ extctx->fpu.addr = info;
+ break;
+
+ case LSX_CTX_MAGIC:
+ if (size < (sizeof(struct sctx_info) +
+ sizeof(struct lsx_context)))
+ goto invalid;
+ extctx->lsx.addr = info;
+ break;
+
+ case LASX_CTX_MAGIC:
+ if (size < (sizeof(struct sctx_info) +
+ sizeof(struct lasx_context)))
+ goto invalid;
+ extctx->lasx.addr = info;
+ break;
+
+ case LBT_CTX_MAGIC:
+ if (size < (sizeof(struct sctx_info) +
+ sizeof(struct lbt_context)))
+ goto invalid;
+ extctx->lbt.addr = info;
+ break;
+
+ default:
+ goto invalid;
+ }
+
+ info = (struct sctx_info *)((char *)info + size);
+ }
+
+done:
+ return 0;
+
+invalid:
+ return -EINVAL;
+}
+
+static int restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc)
+{
+ int i, err = 0;
+ struct extctx_layout extctx;
+
+ memset(&extctx, 0, sizeof(struct extctx_layout));
+
+ err = __get_user(extctx.flags, &sc->sc_flags);
+ if (err)
+ goto bad;
+
+ err = parse_extcontext(sc, &extctx);
+ if (err)
+ goto bad;
+
+ conditional_used_math(extctx.flags & SC_USED_FP);
+
+ /*
+ * The signal handler may have used FPU; give it up if the program
+ * doesn't want it following sigreturn.
+ */
+ if (!(extctx.flags & SC_USED_FP))
+ lose_fpu(0);
+
+ /* Always make any pending restarted system calls return -EINTR */
+ current->restart_block.fn = do_no_restart_syscall;
+
+ err |= __get_user(regs->csr_era, &sc->sc_pc);
+ for (i = 1; i < 32; i++)
+ err |= __get_user(regs->regs[i], &sc->sc_regs[i]);
+
+ if (extctx.lasx.addr)
+ err |= protected_restore_lasx_context(&extctx);
+ else if (extctx.lsx.addr)
+ err |= protected_restore_lsx_context(&extctx);
+ else if (extctx.fpu.addr)
+ err |= protected_restore_fpu_context(&extctx);
+
+#ifdef CONFIG_CPU_HAS_LBT
+ if (extctx.lbt.addr)
+ err |= protected_restore_lbt_context(&extctx);
+#endif
+
+bad:
+ return err;
+}
+
+static unsigned int handle_flags(void)
+{
+ unsigned int flags = 0;
+
+ flags = used_math() ? SC_USED_FP : 0;
+
+ switch (current->thread.error_code) {
+ case 1:
+ flags |= SC_ADDRERR_RD;
+ break;
+ case 2:
+ flags |= SC_ADDRERR_WR;
+ break;
+ }
+
+ return flags;
+}
+
+static unsigned long extframe_alloc(struct extctx_layout *extctx,
+ struct _ctx_layout *layout,
+ size_t size, unsigned int align, unsigned long base)
+{
+ unsigned long new_base = base - size;
+
+ new_base = round_down(new_base, (align < 16 ? 16 : align));
+ new_base -= sizeof(struct sctx_info);
+
+ layout->addr = (void *)new_base;
+ layout->size = (unsigned int)(base - new_base);
+ extctx->size += layout->size;
+
+ return new_base;
+}
+
+static unsigned long setup_extcontext(struct extctx_layout *extctx, unsigned long sp)
+{
+ unsigned long new_sp = sp;
+
+ memset(extctx, 0, sizeof(struct extctx_layout));
+
+ extctx->flags = handle_flags();
+
+ /* Grow down, alloc "end" context info first. */
+ new_sp -= sizeof(struct sctx_info);
+ extctx->end.addr = (void *)new_sp;
+ extctx->end.size = (unsigned int)sizeof(struct sctx_info);
+ extctx->size += extctx->end.size;
+
+ if (extctx->flags & SC_USED_FP) {
+ if (cpu_has_lasx && thread_lasx_context_live())
+ new_sp = extframe_alloc(extctx, &extctx->lasx,
+ sizeof(struct lasx_context), LASX_CTX_ALIGN, new_sp);
+ else if (cpu_has_lsx && thread_lsx_context_live())
+ new_sp = extframe_alloc(extctx, &extctx->lsx,
+ sizeof(struct lsx_context), LSX_CTX_ALIGN, new_sp);
+ else if (cpu_has_fpu)
+ new_sp = extframe_alloc(extctx, &extctx->fpu,
+ sizeof(struct fpu_context), FPU_CTX_ALIGN, new_sp);
+ }
+
+#ifdef CONFIG_CPU_HAS_LBT
+ if (cpu_has_lbt && thread_lbt_context_live()) {
+ new_sp = extframe_alloc(extctx, &extctx->lbt,
+ sizeof(struct lbt_context), LBT_CTX_ALIGN, new_sp);
+ }
+#endif
+
+ return new_sp;
+}
+
+static void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs,
+ struct extctx_layout *extctx)
+{
+ unsigned long sp;
+
+ /* Default to using normal stack */
+ sp = regs->regs[3];
+
+ /*
+ * If we are on the alternate signal stack and would overflow it, don't.
+ * Return an always-bogus address instead so we will die with SIGSEGV.
+ */
+ if (on_sig_stack(sp) &&
+ !likely(on_sig_stack(sp - sizeof(struct rt_sigframe))))
+ return (void __user __force *)(-1UL);
+
+ sp = sigsp(sp, ksig);
+ sp = round_down(sp, 16);
+ sp = setup_extcontext(extctx, sp);
+ sp -= sizeof(struct rt_sigframe);
+
+ if (!IS_ALIGNED(sp, 16))
+ BUG();
+
+ return (void __user *)sp;
+}
+
+/*
+ * Atomically swap in the new signal mask, and wait for a signal.
+ */
+
+SYSCALL_DEFINE0(rt_sigreturn)
+{
+ int sig;
+ sigset_t set;
+ struct pt_regs *regs;
+ struct rt_sigframe __user *frame;
+
+ regs = current_pt_regs();
+ frame = (struct rt_sigframe __user *)regs->regs[3];
+ if (!access_ok(frame, sizeof(*frame)))
+ goto badframe;
+ if (__copy_from_user(&set, &frame->rs_uctx.uc_sigmask, sizeof(set)))
+ goto badframe;
+
+ set_current_blocked(&set);
+
+ sig = restore_sigcontext(regs, &frame->rs_uctx.uc_mcontext);
+ if (sig < 0)
+ goto badframe;
+ else if (sig)
+ force_sig(sig);
+
+ regs->regs[0] = 0; /* No syscall restarting */
+ if (restore_altstack(&frame->rs_uctx.uc_stack))
+ goto badframe;
+
+ return regs->regs[4];
+
+badframe:
+ force_sig(SIGSEGV);
+ return 0;
+}
+
+static int setup_rt_frame(void *sig_return, struct ksignal *ksig,
+ struct pt_regs *regs, sigset_t *set)
+{
+ int err = 0;
+ struct extctx_layout extctx;
+ struct rt_sigframe __user *frame;
+
+ frame = get_sigframe(ksig, regs, &extctx);
+ if (!access_ok(frame, sizeof(*frame) + extctx.size))
+ return -EFAULT;
+
+ /* Create siginfo. */
+ err |= copy_siginfo_to_user(&frame->rs_info, &ksig->info);
+
+ /* Create the ucontext. */
+ err |= __put_user(0, &frame->rs_uctx.uc_flags);
+ err |= __put_user(NULL, &frame->rs_uctx.uc_link);
+ err |= __save_altstack(&frame->rs_uctx.uc_stack, regs->regs[3]);
+ err |= setup_sigcontext(regs, &frame->rs_uctx.uc_mcontext, &extctx);
+ err |= __copy_to_user(&frame->rs_uctx.uc_sigmask, set, sizeof(*set));
+
+ if (err)
+ return -EFAULT;
+
+ /*
+ * Arguments to signal handler:
+ *
+ * a0 = signal number
+ * a1 = pointer to siginfo
+ * a2 = pointer to ucontext
+ *
+ * c0_era point to the signal handler, $r3 (sp) points to
+ * the struct rt_sigframe.
+ */
+ regs->regs[4] = ksig->sig;
+ regs->regs[5] = (unsigned long) &frame->rs_info;
+ regs->regs[6] = (unsigned long) &frame->rs_uctx;
+ regs->regs[3] = (unsigned long) frame;
+ regs->regs[1] = (unsigned long) sig_return;
+ regs->csr_era = (unsigned long) ksig->ka.sa.sa_handler;
+
+ DEBUGP("SIG deliver (%s:%d): sp=0x%p pc=0x%lx ra=0x%lx\n",
+ current->comm, current->pid,
+ frame, regs->csr_era, regs->regs[1]);
+
+ return 0;
+}
+
+static void handle_signal(struct ksignal *ksig, struct pt_regs *regs)
+{
+ int ret;
+ sigset_t *oldset = sigmask_to_save();
+ void *vdso = current->mm->context.vdso;
+
+ /* Are we from a system call? */
+ if (regs->regs[0]) {
+ switch (regs->regs[4]) {
+ case -ERESTART_RESTARTBLOCK:
+ case -ERESTARTNOHAND:
+ regs->regs[4] = -EINTR;
+ break;
+ case -ERESTARTSYS:
+ if (!(ksig->ka.sa.sa_flags & SA_RESTART)) {
+ regs->regs[4] = -EINTR;
+ break;
+ }
+ fallthrough;
+ case -ERESTARTNOINTR:
+ regs->regs[4] = regs->orig_a0;
+ regs->csr_era -= 4;
+ }
+
+ regs->regs[0] = 0; /* Don't deal with this again. */
+ }
+
+ rseq_signal_deliver(ksig, regs);
+
+ ret = setup_rt_frame(vdso + current->thread.vdso->offset_sigreturn, ksig, regs, oldset);
+
+ signal_setup_done(ret, ksig, 0);
+}
+
+void arch_do_signal_or_restart(struct pt_regs *regs)
+{
+ struct ksignal ksig;
+
+ if (get_signal(&ksig)) {
+ /* Whee! Actually deliver the signal. */
+ handle_signal(&ksig, regs);
+ return;
+ }
+
+ /* Are we from a system call? */
+ if (regs->regs[0]) {
+ switch (regs->regs[4]) {
+ case -ERESTARTNOHAND:
+ case -ERESTARTSYS:
+ case -ERESTARTNOINTR:
+ regs->regs[4] = regs->orig_a0;
+ regs->csr_era -= 4;
+ break;
+
+ case -ERESTART_RESTARTBLOCK:
+ regs->regs[4] = regs->orig_a0;
+ regs->regs[11] = __NR_restart_syscall;
+ regs->csr_era -= 4;
+ break;
+ }
+ regs->regs[0] = 0; /* Don't deal with this again. */
+ }
+
+ /*
+ * If there's no signal to deliver, we just put the saved sigmask
+ * back
+ */
+ restore_saved_sigmask();
+}
diff --git a/arch/loongarch/kernel/smp.c b/arch/loongarch/kernel/smp.c
new file mode 100644
index 0000000000..42e3a0e189
--- /dev/null
+++ b/arch/loongarch/kernel/smp.c
@@ -0,0 +1,701 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ *
+ * Derived from MIPS:
+ * Copyright (C) 2000, 2001 Kanoj Sarcar
+ * Copyright (C) 2000, 2001 Ralf Baechle
+ * Copyright (C) 2000, 2001 Silicon Graphics, Inc.
+ * Copyright (C) 2000, 2001, 2003 Broadcom Corporation
+ */
+#include <linux/acpi.h>
+#include <linux/cpu.h>
+#include <linux/cpumask.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/profile.h>
+#include <linux/seq_file.h>
+#include <linux/smp.h>
+#include <linux/threads.h>
+#include <linux/export.h>
+#include <linux/syscore_ops.h>
+#include <linux/time.h>
+#include <linux/tracepoint.h>
+#include <linux/sched/hotplug.h>
+#include <linux/sched/task_stack.h>
+
+#include <asm/cpu.h>
+#include <asm/idle.h>
+#include <asm/loongson.h>
+#include <asm/mmu_context.h>
+#include <asm/numa.h>
+#include <asm/processor.h>
+#include <asm/setup.h>
+#include <asm/time.h>
+
+int __cpu_number_map[NR_CPUS]; /* Map physical to logical */
+EXPORT_SYMBOL(__cpu_number_map);
+
+int __cpu_logical_map[NR_CPUS]; /* Map logical to physical */
+EXPORT_SYMBOL(__cpu_logical_map);
+
+/* Representing the threads (siblings) of each logical CPU */
+cpumask_t cpu_sibling_map[NR_CPUS] __read_mostly;
+EXPORT_SYMBOL(cpu_sibling_map);
+
+/* Representing the core map of multi-core chips of each logical CPU */
+cpumask_t cpu_core_map[NR_CPUS] __read_mostly;
+EXPORT_SYMBOL(cpu_core_map);
+
+static DECLARE_COMPLETION(cpu_starting);
+static DECLARE_COMPLETION(cpu_running);
+
+/*
+ * A logcal cpu mask containing only one VPE per core to
+ * reduce the number of IPIs on large MT systems.
+ */
+cpumask_t cpu_foreign_map[NR_CPUS] __read_mostly;
+EXPORT_SYMBOL(cpu_foreign_map);
+
+/* representing cpus for which sibling maps can be computed */
+static cpumask_t cpu_sibling_setup_map;
+
+/* representing cpus for which core maps can be computed */
+static cpumask_t cpu_core_setup_map;
+
+struct secondary_data cpuboot_data;
+static DEFINE_PER_CPU(int, cpu_state);
+
+enum ipi_msg_type {
+ IPI_RESCHEDULE,
+ IPI_CALL_FUNCTION,
+};
+
+static const char *ipi_types[NR_IPI] __tracepoint_string = {
+ [IPI_RESCHEDULE] = "Rescheduling interrupts",
+ [IPI_CALL_FUNCTION] = "Function call interrupts",
+};
+
+void show_ipi_list(struct seq_file *p, int prec)
+{
+ unsigned int cpu, i;
+
+ for (i = 0; i < NR_IPI; i++) {
+ seq_printf(p, "%*s%u:%s", prec - 1, "IPI", i, prec >= 4 ? " " : "");
+ for_each_online_cpu(cpu)
+ seq_printf(p, "%10u ", per_cpu(irq_stat, cpu).ipi_irqs[i]);
+ seq_printf(p, " LoongArch %d %s\n", i + 1, ipi_types[i]);
+ }
+}
+
+/* Send mailbox buffer via Mail_Send */
+static void csr_mail_send(uint64_t data, int cpu, int mailbox)
+{
+ uint64_t val;
+
+ /* Send high 32 bits */
+ val = IOCSR_MBUF_SEND_BLOCKING;
+ val |= (IOCSR_MBUF_SEND_BOX_HI(mailbox) << IOCSR_MBUF_SEND_BOX_SHIFT);
+ val |= (cpu << IOCSR_MBUF_SEND_CPU_SHIFT);
+ val |= (data & IOCSR_MBUF_SEND_H32_MASK);
+ iocsr_write64(val, LOONGARCH_IOCSR_MBUF_SEND);
+
+ /* Send low 32 bits */
+ val = IOCSR_MBUF_SEND_BLOCKING;
+ val |= (IOCSR_MBUF_SEND_BOX_LO(mailbox) << IOCSR_MBUF_SEND_BOX_SHIFT);
+ val |= (cpu << IOCSR_MBUF_SEND_CPU_SHIFT);
+ val |= (data << IOCSR_MBUF_SEND_BUF_SHIFT);
+ iocsr_write64(val, LOONGARCH_IOCSR_MBUF_SEND);
+};
+
+static u32 ipi_read_clear(int cpu)
+{
+ u32 action;
+
+ /* Load the ipi register to figure out what we're supposed to do */
+ action = iocsr_read32(LOONGARCH_IOCSR_IPI_STATUS);
+ /* Clear the ipi register to clear the interrupt */
+ iocsr_write32(action, LOONGARCH_IOCSR_IPI_CLEAR);
+ wbflush();
+
+ return action;
+}
+
+static void ipi_write_action(int cpu, u32 action)
+{
+ unsigned int irq = 0;
+
+ while ((irq = ffs(action))) {
+ uint32_t val = IOCSR_IPI_SEND_BLOCKING;
+
+ val |= (irq - 1);
+ val |= (cpu << IOCSR_IPI_SEND_CPU_SHIFT);
+ iocsr_write32(val, LOONGARCH_IOCSR_IPI_SEND);
+ action &= ~BIT(irq - 1);
+ }
+}
+
+void loongson_send_ipi_single(int cpu, unsigned int action)
+{
+ ipi_write_action(cpu_logical_map(cpu), (u32)action);
+}
+
+void loongson_send_ipi_mask(const struct cpumask *mask, unsigned int action)
+{
+ unsigned int i;
+
+ for_each_cpu(i, mask)
+ ipi_write_action(cpu_logical_map(i), (u32)action);
+}
+
+/*
+ * This function sends a 'reschedule' IPI to another CPU.
+ * it goes straight through and wastes no time serializing
+ * anything. Worst case is that we lose a reschedule ...
+ */
+void arch_smp_send_reschedule(int cpu)
+{
+ loongson_send_ipi_single(cpu, SMP_RESCHEDULE);
+}
+EXPORT_SYMBOL_GPL(arch_smp_send_reschedule);
+
+irqreturn_t loongson_ipi_interrupt(int irq, void *dev)
+{
+ unsigned int action;
+ unsigned int cpu = smp_processor_id();
+
+ action = ipi_read_clear(cpu_logical_map(cpu));
+
+ if (action & SMP_RESCHEDULE) {
+ scheduler_ipi();
+ per_cpu(irq_stat, cpu).ipi_irqs[IPI_RESCHEDULE]++;
+ }
+
+ if (action & SMP_CALL_FUNCTION) {
+ generic_smp_call_function_interrupt();
+ per_cpu(irq_stat, cpu).ipi_irqs[IPI_CALL_FUNCTION]++;
+ }
+
+ return IRQ_HANDLED;
+}
+
+static void __init fdt_smp_setup(void)
+{
+#ifdef CONFIG_OF
+ unsigned int cpu, cpuid;
+ struct device_node *node = NULL;
+
+ for_each_of_cpu_node(node) {
+ if (!of_device_is_available(node))
+ continue;
+
+ cpuid = of_get_cpu_hwid(node, 0);
+ if (cpuid >= nr_cpu_ids)
+ continue;
+
+ if (cpuid == loongson_sysconf.boot_cpu_id) {
+ cpu = 0;
+ numa_add_cpu(cpu);
+ } else {
+ cpu = cpumask_next_zero(-1, cpu_present_mask);
+ }
+
+ num_processors++;
+ set_cpu_possible(cpu, true);
+ set_cpu_present(cpu, true);
+ __cpu_number_map[cpuid] = cpu;
+ __cpu_logical_map[cpu] = cpuid;
+ }
+
+ loongson_sysconf.nr_cpus = num_processors;
+ set_bit(0, &(loongson_sysconf.cores_io_master));
+#endif
+}
+
+void __init loongson_smp_setup(void)
+{
+ fdt_smp_setup();
+
+ cpu_data[0].core = cpu_logical_map(0) % loongson_sysconf.cores_per_package;
+ cpu_data[0].package = cpu_logical_map(0) / loongson_sysconf.cores_per_package;
+
+ iocsr_write32(0xffffffff, LOONGARCH_IOCSR_IPI_EN);
+ pr_info("Detected %i available CPU(s)\n", loongson_sysconf.nr_cpus);
+}
+
+void __init loongson_prepare_cpus(unsigned int max_cpus)
+{
+ int i = 0;
+
+ parse_acpi_topology();
+
+ for (i = 0; i < loongson_sysconf.nr_cpus; i++) {
+ set_cpu_present(i, true);
+ csr_mail_send(0, __cpu_logical_map[i], 0);
+ cpu_data[i].global_id = __cpu_logical_map[i];
+ }
+
+ per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE;
+}
+
+/*
+ * Setup the PC, SP, and TP of a secondary processor and start it running!
+ */
+void loongson_boot_secondary(int cpu, struct task_struct *idle)
+{
+ unsigned long entry;
+
+ pr_info("Booting CPU#%d...\n", cpu);
+
+ entry = __pa_symbol((unsigned long)&smpboot_entry);
+ cpuboot_data.stack = (unsigned long)__KSTK_TOS(idle);
+ cpuboot_data.thread_info = (unsigned long)task_thread_info(idle);
+
+ csr_mail_send(entry, cpu_logical_map(cpu), 0);
+
+ loongson_send_ipi_single(cpu, SMP_BOOT_CPU);
+}
+
+/*
+ * SMP init and finish on secondary CPUs
+ */
+void loongson_init_secondary(void)
+{
+ unsigned int cpu = smp_processor_id();
+ unsigned int imask = ECFGF_IP0 | ECFGF_IP1 | ECFGF_IP2 |
+ ECFGF_IPI | ECFGF_PMC | ECFGF_TIMER;
+
+ change_csr_ecfg(ECFG0_IM, imask);
+
+ iocsr_write32(0xffffffff, LOONGARCH_IOCSR_IPI_EN);
+
+#ifdef CONFIG_NUMA
+ numa_add_cpu(cpu);
+#endif
+ per_cpu(cpu_state, cpu) = CPU_ONLINE;
+ cpu_data[cpu].package =
+ cpu_logical_map(cpu) / loongson_sysconf.cores_per_package;
+ cpu_data[cpu].core = pptt_enabled ? cpu_data[cpu].core :
+ cpu_logical_map(cpu) % loongson_sysconf.cores_per_package;
+}
+
+void loongson_smp_finish(void)
+{
+ local_irq_enable();
+ iocsr_write64(0, LOONGARCH_IOCSR_MBUF0);
+ pr_info("CPU#%d finished\n", smp_processor_id());
+}
+
+#ifdef CONFIG_HOTPLUG_CPU
+
+int loongson_cpu_disable(void)
+{
+ unsigned long flags;
+ unsigned int cpu = smp_processor_id();
+
+ if (io_master(cpu))
+ return -EBUSY;
+
+#ifdef CONFIG_NUMA
+ numa_remove_cpu(cpu);
+#endif
+ set_cpu_online(cpu, false);
+ calculate_cpu_foreign_map();
+ local_irq_save(flags);
+ irq_migrate_all_off_this_cpu();
+ clear_csr_ecfg(ECFG0_IM);
+ local_irq_restore(flags);
+ local_flush_tlb_all();
+
+ return 0;
+}
+
+void loongson_cpu_die(unsigned int cpu)
+{
+ while (per_cpu(cpu_state, cpu) != CPU_DEAD)
+ cpu_relax();
+
+ mb();
+}
+
+void __noreturn arch_cpu_idle_dead(void)
+{
+ register uint64_t addr;
+ register void (*init_fn)(void);
+
+ idle_task_exit();
+ local_irq_enable();
+ set_csr_ecfg(ECFGF_IPI);
+ __this_cpu_write(cpu_state, CPU_DEAD);
+
+ __smp_mb();
+ do {
+ __asm__ __volatile__("idle 0\n\t");
+ addr = iocsr_read64(LOONGARCH_IOCSR_MBUF0);
+ } while (addr == 0);
+
+ init_fn = (void *)TO_CACHE(addr);
+ iocsr_write32(0xffffffff, LOONGARCH_IOCSR_IPI_CLEAR);
+
+ init_fn();
+ BUG();
+}
+
+#endif
+
+/*
+ * Power management
+ */
+#ifdef CONFIG_PM
+
+static int loongson_ipi_suspend(void)
+{
+ return 0;
+}
+
+static void loongson_ipi_resume(void)
+{
+ iocsr_write32(0xffffffff, LOONGARCH_IOCSR_IPI_EN);
+}
+
+static struct syscore_ops loongson_ipi_syscore_ops = {
+ .resume = loongson_ipi_resume,
+ .suspend = loongson_ipi_suspend,
+};
+
+/*
+ * Enable boot cpu ipi before enabling nonboot cpus
+ * during syscore_resume.
+ */
+static int __init ipi_pm_init(void)
+{
+ register_syscore_ops(&loongson_ipi_syscore_ops);
+ return 0;
+}
+
+core_initcall(ipi_pm_init);
+#endif
+
+static inline void set_cpu_sibling_map(int cpu)
+{
+ int i;
+
+ cpumask_set_cpu(cpu, &cpu_sibling_setup_map);
+
+ for_each_cpu(i, &cpu_sibling_setup_map) {
+ if (cpus_are_siblings(cpu, i)) {
+ cpumask_set_cpu(i, &cpu_sibling_map[cpu]);
+ cpumask_set_cpu(cpu, &cpu_sibling_map[i]);
+ }
+ }
+}
+
+static inline void set_cpu_core_map(int cpu)
+{
+ int i;
+
+ cpumask_set_cpu(cpu, &cpu_core_setup_map);
+
+ for_each_cpu(i, &cpu_core_setup_map) {
+ if (cpu_data[cpu].package == cpu_data[i].package) {
+ cpumask_set_cpu(i, &cpu_core_map[cpu]);
+ cpumask_set_cpu(cpu, &cpu_core_map[i]);
+ }
+ }
+}
+
+/*
+ * Calculate a new cpu_foreign_map mask whenever a
+ * new cpu appears or disappears.
+ */
+void calculate_cpu_foreign_map(void)
+{
+ int i, k, core_present;
+ cpumask_t temp_foreign_map;
+
+ /* Re-calculate the mask */
+ cpumask_clear(&temp_foreign_map);
+ for_each_online_cpu(i) {
+ core_present = 0;
+ for_each_cpu(k, &temp_foreign_map)
+ if (cpus_are_siblings(i, k))
+ core_present = 1;
+ if (!core_present)
+ cpumask_set_cpu(i, &temp_foreign_map);
+ }
+
+ for_each_online_cpu(i)
+ cpumask_andnot(&cpu_foreign_map[i],
+ &temp_foreign_map, &cpu_sibling_map[i]);
+}
+
+/* Preload SMP state for boot cpu */
+void smp_prepare_boot_cpu(void)
+{
+ unsigned int cpu, node, rr_node;
+
+ set_cpu_possible(0, true);
+ set_cpu_online(0, true);
+ set_my_cpu_offset(per_cpu_offset(0));
+
+ rr_node = first_node(node_online_map);
+ for_each_possible_cpu(cpu) {
+ node = early_cpu_to_node(cpu);
+
+ /*
+ * The mapping between present cpus and nodes has been
+ * built during MADT and SRAT parsing.
+ *
+ * If possible cpus = present cpus here, early_cpu_to_node
+ * will return valid node.
+ *
+ * If possible cpus > present cpus here (e.g. some possible
+ * cpus will be added by cpu-hotplug later), for possible but
+ * not present cpus, early_cpu_to_node will return NUMA_NO_NODE,
+ * and we just map them to online nodes in round-robin way.
+ * Once hotplugged, new correct mapping will be built for them.
+ */
+ if (node != NUMA_NO_NODE)
+ set_cpu_numa_node(cpu, node);
+ else {
+ set_cpu_numa_node(cpu, rr_node);
+ rr_node = next_node_in(rr_node, node_online_map);
+ }
+ }
+}
+
+/* called from main before smp_init() */
+void __init smp_prepare_cpus(unsigned int max_cpus)
+{
+ init_new_context(current, &init_mm);
+ current_thread_info()->cpu = 0;
+ loongson_prepare_cpus(max_cpus);
+ set_cpu_sibling_map(0);
+ set_cpu_core_map(0);
+ calculate_cpu_foreign_map();
+#ifndef CONFIG_HOTPLUG_CPU
+ init_cpu_present(cpu_possible_mask);
+#endif
+}
+
+int __cpu_up(unsigned int cpu, struct task_struct *tidle)
+{
+ loongson_boot_secondary(cpu, tidle);
+
+ /* Wait for CPU to start and be ready to sync counters */
+ if (!wait_for_completion_timeout(&cpu_starting,
+ msecs_to_jiffies(5000))) {
+ pr_crit("CPU%u: failed to start\n", cpu);
+ return -EIO;
+ }
+
+ /* Wait for CPU to finish startup & mark itself online before return */
+ wait_for_completion(&cpu_running);
+
+ return 0;
+}
+
+/*
+ * First C code run on the secondary CPUs after being started up by
+ * the master.
+ */
+asmlinkage void start_secondary(void)
+{
+ unsigned int cpu;
+
+ sync_counter();
+ cpu = raw_smp_processor_id();
+ set_my_cpu_offset(per_cpu_offset(cpu));
+ rcu_cpu_starting(cpu);
+
+ cpu_probe();
+ constant_clockevent_init();
+ loongson_init_secondary();
+
+ set_cpu_sibling_map(cpu);
+ set_cpu_core_map(cpu);
+
+ notify_cpu_starting(cpu);
+
+ /* Notify boot CPU that we're starting */
+ complete(&cpu_starting);
+
+ /* The CPU is running, now mark it online */
+ set_cpu_online(cpu, true);
+
+ calculate_cpu_foreign_map();
+
+ /*
+ * Notify boot CPU that we're up & online and it can safely return
+ * from __cpu_up()
+ */
+ complete(&cpu_running);
+
+ /*
+ * irq will be enabled in loongson_smp_finish(), enabling it too
+ * early is dangerous.
+ */
+ WARN_ON_ONCE(!irqs_disabled());
+ loongson_smp_finish();
+
+ cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
+}
+
+void __init smp_cpus_done(unsigned int max_cpus)
+{
+}
+
+static void stop_this_cpu(void *dummy)
+{
+ set_cpu_online(smp_processor_id(), false);
+ calculate_cpu_foreign_map();
+ local_irq_disable();
+ while (true);
+}
+
+void smp_send_stop(void)
+{
+ smp_call_function(stop_this_cpu, NULL, 0);
+}
+
+#ifdef CONFIG_PROFILING
+int setup_profiling_timer(unsigned int multiplier)
+{
+ return 0;
+}
+#endif
+
+static void flush_tlb_all_ipi(void *info)
+{
+ local_flush_tlb_all();
+}
+
+void flush_tlb_all(void)
+{
+ on_each_cpu(flush_tlb_all_ipi, NULL, 1);
+}
+
+static void flush_tlb_mm_ipi(void *mm)
+{
+ local_flush_tlb_mm((struct mm_struct *)mm);
+}
+
+void flush_tlb_mm(struct mm_struct *mm)
+{
+ if (atomic_read(&mm->mm_users) == 0)
+ return; /* happens as a result of exit_mmap() */
+
+ preempt_disable();
+
+ if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
+ on_each_cpu_mask(mm_cpumask(mm), flush_tlb_mm_ipi, mm, 1);
+ } else {
+ unsigned int cpu;
+
+ for_each_online_cpu(cpu) {
+ if (cpu != smp_processor_id() && cpu_context(cpu, mm))
+ cpu_context(cpu, mm) = 0;
+ }
+ local_flush_tlb_mm(mm);
+ }
+
+ preempt_enable();
+}
+
+struct flush_tlb_data {
+ struct vm_area_struct *vma;
+ unsigned long addr1;
+ unsigned long addr2;
+};
+
+static void flush_tlb_range_ipi(void *info)
+{
+ struct flush_tlb_data *fd = info;
+
+ local_flush_tlb_range(fd->vma, fd->addr1, fd->addr2);
+}
+
+void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
+{
+ struct mm_struct *mm = vma->vm_mm;
+
+ preempt_disable();
+ if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
+ struct flush_tlb_data fd = {
+ .vma = vma,
+ .addr1 = start,
+ .addr2 = end,
+ };
+
+ on_each_cpu_mask(mm_cpumask(mm), flush_tlb_range_ipi, &fd, 1);
+ } else {
+ unsigned int cpu;
+
+ for_each_online_cpu(cpu) {
+ if (cpu != smp_processor_id() && cpu_context(cpu, mm))
+ cpu_context(cpu, mm) = 0;
+ }
+ local_flush_tlb_range(vma, start, end);
+ }
+ preempt_enable();
+}
+
+static void flush_tlb_kernel_range_ipi(void *info)
+{
+ struct flush_tlb_data *fd = info;
+
+ local_flush_tlb_kernel_range(fd->addr1, fd->addr2);
+}
+
+void flush_tlb_kernel_range(unsigned long start, unsigned long end)
+{
+ struct flush_tlb_data fd = {
+ .addr1 = start,
+ .addr2 = end,
+ };
+
+ on_each_cpu(flush_tlb_kernel_range_ipi, &fd, 1);
+}
+
+static void flush_tlb_page_ipi(void *info)
+{
+ struct flush_tlb_data *fd = info;
+
+ local_flush_tlb_page(fd->vma, fd->addr1);
+}
+
+void flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
+{
+ preempt_disable();
+ if ((atomic_read(&vma->vm_mm->mm_users) != 1) || (current->mm != vma->vm_mm)) {
+ struct flush_tlb_data fd = {
+ .vma = vma,
+ .addr1 = page,
+ };
+
+ on_each_cpu_mask(mm_cpumask(vma->vm_mm), flush_tlb_page_ipi, &fd, 1);
+ } else {
+ unsigned int cpu;
+
+ for_each_online_cpu(cpu) {
+ if (cpu != smp_processor_id() && cpu_context(cpu, vma->vm_mm))
+ cpu_context(cpu, vma->vm_mm) = 0;
+ }
+ local_flush_tlb_page(vma, page);
+ }
+ preempt_enable();
+}
+EXPORT_SYMBOL(flush_tlb_page);
+
+static void flush_tlb_one_ipi(void *info)
+{
+ unsigned long vaddr = (unsigned long) info;
+
+ local_flush_tlb_one(vaddr);
+}
+
+void flush_tlb_one(unsigned long vaddr)
+{
+ on_each_cpu(flush_tlb_one_ipi, (void *)vaddr, 1);
+}
+EXPORT_SYMBOL(flush_tlb_one);
diff --git a/arch/loongarch/kernel/stacktrace.c b/arch/loongarch/kernel/stacktrace.c
new file mode 100644
index 0000000000..f623feb212
--- /dev/null
+++ b/arch/loongarch/kernel/stacktrace.c
@@ -0,0 +1,80 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Stack trace management functions
+ *
+ * Copyright (C) 2022 Loongson Technology Corporation Limited
+ */
+#include <linux/sched.h>
+#include <linux/stacktrace.h>
+#include <linux/uaccess.h>
+
+#include <asm/stacktrace.h>
+#include <asm/unwind.h>
+
+void arch_stack_walk(stack_trace_consume_fn consume_entry, void *cookie,
+ struct task_struct *task, struct pt_regs *regs)
+{
+ unsigned long addr;
+ struct pt_regs dummyregs;
+ struct unwind_state state;
+
+ if (!regs) {
+ regs = &dummyregs;
+
+ if (task == current) {
+ regs->regs[3] = (unsigned long)__builtin_frame_address(0);
+ regs->csr_era = (unsigned long)__builtin_return_address(0);
+ } else {
+ regs->regs[3] = thread_saved_fp(task);
+ regs->csr_era = thread_saved_ra(task);
+ }
+ regs->regs[1] = 0;
+ }
+
+ for (unwind_start(&state, task, regs);
+ !unwind_done(&state); unwind_next_frame(&state)) {
+ addr = unwind_get_return_address(&state);
+ if (!addr || !consume_entry(cookie, addr))
+ break;
+ }
+}
+
+static int
+copy_stack_frame(unsigned long fp, struct stack_frame *frame)
+{
+ int ret = 1;
+ unsigned long err;
+ unsigned long __user *user_frame_tail;
+
+ user_frame_tail = (unsigned long *)(fp - sizeof(struct stack_frame));
+ if (!access_ok(user_frame_tail, sizeof(*frame)))
+ return 0;
+
+ pagefault_disable();
+ err = (__copy_from_user_inatomic(frame, user_frame_tail, sizeof(*frame)));
+ if (err || (unsigned long)user_frame_tail >= frame->fp)
+ ret = 0;
+ pagefault_enable();
+
+ return ret;
+}
+
+void arch_stack_walk_user(stack_trace_consume_fn consume_entry, void *cookie,
+ const struct pt_regs *regs)
+{
+ unsigned long fp = regs->regs[22];
+
+ while (fp && !((unsigned long)fp & 0xf)) {
+ struct stack_frame frame;
+
+ frame.fp = 0;
+ frame.ra = 0;
+ if (!copy_stack_frame(fp, &frame))
+ break;
+ if (!frame.ra)
+ break;
+ if (!consume_entry(cookie, frame.ra))
+ break;
+ fp = frame.fp;
+ }
+}
diff --git a/arch/loongarch/kernel/switch.S b/arch/loongarch/kernel/switch.S
new file mode 100644
index 0000000000..31dd8199b2
--- /dev/null
+++ b/arch/loongarch/kernel/switch.S
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#include <asm/asm.h>
+#include <asm/asmmacro.h>
+#include <asm/asm-offsets.h>
+#include <asm/loongarch.h>
+#include <asm/regdef.h>
+#include <asm/stackframe.h>
+#include <asm/thread_info.h>
+
+/*
+ * task_struct *__switch_to(task_struct *prev, task_struct *next,
+ * struct thread_info *next_ti)
+ */
+ .align 5
+SYM_FUNC_START(__switch_to)
+ csrrd t1, LOONGARCH_CSR_PRMD
+ stptr.d t1, a0, THREAD_CSRPRMD
+
+ cpu_save_nonscratch a0
+ stptr.d ra, a0, THREAD_REG01
+ stptr.d a3, a0, THREAD_SCHED_RA
+ stptr.d a4, a0, THREAD_SCHED_CFA
+#if defined(CONFIG_STACKPROTECTOR) && !defined(CONFIG_SMP)
+ la t7, __stack_chk_guard
+ LONG_L t8, a1, TASK_STACK_CANARY
+ LONG_S t8, t7, 0
+#endif
+ move tp, a2
+ cpu_restore_nonscratch a1
+
+ li.w t0, _THREAD_SIZE
+ PTR_ADD t0, t0, tp
+ set_saved_sp t0, t1, t2
+
+ ldptr.d t1, a1, THREAD_CSRPRMD
+ csrwr t1, LOONGARCH_CSR_PRMD
+
+ jr ra
+SYM_FUNC_END(__switch_to)
diff --git a/arch/loongarch/kernel/syscall.c b/arch/loongarch/kernel/syscall.c
new file mode 100644
index 0000000000..b4c5acd7aa
--- /dev/null
+++ b/arch/loongarch/kernel/syscall.c
@@ -0,0 +1,64 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Author: Hanlu Li <lihanlu@loongson.cn>
+ * Huacai Chen <chenhuacai@loongson.cn>
+ *
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#include <linux/capability.h>
+#include <linux/entry-common.h>
+#include <linux/errno.h>
+#include <linux/linkage.h>
+#include <linux/syscalls.h>
+#include <linux/unistd.h>
+
+#include <asm/asm.h>
+#include <asm/exception.h>
+#include <asm/signal.h>
+#include <asm/switch_to.h>
+#include <asm-generic/syscalls.h>
+
+#undef __SYSCALL
+#define __SYSCALL(nr, call) [nr] = (call),
+
+SYSCALL_DEFINE6(mmap, unsigned long, addr, unsigned long, len, unsigned long,
+ prot, unsigned long, flags, unsigned long, fd, off_t, offset)
+{
+ if (offset & ~PAGE_MASK)
+ return -EINVAL;
+
+ return ksys_mmap_pgoff(addr, len, prot, flags, fd, offset >> PAGE_SHIFT);
+}
+
+void *sys_call_table[__NR_syscalls] = {
+ [0 ... __NR_syscalls - 1] = sys_ni_syscall,
+#include <asm/unistd.h>
+};
+
+typedef long (*sys_call_fn)(unsigned long, unsigned long,
+ unsigned long, unsigned long, unsigned long, unsigned long);
+
+void noinstr do_syscall(struct pt_regs *regs)
+{
+ unsigned long nr;
+ sys_call_fn syscall_fn;
+
+ nr = regs->regs[11];
+ /* Set for syscall restarting */
+ if (nr < NR_syscalls)
+ regs->regs[0] = nr + 1;
+
+ regs->csr_era += 4;
+ regs->orig_a0 = regs->regs[4];
+ regs->regs[4] = -ENOSYS;
+
+ nr = syscall_enter_from_user_mode(regs, nr);
+
+ if (nr < NR_syscalls) {
+ syscall_fn = sys_call_table[nr];
+ regs->regs[4] = syscall_fn(regs->orig_a0, regs->regs[5], regs->regs[6],
+ regs->regs[7], regs->regs[8], regs->regs[9]);
+ }
+
+ syscall_exit_to_user_mode(regs);
+}
diff --git a/arch/loongarch/kernel/sysrq.c b/arch/loongarch/kernel/sysrq.c
new file mode 100644
index 0000000000..e663c10fa3
--- /dev/null
+++ b/arch/loongarch/kernel/sysrq.c
@@ -0,0 +1,65 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * LoongArch specific sysrq operations.
+ *
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#include <linux/init.h>
+#include <linux/smp.h>
+#include <linux/spinlock.h>
+#include <linux/sysrq.h>
+#include <linux/workqueue.h>
+
+#include <asm/cpu-features.h>
+#include <asm/tlb.h>
+
+/*
+ * Dump TLB entries on all CPUs.
+ */
+
+static DEFINE_SPINLOCK(show_lock);
+
+static void sysrq_tlbdump_single(void *dummy)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&show_lock, flags);
+
+ pr_info("CPU%d:\n", smp_processor_id());
+ dump_tlb_regs();
+ pr_info("\n");
+ dump_tlb_all();
+ pr_info("\n");
+
+ spin_unlock_irqrestore(&show_lock, flags);
+}
+
+#ifdef CONFIG_SMP
+static void sysrq_tlbdump_othercpus(struct work_struct *dummy)
+{
+ smp_call_function(sysrq_tlbdump_single, NULL, 0);
+}
+
+static DECLARE_WORK(sysrq_tlbdump, sysrq_tlbdump_othercpus);
+#endif
+
+static void sysrq_handle_tlbdump(u8 key)
+{
+ sysrq_tlbdump_single(NULL);
+#ifdef CONFIG_SMP
+ schedule_work(&sysrq_tlbdump);
+#endif
+}
+
+static struct sysrq_key_op sysrq_tlbdump_op = {
+ .handler = sysrq_handle_tlbdump,
+ .help_msg = "show-tlbs(x)",
+ .action_msg = "Show TLB entries",
+ .enable_mask = SYSRQ_ENABLE_DUMP,
+};
+
+static int __init loongarch_sysrq_init(void)
+{
+ return register_sysrq_key('x', &sysrq_tlbdump_op);
+}
+arch_initcall(loongarch_sysrq_init);
diff --git a/arch/loongarch/kernel/time.c b/arch/loongarch/kernel/time.c
new file mode 100644
index 0000000000..e7015f7b70
--- /dev/null
+++ b/arch/loongarch/kernel/time.c
@@ -0,0 +1,227 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Common time service routines for LoongArch machines.
+ *
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#include <linux/clockchips.h>
+#include <linux/delay.h>
+#include <linux/export.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/sched_clock.h>
+#include <linux/spinlock.h>
+
+#include <asm/cpu-features.h>
+#include <asm/loongarch.h>
+#include <asm/time.h>
+
+u64 cpu_clock_freq;
+EXPORT_SYMBOL(cpu_clock_freq);
+u64 const_clock_freq;
+EXPORT_SYMBOL(const_clock_freq);
+
+static DEFINE_RAW_SPINLOCK(state_lock);
+static DEFINE_PER_CPU(struct clock_event_device, constant_clockevent_device);
+
+static void constant_event_handler(struct clock_event_device *dev)
+{
+}
+
+static irqreturn_t constant_timer_interrupt(int irq, void *data)
+{
+ int cpu = smp_processor_id();
+ struct clock_event_device *cd;
+
+ /* Clear Timer Interrupt */
+ write_csr_tintclear(CSR_TINTCLR_TI);
+ cd = &per_cpu(constant_clockevent_device, cpu);
+ cd->event_handler(cd);
+
+ return IRQ_HANDLED;
+}
+
+static int constant_set_state_oneshot(struct clock_event_device *evt)
+{
+ unsigned long timer_config;
+
+ raw_spin_lock(&state_lock);
+
+ timer_config = csr_read64(LOONGARCH_CSR_TCFG);
+ timer_config |= CSR_TCFG_EN;
+ timer_config &= ~CSR_TCFG_PERIOD;
+ csr_write64(timer_config, LOONGARCH_CSR_TCFG);
+
+ raw_spin_unlock(&state_lock);
+
+ return 0;
+}
+
+static int constant_set_state_periodic(struct clock_event_device *evt)
+{
+ unsigned long period;
+ unsigned long timer_config;
+
+ raw_spin_lock(&state_lock);
+
+ period = const_clock_freq / HZ;
+ timer_config = period & CSR_TCFG_VAL;
+ timer_config |= (CSR_TCFG_PERIOD | CSR_TCFG_EN);
+ csr_write64(timer_config, LOONGARCH_CSR_TCFG);
+
+ raw_spin_unlock(&state_lock);
+
+ return 0;
+}
+
+static int constant_set_state_shutdown(struct clock_event_device *evt)
+{
+ unsigned long timer_config;
+
+ raw_spin_lock(&state_lock);
+
+ timer_config = csr_read64(LOONGARCH_CSR_TCFG);
+ timer_config &= ~CSR_TCFG_EN;
+ csr_write64(timer_config, LOONGARCH_CSR_TCFG);
+
+ raw_spin_unlock(&state_lock);
+
+ return 0;
+}
+
+static int constant_timer_next_event(unsigned long delta, struct clock_event_device *evt)
+{
+ unsigned long timer_config;
+
+ delta &= CSR_TCFG_VAL;
+ timer_config = delta | CSR_TCFG_EN;
+ csr_write64(timer_config, LOONGARCH_CSR_TCFG);
+
+ return 0;
+}
+
+static unsigned long __init get_loops_per_jiffy(void)
+{
+ unsigned long lpj = (unsigned long)const_clock_freq;
+
+ do_div(lpj, HZ);
+
+ return lpj;
+}
+
+static long init_offset __nosavedata;
+
+void save_counter(void)
+{
+ init_offset = drdtime();
+}
+
+void sync_counter(void)
+{
+ /* Ensure counter begin at 0 */
+ csr_write64(init_offset, LOONGARCH_CSR_CNTC);
+}
+
+static int get_timer_irq(void)
+{
+ struct irq_domain *d = irq_find_matching_fwnode(cpuintc_handle, DOMAIN_BUS_ANY);
+
+ if (d)
+ return irq_create_mapping(d, INT_TI);
+
+ return -EINVAL;
+}
+
+int constant_clockevent_init(void)
+{
+ unsigned int cpu = smp_processor_id();
+ unsigned long min_delta = 0x600;
+ unsigned long max_delta = (1UL << 48) - 1;
+ struct clock_event_device *cd;
+ static int irq = 0, timer_irq_installed = 0;
+
+ if (!timer_irq_installed) {
+ irq = get_timer_irq();
+ if (irq < 0)
+ pr_err("Failed to map irq %d (timer)\n", irq);
+ }
+
+ cd = &per_cpu(constant_clockevent_device, cpu);
+
+ cd->name = "Constant";
+ cd->features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_PERCPU;
+
+ cd->irq = irq;
+ cd->rating = 320;
+ cd->cpumask = cpumask_of(cpu);
+ cd->set_state_oneshot = constant_set_state_oneshot;
+ cd->set_state_oneshot_stopped = constant_set_state_shutdown;
+ cd->set_state_periodic = constant_set_state_periodic;
+ cd->set_state_shutdown = constant_set_state_shutdown;
+ cd->set_next_event = constant_timer_next_event;
+ cd->event_handler = constant_event_handler;
+
+ clockevents_config_and_register(cd, const_clock_freq, min_delta, max_delta);
+
+ if (timer_irq_installed)
+ return 0;
+
+ timer_irq_installed = 1;
+
+ sync_counter();
+
+ if (request_irq(irq, constant_timer_interrupt, IRQF_PERCPU | IRQF_TIMER, "timer", NULL))
+ pr_err("Failed to request irq %d (timer)\n", irq);
+
+ lpj_fine = get_loops_per_jiffy();
+ pr_info("Constant clock event device register\n");
+
+ return 0;
+}
+
+static u64 read_const_counter(struct clocksource *clk)
+{
+ return drdtime();
+}
+
+static noinstr u64 sched_clock_read(void)
+{
+ return drdtime();
+}
+
+static struct clocksource clocksource_const = {
+ .name = "Constant",
+ .rating = 400,
+ .read = read_const_counter,
+ .mask = CLOCKSOURCE_MASK(64),
+ .flags = CLOCK_SOURCE_IS_CONTINUOUS,
+ .vdso_clock_mode = VDSO_CLOCKMODE_CPU,
+};
+
+int __init constant_clocksource_init(void)
+{
+ int res;
+ unsigned long freq = const_clock_freq;
+
+ res = clocksource_register_hz(&clocksource_const, freq);
+
+ sched_clock_register(sched_clock_read, 64, freq);
+
+ pr_info("Constant clock source device register\n");
+
+ return res;
+}
+
+void __init time_init(void)
+{
+ if (!cpu_has_cpucfg)
+ const_clock_freq = cpu_clock_freq;
+ else
+ const_clock_freq = calc_const_freq();
+
+ init_offset = -(drdtime() - csr_read64(LOONGARCH_CSR_CNTC));
+
+ constant_clockevent_init();
+ constant_clocksource_init();
+}
diff --git a/arch/loongarch/kernel/topology.c b/arch/loongarch/kernel/topology.c
new file mode 100644
index 0000000000..3fd1660066
--- /dev/null
+++ b/arch/loongarch/kernel/topology.c
@@ -0,0 +1,56 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/acpi.h>
+#include <linux/cpu.h>
+#include <linux/cpumask.h>
+#include <linux/init.h>
+#include <linux/node.h>
+#include <linux/nodemask.h>
+#include <linux/percpu.h>
+#include <asm/bootinfo.h>
+
+#include <acpi/processor.h>
+
+static DEFINE_PER_CPU(struct cpu, cpu_devices);
+
+#ifdef CONFIG_HOTPLUG_CPU
+int arch_register_cpu(int cpu)
+{
+ int ret;
+ struct cpu *c = &per_cpu(cpu_devices, cpu);
+
+ c->hotpluggable = 1;
+ ret = register_cpu(c, cpu);
+ if (ret < 0)
+ pr_warn("register_cpu %d failed (%d)\n", cpu, ret);
+
+ return ret;
+}
+EXPORT_SYMBOL(arch_register_cpu);
+
+void arch_unregister_cpu(int cpu)
+{
+ struct cpu *c = &per_cpu(cpu_devices, cpu);
+
+ c->hotpluggable = 0;
+ unregister_cpu(c);
+}
+EXPORT_SYMBOL(arch_unregister_cpu);
+#endif
+
+static int __init topology_init(void)
+{
+ int i, ret;
+
+ for_each_present_cpu(i) {
+ struct cpu *c = &per_cpu(cpu_devices, i);
+
+ c->hotpluggable = !io_master(i);
+ ret = register_cpu(c, i);
+ if (ret < 0)
+ pr_warn("topology_init: register_cpu %d failed (%d)\n", i, ret);
+ }
+
+ return 0;
+}
+
+subsys_initcall(topology_init);
diff --git a/arch/loongarch/kernel/traps.c b/arch/loongarch/kernel/traps.c
new file mode 100644
index 0000000000..aebfc3733a
--- /dev/null
+++ b/arch/loongarch/kernel/traps.c
@@ -0,0 +1,1170 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Author: Huacai Chen <chenhuacai@loongson.cn>
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+#include <linux/bitfield.h>
+#include <linux/bitops.h>
+#include <linux/bug.h>
+#include <linux/compiler.h>
+#include <linux/context_tracking.h>
+#include <linux/entry-common.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/kexec.h>
+#include <linux/module.h>
+#include <linux/extable.h>
+#include <linux/mm.h>
+#include <linux/sched/mm.h>
+#include <linux/sched/debug.h>
+#include <linux/smp.h>
+#include <linux/spinlock.h>
+#include <linux/kallsyms.h>
+#include <linux/memblock.h>
+#include <linux/interrupt.h>
+#include <linux/ptrace.h>
+#include <linux/kgdb.h>
+#include <linux/kdebug.h>
+#include <linux/notifier.h>
+#include <linux/irq.h>
+#include <linux/perf_event.h>
+
+#include <asm/addrspace.h>
+#include <asm/bootinfo.h>
+#include <asm/branch.h>
+#include <asm/break.h>
+#include <asm/cpu.h>
+#include <asm/exception.h>
+#include <asm/fpu.h>
+#include <asm/lbt.h>
+#include <asm/inst.h>
+#include <asm/kgdb.h>
+#include <asm/loongarch.h>
+#include <asm/mmu_context.h>
+#include <asm/pgtable.h>
+#include <asm/ptrace.h>
+#include <asm/sections.h>
+#include <asm/siginfo.h>
+#include <asm/stacktrace.h>
+#include <asm/tlb.h>
+#include <asm/types.h>
+#include <asm/unwind.h>
+#include <asm/uprobes.h>
+
+#include "access-helper.h"
+
+static void show_backtrace(struct task_struct *task, const struct pt_regs *regs,
+ const char *loglvl, bool user)
+{
+ unsigned long addr;
+ struct unwind_state state;
+ struct pt_regs *pregs = (struct pt_regs *)regs;
+
+ if (!task)
+ task = current;
+
+ printk("%sCall Trace:", loglvl);
+ for (unwind_start(&state, task, pregs);
+ !unwind_done(&state); unwind_next_frame(&state)) {
+ addr = unwind_get_return_address(&state);
+ print_ip_sym(loglvl, addr);
+ }
+ printk("%s\n", loglvl);
+}
+
+static void show_stacktrace(struct task_struct *task,
+ const struct pt_regs *regs, const char *loglvl, bool user)
+{
+ int i;
+ const int field = 2 * sizeof(unsigned long);
+ unsigned long stackdata;
+ unsigned long *sp = (unsigned long *)regs->regs[3];
+
+ printk("%sStack :", loglvl);
+ i = 0;
+ while ((unsigned long) sp & (PAGE_SIZE - 1)) {
+ if (i && ((i % (64 / field)) == 0)) {
+ pr_cont("\n");
+ printk("%s ", loglvl);
+ }
+ if (i > 39) {
+ pr_cont(" ...");
+ break;
+ }
+
+ if (__get_addr(&stackdata, sp++, user)) {
+ pr_cont(" (Bad stack address)");
+ break;
+ }
+
+ pr_cont(" %0*lx", field, stackdata);
+ i++;
+ }
+ pr_cont("\n");
+ show_backtrace(task, regs, loglvl, user);
+}
+
+void show_stack(struct task_struct *task, unsigned long *sp, const char *loglvl)
+{
+ struct pt_regs regs;
+
+ regs.csr_crmd = 0;
+ if (sp) {
+ regs.csr_era = 0;
+ regs.regs[1] = 0;
+ regs.regs[3] = (unsigned long)sp;
+ } else {
+ if (!task || task == current)
+ prepare_frametrace(&regs);
+ else {
+ regs.csr_era = task->thread.reg01;
+ regs.regs[1] = 0;
+ regs.regs[3] = task->thread.reg03;
+ regs.regs[22] = task->thread.reg22;
+ }
+ }
+
+ show_stacktrace(task, &regs, loglvl, false);
+}
+
+static void show_code(unsigned int *pc, bool user)
+{
+ long i;
+ unsigned int insn;
+
+ printk("Code:");
+
+ for(i = -3 ; i < 6 ; i++) {
+ if (__get_inst(&insn, pc + i, user)) {
+ pr_cont(" (Bad address in era)\n");
+ break;
+ }
+ pr_cont("%c%08x%c", (i?' ':'<'), insn, (i?' ':'>'));
+ }
+ pr_cont("\n");
+}
+
+static void print_bool_fragment(const char *key, unsigned long val, bool first)
+{
+ /* e.g. "+PG", "-DA" */
+ pr_cont("%s%c%s", first ? "" : " ", val ? '+' : '-', key);
+}
+
+static void print_plv_fragment(const char *key, int val)
+{
+ /* e.g. "PLV0", "PPLV3" */
+ pr_cont("%s%d", key, val);
+}
+
+static void print_memory_type_fragment(const char *key, unsigned long val)
+{
+ const char *humanized_type;
+
+ switch (val) {
+ case 0:
+ humanized_type = "SUC";
+ break;
+ case 1:
+ humanized_type = "CC";
+ break;
+ case 2:
+ humanized_type = "WUC";
+ break;
+ default:
+ pr_cont(" %s=Reserved(%lu)", key, val);
+ return;
+ }
+
+ /* e.g. " DATM=WUC" */
+ pr_cont(" %s=%s", key, humanized_type);
+}
+
+static void print_intr_fragment(const char *key, unsigned long val)
+{
+ /* e.g. "LIE=0-1,3,5-7" */
+ pr_cont("%s=%*pbl", key, EXCCODE_INT_NUM, &val);
+}
+
+static void print_crmd(unsigned long x)
+{
+ printk(" CRMD: %08lx (", x);
+ print_plv_fragment("PLV", (int) FIELD_GET(CSR_CRMD_PLV, x));
+ print_bool_fragment("IE", FIELD_GET(CSR_CRMD_IE, x), false);
+ print_bool_fragment("DA", FIELD_GET(CSR_CRMD_DA, x), false);
+ print_bool_fragment("PG", FIELD_GET(CSR_CRMD_PG, x), false);
+ print_memory_type_fragment("DACF", FIELD_GET(CSR_CRMD_DACF, x));
+ print_memory_type_fragment("DACM", FIELD_GET(CSR_CRMD_DACM, x));
+ print_bool_fragment("WE", FIELD_GET(CSR_CRMD_WE, x), false);
+ pr_cont(")\n");
+}
+
+static void print_prmd(unsigned long x)
+{
+ printk(" PRMD: %08lx (", x);
+ print_plv_fragment("PPLV", (int) FIELD_GET(CSR_PRMD_PPLV, x));
+ print_bool_fragment("PIE", FIELD_GET(CSR_PRMD_PIE, x), false);
+ print_bool_fragment("PWE", FIELD_GET(CSR_PRMD_PWE, x), false);
+ pr_cont(")\n");
+}
+
+static void print_euen(unsigned long x)
+{
+ printk(" EUEN: %08lx (", x);
+ print_bool_fragment("FPE", FIELD_GET(CSR_EUEN_FPEN, x), true);
+ print_bool_fragment("SXE", FIELD_GET(CSR_EUEN_LSXEN, x), false);
+ print_bool_fragment("ASXE", FIELD_GET(CSR_EUEN_LASXEN, x), false);
+ print_bool_fragment("BTE", FIELD_GET(CSR_EUEN_LBTEN, x), false);
+ pr_cont(")\n");
+}
+
+static void print_ecfg(unsigned long x)
+{
+ printk(" ECFG: %08lx (", x);
+ print_intr_fragment("LIE", FIELD_GET(CSR_ECFG_IM, x));
+ pr_cont(" VS=%d)\n", (int) FIELD_GET(CSR_ECFG_VS, x));
+}
+
+static const char *humanize_exc_name(unsigned int ecode, unsigned int esubcode)
+{
+ /*
+ * LoongArch users and developers are probably more familiar with
+ * those names found in the ISA manual, so we are going to print out
+ * the latter. This will require some mapping.
+ */
+ switch (ecode) {
+ case EXCCODE_RSV: return "INT";
+ case EXCCODE_TLBL: return "PIL";
+ case EXCCODE_TLBS: return "PIS";
+ case EXCCODE_TLBI: return "PIF";
+ case EXCCODE_TLBM: return "PME";
+ case EXCCODE_TLBNR: return "PNR";
+ case EXCCODE_TLBNX: return "PNX";
+ case EXCCODE_TLBPE: return "PPI";
+ case EXCCODE_ADE:
+ switch (esubcode) {
+ case EXSUBCODE_ADEF: return "ADEF";
+ case EXSUBCODE_ADEM: return "ADEM";
+ }
+ break;
+ case EXCCODE_ALE: return "ALE";
+ case EXCCODE_BCE: return "BCE";
+ case EXCCODE_SYS: return "SYS";
+ case EXCCODE_BP: return "BRK";
+ case EXCCODE_INE: return "INE";
+ case EXCCODE_IPE: return "IPE";
+ case EXCCODE_FPDIS: return "FPD";
+ case EXCCODE_LSXDIS: return "SXD";
+ case EXCCODE_LASXDIS: return "ASXD";
+ case EXCCODE_FPE:
+ switch (esubcode) {
+ case EXCSUBCODE_FPE: return "FPE";
+ case EXCSUBCODE_VFPE: return "VFPE";
+ }
+ break;
+ case EXCCODE_WATCH:
+ switch (esubcode) {
+ case EXCSUBCODE_WPEF: return "WPEF";
+ case EXCSUBCODE_WPEM: return "WPEM";
+ }
+ break;
+ case EXCCODE_BTDIS: return "BTD";
+ case EXCCODE_BTE: return "BTE";
+ case EXCCODE_GSPR: return "GSPR";
+ case EXCCODE_HVC: return "HVC";
+ case EXCCODE_GCM:
+ switch (esubcode) {
+ case EXCSUBCODE_GCSC: return "GCSC";
+ case EXCSUBCODE_GCHC: return "GCHC";
+ }
+ break;
+ /*
+ * The manual did not mention the EXCCODE_SE case, but print out it
+ * nevertheless.
+ */
+ case EXCCODE_SE: return "SE";
+ }
+
+ return "???";
+}
+
+static void print_estat(unsigned long x)
+{
+ unsigned int ecode = FIELD_GET(CSR_ESTAT_EXC, x);
+ unsigned int esubcode = FIELD_GET(CSR_ESTAT_ESUBCODE, x);
+
+ printk("ESTAT: %08lx [%s] (", x, humanize_exc_name(ecode, esubcode));
+ print_intr_fragment("IS", FIELD_GET(CSR_ESTAT_IS, x));
+ pr_cont(" ECode=%d EsubCode=%d)\n", (int) ecode, (int) esubcode);
+}
+
+static void __show_regs(const struct pt_regs *regs)
+{
+ const int field = 2 * sizeof(unsigned long);
+ unsigned int exccode = FIELD_GET(CSR_ESTAT_EXC, regs->csr_estat);
+
+ show_regs_print_info(KERN_DEFAULT);
+
+ /* Print saved GPRs except $zero (substituting with PC/ERA) */
+#define GPR_FIELD(x) field, regs->regs[x]
+ printk("pc %0*lx ra %0*lx tp %0*lx sp %0*lx\n",
+ field, regs->csr_era, GPR_FIELD(1), GPR_FIELD(2), GPR_FIELD(3));
+ printk("a0 %0*lx a1 %0*lx a2 %0*lx a3 %0*lx\n",
+ GPR_FIELD(4), GPR_FIELD(5), GPR_FIELD(6), GPR_FIELD(7));
+ printk("a4 %0*lx a5 %0*lx a6 %0*lx a7 %0*lx\n",
+ GPR_FIELD(8), GPR_FIELD(9), GPR_FIELD(10), GPR_FIELD(11));
+ printk("t0 %0*lx t1 %0*lx t2 %0*lx t3 %0*lx\n",
+ GPR_FIELD(12), GPR_FIELD(13), GPR_FIELD(14), GPR_FIELD(15));
+ printk("t4 %0*lx t5 %0*lx t6 %0*lx t7 %0*lx\n",
+ GPR_FIELD(16), GPR_FIELD(17), GPR_FIELD(18), GPR_FIELD(19));
+ printk("t8 %0*lx u0 %0*lx s9 %0*lx s0 %0*lx\n",
+ GPR_FIELD(20), GPR_FIELD(21), GPR_FIELD(22), GPR_FIELD(23));
+ printk("s1 %0*lx s2 %0*lx s3 %0*lx s4 %0*lx\n",
+ GPR_FIELD(24), GPR_FIELD(25), GPR_FIELD(26), GPR_FIELD(27));
+ printk("s5 %0*lx s6 %0*lx s7 %0*lx s8 %0*lx\n",
+ GPR_FIELD(28), GPR_FIELD(29), GPR_FIELD(30), GPR_FIELD(31));
+
+ /* The slot for $zero is reused as the syscall restart flag */
+ if (regs->regs[0])
+ printk("syscall restart flag: %0*lx\n", GPR_FIELD(0));
+
+ if (user_mode(regs)) {
+ printk(" ra: %0*lx\n", GPR_FIELD(1));
+ printk(" ERA: %0*lx\n", field, regs->csr_era);
+ } else {
+ printk(" ra: %0*lx %pS\n", GPR_FIELD(1), (void *) regs->regs[1]);
+ printk(" ERA: %0*lx %pS\n", field, regs->csr_era, (void *) regs->csr_era);
+ }
+#undef GPR_FIELD
+
+ /* Print saved important CSRs */
+ print_crmd(regs->csr_crmd);
+ print_prmd(regs->csr_prmd);
+ print_euen(regs->csr_euen);
+ print_ecfg(regs->csr_ecfg);
+ print_estat(regs->csr_estat);
+
+ if (exccode >= EXCCODE_TLBL && exccode <= EXCCODE_ALE)
+ printk(" BADV: %0*lx\n", field, regs->csr_badvaddr);
+
+ printk(" PRID: %08x (%s, %s)\n", read_cpucfg(LOONGARCH_CPUCFG0),
+ cpu_family_string(), cpu_full_name_string());
+}
+
+void show_regs(struct pt_regs *regs)
+{
+ __show_regs((struct pt_regs *)regs);
+ dump_stack();
+}
+
+void show_registers(struct pt_regs *regs)
+{
+ __show_regs(regs);
+ print_modules();
+ printk("Process %s (pid: %d, threadinfo=%p, task=%p)\n",
+ current->comm, current->pid, current_thread_info(), current);
+
+ show_stacktrace(current, regs, KERN_DEFAULT, user_mode(regs));
+ show_code((void *)regs->csr_era, user_mode(regs));
+ printk("\n");
+}
+
+static DEFINE_RAW_SPINLOCK(die_lock);
+
+void die(const char *str, struct pt_regs *regs)
+{
+ int ret;
+ static int die_counter;
+
+ oops_enter();
+
+ ret = notify_die(DIE_OOPS, str, regs, 0,
+ current->thread.trap_nr, SIGSEGV);
+
+ console_verbose();
+ raw_spin_lock_irq(&die_lock);
+ bust_spinlocks(1);
+
+ printk("%s[#%d]:\n", str, ++die_counter);
+ show_registers(regs);
+ add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
+ raw_spin_unlock_irq(&die_lock);
+
+ oops_exit();
+
+ if (ret == NOTIFY_STOP)
+ return;
+
+ if (regs && kexec_should_crash(current))
+ crash_kexec(regs);
+
+ if (in_interrupt())
+ panic("Fatal exception in interrupt");
+
+ if (panic_on_oops)
+ panic("Fatal exception");
+
+ make_task_dead(SIGSEGV);
+}
+
+static inline void setup_vint_size(unsigned int size)
+{
+ unsigned int vs;
+
+ vs = ilog2(size/4);
+
+ if (vs == 0 || vs > 7)
+ panic("vint_size %d Not support yet", vs);
+
+ csr_xchg32(vs<<CSR_ECFG_VS_SHIFT, CSR_ECFG_VS, LOONGARCH_CSR_ECFG);
+}
+
+/*
+ * Send SIGFPE according to FCSR Cause bits, which must have already
+ * been masked against Enable bits. This is impotant as Inexact can
+ * happen together with Overflow or Underflow, and `ptrace' can set
+ * any bits.
+ */
+static void force_fcsr_sig(unsigned long fcsr,
+ void __user *fault_addr, struct task_struct *tsk)
+{
+ int si_code = FPE_FLTUNK;
+
+ if (fcsr & FPU_CSR_INV_X)
+ si_code = FPE_FLTINV;
+ else if (fcsr & FPU_CSR_DIV_X)
+ si_code = FPE_FLTDIV;
+ else if (fcsr & FPU_CSR_OVF_X)
+ si_code = FPE_FLTOVF;
+ else if (fcsr & FPU_CSR_UDF_X)
+ si_code = FPE_FLTUND;
+ else if (fcsr & FPU_CSR_INE_X)
+ si_code = FPE_FLTRES;
+
+ force_sig_fault(SIGFPE, si_code, fault_addr);
+}
+
+static int process_fpemu_return(int sig, void __user *fault_addr, unsigned long fcsr)
+{
+ int si_code;
+
+ switch (sig) {
+ case 0:
+ return 0;
+
+ case SIGFPE:
+ force_fcsr_sig(fcsr, fault_addr, current);
+ return 1;
+
+ case SIGBUS:
+ force_sig_fault(SIGBUS, BUS_ADRERR, fault_addr);
+ return 1;
+
+ case SIGSEGV:
+ mmap_read_lock(current->mm);
+ if (vma_lookup(current->mm, (unsigned long)fault_addr))
+ si_code = SEGV_ACCERR;
+ else
+ si_code = SEGV_MAPERR;
+ mmap_read_unlock(current->mm);
+ force_sig_fault(SIGSEGV, si_code, fault_addr);
+ return 1;
+
+ default:
+ force_sig(sig);
+ return 1;
+ }
+}
+
+/*
+ * Delayed fp exceptions when doing a lazy ctx switch
+ */
+asmlinkage void noinstr do_fpe(struct pt_regs *regs, unsigned long fcsr)
+{
+ int sig;
+ void __user *fault_addr;
+ irqentry_state_t state = irqentry_enter(regs);
+
+ if (notify_die(DIE_FP, "FP exception", regs, 0, current->thread.trap_nr,
+ SIGFPE) == NOTIFY_STOP)
+ goto out;
+
+ /* Clear FCSR.Cause before enabling interrupts */
+ write_fcsr(LOONGARCH_FCSR0, fcsr & ~mask_fcsr_x(fcsr));
+ local_irq_enable();
+
+ die_if_kernel("FP exception in kernel code", regs);
+
+ sig = SIGFPE;
+ fault_addr = (void __user *) regs->csr_era;
+
+ /* Send a signal if required. */
+ process_fpemu_return(sig, fault_addr, fcsr);
+
+out:
+ local_irq_disable();
+ irqentry_exit(regs, state);
+}
+
+asmlinkage void noinstr do_ade(struct pt_regs *regs)
+{
+ irqentry_state_t state = irqentry_enter(regs);
+
+ die_if_kernel("Kernel ade access", regs);
+ force_sig_fault(SIGBUS, BUS_ADRERR, (void __user *)regs->csr_badvaddr);
+
+ irqentry_exit(regs, state);
+}
+
+/* sysctl hooks */
+int unaligned_enabled __read_mostly = 1; /* Enabled by default */
+int no_unaligned_warning __read_mostly = 1; /* Only 1 warning by default */
+
+asmlinkage void noinstr do_ale(struct pt_regs *regs)
+{
+ irqentry_state_t state = irqentry_enter(regs);
+
+#ifndef CONFIG_ARCH_STRICT_ALIGN
+ die_if_kernel("Kernel ale access", regs);
+ force_sig_fault(SIGBUS, BUS_ADRALN, (void __user *)regs->csr_badvaddr);
+#else
+ unsigned int *pc;
+
+ perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, regs->csr_badvaddr);
+
+ /*
+ * Did we catch a fault trying to load an instruction?
+ */
+ if (regs->csr_badvaddr == regs->csr_era)
+ goto sigbus;
+ if (user_mode(regs) && !test_thread_flag(TIF_FIXADE))
+ goto sigbus;
+ if (!unaligned_enabled)
+ goto sigbus;
+ if (!no_unaligned_warning)
+ show_registers(regs);
+
+ pc = (unsigned int *)exception_era(regs);
+
+ emulate_load_store_insn(regs, (void __user *)regs->csr_badvaddr, pc);
+
+ goto out;
+
+sigbus:
+ die_if_kernel("Kernel ale access", regs);
+ force_sig_fault(SIGBUS, BUS_ADRALN, (void __user *)regs->csr_badvaddr);
+out:
+#endif
+ irqentry_exit(regs, state);
+}
+
+#ifdef CONFIG_GENERIC_BUG
+int is_valid_bugaddr(unsigned long addr)
+{
+ return 1;
+}
+#endif /* CONFIG_GENERIC_BUG */
+
+static void bug_handler(struct pt_regs *regs)
+{
+ switch (report_bug(regs->csr_era, regs)) {
+ case BUG_TRAP_TYPE_BUG:
+ case BUG_TRAP_TYPE_NONE:
+ die_if_kernel("Oops - BUG", regs);
+ force_sig(SIGTRAP);
+ break;
+
+ case BUG_TRAP_TYPE_WARN:
+ /* Skip the BUG instruction and continue */
+ regs->csr_era += LOONGARCH_INSN_SIZE;
+ break;
+ }
+}
+
+asmlinkage void noinstr do_bce(struct pt_regs *regs)
+{
+ bool user = user_mode(regs);
+ unsigned long era = exception_era(regs);
+ u64 badv = 0, lower = 0, upper = ULONG_MAX;
+ union loongarch_instruction insn;
+ irqentry_state_t state = irqentry_enter(regs);
+
+ if (regs->csr_prmd & CSR_PRMD_PIE)
+ local_irq_enable();
+
+ current->thread.trap_nr = read_csr_excode();
+
+ die_if_kernel("Bounds check error in kernel code", regs);
+
+ /*
+ * Pull out the address that failed bounds checking, and the lower /
+ * upper bound, by minimally looking at the faulting instruction word
+ * and reading from the correct register.
+ */
+ if (__get_inst(&insn.word, (u32 *)era, user))
+ goto bad_era;
+
+ switch (insn.reg3_format.opcode) {
+ case asrtle_op:
+ if (insn.reg3_format.rd != 0)
+ break; /* not asrtle */
+ badv = regs->regs[insn.reg3_format.rj];
+ upper = regs->regs[insn.reg3_format.rk];
+ break;
+
+ case asrtgt_op:
+ if (insn.reg3_format.rd != 0)
+ break; /* not asrtgt */
+ badv = regs->regs[insn.reg3_format.rj];
+ lower = regs->regs[insn.reg3_format.rk];
+ break;
+
+ case ldleb_op:
+ case ldleh_op:
+ case ldlew_op:
+ case ldled_op:
+ case stleb_op:
+ case stleh_op:
+ case stlew_op:
+ case stled_op:
+ case fldles_op:
+ case fldled_op:
+ case fstles_op:
+ case fstled_op:
+ badv = regs->regs[insn.reg3_format.rj];
+ upper = regs->regs[insn.reg3_format.rk];
+ break;
+
+ case ldgtb_op:
+ case ldgth_op:
+ case ldgtw_op:
+ case ldgtd_op:
+ case stgtb_op:
+ case stgth_op:
+ case stgtw_op:
+ case stgtd_op:
+ case fldgts_op:
+ case fldgtd_op:
+ case fstgts_op:
+ case fstgtd_op:
+ badv = regs->regs[insn.reg3_format.rj];
+ lower = regs->regs[insn.reg3_format.rk];
+ break;
+ }
+
+ force_sig_bnderr((void __user *)badv, (void __user *)lower, (void __user *)upper);
+
+out:
+ if (regs->csr_prmd & CSR_PRMD_PIE)
+ local_irq_disable();
+
+ irqentry_exit(regs, state);
+ return;
+
+bad_era:
+ /*
+ * Cannot pull out the instruction word, hence cannot provide more
+ * info than a regular SIGSEGV in this case.
+ */
+ force_sig(SIGSEGV);
+ goto out;
+}
+
+asmlinkage void noinstr do_bp(struct pt_regs *regs)
+{
+ bool user = user_mode(regs);
+ unsigned int opcode, bcode;
+ unsigned long era = exception_era(regs);
+ irqentry_state_t state = irqentry_enter(regs);
+
+ if (regs->csr_prmd & CSR_PRMD_PIE)
+ local_irq_enable();
+
+ if (__get_inst(&opcode, (u32 *)era, user))
+ goto out_sigsegv;
+
+ bcode = (opcode & 0x7fff);
+
+ /*
+ * notify the kprobe handlers, if instruction is likely to
+ * pertain to them.
+ */
+ switch (bcode) {
+ case BRK_KDB:
+ if (kgdb_breakpoint_handler(regs))
+ goto out;
+ else
+ break;
+ case BRK_KPROBE_BP:
+ if (kprobe_breakpoint_handler(regs))
+ goto out;
+ else
+ break;
+ case BRK_KPROBE_SSTEPBP:
+ if (kprobe_singlestep_handler(regs))
+ goto out;
+ else
+ break;
+ case BRK_UPROBE_BP:
+ if (uprobe_breakpoint_handler(regs))
+ goto out;
+ else
+ break;
+ case BRK_UPROBE_XOLBP:
+ if (uprobe_singlestep_handler(regs))
+ goto out;
+ else
+ break;
+ default:
+ current->thread.trap_nr = read_csr_excode();
+ if (notify_die(DIE_TRAP, "Break", regs, bcode,
+ current->thread.trap_nr, SIGTRAP) == NOTIFY_STOP)
+ goto out;
+ else
+ break;
+ }
+
+ switch (bcode) {
+ case BRK_BUG:
+ bug_handler(regs);
+ break;
+ case BRK_DIVZERO:
+ die_if_kernel("Break instruction in kernel code", regs);
+ force_sig_fault(SIGFPE, FPE_INTDIV, (void __user *)regs->csr_era);
+ break;
+ case BRK_OVERFLOW:
+ die_if_kernel("Break instruction in kernel code", regs);
+ force_sig_fault(SIGFPE, FPE_INTOVF, (void __user *)regs->csr_era);
+ break;
+ default:
+ die_if_kernel("Break instruction in kernel code", regs);
+ force_sig_fault(SIGTRAP, TRAP_BRKPT, (void __user *)regs->csr_era);
+ break;
+ }
+
+out:
+ if (regs->csr_prmd & CSR_PRMD_PIE)
+ local_irq_disable();
+
+ irqentry_exit(regs, state);
+ return;
+
+out_sigsegv:
+ force_sig(SIGSEGV);
+ goto out;
+}
+
+asmlinkage void noinstr do_watch(struct pt_regs *regs)
+{
+ irqentry_state_t state = irqentry_enter(regs);
+
+#ifndef CONFIG_HAVE_HW_BREAKPOINT
+ pr_warn("Hardware watch point handler not implemented!\n");
+#else
+ if (kgdb_breakpoint_handler(regs))
+ goto out;
+
+ if (test_tsk_thread_flag(current, TIF_SINGLESTEP)) {
+ int llbit = (csr_read32(LOONGARCH_CSR_LLBCTL) & 0x1);
+ unsigned long pc = instruction_pointer(regs);
+ union loongarch_instruction *ip = (union loongarch_instruction *)pc;
+
+ if (llbit) {
+ /*
+ * When the ll-sc combo is encountered, it is regarded as an single
+ * instruction. So don't clear llbit and reset CSR.FWPS.Skip until
+ * the llsc execution is completed.
+ */
+ csr_write32(CSR_FWPC_SKIP, LOONGARCH_CSR_FWPS);
+ csr_write32(CSR_LLBCTL_KLO, LOONGARCH_CSR_LLBCTL);
+ goto out;
+ }
+
+ if (pc == current->thread.single_step) {
+ /*
+ * Certain insns are occasionally not skipped when CSR.FWPS.Skip is
+ * set, such as fld.d/fst.d. So singlestep needs to compare whether
+ * the csr_era is equal to the value of singlestep which last time set.
+ */
+ if (!is_self_loop_ins(ip, regs)) {
+ /*
+ * Check if the given instruction the target pc is equal to the
+ * current pc, If yes, then we should not set the CSR.FWPS.SKIP
+ * bit to break the original instruction stream.
+ */
+ csr_write32(CSR_FWPC_SKIP, LOONGARCH_CSR_FWPS);
+ goto out;
+ }
+ }
+ } else {
+ breakpoint_handler(regs);
+ watchpoint_handler(regs);
+ }
+
+ force_sig(SIGTRAP);
+out:
+#endif
+ irqentry_exit(regs, state);
+}
+
+asmlinkage void noinstr do_ri(struct pt_regs *regs)
+{
+ int status = SIGILL;
+ unsigned int __maybe_unused opcode;
+ unsigned int __user *era = (unsigned int __user *)exception_era(regs);
+ irqentry_state_t state = irqentry_enter(regs);
+
+ local_irq_enable();
+ current->thread.trap_nr = read_csr_excode();
+
+ if (notify_die(DIE_RI, "RI Fault", regs, 0, current->thread.trap_nr,
+ SIGILL) == NOTIFY_STOP)
+ goto out;
+
+ die_if_kernel("Reserved instruction in kernel code", regs);
+
+ if (unlikely(get_user(opcode, era) < 0)) {
+ status = SIGSEGV;
+ current->thread.error_code = 1;
+ }
+
+ force_sig(status);
+
+out:
+ local_irq_disable();
+ irqentry_exit(regs, state);
+}
+
+static void init_restore_fp(void)
+{
+ if (!used_math()) {
+ /* First time FP context user. */
+ init_fpu();
+ } else {
+ /* This task has formerly used the FP context */
+ if (!is_fpu_owner())
+ own_fpu_inatomic(1);
+ }
+
+ BUG_ON(!is_fp_enabled());
+}
+
+static void init_restore_lsx(void)
+{
+ enable_lsx();
+
+ if (!thread_lsx_context_live()) {
+ /* First time LSX context user */
+ init_restore_fp();
+ init_lsx_upper();
+ set_thread_flag(TIF_LSX_CTX_LIVE);
+ } else {
+ if (!is_simd_owner()) {
+ if (is_fpu_owner()) {
+ restore_lsx_upper(current);
+ } else {
+ __own_fpu();
+ restore_lsx(current);
+ }
+ }
+ }
+
+ set_thread_flag(TIF_USEDSIMD);
+
+ BUG_ON(!is_fp_enabled());
+ BUG_ON(!is_lsx_enabled());
+}
+
+static void init_restore_lasx(void)
+{
+ enable_lasx();
+
+ if (!thread_lasx_context_live()) {
+ /* First time LASX context user */
+ init_restore_lsx();
+ init_lasx_upper();
+ set_thread_flag(TIF_LASX_CTX_LIVE);
+ } else {
+ if (is_fpu_owner() || is_simd_owner()) {
+ init_restore_lsx();
+ restore_lasx_upper(current);
+ } else {
+ __own_fpu();
+ enable_lsx();
+ restore_lasx(current);
+ }
+ }
+
+ set_thread_flag(TIF_USEDSIMD);
+
+ BUG_ON(!is_fp_enabled());
+ BUG_ON(!is_lsx_enabled());
+ BUG_ON(!is_lasx_enabled());
+}
+
+asmlinkage void noinstr do_fpu(struct pt_regs *regs)
+{
+ irqentry_state_t state = irqentry_enter(regs);
+
+ local_irq_enable();
+ die_if_kernel("do_fpu invoked from kernel context!", regs);
+ BUG_ON(is_lsx_enabled());
+ BUG_ON(is_lasx_enabled());
+
+ preempt_disable();
+ init_restore_fp();
+ preempt_enable();
+
+ local_irq_disable();
+ irqentry_exit(regs, state);
+}
+
+asmlinkage void noinstr do_lsx(struct pt_regs *regs)
+{
+ irqentry_state_t state = irqentry_enter(regs);
+
+ local_irq_enable();
+ if (!cpu_has_lsx) {
+ force_sig(SIGILL);
+ goto out;
+ }
+
+ die_if_kernel("do_lsx invoked from kernel context!", regs);
+ BUG_ON(is_lasx_enabled());
+
+ preempt_disable();
+ init_restore_lsx();
+ preempt_enable();
+
+out:
+ local_irq_disable();
+ irqentry_exit(regs, state);
+}
+
+asmlinkage void noinstr do_lasx(struct pt_regs *regs)
+{
+ irqentry_state_t state = irqentry_enter(regs);
+
+ local_irq_enable();
+ if (!cpu_has_lasx) {
+ force_sig(SIGILL);
+ goto out;
+ }
+
+ die_if_kernel("do_lasx invoked from kernel context!", regs);
+
+ preempt_disable();
+ init_restore_lasx();
+ preempt_enable();
+
+out:
+ local_irq_disable();
+ irqentry_exit(regs, state);
+}
+
+static void init_restore_lbt(void)
+{
+ if (!thread_lbt_context_live()) {
+ /* First time LBT context user */
+ init_lbt();
+ set_thread_flag(TIF_LBT_CTX_LIVE);
+ } else {
+ if (!is_lbt_owner())
+ own_lbt_inatomic(1);
+ }
+
+ BUG_ON(!is_lbt_enabled());
+}
+
+asmlinkage void noinstr do_lbt(struct pt_regs *regs)
+{
+ irqentry_state_t state = irqentry_enter(regs);
+
+ /*
+ * BTD (Binary Translation Disable exception) can be triggered
+ * during FP save/restore if TM (Top Mode) is on, which may
+ * cause irq_enable during 'switch_to'. To avoid this situation
+ * (including the user using 'MOVGR2GCSR' to turn on TM, which
+ * will not trigger the BTE), we need to check PRMD first.
+ */
+ if (regs->csr_prmd & CSR_PRMD_PIE)
+ local_irq_enable();
+
+ if (!cpu_has_lbt) {
+ force_sig(SIGILL);
+ goto out;
+ }
+ BUG_ON(is_lbt_enabled());
+
+ preempt_disable();
+ init_restore_lbt();
+ preempt_enable();
+
+out:
+ if (regs->csr_prmd & CSR_PRMD_PIE)
+ local_irq_disable();
+
+ irqentry_exit(regs, state);
+}
+
+asmlinkage void noinstr do_reserved(struct pt_regs *regs)
+{
+ irqentry_state_t state = irqentry_enter(regs);
+
+ local_irq_enable();
+ /*
+ * Game over - no way to handle this if it ever occurs. Most probably
+ * caused by a fatal error after another hardware/software error.
+ */
+ pr_err("Caught reserved exception %u on pid:%d [%s] - should not happen\n",
+ read_csr_excode(), current->pid, current->comm);
+ die_if_kernel("do_reserved exception", regs);
+ force_sig(SIGUNUSED);
+
+ local_irq_disable();
+
+ irqentry_exit(regs, state);
+}
+
+asmlinkage void cache_parity_error(void)
+{
+ /* For the moment, report the problem and hang. */
+ pr_err("Cache error exception:\n");
+ pr_err("csr_merrctl == %08x\n", csr_read32(LOONGARCH_CSR_MERRCTL));
+ pr_err("csr_merrera == %016lx\n", csr_read64(LOONGARCH_CSR_MERRERA));
+ panic("Can't handle the cache error!");
+}
+
+asmlinkage void noinstr handle_loongarch_irq(struct pt_regs *regs)
+{
+ struct pt_regs *old_regs;
+
+ irq_enter_rcu();
+ old_regs = set_irq_regs(regs);
+ handle_arch_irq(regs);
+ set_irq_regs(old_regs);
+ irq_exit_rcu();
+}
+
+asmlinkage void noinstr do_vint(struct pt_regs *regs, unsigned long sp)
+{
+ register int cpu;
+ register unsigned long stack;
+ irqentry_state_t state = irqentry_enter(regs);
+
+ cpu = smp_processor_id();
+
+ if (on_irq_stack(cpu, sp))
+ handle_loongarch_irq(regs);
+ else {
+ stack = per_cpu(irq_stack, cpu) + IRQ_STACK_START;
+
+ /* Save task's sp on IRQ stack for unwinding */
+ *(unsigned long *)stack = sp;
+
+ __asm__ __volatile__(
+ "move $s0, $sp \n" /* Preserve sp */
+ "move $sp, %[stk] \n" /* Switch stack */
+ "move $a0, %[regs] \n"
+ "bl handle_loongarch_irq \n"
+ "move $sp, $s0 \n" /* Restore sp */
+ : /* No outputs */
+ : [stk] "r" (stack), [regs] "r" (regs)
+ : "$a0", "$a1", "$a2", "$a3", "$a4", "$a5", "$a6", "$a7", "$s0",
+ "$t0", "$t1", "$t2", "$t3", "$t4", "$t5", "$t6", "$t7", "$t8",
+ "memory");
+ }
+
+ irqentry_exit(regs, state);
+}
+
+unsigned long eentry;
+unsigned long tlbrentry;
+
+long exception_handlers[VECSIZE * 128 / sizeof(long)] __aligned(SZ_64K);
+
+static void configure_exception_vector(void)
+{
+ eentry = (unsigned long)exception_handlers;
+ tlbrentry = (unsigned long)exception_handlers + 80*VECSIZE;
+
+ csr_write64(eentry, LOONGARCH_CSR_EENTRY);
+ csr_write64(eentry, LOONGARCH_CSR_MERRENTRY);
+ csr_write64(tlbrentry, LOONGARCH_CSR_TLBRENTRY);
+}
+
+void per_cpu_trap_init(int cpu)
+{
+ unsigned int i;
+
+ setup_vint_size(VECSIZE);
+
+ configure_exception_vector();
+
+ if (!cpu_data[cpu].asid_cache)
+ cpu_data[cpu].asid_cache = asid_first_version(cpu);
+
+ mmgrab(&init_mm);
+ current->active_mm = &init_mm;
+ BUG_ON(current->mm);
+ enter_lazy_tlb(&init_mm, current);
+
+ /* Initialise exception handlers */
+ if (cpu == 0)
+ for (i = 0; i < 64; i++)
+ set_handler(i * VECSIZE, handle_reserved, VECSIZE);
+
+ tlb_init(cpu);
+ cpu_cache_init();
+}
+
+/* Install CPU exception handler */
+void set_handler(unsigned long offset, void *addr, unsigned long size)
+{
+ memcpy((void *)(eentry + offset), addr, size);
+ local_flush_icache_range(eentry + offset, eentry + offset + size);
+}
+
+static const char panic_null_cerr[] =
+ "Trying to set NULL cache error exception handler\n";
+
+/*
+ * Install uncached CPU exception handler.
+ * This is suitable only for the cache error exception which is the only
+ * exception handler that is being run uncached.
+ */
+void set_merr_handler(unsigned long offset, void *addr, unsigned long size)
+{
+ unsigned long uncached_eentry = TO_UNCACHE(__pa(eentry));
+
+ if (!addr)
+ panic(panic_null_cerr);
+
+ memcpy((void *)(uncached_eentry + offset), addr, size);
+}
+
+void __init trap_init(void)
+{
+ long i;
+
+ /* Set interrupt vector handler */
+ for (i = EXCCODE_INT_START; i <= EXCCODE_INT_END; i++)
+ set_handler(i * VECSIZE, handle_vint, VECSIZE);
+
+ set_handler(EXCCODE_ADE * VECSIZE, handle_ade, VECSIZE);
+ set_handler(EXCCODE_ALE * VECSIZE, handle_ale, VECSIZE);
+ set_handler(EXCCODE_BCE * VECSIZE, handle_bce, VECSIZE);
+ set_handler(EXCCODE_SYS * VECSIZE, handle_sys, VECSIZE);
+ set_handler(EXCCODE_BP * VECSIZE, handle_bp, VECSIZE);
+ set_handler(EXCCODE_INE * VECSIZE, handle_ri, VECSIZE);
+ set_handler(EXCCODE_IPE * VECSIZE, handle_ri, VECSIZE);
+ set_handler(EXCCODE_FPDIS * VECSIZE, handle_fpu, VECSIZE);
+ set_handler(EXCCODE_LSXDIS * VECSIZE, handle_lsx, VECSIZE);
+ set_handler(EXCCODE_LASXDIS * VECSIZE, handle_lasx, VECSIZE);
+ set_handler(EXCCODE_FPE * VECSIZE, handle_fpe, VECSIZE);
+ set_handler(EXCCODE_BTDIS * VECSIZE, handle_lbt, VECSIZE);
+ set_handler(EXCCODE_WATCH * VECSIZE, handle_watch, VECSIZE);
+
+ cache_error_setup();
+
+ local_flush_icache_range(eentry, eentry + 0x400);
+}
diff --git a/arch/loongarch/kernel/unaligned.c b/arch/loongarch/kernel/unaligned.c
new file mode 100644
index 0000000000..3abf163dda
--- /dev/null
+++ b/arch/loongarch/kernel/unaligned.c
@@ -0,0 +1,497 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Handle unaligned accesses by emulation.
+ *
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ *
+ * Derived from MIPS:
+ * Copyright (C) 1996, 1998, 1999, 2002 by Ralf Baechle
+ * Copyright (C) 1999 Silicon Graphics, Inc.
+ * Copyright (C) 2014 Imagination Technologies Ltd.
+ */
+#include <linux/mm.h>
+#include <linux/sched.h>
+#include <linux/signal.h>
+#include <linux/debugfs.h>
+#include <linux/perf_event.h>
+
+#include <asm/asm.h>
+#include <asm/branch.h>
+#include <asm/fpu.h>
+#include <asm/inst.h>
+
+#include "access-helper.h"
+
+#ifdef CONFIG_DEBUG_FS
+static u32 unaligned_instructions_user;
+static u32 unaligned_instructions_kernel;
+#endif
+
+static inline unsigned long read_fpr(unsigned int idx)
+{
+#define READ_FPR(idx, __value) \
+ __asm__ __volatile__("movfr2gr.d %0, $f"#idx"\n\t" : "=r"(__value));
+
+ unsigned long __value;
+
+ switch (idx) {
+ case 0:
+ READ_FPR(0, __value);
+ break;
+ case 1:
+ READ_FPR(1, __value);
+ break;
+ case 2:
+ READ_FPR(2, __value);
+ break;
+ case 3:
+ READ_FPR(3, __value);
+ break;
+ case 4:
+ READ_FPR(4, __value);
+ break;
+ case 5:
+ READ_FPR(5, __value);
+ break;
+ case 6:
+ READ_FPR(6, __value);
+ break;
+ case 7:
+ READ_FPR(7, __value);
+ break;
+ case 8:
+ READ_FPR(8, __value);
+ break;
+ case 9:
+ READ_FPR(9, __value);
+ break;
+ case 10:
+ READ_FPR(10, __value);
+ break;
+ case 11:
+ READ_FPR(11, __value);
+ break;
+ case 12:
+ READ_FPR(12, __value);
+ break;
+ case 13:
+ READ_FPR(13, __value);
+ break;
+ case 14:
+ READ_FPR(14, __value);
+ break;
+ case 15:
+ READ_FPR(15, __value);
+ break;
+ case 16:
+ READ_FPR(16, __value);
+ break;
+ case 17:
+ READ_FPR(17, __value);
+ break;
+ case 18:
+ READ_FPR(18, __value);
+ break;
+ case 19:
+ READ_FPR(19, __value);
+ break;
+ case 20:
+ READ_FPR(20, __value);
+ break;
+ case 21:
+ READ_FPR(21, __value);
+ break;
+ case 22:
+ READ_FPR(22, __value);
+ break;
+ case 23:
+ READ_FPR(23, __value);
+ break;
+ case 24:
+ READ_FPR(24, __value);
+ break;
+ case 25:
+ READ_FPR(25, __value);
+ break;
+ case 26:
+ READ_FPR(26, __value);
+ break;
+ case 27:
+ READ_FPR(27, __value);
+ break;
+ case 28:
+ READ_FPR(28, __value);
+ break;
+ case 29:
+ READ_FPR(29, __value);
+ break;
+ case 30:
+ READ_FPR(30, __value);
+ break;
+ case 31:
+ READ_FPR(31, __value);
+ break;
+ default:
+ panic("unexpected idx '%d'", idx);
+ }
+#undef READ_FPR
+ return __value;
+}
+
+static inline void write_fpr(unsigned int idx, unsigned long value)
+{
+#define WRITE_FPR(idx, value) \
+ __asm__ __volatile__("movgr2fr.d $f"#idx", %0\n\t" :: "r"(value));
+
+ switch (idx) {
+ case 0:
+ WRITE_FPR(0, value);
+ break;
+ case 1:
+ WRITE_FPR(1, value);
+ break;
+ case 2:
+ WRITE_FPR(2, value);
+ break;
+ case 3:
+ WRITE_FPR(3, value);
+ break;
+ case 4:
+ WRITE_FPR(4, value);
+ break;
+ case 5:
+ WRITE_FPR(5, value);
+ break;
+ case 6:
+ WRITE_FPR(6, value);
+ break;
+ case 7:
+ WRITE_FPR(7, value);
+ break;
+ case 8:
+ WRITE_FPR(8, value);
+ break;
+ case 9:
+ WRITE_FPR(9, value);
+ break;
+ case 10:
+ WRITE_FPR(10, value);
+ break;
+ case 11:
+ WRITE_FPR(11, value);
+ break;
+ case 12:
+ WRITE_FPR(12, value);
+ break;
+ case 13:
+ WRITE_FPR(13, value);
+ break;
+ case 14:
+ WRITE_FPR(14, value);
+ break;
+ case 15:
+ WRITE_FPR(15, value);
+ break;
+ case 16:
+ WRITE_FPR(16, value);
+ break;
+ case 17:
+ WRITE_FPR(17, value);
+ break;
+ case 18:
+ WRITE_FPR(18, value);
+ break;
+ case 19:
+ WRITE_FPR(19, value);
+ break;
+ case 20:
+ WRITE_FPR(20, value);
+ break;
+ case 21:
+ WRITE_FPR(21, value);
+ break;
+ case 22:
+ WRITE_FPR(22, value);
+ break;
+ case 23:
+ WRITE_FPR(23, value);
+ break;
+ case 24:
+ WRITE_FPR(24, value);
+ break;
+ case 25:
+ WRITE_FPR(25, value);
+ break;
+ case 26:
+ WRITE_FPR(26, value);
+ break;
+ case 27:
+ WRITE_FPR(27, value);
+ break;
+ case 28:
+ WRITE_FPR(28, value);
+ break;
+ case 29:
+ WRITE_FPR(29, value);
+ break;
+ case 30:
+ WRITE_FPR(30, value);
+ break;
+ case 31:
+ WRITE_FPR(31, value);
+ break;
+ default:
+ panic("unexpected idx '%d'", idx);
+ }
+#undef WRITE_FPR
+}
+
+void emulate_load_store_insn(struct pt_regs *regs, void __user *addr, unsigned int *pc)
+{
+ bool fp = false;
+ bool sign, write;
+ bool user = user_mode(regs);
+ unsigned int res, size = 0;
+ unsigned long value = 0;
+ union loongarch_instruction insn;
+
+ perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0);
+
+ __get_inst(&insn.word, pc, user);
+
+ switch (insn.reg2i12_format.opcode) {
+ case ldh_op:
+ size = 2;
+ sign = true;
+ write = false;
+ break;
+ case ldhu_op:
+ size = 2;
+ sign = false;
+ write = false;
+ break;
+ case sth_op:
+ size = 2;
+ sign = true;
+ write = true;
+ break;
+ case ldw_op:
+ size = 4;
+ sign = true;
+ write = false;
+ break;
+ case ldwu_op:
+ size = 4;
+ sign = false;
+ write = false;
+ break;
+ case stw_op:
+ size = 4;
+ sign = true;
+ write = true;
+ break;
+ case ldd_op:
+ size = 8;
+ sign = true;
+ write = false;
+ break;
+ case std_op:
+ size = 8;
+ sign = true;
+ write = true;
+ break;
+ case flds_op:
+ size = 4;
+ fp = true;
+ sign = true;
+ write = false;
+ break;
+ case fsts_op:
+ size = 4;
+ fp = true;
+ sign = true;
+ write = true;
+ break;
+ case fldd_op:
+ size = 8;
+ fp = true;
+ sign = true;
+ write = false;
+ break;
+ case fstd_op:
+ size = 8;
+ fp = true;
+ sign = true;
+ write = true;
+ break;
+ }
+
+ switch (insn.reg2i14_format.opcode) {
+ case ldptrw_op:
+ size = 4;
+ sign = true;
+ write = false;
+ break;
+ case stptrw_op:
+ size = 4;
+ sign = true;
+ write = true;
+ break;
+ case ldptrd_op:
+ size = 8;
+ sign = true;
+ write = false;
+ break;
+ case stptrd_op:
+ size = 8;
+ sign = true;
+ write = true;
+ break;
+ }
+
+ switch (insn.reg3_format.opcode) {
+ case ldxh_op:
+ size = 2;
+ sign = true;
+ write = false;
+ break;
+ case ldxhu_op:
+ size = 2;
+ sign = false;
+ write = false;
+ break;
+ case stxh_op:
+ size = 2;
+ sign = true;
+ write = true;
+ break;
+ case ldxw_op:
+ size = 4;
+ sign = true;
+ write = false;
+ break;
+ case ldxwu_op:
+ size = 4;
+ sign = false;
+ write = false;
+ break;
+ case stxw_op:
+ size = 4;
+ sign = true;
+ write = true;
+ break;
+ case ldxd_op:
+ size = 8;
+ sign = true;
+ write = false;
+ break;
+ case stxd_op:
+ size = 8;
+ sign = true;
+ write = true;
+ break;
+ case fldxs_op:
+ size = 4;
+ fp = true;
+ sign = true;
+ write = false;
+ break;
+ case fstxs_op:
+ size = 4;
+ fp = true;
+ sign = true;
+ write = true;
+ break;
+ case fldxd_op:
+ size = 8;
+ fp = true;
+ sign = true;
+ write = false;
+ break;
+ case fstxd_op:
+ size = 8;
+ fp = true;
+ sign = true;
+ write = true;
+ break;
+ }
+
+ if (!size)
+ goto sigbus;
+ if (user && !access_ok(addr, size))
+ goto sigbus;
+
+ if (!write) {
+ res = unaligned_read(addr, &value, size, sign);
+ if (res)
+ goto fault;
+
+ /* Rd is the same field in any formats */
+ if (!fp)
+ regs->regs[insn.reg3_format.rd] = value;
+ else {
+ if (is_fpu_owner())
+ write_fpr(insn.reg3_format.rd, value);
+ else
+ set_fpr64(&current->thread.fpu.fpr[insn.reg3_format.rd], 0, value);
+ }
+ } else {
+ /* Rd is the same field in any formats */
+ if (!fp)
+ value = regs->regs[insn.reg3_format.rd];
+ else {
+ if (is_fpu_owner())
+ value = read_fpr(insn.reg3_format.rd);
+ else
+ value = get_fpr64(&current->thread.fpu.fpr[insn.reg3_format.rd], 0);
+ }
+
+ res = unaligned_write(addr, value, size);
+ if (res)
+ goto fault;
+ }
+
+#ifdef CONFIG_DEBUG_FS
+ if (user)
+ unaligned_instructions_user++;
+ else
+ unaligned_instructions_kernel++;
+#endif
+
+ compute_return_era(regs);
+
+ return;
+
+fault:
+ /* Did we have an exception handler installed? */
+ if (fixup_exception(regs))
+ return;
+
+ die_if_kernel("Unhandled kernel unaligned access", regs);
+ force_sig(SIGSEGV);
+
+ return;
+
+sigbus:
+ die_if_kernel("Unhandled kernel unaligned access", regs);
+ force_sig(SIGBUS);
+
+ return;
+}
+
+#ifdef CONFIG_DEBUG_FS
+static int __init debugfs_unaligned(void)
+{
+ struct dentry *d;
+
+ d = debugfs_create_dir("loongarch", NULL);
+
+ debugfs_create_u32("unaligned_instructions_user",
+ S_IRUGO, d, &unaligned_instructions_user);
+ debugfs_create_u32("unaligned_instructions_kernel",
+ S_IRUGO, d, &unaligned_instructions_kernel);
+
+ return 0;
+}
+arch_initcall(debugfs_unaligned);
+#endif
diff --git a/arch/loongarch/kernel/unwind.c b/arch/loongarch/kernel/unwind.c
new file mode 100644
index 0000000000..a463d69613
--- /dev/null
+++ b/arch/loongarch/kernel/unwind.c
@@ -0,0 +1,32 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2022-2023 Loongson Technology Corporation Limited
+ */
+#include <linux/kernel.h>
+#include <linux/ftrace.h>
+
+#include <asm/unwind.h>
+
+bool default_next_frame(struct unwind_state *state)
+{
+ struct stack_info *info = &state->stack_info;
+ unsigned long addr;
+
+ if (unwind_done(state))
+ return false;
+
+ do {
+ for (state->sp += sizeof(unsigned long);
+ state->sp < info->end; state->sp += sizeof(unsigned long)) {
+ addr = *(unsigned long *)(state->sp);
+ state->pc = unwind_graph_addr(state, addr, state->sp + 8);
+ if (__kernel_text_address(state->pc))
+ return true;
+ }
+
+ state->sp = info->next_sp;
+
+ } while (!get_stack_info(state->sp, state->task, info));
+
+ return false;
+}
diff --git a/arch/loongarch/kernel/unwind_guess.c b/arch/loongarch/kernel/unwind_guess.c
new file mode 100644
index 0000000000..98379b7d41
--- /dev/null
+++ b/arch/loongarch/kernel/unwind_guess.c
@@ -0,0 +1,26 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2022 Loongson Technology Corporation Limited
+ */
+#include <asm/unwind.h>
+
+unsigned long unwind_get_return_address(struct unwind_state *state)
+{
+ return __unwind_get_return_address(state);
+}
+EXPORT_SYMBOL_GPL(unwind_get_return_address);
+
+void unwind_start(struct unwind_state *state, struct task_struct *task,
+ struct pt_regs *regs)
+{
+ __unwind_start(state, task, regs);
+ if (!unwind_done(state) && !__kernel_text_address(state->pc))
+ unwind_next_frame(state);
+}
+EXPORT_SYMBOL_GPL(unwind_start);
+
+bool unwind_next_frame(struct unwind_state *state)
+{
+ return default_next_frame(state);
+}
+EXPORT_SYMBOL_GPL(unwind_next_frame);
diff --git a/arch/loongarch/kernel/unwind_prologue.c b/arch/loongarch/kernel/unwind_prologue.c
new file mode 100644
index 0000000000..929ae24028
--- /dev/null
+++ b/arch/loongarch/kernel/unwind_prologue.c
@@ -0,0 +1,265 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2022 Loongson Technology Corporation Limited
+ */
+#include <linux/cpumask.h>
+#include <linux/ftrace.h>
+#include <linux/kallsyms.h>
+
+#include <asm/inst.h>
+#include <asm/loongson.h>
+#include <asm/ptrace.h>
+#include <asm/setup.h>
+#include <asm/unwind.h>
+
+extern const int unwind_hint_ade;
+extern const int unwind_hint_ale;
+extern const int unwind_hint_bp;
+extern const int unwind_hint_fpe;
+extern const int unwind_hint_fpu;
+extern const int unwind_hint_lsx;
+extern const int unwind_hint_lasx;
+extern const int unwind_hint_lbt;
+extern const int unwind_hint_ri;
+extern const int unwind_hint_watch;
+extern unsigned long eentry;
+#ifdef CONFIG_NUMA
+extern unsigned long pcpu_handlers[NR_CPUS];
+#endif
+
+static inline bool scan_handlers(unsigned long entry_offset)
+{
+ int idx, offset;
+
+ if (entry_offset >= EXCCODE_INT_START * VECSIZE)
+ return false;
+
+ idx = entry_offset / VECSIZE;
+ offset = entry_offset % VECSIZE;
+ switch (idx) {
+ case EXCCODE_ADE:
+ return offset == unwind_hint_ade;
+ case EXCCODE_ALE:
+ return offset == unwind_hint_ale;
+ case EXCCODE_BP:
+ return offset == unwind_hint_bp;
+ case EXCCODE_FPE:
+ return offset == unwind_hint_fpe;
+ case EXCCODE_FPDIS:
+ return offset == unwind_hint_fpu;
+ case EXCCODE_LSXDIS:
+ return offset == unwind_hint_lsx;
+ case EXCCODE_LASXDIS:
+ return offset == unwind_hint_lasx;
+ case EXCCODE_BTDIS:
+ return offset == unwind_hint_lbt;
+ case EXCCODE_INE:
+ return offset == unwind_hint_ri;
+ case EXCCODE_WATCH:
+ return offset == unwind_hint_watch;
+ default:
+ return false;
+ }
+}
+
+static inline bool fix_exception(unsigned long pc)
+{
+#ifdef CONFIG_NUMA
+ int cpu;
+
+ for_each_possible_cpu(cpu) {
+ if (!pcpu_handlers[cpu])
+ continue;
+ if (scan_handlers(pc - pcpu_handlers[cpu]))
+ return true;
+ }
+#endif
+ return scan_handlers(pc - eentry);
+}
+
+/*
+ * As we meet ftrace_regs_entry, reset first flag like first doing
+ * tracing. Prologue analysis will stop soon because PC is at entry.
+ */
+static inline bool fix_ftrace(unsigned long pc)
+{
+#ifdef CONFIG_DYNAMIC_FTRACE
+ return pc == (unsigned long)ftrace_call + LOONGARCH_INSN_SIZE;
+#else
+ return false;
+#endif
+}
+
+static inline bool unwind_state_fixup(struct unwind_state *state)
+{
+ if (!fix_exception(state->pc) && !fix_ftrace(state->pc))
+ return false;
+
+ state->reset = true;
+ return true;
+}
+
+/*
+ * LoongArch function prologue is like follows,
+ * [instructions not use stack var]
+ * addi.d sp, sp, -imm
+ * st.d xx, sp, offset <- save callee saved regs and
+ * st.d yy, sp, offset save ra if function is nest.
+ * [others instructions]
+ */
+static bool unwind_by_prologue(struct unwind_state *state)
+{
+ long frame_ra = -1;
+ unsigned long frame_size = 0;
+ unsigned long size, offset, pc;
+ struct pt_regs *regs;
+ struct stack_info *info = &state->stack_info;
+ union loongarch_instruction *ip, *ip_end;
+
+ if (state->sp >= info->end || state->sp < info->begin)
+ return false;
+
+ if (state->reset) {
+ regs = (struct pt_regs *)state->sp;
+ state->first = true;
+ state->reset = false;
+ state->pc = regs->csr_era;
+ state->ra = regs->regs[1];
+ state->sp = regs->regs[3];
+ return true;
+ }
+
+ /*
+ * When first is not set, the PC is a return address in the previous frame.
+ * We need to adjust its value in case overflow to the next symbol.
+ */
+ pc = state->pc - (state->first ? 0 : LOONGARCH_INSN_SIZE);
+ if (!kallsyms_lookup_size_offset(pc, &size, &offset))
+ return false;
+
+ ip = (union loongarch_instruction *)(pc - offset);
+ ip_end = (union loongarch_instruction *)pc;
+
+ while (ip < ip_end) {
+ if (is_stack_alloc_ins(ip)) {
+ frame_size = (1 << 12) - ip->reg2i12_format.immediate;
+ ip++;
+ break;
+ }
+ ip++;
+ }
+
+ /*
+ * Can't find stack alloc action, PC may be in a leaf function. Only the
+ * first being true is reasonable, otherwise indicate analysis is broken.
+ */
+ if (!frame_size) {
+ if (state->first)
+ goto first;
+
+ return false;
+ }
+
+ while (ip < ip_end) {
+ if (is_ra_save_ins(ip)) {
+ frame_ra = ip->reg2i12_format.immediate;
+ break;
+ }
+ if (is_branch_ins(ip))
+ break;
+ ip++;
+ }
+
+ /* Can't find save $ra action, PC may be in a leaf function, too. */
+ if (frame_ra < 0) {
+ if (state->first) {
+ state->sp = state->sp + frame_size;
+ goto first;
+ }
+ return false;
+ }
+
+ state->pc = *(unsigned long *)(state->sp + frame_ra);
+ state->sp = state->sp + frame_size;
+ goto out;
+
+first:
+ state->pc = state->ra;
+
+out:
+ state->first = false;
+ return unwind_state_fixup(state) || __kernel_text_address(state->pc);
+}
+
+static bool next_frame(struct unwind_state *state)
+{
+ unsigned long pc;
+ struct pt_regs *regs;
+ struct stack_info *info = &state->stack_info;
+
+ if (unwind_done(state))
+ return false;
+
+ do {
+ if (unwind_by_prologue(state)) {
+ state->pc = unwind_graph_addr(state, state->pc, state->sp);
+ return true;
+ }
+
+ if (info->type == STACK_TYPE_IRQ && info->end == state->sp) {
+ regs = (struct pt_regs *)info->next_sp;
+ pc = regs->csr_era;
+
+ if (user_mode(regs) || !__kernel_text_address(pc))
+ goto out;
+
+ state->first = true;
+ state->pc = pc;
+ state->ra = regs->regs[1];
+ state->sp = regs->regs[3];
+ get_stack_info(state->sp, state->task, info);
+
+ return true;
+ }
+
+ state->sp = info->next_sp;
+
+ } while (!get_stack_info(state->sp, state->task, info));
+
+out:
+ state->stack_info.type = STACK_TYPE_UNKNOWN;
+ return false;
+}
+
+unsigned long unwind_get_return_address(struct unwind_state *state)
+{
+ return __unwind_get_return_address(state);
+}
+EXPORT_SYMBOL_GPL(unwind_get_return_address);
+
+void unwind_start(struct unwind_state *state, struct task_struct *task,
+ struct pt_regs *regs)
+{
+ __unwind_start(state, task, regs);
+ state->type = UNWINDER_PROLOGUE;
+ state->first = true;
+
+ /*
+ * The current PC is not kernel text address, we cannot find its
+ * relative symbol. Thus, prologue analysis will be broken. Luckily,
+ * we can use the default_next_frame().
+ */
+ if (!__kernel_text_address(state->pc)) {
+ state->type = UNWINDER_GUESS;
+ if (!unwind_done(state))
+ unwind_next_frame(state);
+ }
+}
+EXPORT_SYMBOL_GPL(unwind_start);
+
+bool unwind_next_frame(struct unwind_state *state)
+{
+ return state->type == UNWINDER_PROLOGUE ?
+ next_frame(state) : default_next_frame(state);
+}
+EXPORT_SYMBOL_GPL(unwind_next_frame);
diff --git a/arch/loongarch/kernel/uprobes.c b/arch/loongarch/kernel/uprobes.c
new file mode 100644
index 0000000000..87abc7137b
--- /dev/null
+++ b/arch/loongarch/kernel/uprobes.c
@@ -0,0 +1,153 @@
+// SPDX-License-Identifier: GPL-2.0-only
+#include <linux/highmem.h>
+#include <linux/ptrace.h>
+#include <linux/sched.h>
+#include <linux/uprobes.h>
+#include <asm/cacheflush.h>
+
+#define UPROBE_TRAP_NR UINT_MAX
+
+int arch_uprobe_analyze_insn(struct arch_uprobe *auprobe,
+ struct mm_struct *mm, unsigned long addr)
+{
+ int idx;
+ union loongarch_instruction insn;
+
+ if (addr & 0x3)
+ return -EILSEQ;
+
+ for (idx = ARRAY_SIZE(auprobe->insn) - 1; idx >= 0; idx--) {
+ insn.word = auprobe->insn[idx];
+ if (insns_not_supported(insn))
+ return -EINVAL;
+ }
+
+ if (insns_need_simulation(insn)) {
+ auprobe->ixol[0] = larch_insn_gen_nop();
+ auprobe->simulate = true;
+ } else {
+ auprobe->ixol[0] = auprobe->insn[0];
+ auprobe->simulate = false;
+ }
+
+ auprobe->ixol[1] = UPROBE_XOLBP_INSN;
+
+ return 0;
+}
+
+int arch_uprobe_pre_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
+{
+ struct uprobe_task *utask = current->utask;
+
+ utask->autask.saved_trap_nr = current->thread.trap_nr;
+ current->thread.trap_nr = UPROBE_TRAP_NR;
+ instruction_pointer_set(regs, utask->xol_vaddr);
+ user_enable_single_step(current);
+
+ return 0;
+}
+
+int arch_uprobe_post_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
+{
+ struct uprobe_task *utask = current->utask;
+
+ WARN_ON_ONCE(current->thread.trap_nr != UPROBE_TRAP_NR);
+ current->thread.trap_nr = utask->autask.saved_trap_nr;
+
+ if (auprobe->simulate)
+ instruction_pointer_set(regs, auprobe->resume_era);
+ else
+ instruction_pointer_set(regs, utask->vaddr + LOONGARCH_INSN_SIZE);
+
+ user_disable_single_step(current);
+
+ return 0;
+}
+
+void arch_uprobe_abort_xol(struct arch_uprobe *auprobe, struct pt_regs *regs)
+{
+ struct uprobe_task *utask = current->utask;
+
+ current->thread.trap_nr = utask->autask.saved_trap_nr;
+ instruction_pointer_set(regs, utask->vaddr);
+ user_disable_single_step(current);
+}
+
+bool arch_uprobe_xol_was_trapped(struct task_struct *t)
+{
+ if (t->thread.trap_nr != UPROBE_TRAP_NR)
+ return true;
+
+ return false;
+}
+
+bool arch_uprobe_skip_sstep(struct arch_uprobe *auprobe, struct pt_regs *regs)
+{
+ union loongarch_instruction insn;
+
+ if (!auprobe->simulate)
+ return false;
+
+ insn.word = auprobe->insn[0];
+ arch_simulate_insn(insn, regs);
+ auprobe->resume_era = regs->csr_era;
+
+ return true;
+}
+
+unsigned long arch_uretprobe_hijack_return_addr(unsigned long trampoline_vaddr,
+ struct pt_regs *regs)
+{
+ unsigned long ra = regs->regs[1];
+
+ regs->regs[1] = trampoline_vaddr;
+
+ return ra;
+}
+
+bool arch_uretprobe_is_alive(struct return_instance *ret,
+ enum rp_check ctx, struct pt_regs *regs)
+{
+ if (ctx == RP_CHECK_CHAIN_CALL)
+ return regs->regs[3] <= ret->stack;
+ else
+ return regs->regs[3] < ret->stack;
+}
+
+int arch_uprobe_exception_notify(struct notifier_block *self,
+ unsigned long val, void *data)
+{
+ return NOTIFY_DONE;
+}
+
+bool uprobe_breakpoint_handler(struct pt_regs *regs)
+{
+ if (uprobe_pre_sstep_notifier(regs))
+ return true;
+
+ return false;
+}
+
+bool uprobe_singlestep_handler(struct pt_regs *regs)
+{
+ if (uprobe_post_sstep_notifier(regs))
+ return true;
+
+ return false;
+}
+
+unsigned long uprobe_get_swbp_addr(struct pt_regs *regs)
+{
+ return instruction_pointer(regs);
+}
+
+void arch_uprobe_copy_ixol(struct page *page, unsigned long vaddr,
+ void *src, unsigned long len)
+{
+ void *kaddr = kmap_local_page(page);
+ void *dst = kaddr + (vaddr & ~PAGE_MASK);
+
+ memcpy(dst, src, len);
+ flush_icache_range((unsigned long)dst, (unsigned long)dst + len);
+ kunmap_local(kaddr);
+}
diff --git a/arch/loongarch/kernel/vdso.c b/arch/loongarch/kernel/vdso.c
new file mode 100644
index 0000000000..14941e4be6
--- /dev/null
+++ b/arch/loongarch/kernel/vdso.c
@@ -0,0 +1,209 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Author: Huacai Chen <chenhuacai@loongson.cn>
+ * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
+ */
+
+#include <linux/binfmts.h>
+#include <linux/elf.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/ioport.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/random.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/time_namespace.h>
+#include <linux/timekeeper_internal.h>
+
+#include <asm/page.h>
+#include <asm/vdso.h>
+#include <vdso/helpers.h>
+#include <vdso/vsyscall.h>
+#include <generated/vdso-offsets.h>
+
+extern char vdso_start[], vdso_end[];
+
+/* Kernel-provided data used by the VDSO. */
+static union {
+ u8 page[PAGE_SIZE];
+ struct vdso_data data[CS_BASES];
+} generic_vdso_data __page_aligned_data;
+
+static union {
+ u8 page[LOONGARCH_VDSO_DATA_SIZE];
+ struct loongarch_vdso_data vdata;
+} loongarch_vdso_data __page_aligned_data;
+
+static struct page *vdso_pages[] = { NULL };
+struct vdso_data *vdso_data = generic_vdso_data.data;
+struct vdso_pcpu_data *vdso_pdata = loongarch_vdso_data.vdata.pdata;
+
+static int vdso_mremap(const struct vm_special_mapping *sm, struct vm_area_struct *new_vma)
+{
+ current->mm->context.vdso = (void *)(new_vma->vm_start);
+
+ return 0;
+}
+
+static vm_fault_t vvar_fault(const struct vm_special_mapping *sm,
+ struct vm_area_struct *vma, struct vm_fault *vmf)
+{
+ unsigned long pfn;
+ struct page *timens_page = find_timens_vvar_page(vma);
+
+ switch (vmf->pgoff) {
+ case VVAR_GENERIC_PAGE_OFFSET:
+ if (!timens_page)
+ pfn = sym_to_pfn(vdso_data);
+ else
+ pfn = page_to_pfn(timens_page);
+ break;
+#ifdef CONFIG_TIME_NS
+ case VVAR_TIMENS_PAGE_OFFSET:
+ /*
+ * If a task belongs to a time namespace then a namespace specific
+ * VVAR is mapped with the VVAR_GENERIC_PAGE_OFFSET and the real
+ * VVAR page is mapped with the VVAR_TIMENS_PAGE_OFFSET offset.
+ * See also the comment near timens_setup_vdso_data().
+ */
+ if (!timens_page)
+ return VM_FAULT_SIGBUS;
+ else
+ pfn = sym_to_pfn(vdso_data);
+ break;
+#endif /* CONFIG_TIME_NS */
+ case VVAR_LOONGARCH_PAGES_START ... VVAR_LOONGARCH_PAGES_END:
+ pfn = sym_to_pfn(&loongarch_vdso_data) + vmf->pgoff - VVAR_LOONGARCH_PAGES_START;
+ break;
+ default:
+ return VM_FAULT_SIGBUS;
+ }
+
+ return vmf_insert_pfn(vma, vmf->address, pfn);
+}
+
+struct loongarch_vdso_info vdso_info = {
+ .vdso = vdso_start,
+ .size = PAGE_SIZE,
+ .code_mapping = {
+ .name = "[vdso]",
+ .pages = vdso_pages,
+ .mremap = vdso_mremap,
+ },
+ .data_mapping = {
+ .name = "[vvar]",
+ .fault = vvar_fault,
+ },
+ .offset_sigreturn = vdso_offset_sigreturn,
+};
+
+static int __init init_vdso(void)
+{
+ unsigned long i, cpu, pfn;
+
+ BUG_ON(!PAGE_ALIGNED(vdso_info.vdso));
+ BUG_ON(!PAGE_ALIGNED(vdso_info.size));
+
+ for_each_possible_cpu(cpu)
+ vdso_pdata[cpu].node = cpu_to_node(cpu);
+
+ pfn = __phys_to_pfn(__pa_symbol(vdso_info.vdso));
+ for (i = 0; i < vdso_info.size / PAGE_SIZE; i++)
+ vdso_info.code_mapping.pages[i] = pfn_to_page(pfn + i);
+
+ return 0;
+}
+subsys_initcall(init_vdso);
+
+#ifdef CONFIG_TIME_NS
+struct vdso_data *arch_get_vdso_data(void *vvar_page)
+{
+ return (struct vdso_data *)(vvar_page);
+}
+
+/*
+ * The vvar mapping contains data for a specific time namespace, so when a
+ * task changes namespace we must unmap its vvar data for the old namespace.
+ * Subsequent faults will map in data for the new namespace.
+ *
+ * For more details see timens_setup_vdso_data().
+ */
+int vdso_join_timens(struct task_struct *task, struct time_namespace *ns)
+{
+ struct mm_struct *mm = task->mm;
+ struct vm_area_struct *vma;
+
+ VMA_ITERATOR(vmi, mm, 0);
+
+ mmap_read_lock(mm);
+ for_each_vma(vmi, vma) {
+ if (vma_is_special_mapping(vma, &vdso_info.data_mapping))
+ zap_vma_pages(vma);
+ }
+ mmap_read_unlock(mm);
+
+ return 0;
+}
+#endif
+
+static unsigned long vdso_base(void)
+{
+ unsigned long base = STACK_TOP;
+
+ if (current->flags & PF_RANDOMIZE) {
+ base += get_random_u32_below(VDSO_RANDOMIZE_SIZE);
+ base = PAGE_ALIGN(base);
+ }
+
+ return base;
+}
+
+int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
+{
+ int ret;
+ unsigned long size, data_addr, vdso_addr;
+ struct mm_struct *mm = current->mm;
+ struct vm_area_struct *vma;
+ struct loongarch_vdso_info *info = current->thread.vdso;
+
+ if (mmap_write_lock_killable(mm))
+ return -EINTR;
+
+ /*
+ * Determine total area size. This includes the VDSO data itself
+ * and the data pages.
+ */
+ size = VVAR_SIZE + info->size;
+
+ data_addr = get_unmapped_area(NULL, vdso_base(), size, 0, 0);
+ if (IS_ERR_VALUE(data_addr)) {
+ ret = data_addr;
+ goto out;
+ }
+
+ vma = _install_special_mapping(mm, data_addr, VVAR_SIZE,
+ VM_READ | VM_MAYREAD | VM_PFNMAP,
+ &info->data_mapping);
+ if (IS_ERR(vma)) {
+ ret = PTR_ERR(vma);
+ goto out;
+ }
+
+ vdso_addr = data_addr + VVAR_SIZE;
+ vma = _install_special_mapping(mm, vdso_addr, info->size,
+ VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC,
+ &info->code_mapping);
+ if (IS_ERR(vma)) {
+ ret = PTR_ERR(vma);
+ goto out;
+ }
+
+ mm->context.vdso = (void *)vdso_addr;
+ ret = 0;
+
+out:
+ mmap_write_unlock(mm);
+ return ret;
+}
diff --git a/arch/loongarch/kernel/vmlinux.lds.S b/arch/loongarch/kernel/vmlinux.lds.S
new file mode 100644
index 0000000000..bb2ec86f37
--- /dev/null
+++ b/arch/loongarch/kernel/vmlinux.lds.S
@@ -0,0 +1,165 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#include <linux/sizes.h>
+#include <asm/asm-offsets.h>
+#include <asm/thread_info.h>
+
+#define PAGE_SIZE _PAGE_SIZE
+#define RO_EXCEPTION_TABLE_ALIGN 4
+
+/*
+ * Put .bss..swapper_pg_dir as the first thing in .bss. This will
+ * ensure that it has .bss alignment (64K).
+ */
+#define BSS_FIRST_SECTIONS *(.bss..swapper_pg_dir)
+
+#include <asm-generic/vmlinux.lds.h>
+#include "image-vars.h"
+
+/*
+ * Max avaliable Page Size is 64K, so we set SectionAlignment
+ * field of EFI application to 64K.
+ */
+PECOFF_FILE_ALIGN = 0x200;
+PECOFF_SEGMENT_ALIGN = 0x10000;
+
+OUTPUT_ARCH(loongarch)
+ENTRY(kernel_entry)
+PHDRS {
+ text PT_LOAD FLAGS(7); /* RWX */
+ note PT_NOTE FLAGS(4); /* R__ */
+}
+
+jiffies = jiffies_64;
+
+SECTIONS
+{
+ . = VMLINUX_LOAD_ADDRESS;
+
+ _text = .;
+ HEAD_TEXT_SECTION
+
+ . = ALIGN(PECOFF_SEGMENT_ALIGN);
+ _stext = .;
+ .text : {
+ TEXT_TEXT
+ SCHED_TEXT
+ LOCK_TEXT
+ KPROBES_TEXT
+ IRQENTRY_TEXT
+ SOFTIRQENTRY_TEXT
+ *(.fixup)
+ *(.gnu.warning)
+ } :text = 0
+ . = ALIGN(PECOFF_SEGMENT_ALIGN);
+ _etext = .;
+
+ . = ALIGN(PECOFF_SEGMENT_ALIGN);
+ __init_begin = .;
+ __inittext_begin = .;
+
+ INIT_TEXT_SECTION(PAGE_SIZE)
+ .exit.text : {
+ EXIT_TEXT
+ }
+
+ . = ALIGN(PECOFF_SEGMENT_ALIGN);
+ __inittext_end = .;
+
+ __initdata_begin = .;
+
+ /*
+ * struct alt_inst entries. From the header (alternative.h):
+ * "Alternative instructions for different CPU types or capabilities"
+ * Think locking instructions on spinlocks.
+ */
+ . = ALIGN(4);
+ .altinstructions : AT(ADDR(.altinstructions) - LOAD_OFFSET) {
+ __alt_instructions = .;
+ *(.altinstructions)
+ __alt_instructions_end = .;
+ }
+
+ INIT_DATA_SECTION(16)
+ .exit.data : {
+ EXIT_DATA
+ }
+
+#ifdef CONFIG_SMP
+ PERCPU_SECTION(1 << CONFIG_L1_CACHE_SHIFT)
+#endif
+
+ .init.bss : {
+ *(.init.bss)
+ }
+ . = ALIGN(PECOFF_SEGMENT_ALIGN);
+ __initdata_end = .;
+
+ __init_end = .;
+
+ _sdata = .;
+ RO_DATA(4096)
+
+ .got : ALIGN(16) { *(.got) }
+ .plt : ALIGN(16) { *(.plt) }
+ .got.plt : ALIGN(16) { *(.got.plt) }
+
+ RW_DATA(1 << CONFIG_L1_CACHE_SHIFT, PAGE_SIZE, THREAD_SIZE)
+
+ .rela.dyn : ALIGN(8) {
+ __rela_dyn_begin = .;
+ *(.rela.dyn) *(.rela*)
+ __rela_dyn_end = .;
+ }
+
+ .data.rel : { *(.data.rel*) }
+
+#ifdef CONFIG_RELOCATABLE
+ . = ALIGN(8);
+ .la_abs : AT(ADDR(.la_abs) - LOAD_OFFSET) {
+ __la_abs_begin = .;
+ *(.la_abs)
+ __la_abs_end = .;
+ }
+#endif
+
+ .sdata : {
+ *(.sdata)
+ }
+ .edata_padding : { BYTE(0); . = ALIGN(PECOFF_FILE_ALIGN); }
+ _edata = .;
+
+ BSS_SECTION(0, SZ_64K, 8)
+ . = ALIGN(PECOFF_SEGMENT_ALIGN);
+
+ _end = .;
+
+ STABS_DEBUG
+ DWARF_DEBUG
+ ELF_DETAILS
+
+#ifdef CONFIG_EFI_STUB
+ /* header symbols */
+ _kernel_asize = _end - _text;
+ _kernel_fsize = _edata - _text;
+ _kernel_vsize = _end - __initdata_begin;
+ _kernel_rsize = _edata - __initdata_begin;
+ _kernel_offset = kernel_offset - _text;
+#endif
+
+ .gptab.sdata : {
+ *(.gptab.data)
+ *(.gptab.sdata)
+ }
+ .gptab.sbss : {
+ *(.gptab.bss)
+ *(.gptab.sbss)
+ }
+
+ DISCARDS
+ /DISCARD/ : {
+ *(.dynamic .dynsym .dynstr .hash .gnu.hash)
+ *(.gnu.attributes)
+ *(.options)
+ *(.eh_frame)
+ }
+}