summaryrefslogtreecommitdiffstats
path: root/arch/mips/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/mips/kernel')
-rw-r--r--arch/mips/kernel/.gitignore2
-rw-r--r--arch/mips/kernel/Makefile117
-rw-r--r--arch/mips/kernel/access-helper.h19
-rw-r--r--arch/mips/kernel/asm-offsets.c398
-rw-r--r--arch/mips/kernel/bmips_5xxx_init.S747
-rw-r--r--arch/mips/kernel/bmips_vec.S322
-rw-r--r--arch/mips/kernel/branch.c908
-rw-r--r--arch/mips/kernel/cacheinfo.c116
-rw-r--r--arch/mips/kernel/cevt-bcm1480.c138
-rw-r--r--arch/mips/kernel/cevt-ds1287.c121
-rw-r--r--arch/mips/kernel/cevt-gt641xx.c146
-rw-r--r--arch/mips/kernel/cevt-r4k.c341
-rw-r--r--arch/mips/kernel/cevt-sb1250.c138
-rw-r--r--arch/mips/kernel/cevt-txx9.c220
-rw-r--r--arch/mips/kernel/cmpxchg.c104
-rw-r--r--arch/mips/kernel/cps-vec-ns16550.S212
-rw-r--r--arch/mips/kernel/cps-vec.S629
-rw-r--r--arch/mips/kernel/cpu-probe.c2043
-rw-r--r--arch/mips/kernel/cpu-r3k-probe.c152
-rw-r--r--arch/mips/kernel/crash.c103
-rw-r--r--arch/mips/kernel/crash_dump.c19
-rw-r--r--arch/mips/kernel/csrc-bcm1480.c48
-rw-r--r--arch/mips/kernel/csrc-ioasic.c65
-rw-r--r--arch/mips/kernel/csrc-r4k.c130
-rw-r--r--arch/mips/kernel/csrc-sb1250.c71
-rw-r--r--arch/mips/kernel/early_printk.c41
-rw-r--r--arch/mips/kernel/early_printk_8250.c54
-rw-r--r--arch/mips/kernel/elf.c343
-rw-r--r--arch/mips/kernel/entry.S185
-rw-r--r--arch/mips/kernel/fpu-probe.c321
-rw-r--r--arch/mips/kernel/fpu-probe.h40
-rw-r--r--arch/mips/kernel/ftrace.c402
-rw-r--r--arch/mips/kernel/genex.S681
-rw-r--r--arch/mips/kernel/gpio_txx9.c86
-rw-r--r--arch/mips/kernel/head.S154
-rw-r--r--arch/mips/kernel/i8253.c38
-rw-r--r--arch/mips/kernel/idle.c261
-rw-r--r--arch/mips/kernel/irq-gt641xx.c118
-rw-r--r--arch/mips/kernel/irq-msc01.c156
-rw-r--r--arch/mips/kernel/irq.c119
-rw-r--r--arch/mips/kernel/irq_txx9.c178
-rw-r--r--arch/mips/kernel/jump_label.c109
-rw-r--r--arch/mips/kernel/kgdb.c394
-rw-r--r--arch/mips/kernel/kprobes.c525
-rw-r--r--arch/mips/kernel/linux32.c133
-rw-r--r--arch/mips/kernel/machine_kexec.c264
-rw-r--r--arch/mips/kernel/mcount.S220
-rw-r--r--arch/mips/kernel/mips-cm.c515
-rw-r--r--arch/mips/kernel/mips-cpc.c123
-rw-r--r--arch/mips/kernel/mips-mt-fpaff.c219
-rw-r--r--arch/mips/kernel/mips-mt.c246
-rw-r--r--arch/mips/kernel/mips-r2-to-r6-emul.c2363
-rw-r--r--arch/mips/kernel/module.c453
-rw-r--r--arch/mips/kernel/octeon_switch.S554
-rw-r--r--arch/mips/kernel/perf_event.c67
-rw-r--r--arch/mips/kernel/perf_event_mipsxx.c2056
-rw-r--r--arch/mips/kernel/perf_regs.c68
-rw-r--r--arch/mips/kernel/pm-cps.c738
-rw-r--r--arch/mips/kernel/pm.c95
-rw-r--r--arch/mips/kernel/probes-common.h79
-rw-r--r--arch/mips/kernel/proc.c332
-rw-r--r--arch/mips/kernel/process.c912
-rw-r--r--arch/mips/kernel/prom.c72
-rw-r--r--arch/mips/kernel/ptrace.c1381
-rw-r--r--arch/mips/kernel/ptrace32.c317
-rw-r--r--arch/mips/kernel/r2300_fpu.S130
-rw-r--r--arch/mips/kernel/r2300_switch.S65
-rw-r--r--arch/mips/kernel/r4k-bugs64.c325
-rw-r--r--arch/mips/kernel/r4k_fpu.S417
-rw-r--r--arch/mips/kernel/r4k_switch.S59
-rw-r--r--arch/mips/kernel/relocate.c466
-rw-r--r--arch/mips/kernel/relocate_kernel.S184
-rw-r--r--arch/mips/kernel/reset.c124
-rw-r--r--arch/mips/kernel/rtlx-cmp.c122
-rw-r--r--arch/mips/kernel/rtlx-mt.c147
-rw-r--r--arch/mips/kernel/rtlx.c409
-rw-r--r--arch/mips/kernel/scall32-o32.S222
-rw-r--r--arch/mips/kernel/scall64-n32.S107
-rw-r--r--arch/mips/kernel/scall64-n64.S115
-rw-r--r--arch/mips/kernel/scall64-o32.S221
-rw-r--r--arch/mips/kernel/segment.c93
-rw-r--r--arch/mips/kernel/setup.c855
-rw-r--r--arch/mips/kernel/signal-common.h43
-rw-r--r--arch/mips/kernel/signal.c968
-rw-r--r--arch/mips/kernel/signal32.c78
-rw-r--r--arch/mips/kernel/signal_n32.c148
-rw-r--r--arch/mips/kernel/signal_o32.c290
-rw-r--r--arch/mips/kernel/smp-bmips.c672
-rw-r--r--arch/mips/kernel/smp-cmp.c148
-rw-r--r--arch/mips/kernel/smp-cps.c644
-rw-r--r--arch/mips/kernel/smp-mt.c240
-rw-r--r--arch/mips/kernel/smp-up.c79
-rw-r--r--arch/mips/kernel/smp.c714
-rw-r--r--arch/mips/kernel/spinlock_test.c127
-rw-r--r--arch/mips/kernel/spram.c220
-rw-r--r--arch/mips/kernel/stacktrace.c93
-rw-r--r--arch/mips/kernel/sync-r4k.c122
-rw-r--r--arch/mips/kernel/syscall.c242
-rw-r--r--arch/mips/kernel/syscalls/Makefile51
-rw-r--r--arch/mips/kernel/syscalls/syscall_n32.tbl391
-rw-r--r--arch/mips/kernel/syscalls/syscall_n64.tbl367
-rw-r--r--arch/mips/kernel/syscalls/syscall_o32.tbl440
-rw-r--r--arch/mips/kernel/syscalls/syscallnr.sh26
-rw-r--r--arch/mips/kernel/sysrq.c66
-rw-r--r--arch/mips/kernel/time.c167
-rw-r--r--arch/mips/kernel/topology.c28
-rw-r--r--arch/mips/kernel/traps.c2562
-rw-r--r--arch/mips/kernel/unaligned.c1572
-rw-r--r--arch/mips/kernel/uprobes.c263
-rw-r--r--arch/mips/kernel/vdso.c193
-rw-r--r--arch/mips/kernel/vmlinux.lds.S243
-rw-r--r--arch/mips/kernel/vpe-cmp.c180
-rw-r--r--arch/mips/kernel/vpe-mt.c521
-rw-r--r--arch/mips/kernel/vpe.c900
-rw-r--r--arch/mips/kernel/watch.c211
115 files changed, 39791 insertions, 0 deletions
diff --git a/arch/mips/kernel/.gitignore b/arch/mips/kernel/.gitignore
new file mode 100644
index 000000000..bbb90f92d
--- /dev/null
+++ b/arch/mips/kernel/.gitignore
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
+vmlinux.lds
diff --git a/arch/mips/kernel/Makefile b/arch/mips/kernel/Makefile
new file mode 100644
index 000000000..5d1addac5
--- /dev/null
+++ b/arch/mips/kernel/Makefile
@@ -0,0 +1,117 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for the Linux/MIPS kernel.
+#
+
+extra-y := vmlinux.lds
+
+obj-y += head.o branch.o cmpxchg.o elf.o entry.o genex.o idle.o irq.o \
+ process.o prom.o ptrace.o reset.o setup.o signal.o \
+ syscall.o time.o topology.o traps.o unaligned.o watch.o \
+ vdso.o cacheinfo.o
+
+ifdef CONFIG_CPU_R3K_TLB
+obj-y += cpu-r3k-probe.o
+else
+obj-y += cpu-probe.o
+endif
+
+ifdef CONFIG_FUNCTION_TRACER
+CFLAGS_REMOVE_ftrace.o = $(CC_FLAGS_FTRACE)
+CFLAGS_REMOVE_early_printk.o = $(CC_FLAGS_FTRACE)
+CFLAGS_REMOVE_perf_event.o = $(CC_FLAGS_FTRACE)
+CFLAGS_REMOVE_perf_event_mipsxx.o = $(CC_FLAGS_FTRACE)
+endif
+
+obj-$(CONFIG_CEVT_BCM1480) += cevt-bcm1480.o
+obj-$(CONFIG_CEVT_R4K) += cevt-r4k.o
+obj-$(CONFIG_CEVT_DS1287) += cevt-ds1287.o
+obj-$(CONFIG_CEVT_GT641XX) += cevt-gt641xx.o
+obj-$(CONFIG_CEVT_SB1250) += cevt-sb1250.o
+obj-$(CONFIG_CEVT_TXX9) += cevt-txx9.o
+obj-$(CONFIG_CSRC_BCM1480) += csrc-bcm1480.o
+obj-$(CONFIG_CSRC_IOASIC) += csrc-ioasic.o
+obj-$(CONFIG_CSRC_R4K) += csrc-r4k.o
+obj-$(CONFIG_CSRC_SB1250) += csrc-sb1250.o
+obj-$(CONFIG_SYNC_R4K) += sync-r4k.o
+
+obj-$(CONFIG_DEBUG_FS) += segment.o
+obj-$(CONFIG_STACKTRACE) += stacktrace.o
+obj-$(CONFIG_MODULES) += module.o
+
+obj-$(CONFIG_FTRACE_SYSCALLS) += ftrace.o
+obj-$(CONFIG_FUNCTION_TRACER) += mcount.o ftrace.o
+
+sw-y := r4k_switch.o
+sw-$(CONFIG_CPU_R3000) := r2300_switch.o
+sw-$(CONFIG_CPU_CAVIUM_OCTEON) := octeon_switch.o
+obj-y += $(sw-y)
+
+obj-$(CONFIG_MIPS_FP_SUPPORT) += fpu-probe.o
+obj-$(CONFIG_CPU_R2300_FPU) += r2300_fpu.o
+obj-$(CONFIG_CPU_R4K_FPU) += r4k_fpu.o
+
+obj-$(CONFIG_SMP) += smp.o
+obj-$(CONFIG_SMP_UP) += smp-up.o
+obj-$(CONFIG_CPU_BMIPS) += smp-bmips.o bmips_vec.o bmips_5xxx_init.o
+
+obj-$(CONFIG_MIPS_MT) += mips-mt.o
+obj-$(CONFIG_MIPS_MT_FPAFF) += mips-mt-fpaff.o
+obj-$(CONFIG_MIPS_MT_SMP) += smp-mt.o
+obj-$(CONFIG_MIPS_CMP) += smp-cmp.o
+obj-$(CONFIG_MIPS_CPS) += smp-cps.o cps-vec.o
+obj-$(CONFIG_MIPS_CPS_NS16550) += cps-vec-ns16550.o
+obj-$(CONFIG_MIPS_SPRAM) += spram.o
+
+obj-$(CONFIG_MIPS_VPE_LOADER) += vpe.o
+obj-$(CONFIG_MIPS_VPE_LOADER_CMP) += vpe-cmp.o
+obj-$(CONFIG_MIPS_VPE_LOADER_MT) += vpe-mt.o
+obj-$(CONFIG_MIPS_VPE_APSP_API) += rtlx.o
+obj-$(CONFIG_MIPS_VPE_APSP_API_CMP) += rtlx-cmp.o
+obj-$(CONFIG_MIPS_VPE_APSP_API_MT) += rtlx-mt.o
+
+obj-$(CONFIG_MIPS_MSC) += irq-msc01.o
+obj-$(CONFIG_IRQ_TXX9) += irq_txx9.o
+obj-$(CONFIG_IRQ_GT641XX) += irq-gt641xx.o
+
+obj-$(CONFIG_KPROBES) += kprobes.o
+obj-$(CONFIG_32BIT) += scall32-o32.o
+obj-$(CONFIG_64BIT) += scall64-n64.o
+obj-$(CONFIG_MIPS32_COMPAT) += linux32.o ptrace32.o signal32.o
+obj-$(CONFIG_MIPS32_N32) += scall64-n32.o signal_n32.o
+obj-$(CONFIG_MIPS32_O32) += scall64-o32.o signal_o32.o
+
+obj-$(CONFIG_KGDB) += kgdb.o
+obj-$(CONFIG_PROC_FS) += proc.o
+obj-$(CONFIG_MAGIC_SYSRQ) += sysrq.o
+
+obj-$(CONFIG_CPU_R4X00_BUGS64) += r4k-bugs64.o
+
+obj-$(CONFIG_I8253) += i8253.o
+
+obj-$(CONFIG_GPIO_TXX9) += gpio_txx9.o
+
+obj-$(CONFIG_RELOCATABLE) += relocate.o
+
+obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o crash.o
+obj-$(CONFIG_CRASH_DUMP) += crash_dump.o
+obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
+obj-$(CONFIG_EARLY_PRINTK_8250) += early_printk_8250.o
+obj-$(CONFIG_SPINLOCK_TEST) += spinlock_test.o
+obj-$(CONFIG_MIPSR2_TO_R6_EMULATOR) += mips-r2-to-r6-emul.o
+
+CFLAGS_cpu-bugs64.o = $(shell if $(CC) $(KBUILD_CFLAGS) -Wa,-mdaddi -c -o /dev/null -x c /dev/null >/dev/null 2>&1; then echo "-DHAVE_AS_SET_DADDI"; fi)
+
+obj-$(CONFIG_PERF_EVENTS) += perf_event.o perf_regs.o
+obj-$(CONFIG_HW_PERF_EVENTS) += perf_event_mipsxx.o
+
+obj-$(CONFIG_JUMP_LABEL) += jump_label.o
+obj-$(CONFIG_UPROBES) += uprobes.o
+
+obj-$(CONFIG_MIPS_CM) += mips-cm.o
+obj-$(CONFIG_MIPS_CPC) += mips-cpc.o
+
+obj-$(CONFIG_CPU_PM) += pm.o
+obj-$(CONFIG_MIPS_CPS_PM) += pm-cps.o
+
+CPPFLAGS_vmlinux.lds := $(KBUILD_CFLAGS)
diff --git a/arch/mips/kernel/access-helper.h b/arch/mips/kernel/access-helper.h
new file mode 100644
index 000000000..590388031
--- /dev/null
+++ b/arch/mips/kernel/access-helper.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#include <linux/uaccess.h>
+
+static inline int __get_addr(unsigned long *a, unsigned long *p, bool user)
+{
+ return user ? get_user(*a, (unsigned long __user *)p) :
+ get_kernel_nofault(*a, p);
+}
+
+static inline int __get_inst16(u16 *i, u16 *p, bool user)
+{
+ return user ? get_user(*i, (u16 __user *)p) : get_kernel_nofault(*i, p);
+}
+
+static inline int __get_inst32(u32 *i, u32 *p, bool user)
+{
+ return user ? get_user(*i, (u32 __user *)p) : get_kernel_nofault(*i, p);
+}
diff --git a/arch/mips/kernel/asm-offsets.c b/arch/mips/kernel/asm-offsets.c
new file mode 100644
index 000000000..c4501897b
--- /dev/null
+++ b/arch/mips/kernel/asm-offsets.c
@@ -0,0 +1,398 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * asm-offsets.c: Calculate pt_regs and task_struct offsets.
+ *
+ * Copyright (C) 1996 David S. Miller
+ * Copyright (C) 1997, 1998, 1999, 2000, 2001, 2002, 2003 Ralf Baechle
+ * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
+ *
+ * Kevin Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com
+ * Copyright (C) 2000 MIPS Technologies, Inc.
+ */
+#include <linux/compat.h>
+#include <linux/types.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/kbuild.h>
+#include <linux/suspend.h>
+#include <asm/cpu-info.h>
+#include <asm/pm.h>
+#include <asm/ptrace.h>
+#include <asm/processor.h>
+#include <asm/smp-cps.h>
+
+#include <linux/kvm_host.h>
+
+void output_ptreg_defines(void)
+{
+ COMMENT("MIPS pt_regs offsets.");
+ OFFSET(PT_R0, pt_regs, regs[0]);
+ OFFSET(PT_R1, pt_regs, regs[1]);
+ OFFSET(PT_R2, pt_regs, regs[2]);
+ OFFSET(PT_R3, pt_regs, regs[3]);
+ OFFSET(PT_R4, pt_regs, regs[4]);
+ OFFSET(PT_R5, pt_regs, regs[5]);
+ OFFSET(PT_R6, pt_regs, regs[6]);
+ OFFSET(PT_R7, pt_regs, regs[7]);
+ OFFSET(PT_R8, pt_regs, regs[8]);
+ OFFSET(PT_R9, pt_regs, regs[9]);
+ OFFSET(PT_R10, pt_regs, regs[10]);
+ OFFSET(PT_R11, pt_regs, regs[11]);
+ OFFSET(PT_R12, pt_regs, regs[12]);
+ OFFSET(PT_R13, pt_regs, regs[13]);
+ OFFSET(PT_R14, pt_regs, regs[14]);
+ OFFSET(PT_R15, pt_regs, regs[15]);
+ OFFSET(PT_R16, pt_regs, regs[16]);
+ OFFSET(PT_R17, pt_regs, regs[17]);
+ OFFSET(PT_R18, pt_regs, regs[18]);
+ OFFSET(PT_R19, pt_regs, regs[19]);
+ OFFSET(PT_R20, pt_regs, regs[20]);
+ OFFSET(PT_R21, pt_regs, regs[21]);
+ OFFSET(PT_R22, pt_regs, regs[22]);
+ OFFSET(PT_R23, pt_regs, regs[23]);
+ OFFSET(PT_R24, pt_regs, regs[24]);
+ OFFSET(PT_R25, pt_regs, regs[25]);
+ OFFSET(PT_R26, pt_regs, regs[26]);
+ OFFSET(PT_R27, pt_regs, regs[27]);
+ OFFSET(PT_R28, pt_regs, regs[28]);
+ OFFSET(PT_R29, pt_regs, regs[29]);
+ OFFSET(PT_R30, pt_regs, regs[30]);
+ OFFSET(PT_R31, pt_regs, regs[31]);
+ OFFSET(PT_LO, pt_regs, lo);
+ OFFSET(PT_HI, pt_regs, hi);
+#ifdef CONFIG_CPU_HAS_SMARTMIPS
+ OFFSET(PT_ACX, pt_regs, acx);
+#endif
+ OFFSET(PT_EPC, pt_regs, cp0_epc);
+ OFFSET(PT_BVADDR, pt_regs, cp0_badvaddr);
+ OFFSET(PT_STATUS, pt_regs, cp0_status);
+ OFFSET(PT_CAUSE, pt_regs, cp0_cause);
+#ifdef CONFIG_CPU_CAVIUM_OCTEON
+ OFFSET(PT_MPL, pt_regs, mpl);
+ OFFSET(PT_MTP, pt_regs, mtp);
+#endif /* CONFIG_CPU_CAVIUM_OCTEON */
+ DEFINE(PT_SIZE, sizeof(struct pt_regs));
+ BLANK();
+}
+
+void output_task_defines(void)
+{
+ COMMENT("MIPS task_struct offsets.");
+ OFFSET(TASK_THREAD_INFO, task_struct, stack);
+ OFFSET(TASK_FLAGS, task_struct, flags);
+ OFFSET(TASK_MM, task_struct, mm);
+ OFFSET(TASK_PID, task_struct, pid);
+#if defined(CONFIG_STACKPROTECTOR)
+ OFFSET(TASK_STACK_CANARY, task_struct, stack_canary);
+#endif
+ DEFINE(TASK_STRUCT_SIZE, sizeof(struct task_struct));
+ BLANK();
+}
+
+void output_thread_info_defines(void)
+{
+ COMMENT("MIPS thread_info offsets.");
+ OFFSET(TI_TASK, thread_info, task);
+ OFFSET(TI_FLAGS, thread_info, flags);
+ OFFSET(TI_TP_VALUE, thread_info, tp_value);
+ OFFSET(TI_CPU, thread_info, cpu);
+ OFFSET(TI_PRE_COUNT, thread_info, preempt_count);
+ OFFSET(TI_REGS, thread_info, regs);
+ DEFINE(_THREAD_SIZE, THREAD_SIZE);
+ DEFINE(_THREAD_MASK, THREAD_MASK);
+ DEFINE(_IRQ_STACK_SIZE, IRQ_STACK_SIZE);
+ DEFINE(_IRQ_STACK_START, IRQ_STACK_START);
+ BLANK();
+}
+
+void output_thread_defines(void)
+{
+ COMMENT("MIPS specific thread_struct offsets.");
+ OFFSET(THREAD_REG16, task_struct, thread.reg16);
+ OFFSET(THREAD_REG17, task_struct, thread.reg17);
+ OFFSET(THREAD_REG18, task_struct, thread.reg18);
+ OFFSET(THREAD_REG19, task_struct, thread.reg19);
+ OFFSET(THREAD_REG20, task_struct, thread.reg20);
+ OFFSET(THREAD_REG21, task_struct, thread.reg21);
+ OFFSET(THREAD_REG22, task_struct, thread.reg22);
+ OFFSET(THREAD_REG23, task_struct, thread.reg23);
+ OFFSET(THREAD_REG29, task_struct, thread.reg29);
+ OFFSET(THREAD_REG30, task_struct, thread.reg30);
+ OFFSET(THREAD_REG31, task_struct, thread.reg31);
+ OFFSET(THREAD_STATUS, task_struct,
+ thread.cp0_status);
+
+ OFFSET(THREAD_BVADDR, task_struct, \
+ thread.cp0_badvaddr);
+ OFFSET(THREAD_BUADDR, task_struct, \
+ thread.cp0_baduaddr);
+ OFFSET(THREAD_ECODE, task_struct, \
+ thread.error_code);
+ OFFSET(THREAD_TRAPNO, task_struct, thread.trap_nr);
+ BLANK();
+}
+
+#ifdef CONFIG_MIPS_FP_SUPPORT
+void output_thread_fpu_defines(void)
+{
+ OFFSET(THREAD_FPU, task_struct, thread.fpu);
+
+ OFFSET(THREAD_FPR0, task_struct, thread.fpu.fpr[0]);
+ OFFSET(THREAD_FPR1, task_struct, thread.fpu.fpr[1]);
+ OFFSET(THREAD_FPR2, task_struct, thread.fpu.fpr[2]);
+ OFFSET(THREAD_FPR3, task_struct, thread.fpu.fpr[3]);
+ OFFSET(THREAD_FPR4, task_struct, thread.fpu.fpr[4]);
+ OFFSET(THREAD_FPR5, task_struct, thread.fpu.fpr[5]);
+ OFFSET(THREAD_FPR6, task_struct, thread.fpu.fpr[6]);
+ OFFSET(THREAD_FPR7, task_struct, thread.fpu.fpr[7]);
+ OFFSET(THREAD_FPR8, task_struct, thread.fpu.fpr[8]);
+ OFFSET(THREAD_FPR9, task_struct, thread.fpu.fpr[9]);
+ OFFSET(THREAD_FPR10, task_struct, thread.fpu.fpr[10]);
+ OFFSET(THREAD_FPR11, task_struct, thread.fpu.fpr[11]);
+ OFFSET(THREAD_FPR12, task_struct, thread.fpu.fpr[12]);
+ OFFSET(THREAD_FPR13, task_struct, thread.fpu.fpr[13]);
+ OFFSET(THREAD_FPR14, task_struct, thread.fpu.fpr[14]);
+ OFFSET(THREAD_FPR15, task_struct, thread.fpu.fpr[15]);
+ OFFSET(THREAD_FPR16, task_struct, thread.fpu.fpr[16]);
+ OFFSET(THREAD_FPR17, task_struct, thread.fpu.fpr[17]);
+ OFFSET(THREAD_FPR18, task_struct, thread.fpu.fpr[18]);
+ OFFSET(THREAD_FPR19, task_struct, thread.fpu.fpr[19]);
+ OFFSET(THREAD_FPR20, task_struct, thread.fpu.fpr[20]);
+ OFFSET(THREAD_FPR21, task_struct, thread.fpu.fpr[21]);
+ OFFSET(THREAD_FPR22, task_struct, thread.fpu.fpr[22]);
+ OFFSET(THREAD_FPR23, task_struct, thread.fpu.fpr[23]);
+ OFFSET(THREAD_FPR24, task_struct, thread.fpu.fpr[24]);
+ OFFSET(THREAD_FPR25, task_struct, thread.fpu.fpr[25]);
+ OFFSET(THREAD_FPR26, task_struct, thread.fpu.fpr[26]);
+ OFFSET(THREAD_FPR27, task_struct, thread.fpu.fpr[27]);
+ OFFSET(THREAD_FPR28, task_struct, thread.fpu.fpr[28]);
+ OFFSET(THREAD_FPR29, task_struct, thread.fpu.fpr[29]);
+ OFFSET(THREAD_FPR30, task_struct, thread.fpu.fpr[30]);
+ OFFSET(THREAD_FPR31, task_struct, thread.fpu.fpr[31]);
+
+ OFFSET(THREAD_FCR31, task_struct, thread.fpu.fcr31);
+ OFFSET(THREAD_MSA_CSR, task_struct, thread.fpu.msacsr);
+ BLANK();
+}
+#endif
+
+void output_mm_defines(void)
+{
+ COMMENT("Size of struct page");
+ DEFINE(STRUCT_PAGE_SIZE, sizeof(struct page));
+ BLANK();
+ COMMENT("Linux mm_struct offsets.");
+ OFFSET(MM_USERS, mm_struct, mm_users);
+ OFFSET(MM_PGD, mm_struct, pgd);
+ OFFSET(MM_CONTEXT, mm_struct, context);
+ BLANK();
+ DEFINE(_PGD_T_SIZE, sizeof(pgd_t));
+ DEFINE(_PMD_T_SIZE, sizeof(pmd_t));
+ DEFINE(_PTE_T_SIZE, sizeof(pte_t));
+ BLANK();
+ DEFINE(_PGD_T_LOG2, PGD_T_LOG2);
+#ifndef __PAGETABLE_PMD_FOLDED
+ DEFINE(_PMD_T_LOG2, PMD_T_LOG2);
+#endif
+ DEFINE(_PTE_T_LOG2, PTE_T_LOG2);
+ BLANK();
+ BLANK();
+ DEFINE(_PMD_SHIFT, PMD_SHIFT);
+ DEFINE(_PGDIR_SHIFT, PGDIR_SHIFT);
+ BLANK();
+ DEFINE(_PTRS_PER_PGD, PTRS_PER_PGD);
+ DEFINE(_PTRS_PER_PMD, PTRS_PER_PMD);
+ DEFINE(_PTRS_PER_PTE, PTRS_PER_PTE);
+ BLANK();
+ DEFINE(_PAGE_SHIFT, PAGE_SHIFT);
+ DEFINE(_PAGE_SIZE, PAGE_SIZE);
+ BLANK();
+}
+
+#ifdef CONFIG_32BIT
+void output_sc_defines(void)
+{
+ COMMENT("Linux sigcontext offsets.");
+ OFFSET(SC_REGS, sigcontext, sc_regs);
+ OFFSET(SC_FPREGS, sigcontext, sc_fpregs);
+ OFFSET(SC_ACX, sigcontext, sc_acx);
+ OFFSET(SC_MDHI, sigcontext, sc_mdhi);
+ OFFSET(SC_MDLO, sigcontext, sc_mdlo);
+ OFFSET(SC_PC, sigcontext, sc_pc);
+ OFFSET(SC_FPC_CSR, sigcontext, sc_fpc_csr);
+ OFFSET(SC_FPC_EIR, sigcontext, sc_fpc_eir);
+ OFFSET(SC_HI1, sigcontext, sc_hi1);
+ OFFSET(SC_LO1, sigcontext, sc_lo1);
+ OFFSET(SC_HI2, sigcontext, sc_hi2);
+ OFFSET(SC_LO2, sigcontext, sc_lo2);
+ OFFSET(SC_HI3, sigcontext, sc_hi3);
+ OFFSET(SC_LO3, sigcontext, sc_lo3);
+ BLANK();
+}
+#endif
+
+#ifdef CONFIG_64BIT
+void output_sc_defines(void)
+{
+ COMMENT("Linux sigcontext offsets.");
+ OFFSET(SC_REGS, sigcontext, sc_regs);
+ OFFSET(SC_FPREGS, sigcontext, sc_fpregs);
+ OFFSET(SC_MDHI, sigcontext, sc_mdhi);
+ OFFSET(SC_MDLO, sigcontext, sc_mdlo);
+ OFFSET(SC_PC, sigcontext, sc_pc);
+ OFFSET(SC_FPC_CSR, sigcontext, sc_fpc_csr);
+ BLANK();
+}
+#endif
+
+void output_signal_defined(void)
+{
+ COMMENT("Linux signal numbers.");
+ DEFINE(_SIGHUP, SIGHUP);
+ DEFINE(_SIGINT, SIGINT);
+ DEFINE(_SIGQUIT, SIGQUIT);
+ DEFINE(_SIGILL, SIGILL);
+ DEFINE(_SIGTRAP, SIGTRAP);
+ DEFINE(_SIGIOT, SIGIOT);
+ DEFINE(_SIGABRT, SIGABRT);
+ DEFINE(_SIGEMT, SIGEMT);
+ DEFINE(_SIGFPE, SIGFPE);
+ DEFINE(_SIGKILL, SIGKILL);
+ DEFINE(_SIGBUS, SIGBUS);
+ DEFINE(_SIGSEGV, SIGSEGV);
+ DEFINE(_SIGSYS, SIGSYS);
+ DEFINE(_SIGPIPE, SIGPIPE);
+ DEFINE(_SIGALRM, SIGALRM);
+ DEFINE(_SIGTERM, SIGTERM);
+ DEFINE(_SIGUSR1, SIGUSR1);
+ DEFINE(_SIGUSR2, SIGUSR2);
+ DEFINE(_SIGCHLD, SIGCHLD);
+ DEFINE(_SIGPWR, SIGPWR);
+ DEFINE(_SIGWINCH, SIGWINCH);
+ DEFINE(_SIGURG, SIGURG);
+ DEFINE(_SIGIO, SIGIO);
+ DEFINE(_SIGSTOP, SIGSTOP);
+ DEFINE(_SIGTSTP, SIGTSTP);
+ DEFINE(_SIGCONT, SIGCONT);
+ DEFINE(_SIGTTIN, SIGTTIN);
+ DEFINE(_SIGTTOU, SIGTTOU);
+ DEFINE(_SIGVTALRM, SIGVTALRM);
+ DEFINE(_SIGPROF, SIGPROF);
+ DEFINE(_SIGXCPU, SIGXCPU);
+ DEFINE(_SIGXFSZ, SIGXFSZ);
+ BLANK();
+}
+
+#ifdef CONFIG_CPU_CAVIUM_OCTEON
+void output_octeon_cop2_state_defines(void)
+{
+ COMMENT("Octeon specific octeon_cop2_state offsets.");
+ OFFSET(OCTEON_CP2_CRC_IV, octeon_cop2_state, cop2_crc_iv);
+ OFFSET(OCTEON_CP2_CRC_LENGTH, octeon_cop2_state, cop2_crc_length);
+ OFFSET(OCTEON_CP2_CRC_POLY, octeon_cop2_state, cop2_crc_poly);
+ OFFSET(OCTEON_CP2_LLM_DAT, octeon_cop2_state, cop2_llm_dat);
+ OFFSET(OCTEON_CP2_3DES_IV, octeon_cop2_state, cop2_3des_iv);
+ OFFSET(OCTEON_CP2_3DES_KEY, octeon_cop2_state, cop2_3des_key);
+ OFFSET(OCTEON_CP2_3DES_RESULT, octeon_cop2_state, cop2_3des_result);
+ OFFSET(OCTEON_CP2_AES_INP0, octeon_cop2_state, cop2_aes_inp0);
+ OFFSET(OCTEON_CP2_AES_IV, octeon_cop2_state, cop2_aes_iv);
+ OFFSET(OCTEON_CP2_AES_KEY, octeon_cop2_state, cop2_aes_key);
+ OFFSET(OCTEON_CP2_AES_KEYLEN, octeon_cop2_state, cop2_aes_keylen);
+ OFFSET(OCTEON_CP2_AES_RESULT, octeon_cop2_state, cop2_aes_result);
+ OFFSET(OCTEON_CP2_GFM_MULT, octeon_cop2_state, cop2_gfm_mult);
+ OFFSET(OCTEON_CP2_GFM_POLY, octeon_cop2_state, cop2_gfm_poly);
+ OFFSET(OCTEON_CP2_GFM_RESULT, octeon_cop2_state, cop2_gfm_result);
+ OFFSET(OCTEON_CP2_HSH_DATW, octeon_cop2_state, cop2_hsh_datw);
+ OFFSET(OCTEON_CP2_HSH_IVW, octeon_cop2_state, cop2_hsh_ivw);
+ OFFSET(OCTEON_CP2_SHA3, octeon_cop2_state, cop2_sha3);
+ OFFSET(THREAD_CP2, task_struct, thread.cp2);
+ OFFSET(THREAD_CVMSEG, task_struct, thread.cvmseg.cvmseg);
+ BLANK();
+}
+#endif
+
+#ifdef CONFIG_HIBERNATION
+void output_pbe_defines(void)
+{
+ COMMENT(" Linux struct pbe offsets. ");
+ OFFSET(PBE_ADDRESS, pbe, address);
+ OFFSET(PBE_ORIG_ADDRESS, pbe, orig_address);
+ OFFSET(PBE_NEXT, pbe, next);
+ DEFINE(PBE_SIZE, sizeof(struct pbe));
+ BLANK();
+}
+#endif
+
+#ifdef CONFIG_CPU_PM
+void output_pm_defines(void)
+{
+ COMMENT(" PM offsets. ");
+#ifdef CONFIG_EVA
+ OFFSET(SSS_SEGCTL0, mips_static_suspend_state, segctl[0]);
+ OFFSET(SSS_SEGCTL1, mips_static_suspend_state, segctl[1]);
+ OFFSET(SSS_SEGCTL2, mips_static_suspend_state, segctl[2]);
+#endif
+ OFFSET(SSS_SP, mips_static_suspend_state, sp);
+ BLANK();
+}
+#endif
+
+#ifdef CONFIG_MIPS_FP_SUPPORT
+void output_kvm_defines(void)
+{
+ COMMENT(" KVM/MIPS Specific offsets. ");
+
+ OFFSET(VCPU_FPR0, kvm_vcpu_arch, fpu.fpr[0]);
+ OFFSET(VCPU_FPR1, kvm_vcpu_arch, fpu.fpr[1]);
+ OFFSET(VCPU_FPR2, kvm_vcpu_arch, fpu.fpr[2]);
+ OFFSET(VCPU_FPR3, kvm_vcpu_arch, fpu.fpr[3]);
+ OFFSET(VCPU_FPR4, kvm_vcpu_arch, fpu.fpr[4]);
+ OFFSET(VCPU_FPR5, kvm_vcpu_arch, fpu.fpr[5]);
+ OFFSET(VCPU_FPR6, kvm_vcpu_arch, fpu.fpr[6]);
+ OFFSET(VCPU_FPR7, kvm_vcpu_arch, fpu.fpr[7]);
+ OFFSET(VCPU_FPR8, kvm_vcpu_arch, fpu.fpr[8]);
+ OFFSET(VCPU_FPR9, kvm_vcpu_arch, fpu.fpr[9]);
+ OFFSET(VCPU_FPR10, kvm_vcpu_arch, fpu.fpr[10]);
+ OFFSET(VCPU_FPR11, kvm_vcpu_arch, fpu.fpr[11]);
+ OFFSET(VCPU_FPR12, kvm_vcpu_arch, fpu.fpr[12]);
+ OFFSET(VCPU_FPR13, kvm_vcpu_arch, fpu.fpr[13]);
+ OFFSET(VCPU_FPR14, kvm_vcpu_arch, fpu.fpr[14]);
+ OFFSET(VCPU_FPR15, kvm_vcpu_arch, fpu.fpr[15]);
+ OFFSET(VCPU_FPR16, kvm_vcpu_arch, fpu.fpr[16]);
+ OFFSET(VCPU_FPR17, kvm_vcpu_arch, fpu.fpr[17]);
+ OFFSET(VCPU_FPR18, kvm_vcpu_arch, fpu.fpr[18]);
+ OFFSET(VCPU_FPR19, kvm_vcpu_arch, fpu.fpr[19]);
+ OFFSET(VCPU_FPR20, kvm_vcpu_arch, fpu.fpr[20]);
+ OFFSET(VCPU_FPR21, kvm_vcpu_arch, fpu.fpr[21]);
+ OFFSET(VCPU_FPR22, kvm_vcpu_arch, fpu.fpr[22]);
+ OFFSET(VCPU_FPR23, kvm_vcpu_arch, fpu.fpr[23]);
+ OFFSET(VCPU_FPR24, kvm_vcpu_arch, fpu.fpr[24]);
+ OFFSET(VCPU_FPR25, kvm_vcpu_arch, fpu.fpr[25]);
+ OFFSET(VCPU_FPR26, kvm_vcpu_arch, fpu.fpr[26]);
+ OFFSET(VCPU_FPR27, kvm_vcpu_arch, fpu.fpr[27]);
+ OFFSET(VCPU_FPR28, kvm_vcpu_arch, fpu.fpr[28]);
+ OFFSET(VCPU_FPR29, kvm_vcpu_arch, fpu.fpr[29]);
+ OFFSET(VCPU_FPR30, kvm_vcpu_arch, fpu.fpr[30]);
+ OFFSET(VCPU_FPR31, kvm_vcpu_arch, fpu.fpr[31]);
+
+ OFFSET(VCPU_FCR31, kvm_vcpu_arch, fpu.fcr31);
+ OFFSET(VCPU_MSA_CSR, kvm_vcpu_arch, fpu.msacsr);
+ BLANK();
+}
+#endif
+
+#ifdef CONFIG_MIPS_CPS
+void output_cps_defines(void)
+{
+ COMMENT(" MIPS CPS offsets. ");
+
+ OFFSET(COREBOOTCFG_VPEMASK, core_boot_config, vpe_mask);
+ OFFSET(COREBOOTCFG_VPECONFIG, core_boot_config, vpe_config);
+ DEFINE(COREBOOTCFG_SIZE, sizeof(struct core_boot_config));
+
+ OFFSET(VPEBOOTCFG_PC, vpe_boot_config, pc);
+ OFFSET(VPEBOOTCFG_SP, vpe_boot_config, sp);
+ OFFSET(VPEBOOTCFG_GP, vpe_boot_config, gp);
+ DEFINE(VPEBOOTCFG_SIZE, sizeof(struct vpe_boot_config));
+}
+#endif
diff --git a/arch/mips/kernel/bmips_5xxx_init.S b/arch/mips/kernel/bmips_5xxx_init.S
new file mode 100644
index 000000000..9e422d186
--- /dev/null
+++ b/arch/mips/kernel/bmips_5xxx_init.S
@@ -0,0 +1,747 @@
+
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2011-2012 by Broadcom Corporation
+ *
+ * Init for bmips 5000.
+ * Used to init second core in dual core 5000's.
+ */
+
+#include <linux/init.h>
+
+#include <asm/asm.h>
+#include <asm/asmmacro.h>
+#include <asm/cacheops.h>
+#include <asm/regdef.h>
+#include <asm/mipsregs.h>
+#include <asm/stackframe.h>
+#include <asm/addrspace.h>
+#include <asm/hazards.h>
+#include <asm/bmips.h>
+
+#ifdef CONFIG_CPU_BMIPS5000
+
+
+#define cacheop(kva, size, linesize, op) \
+ .set noreorder ; \
+ addu t1, kva, size ; \
+ subu t2, linesize, 1 ; \
+ not t2 ; \
+ and t0, kva, t2 ; \
+ addiu t1, t1, -1 ; \
+ and t1, t2 ; \
+9: cache op, 0(t0) ; \
+ bne t0, t1, 9b ; \
+ addu t0, linesize ; \
+ .set reorder ;
+
+
+
+#define IS_SHIFT 22
+#define IL_SHIFT 19
+#define IA_SHIFT 16
+#define DS_SHIFT 13
+#define DL_SHIFT 10
+#define DA_SHIFT 7
+#define IS_MASK 7
+#define IL_MASK 7
+#define IA_MASK 7
+#define DS_MASK 7
+#define DL_MASK 7
+#define DA_MASK 7
+#define ICE_MASK 0x80000000
+#define DCE_MASK 0x40000000
+
+#define CP0_BRCM_CONFIG0 $22, 0
+#define CP0_BRCM_MODE $22, 1
+#define CP0_CONFIG_K0_MASK 7
+
+#define CP0_ICACHE_TAG_LO $28
+#define CP0_ICACHE_DATA_LO $28, 1
+#define CP0_DCACHE_TAG_LO $28, 2
+#define CP0_D_SEC_CACHE_DATA_LO $28, 3
+#define CP0_ICACHE_TAG_HI $29
+#define CP0_ICACHE_DATA_HI $29, 1
+#define CP0_DCACHE_TAG_HI $29, 2
+
+#define CP0_BRCM_MODE_Luc_MASK (1 << 11)
+#define CP0_BRCM_CONFIG0_CWF_MASK (1 << 20)
+#define CP0_BRCM_CONFIG0_TSE_MASK (1 << 19)
+#define CP0_BRCM_MODE_SET_MASK (1 << 7)
+#define CP0_BRCM_MODE_ClkRATIO_MASK (7 << 4)
+#define CP0_BRCM_MODE_BrPRED_MASK (3 << 24)
+#define CP0_BRCM_MODE_BrPRED_SHIFT 24
+#define CP0_BRCM_MODE_BrHIST_MASK (0x1f << 20)
+#define CP0_BRCM_MODE_BrHIST_SHIFT 20
+
+/* ZSC L2 Cache Register Access Register Definitions */
+#define BRCM_ZSC_ALL_REGS_SELECT 0x7 << 24
+
+#define BRCM_ZSC_CONFIG_REG 0 << 3
+#define BRCM_ZSC_REQ_BUFFER_REG 2 << 3
+#define BRCM_ZSC_RBUS_ADDR_MAPPING_REG0 4 << 3
+#define BRCM_ZSC_RBUS_ADDR_MAPPING_REG1 6 << 3
+#define BRCM_ZSC_RBUS_ADDR_MAPPING_REG2 8 << 3
+
+#define BRCM_ZSC_SCB0_ADDR_MAPPING_REG0 0xa << 3
+#define BRCM_ZSC_SCB0_ADDR_MAPPING_REG1 0xc << 3
+
+#define BRCM_ZSC_SCB1_ADDR_MAPPING_REG0 0xe << 3
+#define BRCM_ZSC_SCB1_ADDR_MAPPING_REG1 0x10 << 3
+
+#define BRCM_ZSC_CONFIG_LMB1En 1 << (15)
+#define BRCM_ZSC_CONFIG_LMB0En 1 << (14)
+
+/* branch predition values */
+
+#define BRCM_BrPRED_ALL_TAKEN (0x0)
+#define BRCM_BrPRED_ALL_NOT_TAKEN (0x1)
+#define BRCM_BrPRED_BHT_ENABLE (0x2)
+#define BRCM_BrPRED_PREDICT_BACKWARD (0x3)
+
+
+
+.align 2
+/*
+ * Function: size_i_cache
+ * Arguments: None
+ * Returns: v0 = i cache size, v1 = I cache line size
+ * Description: compute the I-cache size and I-cache line size
+ * Trashes: v0, v1, a0, t0
+ *
+ * pseudo code:
+ *
+ */
+
+LEAF(size_i_cache)
+ .set noreorder
+
+ mfc0 a0, CP0_CONFIG, 1
+ move t0, a0
+
+ /*
+ * Determine sets per way: IS
+ *
+ * This field contains the number of sets (i.e., indices) per way of
+ * the instruction cache:
+ * i) 0x0: 64, ii) 0x1: 128, iii) 0x2: 256, iv) 0x3: 512, v) 0x4: 1k
+ * vi) 0x5 - 0x7: Reserved.
+ */
+
+ srl a0, a0, IS_SHIFT
+ and a0, a0, IS_MASK
+
+ /* sets per way = (64<<IS) */
+
+ li v0, 0x40
+ sllv v0, v0, a0
+
+ /*
+ * Determine line size
+ *
+ * This field contains the line size of the instruction cache:
+ * i) 0x0: No I-cache present, i) 0x3: 16 bytes, ii) 0x4: 32 bytes, iii)
+ * 0x5: 64 bytes, iv) the rest: Reserved.
+ */
+
+ move a0, t0
+
+ srl a0, a0, IL_SHIFT
+ and a0, a0, IL_MASK
+
+ beqz a0, no_i_cache
+ nop
+
+ /* line size = 2 ^ (IL+1) */
+
+ addi a0, a0, 1
+ li v1, 1
+ sll v1, v1, a0
+
+ /* v0 now have sets per way, multiply it by line size now
+ * that will give the set size
+ */
+
+ sll v0, v0, a0
+
+ /*
+ * Determine set associativity
+ *
+ * This field contains the set associativity of the instruction cache.
+ * i) 0x0: Direct mapped, ii) 0x1: 2-way, iii) 0x2: 3-way, iv) 0x3:
+ * 4-way, v) 0x4 - 0x7: Reserved.
+ */
+
+ move a0, t0
+
+ srl a0, a0, IA_SHIFT
+ and a0, a0, IA_MASK
+ addi a0, a0, 0x1
+
+ /* v0 has the set size, multiply it by
+ * set associativiy, to get the cache size
+ */
+
+ multu v0, a0 /*multu is interlocked, so no need to insert nops */
+ mflo v0
+ b 1f
+ nop
+
+no_i_cache:
+ move v0, zero
+ move v1, zero
+1:
+ jr ra
+ nop
+ .set reorder
+
+END(size_i_cache)
+
+/*
+ * Function: size_d_cache
+ * Arguments: None
+ * Returns: v0 = d cache size, v1 = d cache line size
+ * Description: compute the D-cache size and D-cache line size.
+ * Trashes: v0, v1, a0, t0
+ *
+ */
+
+LEAF(size_d_cache)
+ .set noreorder
+
+ mfc0 a0, CP0_CONFIG, 1
+ move t0, a0
+
+ /*
+ * Determine sets per way: IS
+ *
+ * This field contains the number of sets (i.e., indices) per way of
+ * the instruction cache:
+ * i) 0x0: 64, ii) 0x1: 128, iii) 0x2: 256, iv) 0x3: 512, v) 0x4: 1k
+ * vi) 0x5 - 0x7: Reserved.
+ */
+
+ srl a0, a0, DS_SHIFT
+ and a0, a0, DS_MASK
+
+ /* sets per way = (64<<IS) */
+
+ li v0, 0x40
+ sllv v0, v0, a0
+
+ /*
+ * Determine line size
+ *
+ * This field contains the line size of the instruction cache:
+ * i) 0x0: No I-cache present, i) 0x3: 16 bytes, ii) 0x4: 32 bytes, iii)
+ * 0x5: 64 bytes, iv) the rest: Reserved.
+ */
+ move a0, t0
+
+ srl a0, a0, DL_SHIFT
+ and a0, a0, DL_MASK
+
+ beqz a0, no_d_cache
+ nop
+
+ /* line size = 2 ^ (IL+1) */
+
+ addi a0, a0, 1
+ li v1, 1
+ sll v1, v1, a0
+
+ /* v0 now have sets per way, multiply it by line size now
+ * that will give the set size
+ */
+
+ sll v0, v0, a0
+
+ /* determine set associativity
+ *
+ * This field contains the set associativity of the instruction cache.
+ * i) 0x0: Direct mapped, ii) 0x1: 2-way, iii) 0x2: 3-way, iv) 0x3:
+ * 4-way, v) 0x4 - 0x7: Reserved.
+ */
+
+ move a0, t0
+
+ srl a0, a0, DA_SHIFT
+ and a0, a0, DA_MASK
+ addi a0, a0, 0x1
+
+ /* v0 has the set size, multiply it by
+ * set associativiy, to get the cache size
+ */
+
+ multu v0, a0 /*multu is interlocked, so no need to insert nops */
+ mflo v0
+
+ b 1f
+ nop
+
+no_d_cache:
+ move v0, zero
+ move v1, zero
+1:
+ jr ra
+ nop
+ .set reorder
+
+END(size_d_cache)
+
+
+/*
+ * Function: enable_ID
+ * Arguments: None
+ * Returns: None
+ * Description: Enable I and D caches, initialize I and D-caches, also set
+ * hardware delay for d-cache (TP0).
+ * Trashes: t0
+ *
+ */
+ .global enable_ID
+ .ent enable_ID
+ .set noreorder
+enable_ID:
+ mfc0 t0, CP0_BRCM_CONFIG0
+ or t0, t0, (ICE_MASK | DCE_MASK)
+ mtc0 t0, CP0_BRCM_CONFIG0
+ jr ra
+ nop
+
+ .end enable_ID
+ .set reorder
+
+
+/*
+ * Function: l1_init
+ * Arguments: None
+ * Returns: None
+ * Description: Enable I and D caches, and initialize I and D-caches
+ * Trashes: a0, v0, v1, t0, t1, t2, t8
+ *
+ */
+ .globl l1_init
+ .ent l1_init
+ .set noreorder
+l1_init:
+
+ /* save return address */
+ move t8, ra
+
+
+ /* initialize I and D cache Data and Tag registers. */
+ mtc0 zero, CP0_ICACHE_TAG_LO
+ mtc0 zero, CP0_ICACHE_TAG_HI
+ mtc0 zero, CP0_ICACHE_DATA_LO
+ mtc0 zero, CP0_ICACHE_DATA_HI
+ mtc0 zero, CP0_DCACHE_TAG_LO
+ mtc0 zero, CP0_DCACHE_TAG_HI
+
+ /* Enable Caches before Clearing. If the caches are disabled
+ * then the cache operations to clear the cache will be ignored
+ */
+
+ jal enable_ID
+ nop
+
+ jal size_i_cache /* v0 = i-cache size, v1 = i-cache line size */
+ nop
+
+ /* run uncached in kseg 1 */
+ la k0, 1f
+ lui k1, 0x2000
+ or k0, k1, k0
+ jr k0
+ nop
+1:
+
+ /*
+ * set K0 cache mode
+ */
+
+ mfc0 t0, CP0_CONFIG
+ and t0, t0, ~CP0_CONFIG_K0_MASK
+ or t0, t0, 3 /* Write Back mode */
+ mtc0 t0, CP0_CONFIG
+
+ /*
+ * Initialize instruction cache.
+ */
+
+ li a0, KSEG0
+ cacheop(a0, v0, v1, Index_Store_Tag_I)
+
+ /*
+ * Now we can run from I-$, kseg 0
+ */
+ la k0, 1f
+ lui k1, 0x2000
+ or k0, k1, k0
+ xor k0, k1, k0
+ jr k0
+ nop
+1:
+ /*
+ * Initialize data cache.
+ */
+
+ jal size_d_cache /* v0 = d-cache size, v1 = d-cache line size */
+ nop
+
+
+ li a0, KSEG0
+ cacheop(a0, v0, v1, Index_Store_Tag_D)
+
+ jr t8
+ nop
+
+ .end l1_init
+ .set reorder
+
+
+/*
+ * Function: set_other_config
+ * Arguments: none
+ * Returns: None
+ * Description: initialize other remainder configuration to defaults.
+ * Trashes: t0, t1
+ *
+ * pseudo code:
+ *
+ */
+LEAF(set_other_config)
+ .set noreorder
+
+ /* enable Bus error for I-fetch */
+ mfc0 t0, CP0_CACHEERR, 0
+ li t1, 0x4
+ or t0, t1
+ mtc0 t0, CP0_CACHEERR, 0
+
+ /* enable Bus error for Load */
+ mfc0 t0, CP0_CACHEERR, 1
+ li t1, 0x4
+ or t0, t1
+ mtc0 t0, CP0_CACHEERR, 1
+
+ /* enable Bus Error for Store */
+ mfc0 t0, CP0_CACHEERR, 2
+ li t1, 0x4
+ or t0, t1
+ mtc0 t0, CP0_CACHEERR, 2
+
+ jr ra
+ nop
+ .set reorder
+END(set_other_config)
+
+/*
+ * Function: set_branch_pred
+ * Arguments: none
+ * Returns: None
+ * Description:
+ * Trashes: t0, t1
+ *
+ * pseudo code:
+ *
+ */
+
+LEAF(set_branch_pred)
+ .set noreorder
+ mfc0 t0, CP0_BRCM_MODE
+ li t1, ~(CP0_BRCM_MODE_BrPRED_MASK | CP0_BRCM_MODE_BrHIST_MASK )
+ and t0, t0, t1
+
+ /* enable Branch prediction */
+ li t1, BRCM_BrPRED_BHT_ENABLE
+ sll t1, CP0_BRCM_MODE_BrPRED_SHIFT
+ or t0, t0, t1
+
+ /* set history count to 8 */
+ li t1, 8
+ sll t1, CP0_BRCM_MODE_BrHIST_SHIFT
+ or t0, t0, t1
+
+ mtc0 t0, CP0_BRCM_MODE
+ jr ra
+ nop
+ .set reorder
+END(set_branch_pred)
+
+
+/*
+ * Function: set_luc
+ * Arguments: set link uncached.
+ * Returns: None
+ * Description:
+ * Trashes: t0, t1
+ *
+ */
+LEAF(set_luc)
+ .set noreorder
+ mfc0 t0, CP0_BRCM_MODE
+ li t1, ~(CP0_BRCM_MODE_Luc_MASK)
+ and t0, t0, t1
+
+ /* set Luc */
+ ori t0, t0, CP0_BRCM_MODE_Luc_MASK
+
+ mtc0 t0, CP0_BRCM_MODE
+ jr ra
+ nop
+ .set reorder
+END(set_luc)
+
+/*
+ * Function: set_cwf_tse
+ * Arguments: set CWF and TSE bits
+ * Returns: None
+ * Description:
+ * Trashes: t0, t1
+ *
+ */
+LEAF(set_cwf_tse)
+ .set noreorder
+ mfc0 t0, CP0_BRCM_CONFIG0
+ li t1, (CP0_BRCM_CONFIG0_CWF_MASK | CP0_BRCM_CONFIG0_TSE_MASK)
+ or t0, t0, t1
+
+ mtc0 t0, CP0_BRCM_CONFIG0
+ jr ra
+ nop
+ .set reorder
+END(set_cwf_tse)
+
+/*
+ * Function: set_clock_ratio
+ * Arguments: set clock ratio specified by a0
+ * Returns: None
+ * Description:
+ * Trashes: v0, v1, a0, a1
+ *
+ * pseudo code:
+ *
+ */
+LEAF(set_clock_ratio)
+ .set noreorder
+
+ mfc0 t0, CP0_BRCM_MODE
+ li t1, ~(CP0_BRCM_MODE_SET_MASK | CP0_BRCM_MODE_ClkRATIO_MASK)
+ and t0, t0, t1
+ li t1, CP0_BRCM_MODE_SET_MASK
+ or t0, t0, t1
+ or t0, t0, a0
+ mtc0 t0, CP0_BRCM_MODE
+ jr ra
+ nop
+ .set reorder
+END(set_clock_ratio)
+/*
+ * Function: set_zephyr
+ * Arguments: None
+ * Returns: None
+ * Description: Set any zephyr bits
+ * Trashes: t0 & t1
+ *
+ */
+LEAF(set_zephyr)
+ .set noreorder
+
+ /* enable read/write of CP0 #22 sel. 8 */
+ li t0, 0x5a455048
+ .word 0x4088b00f /* mtc0 t0, $22, 15 */
+
+ .word 0x4008b008 /* mfc0 t0, $22, 8 */
+ li t1, 0x09008000 /* turn off pref, jtb */
+ or t0, t0, t1
+ .word 0x4088b008 /* mtc0 t0, $22, 8 */
+ sync
+
+ /* disable read/write of CP0 #22 sel 8 */
+ li t0, 0x0
+ .word 0x4088b00f /* mtc0 t0, $22, 15 */
+
+
+ jr ra
+ nop
+ .set reorder
+
+END(set_zephyr)
+
+
+/*
+ * Function: set_llmb
+ * Arguments: a0=0 disable llmb, a0=1 enables llmb
+ * Returns: None
+ * Description:
+ * Trashes: t0, t1, t2
+ *
+ * pseudo code:
+ *
+ */
+LEAF(set_llmb)
+ .set noreorder
+
+ li t2, 0x90000000 | BRCM_ZSC_ALL_REGS_SELECT | BRCM_ZSC_CONFIG_REG
+ sync
+ cache 0x7, 0x0(t2)
+ sync
+ mfc0 t0, CP0_D_SEC_CACHE_DATA_LO
+ li t1, ~(BRCM_ZSC_CONFIG_LMB1En | BRCM_ZSC_CONFIG_LMB0En)
+ and t0, t0, t1
+
+ beqz a0, svlmb
+ nop
+
+enable_lmb:
+ li t1, (BRCM_ZSC_CONFIG_LMB1En | BRCM_ZSC_CONFIG_LMB0En)
+ or t0, t0, t1
+
+svlmb:
+ mtc0 t0, CP0_D_SEC_CACHE_DATA_LO
+ sync
+ cache 0xb, 0x0(t2)
+ sync
+
+ jr ra
+ nop
+ .set reorder
+
+END(set_llmb)
+/*
+ * Function: core_init
+ * Arguments: none
+ * Returns: None
+ * Description: initialize core related configuration
+ * Trashes: v0,v1,a0,a1,t8
+ *
+ * pseudo code:
+ *
+ */
+ .globl core_init
+ .ent core_init
+ .set noreorder
+core_init:
+ move t8, ra
+
+ /* set Zephyr bits. */
+ bal set_zephyr
+ nop
+
+ /* set low latency memory bus */
+ li a0, 1
+ bal set_llmb
+ nop
+
+ /* set branch prediction (TP0 only) */
+ bal set_branch_pred
+ nop
+
+ /* set link uncached */
+ bal set_luc
+ nop
+
+ /* set CWF and TSE */
+ bal set_cwf_tse
+ nop
+
+ /*
+ *set clock ratio by setting 1 to 'set'
+ * and 0 to ClkRatio, (TP0 only)
+ */
+ li a0, 0
+ bal set_clock_ratio
+ nop
+
+ /* set other configuration to defaults */
+ bal set_other_config
+ nop
+
+ move ra, t8
+ jr ra
+ nop
+
+ .set reorder
+ .end core_init
+
+/*
+ * Function: clear_jump_target_buffer
+ * Arguments: None
+ * Returns: None
+ * Description:
+ * Trashes: t0, t1, t2
+ *
+ */
+#define RESET_CALL_RETURN_STACK_THIS_THREAD (0x06<<16)
+#define RESET_JUMP_TARGET_BUFFER_THIS_THREAD (0x04<<16)
+#define JTB_CS_CNTL_MASK (0xFF<<16)
+
+ .globl clear_jump_target_buffer
+ .ent clear_jump_target_buffer
+ .set noreorder
+clear_jump_target_buffer:
+
+ mfc0 t0, $22, 2
+ nop
+ nop
+
+ li t1, ~JTB_CS_CNTL_MASK
+ and t0, t0, t1
+ li t2, RESET_CALL_RETURN_STACK_THIS_THREAD
+ or t0, t0, t2
+ mtc0 t0, $22, 2
+ nop
+ nop
+
+ and t0, t0, t1
+ li t2, RESET_JUMP_TARGET_BUFFER_THIS_THREAD
+ or t0, t0, t2
+ mtc0 t0, $22, 2
+ nop
+ nop
+ jr ra
+ nop
+
+ .end clear_jump_target_buffer
+ .set reorder
+/*
+ * Function: bmips_cache_init
+ * Arguments: None
+ * Returns: None
+ * Description: Enable I and D caches, and initialize I and D-caches
+ * Trashes: v0, v1, t0, t1, t2, t5, t7, t8
+ *
+ */
+ .globl bmips_5xxx_init
+ .ent bmips_5xxx_init
+ .set noreorder
+bmips_5xxx_init:
+
+ /* save return address and A0 */
+ move t7, ra
+ move t5, a0
+
+ jal l1_init
+ nop
+
+ jal core_init
+ nop
+
+ jal clear_jump_target_buffer
+ nop
+
+ mtc0 zero, CP0_CAUSE
+
+ move a0, t5
+ jr t7
+ nop
+
+ .end bmips_5xxx_init
+ .set reorder
+
+
+#endif
diff --git a/arch/mips/kernel/bmips_vec.S b/arch/mips/kernel/bmips_vec.S
new file mode 100644
index 000000000..921a5fa55
--- /dev/null
+++ b/arch/mips/kernel/bmips_vec.S
@@ -0,0 +1,322 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2011 by Kevin Cernekee (cernekee@gmail.com)
+ *
+ * Reset/NMI/re-entry vectors for BMIPS processors
+ */
+
+
+#include <asm/asm.h>
+#include <asm/asmmacro.h>
+#include <asm/cacheops.h>
+#include <asm/cpu.h>
+#include <asm/regdef.h>
+#include <asm/mipsregs.h>
+#include <asm/stackframe.h>
+#include <asm/addrspace.h>
+#include <asm/hazards.h>
+#include <asm/bmips.h>
+
+ .macro BARRIER
+ .set mips32
+ _ssnop
+ _ssnop
+ _ssnop
+ .set mips0
+ .endm
+
+/***********************************************************************
+ * Alternate CPU1 startup vector for BMIPS4350
+ *
+ * On some systems the bootloader has already started CPU1 and configured
+ * it to resume execution at 0x8000_0200 (!BEV IV vector) when it is
+ * triggered by the SW1 interrupt. If that is the case we try to move
+ * it to a more convenient place: BMIPS_WARM_RESTART_VEC @ 0x8000_0380.
+ ***********************************************************************/
+
+LEAF(bmips_smp_movevec)
+ la k0, 1f
+ li k1, CKSEG1
+ or k0, k1
+ jr k0
+
+1:
+ /* clear IV, pending IPIs */
+ mtc0 zero, CP0_CAUSE
+
+ /* re-enable IRQs to wait for SW1 */
+ li k0, ST0_IE | ST0_BEV | STATUSF_IP1
+ mtc0 k0, CP0_STATUS
+
+ /* set up CPU1 CBR; move BASE to 0xa000_0000 */
+ li k0, 0xff400000
+ mtc0 k0, $22, 6
+ /* set up relocation vector address based on thread ID */
+ mfc0 k1, $22, 3
+ srl k1, 16
+ andi k1, 0x8000
+ or k1, CKSEG1 | BMIPS_RELO_VECTOR_CONTROL_0
+ or k0, k1
+ li k1, 0xa0080000
+ sw k1, 0(k0)
+
+ /* wait here for SW1 interrupt from bmips_boot_secondary() */
+ wait
+
+ la k0, bmips_reset_nmi_vec
+ li k1, CKSEG1
+ or k0, k1
+ jr k0
+END(bmips_smp_movevec)
+
+/***********************************************************************
+ * Reset/NMI vector
+ * For BMIPS processors that can relocate their exception vectors, this
+ * entire function gets copied to 0x8000_0000.
+ ***********************************************************************/
+
+NESTED(bmips_reset_nmi_vec, PT_SIZE, sp)
+ .set push
+ .set noat
+ .align 4
+
+#ifdef CONFIG_SMP
+ /* if the NMI bit is clear, assume this is a CPU1 reset instead */
+ li k1, (1 << 19)
+ mfc0 k0, CP0_STATUS
+ and k0, k1
+ beqz k0, soft_reset
+
+#if defined(CONFIG_CPU_BMIPS5000)
+ mfc0 k0, CP0_PRID
+ li k1, PRID_IMP_BMIPS5000
+ /* mask with PRID_IMP_BMIPS5000 to cover both variants */
+ andi k0, PRID_IMP_BMIPS5000
+ bne k0, k1, 1f
+
+ /* if we're not on core 0, this must be the SMP boot signal */
+ li k1, (3 << 25)
+ mfc0 k0, $22
+ and k0, k1
+ bnez k0, bmips_smp_entry
+1:
+#endif /* CONFIG_CPU_BMIPS5000 */
+#endif /* CONFIG_SMP */
+
+ /* nope, it's just a regular NMI */
+ SAVE_ALL
+ move a0, sp
+
+ /* clear EXL, ERL, BEV so that TLB refills still work */
+ mfc0 k0, CP0_STATUS
+ li k1, ST0_ERL | ST0_EXL | ST0_BEV | ST0_IE
+ or k0, k1
+ xor k0, k1
+ mtc0 k0, CP0_STATUS
+ BARRIER
+
+ /* jump to the NMI handler function */
+ la k0, nmi_handler
+ jr k0
+
+ RESTORE_ALL
+ .set arch=r4000
+ eret
+
+#ifdef CONFIG_SMP
+soft_reset:
+
+#if defined(CONFIG_CPU_BMIPS5000)
+ mfc0 k0, CP0_PRID
+ andi k0, 0xff00
+ li k1, PRID_IMP_BMIPS5200
+ bne k0, k1, bmips_smp_entry
+
+ /* if running on TP 1, jump to bmips_smp_entry */
+ mfc0 k0, $22
+ li k1, (1 << 24)
+ and k1, k0
+ bnez k1, bmips_smp_entry
+ nop
+
+ /*
+ * running on TP0, can not be core 0 (the boot core).
+ * Check for soft reset. Indicates a warm boot
+ */
+ mfc0 k0, $12
+ li k1, (1 << 20)
+ and k0, k1
+ beqz k0, bmips_smp_entry
+
+ /*
+ * Warm boot.
+ * Cache init is only done on TP0
+ */
+ la k0, bmips_5xxx_init
+ jalr k0
+ nop
+
+ b bmips_smp_entry
+ nop
+#endif
+
+/***********************************************************************
+ * CPU1 reset vector (used for the initial boot only)
+ * This is still part of bmips_reset_nmi_vec().
+ ***********************************************************************/
+
+bmips_smp_entry:
+
+ /* set up CP0 STATUS; enable FPU */
+ li k0, 0x30000000
+ mtc0 k0, CP0_STATUS
+ BARRIER
+
+ /* set local CP0 CONFIG to make kseg0 cacheable, write-back */
+ mfc0 k0, CP0_CONFIG
+ ori k0, 0x07
+ xori k0, 0x04
+ mtc0 k0, CP0_CONFIG
+
+ mfc0 k0, CP0_PRID
+ andi k0, 0xff00
+#if defined(CONFIG_CPU_BMIPS4350) || defined(CONFIG_CPU_BMIPS4380)
+ li k1, PRID_IMP_BMIPS43XX
+ bne k0, k1, 2f
+
+ /* initialize CPU1's local I-cache */
+ li k0, 0x80000000
+ li k1, 0x80010000
+ mtc0 zero, $28
+ mtc0 zero, $28, 1
+ BARRIER
+
+1: cache Index_Store_Tag_I, 0(k0)
+ addiu k0, 16
+ bne k0, k1, 1b
+
+ b 3f
+2:
+#endif /* CONFIG_CPU_BMIPS4350 || CONFIG_CPU_BMIPS4380 */
+#if defined(CONFIG_CPU_BMIPS5000)
+ /* mask with PRID_IMP_BMIPS5000 to cover both variants */
+ li k1, PRID_IMP_BMIPS5000
+ andi k0, PRID_IMP_BMIPS5000
+ bne k0, k1, 3f
+
+ /* set exception vector base */
+ la k0, ebase
+ lw k0, 0(k0)
+ mtc0 k0, $15, 1
+ BARRIER
+#endif /* CONFIG_CPU_BMIPS5000 */
+3:
+ /* jump back to kseg0 in case we need to remap the kseg1 area */
+ la k0, 1f
+ jr k0
+1:
+ la k0, bmips_enable_xks01
+ jalr k0
+
+ /* use temporary stack to set up upper memory TLB */
+ li sp, BMIPS_WARM_RESTART_VEC
+ la k0, plat_wired_tlb_setup
+ jalr k0
+
+ /* switch to permanent stack and continue booting */
+
+ .global bmips_secondary_reentry
+bmips_secondary_reentry:
+ la k0, bmips_smp_boot_sp
+ lw sp, 0(k0)
+ la k0, bmips_smp_boot_gp
+ lw gp, 0(k0)
+ la k0, start_secondary
+ jr k0
+
+#endif /* CONFIG_SMP */
+
+ .align 4
+ .global bmips_reset_nmi_vec_end
+bmips_reset_nmi_vec_end:
+
+END(bmips_reset_nmi_vec)
+
+ .set pop
+
+/***********************************************************************
+ * CPU1 warm restart vector (used for second and subsequent boots).
+ * Also used for S2 standby recovery (PM).
+ * This entire function gets copied to (BMIPS_WARM_RESTART_VEC)
+ ***********************************************************************/
+
+LEAF(bmips_smp_int_vec)
+
+ .align 4
+ mfc0 k0, CP0_STATUS
+ ori k0, 0x01
+ xori k0, 0x01
+ mtc0 k0, CP0_STATUS
+ eret
+
+ .align 4
+ .global bmips_smp_int_vec_end
+bmips_smp_int_vec_end:
+
+END(bmips_smp_int_vec)
+
+/***********************************************************************
+ * XKS01 support
+ * Certain CPUs support extending kseg0 to 1024MB.
+ ***********************************************************************/
+
+LEAF(bmips_enable_xks01)
+
+#if defined(CONFIG_XKS01)
+ mfc0 t0, CP0_PRID
+ andi t2, t0, 0xff00
+#if defined(CONFIG_CPU_BMIPS4380)
+ li t1, PRID_IMP_BMIPS43XX
+ bne t2, t1, 1f
+
+ andi t0, 0xff
+ addiu t1, t0, -PRID_REV_BMIPS4380_HI
+ bgtz t1, 2f
+ addiu t0, -PRID_REV_BMIPS4380_LO
+ bltz t0, 2f
+
+ mfc0 t0, $22, 3
+ li t1, 0x1ff0
+ li t2, (1 << 12) | (1 << 9)
+ or t0, t1
+ xor t0, t1
+ or t0, t2
+ mtc0 t0, $22, 3
+ BARRIER
+ b 2f
+1:
+#endif /* CONFIG_CPU_BMIPS4380 */
+#if defined(CONFIG_CPU_BMIPS5000)
+ li t1, PRID_IMP_BMIPS5000
+ /* mask with PRID_IMP_BMIPS5000 to cover both variants */
+ andi t2, PRID_IMP_BMIPS5000
+ bne t2, t1, 2f
+
+ mfc0 t0, $22, 5
+ li t1, 0x01ff
+ li t2, (1 << 8) | (1 << 5)
+ or t0, t1
+ xor t0, t1
+ or t0, t2
+ mtc0 t0, $22, 5
+ BARRIER
+#endif /* CONFIG_CPU_BMIPS5000 */
+2:
+#endif /* defined(CONFIG_XKS01) */
+
+ jr ra
+
+END(bmips_enable_xks01)
diff --git a/arch/mips/kernel/branch.c b/arch/mips/kernel/branch.c
new file mode 100644
index 000000000..0216ff24c
--- /dev/null
+++ b/arch/mips/kernel/branch.c
@@ -0,0 +1,908 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1996, 97, 2000, 2001 by Ralf Baechle
+ * Copyright (C) 2001 MIPS Technologies, Inc.
+ */
+#include <linux/kernel.h>
+#include <linux/sched/signal.h>
+#include <linux/signal.h>
+#include <linux/export.h>
+#include <asm/branch.h>
+#include <asm/cpu.h>
+#include <asm/cpu-features.h>
+#include <asm/fpu.h>
+#include <asm/fpu_emulator.h>
+#include <asm/inst.h>
+#include <asm/mips-r2-to-r6-emul.h>
+#include <asm/ptrace.h>
+#include <linux/uaccess.h>
+
+#include "probes-common.h"
+
+/*
+ * Calculate and return exception PC in case of branch delay slot
+ * for microMIPS and MIPS16e. It does not clear the ISA mode bit.
+ */
+int __isa_exception_epc(struct pt_regs *regs)
+{
+ unsigned short inst;
+ long epc = regs->cp0_epc;
+
+ /* Calculate exception PC in branch delay slot. */
+ if (__get_user(inst, (u16 __user *) msk_isa16_mode(epc))) {
+ /* This should never happen because delay slot was checked. */
+ force_sig(SIGSEGV);
+ return epc;
+ }
+ if (cpu_has_mips16) {
+ union mips16e_instruction inst_mips16e;
+
+ inst_mips16e.full = inst;
+ if (inst_mips16e.ri.opcode == MIPS16e_jal_op)
+ epc += 4;
+ else
+ epc += 2;
+ } else if (mm_insn_16bit(inst))
+ epc += 2;
+ else
+ epc += 4;
+
+ return epc;
+}
+
+/* (microMIPS) Convert 16-bit register encoding to 32-bit register encoding. */
+static const unsigned int reg16to32map[8] = {16, 17, 2, 3, 4, 5, 6, 7};
+
+int __mm_isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn,
+ unsigned long *contpc)
+{
+ union mips_instruction insn = (union mips_instruction)dec_insn.insn;
+ int __maybe_unused bc_false = 0;
+
+ if (!cpu_has_mmips)
+ return 0;
+
+ switch (insn.mm_i_format.opcode) {
+ case mm_pool32a_op:
+ if ((insn.mm_i_format.simmediate & MM_POOL32A_MINOR_MASK) ==
+ mm_pool32axf_op) {
+ switch (insn.mm_i_format.simmediate >>
+ MM_POOL32A_MINOR_SHIFT) {
+ case mm_jalr_op:
+ case mm_jalrhb_op:
+ case mm_jalrs_op:
+ case mm_jalrshb_op:
+ if (insn.mm_i_format.rt != 0) /* Not mm_jr */
+ regs->regs[insn.mm_i_format.rt] =
+ regs->cp0_epc +
+ dec_insn.pc_inc +
+ dec_insn.next_pc_inc;
+ *contpc = regs->regs[insn.mm_i_format.rs];
+ return 1;
+ }
+ }
+ break;
+ case mm_pool32i_op:
+ switch (insn.mm_i_format.rt) {
+ case mm_bltzals_op:
+ case mm_bltzal_op:
+ regs->regs[31] = regs->cp0_epc +
+ dec_insn.pc_inc +
+ dec_insn.next_pc_inc;
+ fallthrough;
+ case mm_bltz_op:
+ if ((long)regs->regs[insn.mm_i_format.rs] < 0)
+ *contpc = regs->cp0_epc +
+ dec_insn.pc_inc +
+ (insn.mm_i_format.simmediate << 1);
+ else
+ *contpc = regs->cp0_epc +
+ dec_insn.pc_inc +
+ dec_insn.next_pc_inc;
+ return 1;
+ case mm_bgezals_op:
+ case mm_bgezal_op:
+ regs->regs[31] = regs->cp0_epc +
+ dec_insn.pc_inc +
+ dec_insn.next_pc_inc;
+ fallthrough;
+ case mm_bgez_op:
+ if ((long)regs->regs[insn.mm_i_format.rs] >= 0)
+ *contpc = regs->cp0_epc +
+ dec_insn.pc_inc +
+ (insn.mm_i_format.simmediate << 1);
+ else
+ *contpc = regs->cp0_epc +
+ dec_insn.pc_inc +
+ dec_insn.next_pc_inc;
+ return 1;
+ case mm_blez_op:
+ if ((long)regs->regs[insn.mm_i_format.rs] <= 0)
+ *contpc = regs->cp0_epc +
+ dec_insn.pc_inc +
+ (insn.mm_i_format.simmediate << 1);
+ else
+ *contpc = regs->cp0_epc +
+ dec_insn.pc_inc +
+ dec_insn.next_pc_inc;
+ return 1;
+ case mm_bgtz_op:
+ if ((long)regs->regs[insn.mm_i_format.rs] <= 0)
+ *contpc = regs->cp0_epc +
+ dec_insn.pc_inc +
+ (insn.mm_i_format.simmediate << 1);
+ else
+ *contpc = regs->cp0_epc +
+ dec_insn.pc_inc +
+ dec_insn.next_pc_inc;
+ return 1;
+#ifdef CONFIG_MIPS_FP_SUPPORT
+ case mm_bc2f_op:
+ case mm_bc1f_op: {
+ unsigned int fcr31;
+ unsigned int bit;
+
+ bc_false = 1;
+ fallthrough;
+ case mm_bc2t_op:
+ case mm_bc1t_op:
+ preempt_disable();
+ if (is_fpu_owner())
+ fcr31 = read_32bit_cp1_register(CP1_STATUS);
+ else
+ fcr31 = current->thread.fpu.fcr31;
+ preempt_enable();
+
+ if (bc_false)
+ fcr31 = ~fcr31;
+
+ bit = (insn.mm_i_format.rs >> 2);
+ bit += (bit != 0);
+ bit += 23;
+ if (fcr31 & (1 << bit))
+ *contpc = regs->cp0_epc +
+ dec_insn.pc_inc +
+ (insn.mm_i_format.simmediate << 1);
+ else
+ *contpc = regs->cp0_epc +
+ dec_insn.pc_inc + dec_insn.next_pc_inc;
+ return 1;
+ }
+#endif /* CONFIG_MIPS_FP_SUPPORT */
+ }
+ break;
+ case mm_pool16c_op:
+ switch (insn.mm_i_format.rt) {
+ case mm_jalr16_op:
+ case mm_jalrs16_op:
+ regs->regs[31] = regs->cp0_epc +
+ dec_insn.pc_inc + dec_insn.next_pc_inc;
+ fallthrough;
+ case mm_jr16_op:
+ *contpc = regs->regs[insn.mm_i_format.rs];
+ return 1;
+ }
+ break;
+ case mm_beqz16_op:
+ if ((long)regs->regs[reg16to32map[insn.mm_b1_format.rs]] == 0)
+ *contpc = regs->cp0_epc +
+ dec_insn.pc_inc +
+ (insn.mm_b1_format.simmediate << 1);
+ else
+ *contpc = regs->cp0_epc +
+ dec_insn.pc_inc + dec_insn.next_pc_inc;
+ return 1;
+ case mm_bnez16_op:
+ if ((long)regs->regs[reg16to32map[insn.mm_b1_format.rs]] != 0)
+ *contpc = regs->cp0_epc +
+ dec_insn.pc_inc +
+ (insn.mm_b1_format.simmediate << 1);
+ else
+ *contpc = regs->cp0_epc +
+ dec_insn.pc_inc + dec_insn.next_pc_inc;
+ return 1;
+ case mm_b16_op:
+ *contpc = regs->cp0_epc + dec_insn.pc_inc +
+ (insn.mm_b0_format.simmediate << 1);
+ return 1;
+ case mm_beq32_op:
+ if (regs->regs[insn.mm_i_format.rs] ==
+ regs->regs[insn.mm_i_format.rt])
+ *contpc = regs->cp0_epc +
+ dec_insn.pc_inc +
+ (insn.mm_i_format.simmediate << 1);
+ else
+ *contpc = regs->cp0_epc +
+ dec_insn.pc_inc +
+ dec_insn.next_pc_inc;
+ return 1;
+ case mm_bne32_op:
+ if (regs->regs[insn.mm_i_format.rs] !=
+ regs->regs[insn.mm_i_format.rt])
+ *contpc = regs->cp0_epc +
+ dec_insn.pc_inc +
+ (insn.mm_i_format.simmediate << 1);
+ else
+ *contpc = regs->cp0_epc +
+ dec_insn.pc_inc + dec_insn.next_pc_inc;
+ return 1;
+ case mm_jalx32_op:
+ regs->regs[31] = regs->cp0_epc +
+ dec_insn.pc_inc + dec_insn.next_pc_inc;
+ *contpc = regs->cp0_epc + dec_insn.pc_inc;
+ *contpc >>= 28;
+ *contpc <<= 28;
+ *contpc |= (insn.j_format.target << 2);
+ return 1;
+ case mm_jals32_op:
+ case mm_jal32_op:
+ regs->regs[31] = regs->cp0_epc +
+ dec_insn.pc_inc + dec_insn.next_pc_inc;
+ fallthrough;
+ case mm_j32_op:
+ *contpc = regs->cp0_epc + dec_insn.pc_inc;
+ *contpc >>= 27;
+ *contpc <<= 27;
+ *contpc |= (insn.j_format.target << 1);
+ set_isa16_mode(*contpc);
+ return 1;
+ }
+ return 0;
+}
+
+/*
+ * Compute return address and emulate branch in microMIPS mode after an
+ * exception only. It does not handle compact branches/jumps and cannot
+ * be used in interrupt context. (Compact branches/jumps do not cause
+ * exceptions.)
+ */
+int __microMIPS_compute_return_epc(struct pt_regs *regs)
+{
+ u16 __user *pc16;
+ u16 halfword;
+ unsigned int word;
+ unsigned long contpc;
+ struct mm_decoded_insn mminsn = { 0 };
+
+ mminsn.micro_mips_mode = 1;
+
+ /* This load never faults. */
+ pc16 = (unsigned short __user *)msk_isa16_mode(regs->cp0_epc);
+ __get_user(halfword, pc16);
+ pc16++;
+ contpc = regs->cp0_epc + 2;
+ word = ((unsigned int)halfword << 16);
+ mminsn.pc_inc = 2;
+
+ if (!mm_insn_16bit(halfword)) {
+ __get_user(halfword, pc16);
+ pc16++;
+ contpc = regs->cp0_epc + 4;
+ mminsn.pc_inc = 4;
+ word |= halfword;
+ }
+ mminsn.insn = word;
+
+ if (get_user(halfword, pc16))
+ goto sigsegv;
+ mminsn.next_pc_inc = 2;
+ word = ((unsigned int)halfword << 16);
+
+ if (!mm_insn_16bit(halfword)) {
+ pc16++;
+ if (get_user(halfword, pc16))
+ goto sigsegv;
+ mminsn.next_pc_inc = 4;
+ word |= halfword;
+ }
+ mminsn.next_insn = word;
+
+ mm_isBranchInstr(regs, mminsn, &contpc);
+
+ regs->cp0_epc = contpc;
+
+ return 0;
+
+sigsegv:
+ force_sig(SIGSEGV);
+ return -EFAULT;
+}
+
+/*
+ * Compute return address and emulate branch in MIPS16e mode after an
+ * exception only. It does not handle compact branches/jumps and cannot
+ * be used in interrupt context. (Compact branches/jumps do not cause
+ * exceptions.)
+ */
+int __MIPS16e_compute_return_epc(struct pt_regs *regs)
+{
+ u16 __user *addr;
+ union mips16e_instruction inst;
+ u16 inst2;
+ u32 fullinst;
+ long epc;
+
+ epc = regs->cp0_epc;
+
+ /* Read the instruction. */
+ addr = (u16 __user *)msk_isa16_mode(epc);
+ if (__get_user(inst.full, addr)) {
+ force_sig(SIGSEGV);
+ return -EFAULT;
+ }
+
+ switch (inst.ri.opcode) {
+ case MIPS16e_extend_op:
+ regs->cp0_epc += 4;
+ return 0;
+
+ /*
+ * JAL and JALX in MIPS16e mode
+ */
+ case MIPS16e_jal_op:
+ addr += 1;
+ if (__get_user(inst2, addr)) {
+ force_sig(SIGSEGV);
+ return -EFAULT;
+ }
+ fullinst = ((unsigned)inst.full << 16) | inst2;
+ regs->regs[31] = epc + 6;
+ epc += 4;
+ epc >>= 28;
+ epc <<= 28;
+ /*
+ * JAL:5 X:1 TARGET[20-16]:5 TARGET[25:21]:5 TARGET[15:0]:16
+ *
+ * ......TARGET[15:0].................TARGET[20:16]...........
+ * ......TARGET[25:21]
+ */
+ epc |=
+ ((fullinst & 0xffff) << 2) | ((fullinst & 0x3e00000) >> 3) |
+ ((fullinst & 0x1f0000) << 7);
+ if (!inst.jal.x)
+ set_isa16_mode(epc); /* Set ISA mode bit. */
+ regs->cp0_epc = epc;
+ return 0;
+
+ /*
+ * J(AL)R(C)
+ */
+ case MIPS16e_rr_op:
+ if (inst.rr.func == MIPS16e_jr_func) {
+
+ if (inst.rr.ra)
+ regs->cp0_epc = regs->regs[31];
+ else
+ regs->cp0_epc =
+ regs->regs[reg16to32[inst.rr.rx]];
+
+ if (inst.rr.l) {
+ if (inst.rr.nd)
+ regs->regs[31] = epc + 2;
+ else
+ regs->regs[31] = epc + 4;
+ }
+ return 0;
+ }
+ break;
+ }
+
+ /*
+ * All other cases have no branch delay slot and are 16-bits.
+ * Branches do not cause an exception.
+ */
+ regs->cp0_epc += 2;
+
+ return 0;
+}
+
+/**
+ * __compute_return_epc_for_insn - Computes the return address and do emulate
+ * branch simulation, if required.
+ *
+ * @regs: Pointer to pt_regs
+ * @insn: branch instruction to decode
+ * Return: -EFAULT on error and forces SIGILL, and on success
+ * returns 0 or BRANCH_LIKELY_TAKEN as appropriate after
+ * evaluating the branch.
+ *
+ * MIPS R6 Compact branches and forbidden slots:
+ * Compact branches do not throw exceptions because they do
+ * not have delay slots. The forbidden slot instruction ($PC+4)
+ * is only executed if the branch was not taken. Otherwise the
+ * forbidden slot is skipped entirely. This means that the
+ * only possible reason to be here because of a MIPS R6 compact
+ * branch instruction is that the forbidden slot has thrown one.
+ * In that case the branch was not taken, so the EPC can be safely
+ * set to EPC + 8.
+ */
+int __compute_return_epc_for_insn(struct pt_regs *regs,
+ union mips_instruction insn)
+{
+ long epc = regs->cp0_epc;
+ unsigned int dspcontrol;
+ int ret = 0;
+
+ switch (insn.i_format.opcode) {
+ /*
+ * jr and jalr are in r_format format.
+ */
+ case spec_op:
+ switch (insn.r_format.func) {
+ case jalr_op:
+ regs->regs[insn.r_format.rd] = epc + 8;
+ fallthrough;
+ case jr_op:
+ if (NO_R6EMU && insn.r_format.func == jr_op)
+ goto sigill_r2r6;
+ regs->cp0_epc = regs->regs[insn.r_format.rs];
+ break;
+ }
+ break;
+
+ /*
+ * This group contains:
+ * bltz_op, bgez_op, bltzl_op, bgezl_op,
+ * bltzal_op, bgezal_op, bltzall_op, bgezall_op.
+ */
+ case bcond_op:
+ switch (insn.i_format.rt) {
+ case bltzl_op:
+ if (NO_R6EMU)
+ goto sigill_r2r6;
+ fallthrough;
+ case bltz_op:
+ if ((long)regs->regs[insn.i_format.rs] < 0) {
+ epc = epc + 4 + (insn.i_format.simmediate << 2);
+ if (insn.i_format.rt == bltzl_op)
+ ret = BRANCH_LIKELY_TAKEN;
+ } else
+ epc += 8;
+ regs->cp0_epc = epc;
+ break;
+
+ case bgezl_op:
+ if (NO_R6EMU)
+ goto sigill_r2r6;
+ fallthrough;
+ case bgez_op:
+ if ((long)regs->regs[insn.i_format.rs] >= 0) {
+ epc = epc + 4 + (insn.i_format.simmediate << 2);
+ if (insn.i_format.rt == bgezl_op)
+ ret = BRANCH_LIKELY_TAKEN;
+ } else
+ epc += 8;
+ regs->cp0_epc = epc;
+ break;
+
+ case bltzal_op:
+ case bltzall_op:
+ if (NO_R6EMU && (insn.i_format.rs ||
+ insn.i_format.rt == bltzall_op))
+ goto sigill_r2r6;
+ regs->regs[31] = epc + 8;
+ /*
+ * OK we are here either because we hit a NAL
+ * instruction or because we are emulating an
+ * old bltzal{,l} one. Let's figure out what the
+ * case really is.
+ */
+ if (!insn.i_format.rs) {
+ /*
+ * NAL or BLTZAL with rs == 0
+ * Doesn't matter if we are R6 or not. The
+ * result is the same
+ */
+ regs->cp0_epc += 4 +
+ (insn.i_format.simmediate << 2);
+ break;
+ }
+ /* Now do the real thing for non-R6 BLTZAL{,L} */
+ if ((long)regs->regs[insn.i_format.rs] < 0) {
+ epc = epc + 4 + (insn.i_format.simmediate << 2);
+ if (insn.i_format.rt == bltzall_op)
+ ret = BRANCH_LIKELY_TAKEN;
+ } else
+ epc += 8;
+ regs->cp0_epc = epc;
+ break;
+
+ case bgezal_op:
+ case bgezall_op:
+ if (NO_R6EMU && (insn.i_format.rs ||
+ insn.i_format.rt == bgezall_op))
+ goto sigill_r2r6;
+ regs->regs[31] = epc + 8;
+ /*
+ * OK we are here either because we hit a BAL
+ * instruction or because we are emulating an
+ * old bgezal{,l} one. Let's figure out what the
+ * case really is.
+ */
+ if (!insn.i_format.rs) {
+ /*
+ * BAL or BGEZAL with rs == 0
+ * Doesn't matter if we are R6 or not. The
+ * result is the same
+ */
+ regs->cp0_epc += 4 +
+ (insn.i_format.simmediate << 2);
+ break;
+ }
+ /* Now do the real thing for non-R6 BGEZAL{,L} */
+ if ((long)regs->regs[insn.i_format.rs] >= 0) {
+ epc = epc + 4 + (insn.i_format.simmediate << 2);
+ if (insn.i_format.rt == bgezall_op)
+ ret = BRANCH_LIKELY_TAKEN;
+ } else
+ epc += 8;
+ regs->cp0_epc = epc;
+ break;
+
+ case bposge32_op:
+ if (!cpu_has_dsp)
+ goto sigill_dsp;
+
+ dspcontrol = rddsp(0x01);
+
+ if (dspcontrol >= 32) {
+ epc = epc + 4 + (insn.i_format.simmediate << 2);
+ } else
+ epc += 8;
+ regs->cp0_epc = epc;
+ break;
+ }
+ break;
+
+ /*
+ * These are unconditional and in j_format.
+ */
+ case jalx_op:
+ case jal_op:
+ regs->regs[31] = regs->cp0_epc + 8;
+ fallthrough;
+ case j_op:
+ epc += 4;
+ epc >>= 28;
+ epc <<= 28;
+ epc |= (insn.j_format.target << 2);
+ regs->cp0_epc = epc;
+ if (insn.i_format.opcode == jalx_op)
+ set_isa16_mode(regs->cp0_epc);
+ break;
+
+ /*
+ * These are conditional and in i_format.
+ */
+ case beql_op:
+ if (NO_R6EMU)
+ goto sigill_r2r6;
+ fallthrough;
+ case beq_op:
+ if (regs->regs[insn.i_format.rs] ==
+ regs->regs[insn.i_format.rt]) {
+ epc = epc + 4 + (insn.i_format.simmediate << 2);
+ if (insn.i_format.opcode == beql_op)
+ ret = BRANCH_LIKELY_TAKEN;
+ } else
+ epc += 8;
+ regs->cp0_epc = epc;
+ break;
+
+ case bnel_op:
+ if (NO_R6EMU)
+ goto sigill_r2r6;
+ fallthrough;
+ case bne_op:
+ if (regs->regs[insn.i_format.rs] !=
+ regs->regs[insn.i_format.rt]) {
+ epc = epc + 4 + (insn.i_format.simmediate << 2);
+ if (insn.i_format.opcode == bnel_op)
+ ret = BRANCH_LIKELY_TAKEN;
+ } else
+ epc += 8;
+ regs->cp0_epc = epc;
+ break;
+
+ case blezl_op: /* not really i_format */
+ if (!insn.i_format.rt && NO_R6EMU)
+ goto sigill_r2r6;
+ fallthrough;
+ case blez_op:
+ /*
+ * Compact branches for R6 for the
+ * blez and blezl opcodes.
+ * BLEZ | rs = 0 | rt != 0 == BLEZALC
+ * BLEZ | rs = rt != 0 == BGEZALC
+ * BLEZ | rs != 0 | rt != 0 == BGEUC
+ * BLEZL | rs = 0 | rt != 0 == BLEZC
+ * BLEZL | rs = rt != 0 == BGEZC
+ * BLEZL | rs != 0 | rt != 0 == BGEC
+ *
+ * For real BLEZ{,L}, rt is always 0.
+ */
+
+ if (cpu_has_mips_r6 && insn.i_format.rt) {
+ if ((insn.i_format.opcode == blez_op) &&
+ ((!insn.i_format.rs && insn.i_format.rt) ||
+ (insn.i_format.rs == insn.i_format.rt)))
+ regs->regs[31] = epc + 4;
+ regs->cp0_epc += 8;
+ break;
+ }
+ /* rt field assumed to be zero */
+ if ((long)regs->regs[insn.i_format.rs] <= 0) {
+ epc = epc + 4 + (insn.i_format.simmediate << 2);
+ if (insn.i_format.opcode == blezl_op)
+ ret = BRANCH_LIKELY_TAKEN;
+ } else
+ epc += 8;
+ regs->cp0_epc = epc;
+ break;
+
+ case bgtzl_op:
+ if (!insn.i_format.rt && NO_R6EMU)
+ goto sigill_r2r6;
+ fallthrough;
+ case bgtz_op:
+ /*
+ * Compact branches for R6 for the
+ * bgtz and bgtzl opcodes.
+ * BGTZ | rs = 0 | rt != 0 == BGTZALC
+ * BGTZ | rs = rt != 0 == BLTZALC
+ * BGTZ | rs != 0 | rt != 0 == BLTUC
+ * BGTZL | rs = 0 | rt != 0 == BGTZC
+ * BGTZL | rs = rt != 0 == BLTZC
+ * BGTZL | rs != 0 | rt != 0 == BLTC
+ *
+ * *ZALC varint for BGTZ &&& rt != 0
+ * For real GTZ{,L}, rt is always 0.
+ */
+ if (cpu_has_mips_r6 && insn.i_format.rt) {
+ if ((insn.i_format.opcode == blez_op) &&
+ ((!insn.i_format.rs && insn.i_format.rt) ||
+ (insn.i_format.rs == insn.i_format.rt)))
+ regs->regs[31] = epc + 4;
+ regs->cp0_epc += 8;
+ break;
+ }
+
+ /* rt field assumed to be zero */
+ if ((long)regs->regs[insn.i_format.rs] > 0) {
+ epc = epc + 4 + (insn.i_format.simmediate << 2);
+ if (insn.i_format.opcode == bgtzl_op)
+ ret = BRANCH_LIKELY_TAKEN;
+ } else
+ epc += 8;
+ regs->cp0_epc = epc;
+ break;
+
+#ifdef CONFIG_MIPS_FP_SUPPORT
+ /*
+ * And now the FPA/cp1 branch instructions.
+ */
+ case cop1_op: {
+ unsigned int bit, fcr31, reg;
+
+ if (cpu_has_mips_r6 &&
+ ((insn.i_format.rs == bc1eqz_op) ||
+ (insn.i_format.rs == bc1nez_op))) {
+ if (!init_fp_ctx(current))
+ lose_fpu(1);
+ reg = insn.i_format.rt;
+ bit = get_fpr32(&current->thread.fpu.fpr[reg], 0) & 0x1;
+ if (insn.i_format.rs == bc1eqz_op)
+ bit = !bit;
+ own_fpu(1);
+ if (bit)
+ epc = epc + 4 +
+ (insn.i_format.simmediate << 2);
+ else
+ epc += 8;
+ regs->cp0_epc = epc;
+
+ break;
+ } else {
+
+ preempt_disable();
+ if (is_fpu_owner())
+ fcr31 = read_32bit_cp1_register(CP1_STATUS);
+ else
+ fcr31 = current->thread.fpu.fcr31;
+ preempt_enable();
+
+ bit = (insn.i_format.rt >> 2);
+ bit += (bit != 0);
+ bit += 23;
+ switch (insn.i_format.rt & 3) {
+ case 0: /* bc1f */
+ case 2: /* bc1fl */
+ if (~fcr31 & (1 << bit)) {
+ epc = epc + 4 +
+ (insn.i_format.simmediate << 2);
+ if (insn.i_format.rt == 2)
+ ret = BRANCH_LIKELY_TAKEN;
+ } else
+ epc += 8;
+ regs->cp0_epc = epc;
+ break;
+
+ case 1: /* bc1t */
+ case 3: /* bc1tl */
+ if (fcr31 & (1 << bit)) {
+ epc = epc + 4 +
+ (insn.i_format.simmediate << 2);
+ if (insn.i_format.rt == 3)
+ ret = BRANCH_LIKELY_TAKEN;
+ } else
+ epc += 8;
+ regs->cp0_epc = epc;
+ break;
+ }
+ break;
+ }
+ }
+#endif /* CONFIG_MIPS_FP_SUPPORT */
+
+#ifdef CONFIG_CPU_CAVIUM_OCTEON
+ case lwc2_op: /* This is bbit0 on Octeon */
+ if ((regs->regs[insn.i_format.rs] & (1ull<<insn.i_format.rt))
+ == 0)
+ epc = epc + 4 + (insn.i_format.simmediate << 2);
+ else
+ epc += 8;
+ regs->cp0_epc = epc;
+ break;
+ case ldc2_op: /* This is bbit032 on Octeon */
+ if ((regs->regs[insn.i_format.rs] &
+ (1ull<<(insn.i_format.rt+32))) == 0)
+ epc = epc + 4 + (insn.i_format.simmediate << 2);
+ else
+ epc += 8;
+ regs->cp0_epc = epc;
+ break;
+ case swc2_op: /* This is bbit1 on Octeon */
+ if (regs->regs[insn.i_format.rs] & (1ull<<insn.i_format.rt))
+ epc = epc + 4 + (insn.i_format.simmediate << 2);
+ else
+ epc += 8;
+ regs->cp0_epc = epc;
+ break;
+ case sdc2_op: /* This is bbit132 on Octeon */
+ if (regs->regs[insn.i_format.rs] &
+ (1ull<<(insn.i_format.rt+32)))
+ epc = epc + 4 + (insn.i_format.simmediate << 2);
+ else
+ epc += 8;
+ regs->cp0_epc = epc;
+ break;
+#else
+ case bc6_op:
+ /* Only valid for MIPS R6 */
+ if (!cpu_has_mips_r6)
+ goto sigill_r6;
+ regs->cp0_epc += 8;
+ break;
+ case balc6_op:
+ if (!cpu_has_mips_r6)
+ goto sigill_r6;
+ /* Compact branch: BALC */
+ regs->regs[31] = epc + 4;
+ epc += 4 + (insn.i_format.simmediate << 2);
+ regs->cp0_epc = epc;
+ break;
+ case pop66_op:
+ if (!cpu_has_mips_r6)
+ goto sigill_r6;
+ /* Compact branch: BEQZC || JIC */
+ regs->cp0_epc += 8;
+ break;
+ case pop76_op:
+ if (!cpu_has_mips_r6)
+ goto sigill_r6;
+ /* Compact branch: BNEZC || JIALC */
+ if (!insn.i_format.rs) {
+ /* JIALC: set $31/ra */
+ regs->regs[31] = epc + 4;
+ }
+ regs->cp0_epc += 8;
+ break;
+#endif
+ case pop10_op:
+ case pop30_op:
+ /* Only valid for MIPS R6 */
+ if (!cpu_has_mips_r6)
+ goto sigill_r6;
+ /*
+ * Compact branches:
+ * bovc, beqc, beqzalc, bnvc, bnec, bnezlac
+ */
+ if (insn.i_format.rt && !insn.i_format.rs)
+ regs->regs[31] = epc + 4;
+ regs->cp0_epc += 8;
+ break;
+ }
+
+ return ret;
+
+sigill_dsp:
+ pr_debug("%s: DSP branch but not DSP ASE - sending SIGILL.\n",
+ current->comm);
+ force_sig(SIGILL);
+ return -EFAULT;
+sigill_r2r6:
+ pr_debug("%s: R2 branch but r2-to-r6 emulator is not present - sending SIGILL.\n",
+ current->comm);
+ force_sig(SIGILL);
+ return -EFAULT;
+sigill_r6:
+ pr_debug("%s: R6 branch but no MIPSr6 ISA support - sending SIGILL.\n",
+ current->comm);
+ force_sig(SIGILL);
+ return -EFAULT;
+}
+EXPORT_SYMBOL_GPL(__compute_return_epc_for_insn);
+
+int __compute_return_epc(struct pt_regs *regs)
+{
+ unsigned int __user *addr;
+ long epc;
+ union mips_instruction insn;
+
+ epc = regs->cp0_epc;
+ if (epc & 3)
+ goto unaligned;
+
+ /*
+ * Read the instruction
+ */
+ addr = (unsigned int __user *) epc;
+ if (__get_user(insn.word, addr)) {
+ force_sig(SIGSEGV);
+ return -EFAULT;
+ }
+
+ return __compute_return_epc_for_insn(regs, insn);
+
+unaligned:
+ printk("%s: unaligned epc - sending SIGBUS.\n", current->comm);
+ force_sig(SIGBUS);
+ return -EFAULT;
+}
+
+#if (defined CONFIG_KPROBES) || (defined CONFIG_UPROBES)
+
+int __insn_is_compact_branch(union mips_instruction insn)
+{
+ if (!cpu_has_mips_r6)
+ return 0;
+
+ switch (insn.i_format.opcode) {
+ case blezl_op:
+ case bgtzl_op:
+ case blez_op:
+ case bgtz_op:
+ /*
+ * blez[l] and bgtz[l] opcodes with non-zero rt
+ * are MIPS R6 compact branches
+ */
+ if (insn.i_format.rt)
+ return 1;
+ break;
+ case bc6_op:
+ case balc6_op:
+ case pop10_op:
+ case pop30_op:
+ case pop66_op:
+ case pop76_op:
+ return 1;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(__insn_is_compact_branch);
+
+#endif /* CONFIG_KPROBES || CONFIG_UPROBES */
diff --git a/arch/mips/kernel/cacheinfo.c b/arch/mips/kernel/cacheinfo.c
new file mode 100644
index 000000000..495dd0582
--- /dev/null
+++ b/arch/mips/kernel/cacheinfo.c
@@ -0,0 +1,116 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * MIPS cacheinfo support
+ */
+#include <linux/cacheinfo.h>
+
+/* Populates leaf and increments to next leaf */
+#define populate_cache(cache, leaf, c_level, c_type) \
+do { \
+ leaf->type = c_type; \
+ leaf->level = c_level; \
+ leaf->coherency_line_size = c->cache.linesz; \
+ leaf->number_of_sets = c->cache.sets; \
+ leaf->ways_of_associativity = c->cache.ways; \
+ leaf->size = c->cache.linesz * c->cache.sets * \
+ c->cache.ways; \
+ leaf++; \
+} while (0)
+
+int init_cache_level(unsigned int cpu)
+{
+ struct cpuinfo_mips *c = &current_cpu_data;
+ struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
+ int levels = 0, leaves = 0;
+
+ /*
+ * If Dcache is not set, we assume the cache structures
+ * are not properly initialized.
+ */
+ if (c->dcache.waysize)
+ levels += 1;
+ else
+ return -ENOENT;
+
+
+ leaves += (c->icache.waysize) ? 2 : 1;
+
+ if (c->vcache.waysize) {
+ levels++;
+ leaves++;
+ }
+
+ if (c->scache.waysize) {
+ levels++;
+ leaves++;
+ }
+
+ if (c->tcache.waysize) {
+ levels++;
+ leaves++;
+ }
+
+ this_cpu_ci->num_levels = levels;
+ this_cpu_ci->num_leaves = leaves;
+ return 0;
+}
+
+static void fill_cpumask_siblings(int cpu, cpumask_t *cpu_map)
+{
+ int cpu1;
+
+ for_each_possible_cpu(cpu1)
+ if (cpus_are_siblings(cpu, cpu1))
+ cpumask_set_cpu(cpu1, cpu_map);
+}
+
+static void fill_cpumask_cluster(int cpu, cpumask_t *cpu_map)
+{
+ int cpu1;
+ int cluster = cpu_cluster(&cpu_data[cpu]);
+
+ for_each_possible_cpu(cpu1)
+ if (cpu_cluster(&cpu_data[cpu1]) == cluster)
+ cpumask_set_cpu(cpu1, cpu_map);
+}
+
+int populate_cache_leaves(unsigned int cpu)
+{
+ struct cpuinfo_mips *c = &current_cpu_data;
+ struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
+ struct cacheinfo *this_leaf = this_cpu_ci->info_list;
+ int level = 1;
+
+ if (c->icache.waysize) {
+ /* I/D caches are per core */
+ fill_cpumask_siblings(cpu, &this_leaf->shared_cpu_map);
+ populate_cache(dcache, this_leaf, level, CACHE_TYPE_DATA);
+ fill_cpumask_siblings(cpu, &this_leaf->shared_cpu_map);
+ populate_cache(icache, this_leaf, level, CACHE_TYPE_INST);
+ level++;
+ } else {
+ populate_cache(dcache, this_leaf, level, CACHE_TYPE_UNIFIED);
+ level++;
+ }
+
+ if (c->vcache.waysize) {
+ /* Vcache is per core as well */
+ fill_cpumask_siblings(cpu, &this_leaf->shared_cpu_map);
+ populate_cache(vcache, this_leaf, level, CACHE_TYPE_UNIFIED);
+ level++;
+ }
+
+ if (c->scache.waysize) {
+ /* Scache is per cluster */
+ fill_cpumask_cluster(cpu, &this_leaf->shared_cpu_map);
+ populate_cache(scache, this_leaf, level, CACHE_TYPE_UNIFIED);
+ level++;
+ }
+
+ if (c->tcache.waysize)
+ populate_cache(tcache, this_leaf, level, CACHE_TYPE_UNIFIED);
+
+ this_cpu_ci->cpu_map_populated = true;
+
+ return 0;
+}
diff --git a/arch/mips/kernel/cevt-bcm1480.c b/arch/mips/kernel/cevt-bcm1480.c
new file mode 100644
index 000000000..d39a2963b
--- /dev/null
+++ b/arch/mips/kernel/cevt-bcm1480.c
@@ -0,0 +1,138 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2000,2001,2004 Broadcom Corporation
+ */
+#include <linux/clockchips.h>
+#include <linux/interrupt.h>
+#include <linux/percpu.h>
+#include <linux/smp.h>
+#include <linux/irq.h>
+
+#include <asm/addrspace.h>
+#include <asm/io.h>
+#include <asm/time.h>
+
+#include <asm/sibyte/bcm1480_regs.h>
+#include <asm/sibyte/sb1250_regs.h>
+#include <asm/sibyte/bcm1480_int.h>
+#include <asm/sibyte/bcm1480_scd.h>
+
+#include <asm/sibyte/sb1250.h>
+
+#define IMR_IP2_VAL K_BCM1480_INT_MAP_I0
+#define IMR_IP3_VAL K_BCM1480_INT_MAP_I1
+#define IMR_IP4_VAL K_BCM1480_INT_MAP_I2
+
+/*
+ * The general purpose timer ticks at 1MHz independent if
+ * the rest of the system
+ */
+
+static int sibyte_set_periodic(struct clock_event_device *evt)
+{
+ unsigned int cpu = smp_processor_id();
+ void __iomem *cfg, *init;
+
+ cfg = IOADDR(A_SCD_TIMER_REGISTER(cpu, R_SCD_TIMER_CFG));
+ init = IOADDR(A_SCD_TIMER_REGISTER(cpu, R_SCD_TIMER_INIT));
+
+ __raw_writeq(0, cfg);
+ __raw_writeq((V_SCD_TIMER_FREQ / HZ) - 1, init);
+ __raw_writeq(M_SCD_TIMER_ENABLE | M_SCD_TIMER_MODE_CONTINUOUS, cfg);
+ return 0;
+}
+
+static int sibyte_shutdown(struct clock_event_device *evt)
+{
+ unsigned int cpu = smp_processor_id();
+ void __iomem *cfg;
+
+ cfg = IOADDR(A_SCD_TIMER_REGISTER(cpu, R_SCD_TIMER_CFG));
+
+ /* Stop the timer until we actually program a shot */
+ __raw_writeq(0, cfg);
+ return 0;
+}
+
+static int sibyte_next_event(unsigned long delta, struct clock_event_device *cd)
+{
+ unsigned int cpu = smp_processor_id();
+ void __iomem *cfg, *init;
+
+ cfg = IOADDR(A_SCD_TIMER_REGISTER(cpu, R_SCD_TIMER_CFG));
+ init = IOADDR(A_SCD_TIMER_REGISTER(cpu, R_SCD_TIMER_INIT));
+
+ __raw_writeq(0, cfg);
+ __raw_writeq(delta - 1, init);
+ __raw_writeq(M_SCD_TIMER_ENABLE, cfg);
+
+ return 0;
+}
+
+static irqreturn_t sibyte_counter_handler(int irq, void *dev_id)
+{
+ unsigned int cpu = smp_processor_id();
+ struct clock_event_device *cd = dev_id;
+ void __iomem *cfg;
+ unsigned long tmode;
+
+ if (clockevent_state_periodic(cd))
+ tmode = M_SCD_TIMER_ENABLE | M_SCD_TIMER_MODE_CONTINUOUS;
+ else
+ tmode = 0;
+
+ /* ACK interrupt */
+ cfg = IOADDR(A_SCD_TIMER_REGISTER(cpu, R_SCD_TIMER_CFG));
+ ____raw_writeq(tmode, cfg);
+
+ cd->event_handler(cd);
+
+ return IRQ_HANDLED;
+}
+
+static DEFINE_PER_CPU(struct clock_event_device, sibyte_hpt_clockevent);
+static DEFINE_PER_CPU(char [18], sibyte_hpt_name);
+
+void sb1480_clockevent_init(void)
+{
+ unsigned int cpu = smp_processor_id();
+ unsigned int irq = K_BCM1480_INT_TIMER_0 + cpu;
+ struct clock_event_device *cd = &per_cpu(sibyte_hpt_clockevent, cpu);
+ unsigned char *name = per_cpu(sibyte_hpt_name, cpu);
+ unsigned long flags = IRQF_PERCPU | IRQF_TIMER;
+
+ BUG_ON(cpu > 3); /* Only have 4 general purpose timers */
+
+ sprintf(name, "bcm1480-counter-%d", cpu);
+ cd->name = name;
+ cd->features = CLOCK_EVT_FEAT_PERIODIC |
+ CLOCK_EVT_FEAT_ONESHOT;
+ clockevent_set_clock(cd, V_SCD_TIMER_FREQ);
+ cd->max_delta_ns = clockevent_delta2ns(0x7fffff, cd);
+ cd->max_delta_ticks = 0x7fffff;
+ cd->min_delta_ns = clockevent_delta2ns(2, cd);
+ cd->min_delta_ticks = 2;
+ cd->rating = 200;
+ cd->irq = irq;
+ cd->cpumask = cpumask_of(cpu);
+ cd->set_next_event = sibyte_next_event;
+ cd->set_state_shutdown = sibyte_shutdown;
+ cd->set_state_periodic = sibyte_set_periodic;
+ cd->set_state_oneshot = sibyte_shutdown;
+ clockevents_register_device(cd);
+
+ bcm1480_mask_irq(cpu, irq);
+
+ /*
+ * Map the timer interrupt to IP[4] of this cpu
+ */
+ __raw_writeq(IMR_IP4_VAL,
+ IOADDR(A_BCM1480_IMR_REGISTER(cpu,
+ R_BCM1480_IMR_INTERRUPT_MAP_BASE_H) + (irq << 3)));
+
+ bcm1480_unmask_irq(cpu, irq);
+
+ irq_set_affinity(irq, cpumask_of(cpu));
+ if (request_irq(irq, sibyte_counter_handler, flags, name, cd))
+ pr_err("Failed to request irq %d (%s)\n", irq, name);
+}
diff --git a/arch/mips/kernel/cevt-ds1287.c b/arch/mips/kernel/cevt-ds1287.c
new file mode 100644
index 000000000..9a47fbcd4
--- /dev/null
+++ b/arch/mips/kernel/cevt-ds1287.c
@@ -0,0 +1,121 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * DS1287 clockevent driver
+ *
+ * Copyright (C) 2008 Yoichi Yuasa <yuasa@linux-mips.org>
+ */
+#include <linux/clockchips.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/mc146818rtc.h>
+#include <linux/irq.h>
+
+#include <asm/time.h>
+
+int ds1287_timer_state(void)
+{
+ return (CMOS_READ(RTC_REG_C) & RTC_PF) != 0;
+}
+
+int ds1287_set_base_clock(unsigned int hz)
+{
+ u8 rate;
+
+ switch (hz) {
+ case 128:
+ rate = 0x9;
+ break;
+ case 256:
+ rate = 0x8;
+ break;
+ case 1024:
+ rate = 0x6;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ CMOS_WRITE(RTC_REF_CLCK_32KHZ | rate, RTC_REG_A);
+
+ return 0;
+}
+
+static int ds1287_set_next_event(unsigned long delta,
+ struct clock_event_device *evt)
+{
+ return -EINVAL;
+}
+
+static int ds1287_shutdown(struct clock_event_device *evt)
+{
+ u8 val;
+
+ spin_lock(&rtc_lock);
+
+ val = CMOS_READ(RTC_REG_B);
+ val &= ~RTC_PIE;
+ CMOS_WRITE(val, RTC_REG_B);
+
+ spin_unlock(&rtc_lock);
+ return 0;
+}
+
+static int ds1287_set_periodic(struct clock_event_device *evt)
+{
+ u8 val;
+
+ spin_lock(&rtc_lock);
+
+ val = CMOS_READ(RTC_REG_B);
+ val |= RTC_PIE;
+ CMOS_WRITE(val, RTC_REG_B);
+
+ spin_unlock(&rtc_lock);
+ return 0;
+}
+
+static void ds1287_event_handler(struct clock_event_device *dev)
+{
+}
+
+static struct clock_event_device ds1287_clockevent = {
+ .name = "ds1287",
+ .features = CLOCK_EVT_FEAT_PERIODIC,
+ .set_next_event = ds1287_set_next_event,
+ .set_state_shutdown = ds1287_shutdown,
+ .set_state_periodic = ds1287_set_periodic,
+ .tick_resume = ds1287_shutdown,
+ .event_handler = ds1287_event_handler,
+};
+
+static irqreturn_t ds1287_interrupt(int irq, void *dev_id)
+{
+ struct clock_event_device *cd = &ds1287_clockevent;
+
+ /* Ack the RTC interrupt. */
+ CMOS_READ(RTC_REG_C);
+
+ cd->event_handler(cd);
+
+ return IRQ_HANDLED;
+}
+
+int __init ds1287_clockevent_init(int irq)
+{
+ unsigned long flags = IRQF_PERCPU | IRQF_TIMER;
+ struct clock_event_device *cd;
+
+ cd = &ds1287_clockevent;
+ cd->rating = 100;
+ cd->irq = irq;
+ clockevent_set_clock(cd, 32768);
+ cd->max_delta_ns = clockevent_delta2ns(0x7fffffff, cd);
+ cd->max_delta_ticks = 0x7fffffff;
+ cd->min_delta_ns = clockevent_delta2ns(0x300, cd);
+ cd->min_delta_ticks = 0x300;
+ cd->cpumask = cpumask_of(0);
+
+ clockevents_register_device(&ds1287_clockevent);
+
+ return request_irq(irq, ds1287_interrupt, flags, "ds1287", NULL);
+}
diff --git a/arch/mips/kernel/cevt-gt641xx.c b/arch/mips/kernel/cevt-gt641xx.c
new file mode 100644
index 000000000..5b132e8c5
--- /dev/null
+++ b/arch/mips/kernel/cevt-gt641xx.c
@@ -0,0 +1,146 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * GT641xx clockevent routines.
+ *
+ * Copyright (C) 2007 Yoichi Yuasa <yuasa@linux-mips.org>
+ */
+#include <linux/clockchips.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/irq.h>
+
+#include <asm/gt64120.h>
+#include <asm/time.h>
+
+static DEFINE_RAW_SPINLOCK(gt641xx_timer_lock);
+static unsigned int gt641xx_base_clock;
+
+void gt641xx_set_base_clock(unsigned int clock)
+{
+ gt641xx_base_clock = clock;
+}
+
+int gt641xx_timer0_state(void)
+{
+ if (GT_READ(GT_TC0_OFS))
+ return 0;
+
+ GT_WRITE(GT_TC0_OFS, gt641xx_base_clock / HZ);
+ GT_WRITE(GT_TC_CONTROL_OFS, GT_TC_CONTROL_ENTC0_MSK);
+
+ return 1;
+}
+
+static int gt641xx_timer0_set_next_event(unsigned long delta,
+ struct clock_event_device *evt)
+{
+ u32 ctrl;
+
+ raw_spin_lock(&gt641xx_timer_lock);
+
+ ctrl = GT_READ(GT_TC_CONTROL_OFS);
+ ctrl &= ~(GT_TC_CONTROL_ENTC0_MSK | GT_TC_CONTROL_SELTC0_MSK);
+ ctrl |= GT_TC_CONTROL_ENTC0_MSK;
+
+ GT_WRITE(GT_TC0_OFS, delta);
+ GT_WRITE(GT_TC_CONTROL_OFS, ctrl);
+
+ raw_spin_unlock(&gt641xx_timer_lock);
+
+ return 0;
+}
+
+static int gt641xx_timer0_shutdown(struct clock_event_device *evt)
+{
+ u32 ctrl;
+
+ raw_spin_lock(&gt641xx_timer_lock);
+
+ ctrl = GT_READ(GT_TC_CONTROL_OFS);
+ ctrl &= ~(GT_TC_CONTROL_ENTC0_MSK | GT_TC_CONTROL_SELTC0_MSK);
+ GT_WRITE(GT_TC_CONTROL_OFS, ctrl);
+
+ raw_spin_unlock(&gt641xx_timer_lock);
+ return 0;
+}
+
+static int gt641xx_timer0_set_oneshot(struct clock_event_device *evt)
+{
+ u32 ctrl;
+
+ raw_spin_lock(&gt641xx_timer_lock);
+
+ ctrl = GT_READ(GT_TC_CONTROL_OFS);
+ ctrl &= ~GT_TC_CONTROL_SELTC0_MSK;
+ ctrl |= GT_TC_CONTROL_ENTC0_MSK;
+ GT_WRITE(GT_TC_CONTROL_OFS, ctrl);
+
+ raw_spin_unlock(&gt641xx_timer_lock);
+ return 0;
+}
+
+static int gt641xx_timer0_set_periodic(struct clock_event_device *evt)
+{
+ u32 ctrl;
+
+ raw_spin_lock(&gt641xx_timer_lock);
+
+ ctrl = GT_READ(GT_TC_CONTROL_OFS);
+ ctrl |= GT_TC_CONTROL_ENTC0_MSK | GT_TC_CONTROL_SELTC0_MSK;
+ GT_WRITE(GT_TC_CONTROL_OFS, ctrl);
+
+ raw_spin_unlock(&gt641xx_timer_lock);
+ return 0;
+}
+
+static void gt641xx_timer0_event_handler(struct clock_event_device *dev)
+{
+}
+
+static struct clock_event_device gt641xx_timer0_clockevent = {
+ .name = "gt641xx-timer0",
+ .features = CLOCK_EVT_FEAT_PERIODIC |
+ CLOCK_EVT_FEAT_ONESHOT,
+ .irq = GT641XX_TIMER0_IRQ,
+ .set_next_event = gt641xx_timer0_set_next_event,
+ .set_state_shutdown = gt641xx_timer0_shutdown,
+ .set_state_periodic = gt641xx_timer0_set_periodic,
+ .set_state_oneshot = gt641xx_timer0_set_oneshot,
+ .tick_resume = gt641xx_timer0_shutdown,
+ .event_handler = gt641xx_timer0_event_handler,
+};
+
+static irqreturn_t gt641xx_timer0_interrupt(int irq, void *dev_id)
+{
+ struct clock_event_device *cd = &gt641xx_timer0_clockevent;
+
+ cd->event_handler(cd);
+
+ return IRQ_HANDLED;
+}
+
+static int __init gt641xx_timer0_clockevent_init(void)
+{
+ struct clock_event_device *cd;
+
+ if (!gt641xx_base_clock)
+ return 0;
+
+ GT_WRITE(GT_TC0_OFS, gt641xx_base_clock / HZ);
+
+ cd = &gt641xx_timer0_clockevent;
+ cd->rating = 200 + gt641xx_base_clock / 10000000;
+ clockevent_set_clock(cd, gt641xx_base_clock);
+ cd->max_delta_ns = clockevent_delta2ns(0x7fffffff, cd);
+ cd->max_delta_ticks = 0x7fffffff;
+ cd->min_delta_ns = clockevent_delta2ns(0x300, cd);
+ cd->min_delta_ticks = 0x300;
+ cd->cpumask = cpumask_of(0);
+
+ clockevents_register_device(&gt641xx_timer0_clockevent);
+
+ return request_irq(GT641XX_TIMER0_IRQ, gt641xx_timer0_interrupt,
+ IRQF_PERCPU | IRQF_TIMER, "gt641xx_timer0", NULL);
+}
+arch_initcall(gt641xx_timer0_clockevent_init);
diff --git a/arch/mips/kernel/cevt-r4k.c b/arch/mips/kernel/cevt-r4k.c
new file mode 100644
index 000000000..32ec67c9a
--- /dev/null
+++ b/arch/mips/kernel/cevt-r4k.c
@@ -0,0 +1,341 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2007 MIPS Technologies, Inc.
+ * Copyright (C) 2007 Ralf Baechle <ralf@linux-mips.org>
+ */
+#include <linux/clockchips.h>
+#include <linux/interrupt.h>
+#include <linux/cpufreq.h>
+#include <linux/percpu.h>
+#include <linux/smp.h>
+#include <linux/irq.h>
+
+#include <asm/time.h>
+#include <asm/cevt-r4k.h>
+
+static int mips_next_event(unsigned long delta,
+ struct clock_event_device *evt)
+{
+ unsigned int cnt;
+ int res;
+
+ cnt = read_c0_count();
+ cnt += delta;
+ write_c0_compare(cnt);
+ res = ((int)(read_c0_count() - cnt) >= 0) ? -ETIME : 0;
+ return res;
+}
+
+/**
+ * calculate_min_delta() - Calculate a good minimum delta for mips_next_event().
+ *
+ * Running under virtualisation can introduce overhead into mips_next_event() in
+ * the form of hypervisor emulation of CP0_Count/CP0_Compare registers,
+ * potentially with an unnatural frequency, which makes a fixed min_delta_ns
+ * value inappropriate as it may be too small.
+ *
+ * It can also introduce occasional latency from the guest being descheduled.
+ *
+ * This function calculates a good minimum delta based roughly on the 75th
+ * percentile of the time taken to do the mips_next_event() sequence, in order
+ * to handle potentially higher overhead while also eliminating outliers due to
+ * unpredictable hypervisor latency (which can be handled by retries).
+ *
+ * Return: An appropriate minimum delta for the clock event device.
+ */
+static unsigned int calculate_min_delta(void)
+{
+ unsigned int cnt, i, j, k, l;
+ unsigned int buf1[4], buf2[3];
+ unsigned int min_delta;
+
+ /*
+ * Calculate the median of 5 75th percentiles of 5 samples of how long
+ * it takes to set CP0_Compare = CP0_Count + delta.
+ */
+ for (i = 0; i < 5; ++i) {
+ for (j = 0; j < 5; ++j) {
+ /*
+ * This is like the code in mips_next_event(), and
+ * directly measures the borderline "safe" delta.
+ */
+ cnt = read_c0_count();
+ write_c0_compare(cnt);
+ cnt = read_c0_count() - cnt;
+
+ /* Sorted insert into buf1 */
+ for (k = 0; k < j; ++k) {
+ if (cnt < buf1[k]) {
+ l = min_t(unsigned int,
+ j, ARRAY_SIZE(buf1) - 1);
+ for (; l > k; --l)
+ buf1[l] = buf1[l - 1];
+ break;
+ }
+ }
+ if (k < ARRAY_SIZE(buf1))
+ buf1[k] = cnt;
+ }
+
+ /* Sorted insert of 75th percentile into buf2 */
+ for (k = 0; k < i && k < ARRAY_SIZE(buf2); ++k) {
+ if (buf1[ARRAY_SIZE(buf1) - 1] < buf2[k]) {
+ l = min_t(unsigned int,
+ i, ARRAY_SIZE(buf2) - 1);
+ for (; l > k; --l)
+ buf2[l] = buf2[l - 1];
+ break;
+ }
+ }
+ if (k < ARRAY_SIZE(buf2))
+ buf2[k] = buf1[ARRAY_SIZE(buf1) - 1];
+ }
+
+ /* Use 2 * median of 75th percentiles */
+ min_delta = buf2[ARRAY_SIZE(buf2) - 1] * 2;
+
+ /* Don't go too low */
+ if (min_delta < 0x300)
+ min_delta = 0x300;
+
+ pr_debug("%s: median 75th percentile=%#x, min_delta=%#x\n",
+ __func__, buf2[ARRAY_SIZE(buf2) - 1], min_delta);
+ return min_delta;
+}
+
+DEFINE_PER_CPU(struct clock_event_device, mips_clockevent_device);
+int cp0_timer_irq_installed;
+
+/*
+ * Possibly handle a performance counter interrupt.
+ * Return true if the timer interrupt should not be checked
+ */
+static inline int handle_perf_irq(int r2)
+{
+ /*
+ * The performance counter overflow interrupt may be shared with the
+ * timer interrupt (cp0_perfcount_irq < 0). If it is and a
+ * performance counter has overflowed (perf_irq() == IRQ_HANDLED)
+ * and we can't reliably determine if a counter interrupt has also
+ * happened (!r2) then don't check for a timer interrupt.
+ */
+ return (cp0_perfcount_irq < 0) &&
+ perf_irq() == IRQ_HANDLED &&
+ !r2;
+}
+
+irqreturn_t c0_compare_interrupt(int irq, void *dev_id)
+{
+ const int r2 = cpu_has_mips_r2_r6;
+ struct clock_event_device *cd;
+ int cpu = smp_processor_id();
+
+ /*
+ * Suckage alert:
+ * Before R2 of the architecture there was no way to see if a
+ * performance counter interrupt was pending, so we have to run
+ * the performance counter interrupt handler anyway.
+ */
+ if (handle_perf_irq(r2))
+ return IRQ_HANDLED;
+
+ /*
+ * The same applies to performance counter interrupts. But with the
+ * above we now know that the reason we got here must be a timer
+ * interrupt. Being the paranoiacs we are we check anyway.
+ */
+ if (!r2 || (read_c0_cause() & CAUSEF_TI)) {
+ /* Clear Count/Compare Interrupt */
+ write_c0_compare(read_c0_compare());
+ cd = &per_cpu(mips_clockevent_device, cpu);
+ cd->event_handler(cd);
+
+ return IRQ_HANDLED;
+ }
+
+ return IRQ_NONE;
+}
+
+struct irqaction c0_compare_irqaction = {
+ .handler = c0_compare_interrupt,
+ /*
+ * IRQF_SHARED: The timer interrupt may be shared with other interrupts
+ * such as perf counter and FDC interrupts.
+ */
+ .flags = IRQF_PERCPU | IRQF_TIMER | IRQF_SHARED,
+ .name = "timer",
+};
+
+
+void mips_event_handler(struct clock_event_device *dev)
+{
+}
+
+/*
+ * FIXME: This doesn't hold for the relocated E9000 compare interrupt.
+ */
+static int c0_compare_int_pending(void)
+{
+ /* When cpu_has_mips_r2, this checks Cause.TI instead of Cause.IP7 */
+ return (read_c0_cause() >> cp0_compare_irq_shift) & (1ul << CAUSEB_IP);
+}
+
+/*
+ * Compare interrupt can be routed and latched outside the core,
+ * so wait up to worst case number of cycle counter ticks for timer interrupt
+ * changes to propagate to the cause register.
+ */
+#define COMPARE_INT_SEEN_TICKS 50
+
+int c0_compare_int_usable(void)
+{
+ unsigned int delta;
+ unsigned int cnt;
+
+ /*
+ * IP7 already pending? Try to clear it by acking the timer.
+ */
+ if (c0_compare_int_pending()) {
+ cnt = read_c0_count();
+ write_c0_compare(cnt);
+ back_to_back_c0_hazard();
+ while (read_c0_count() < (cnt + COMPARE_INT_SEEN_TICKS))
+ if (!c0_compare_int_pending())
+ break;
+ if (c0_compare_int_pending())
+ return 0;
+ }
+
+ for (delta = 0x10; delta <= 0x400000; delta <<= 1) {
+ cnt = read_c0_count();
+ cnt += delta;
+ write_c0_compare(cnt);
+ back_to_back_c0_hazard();
+ if ((int)(read_c0_count() - cnt) < 0)
+ break;
+ /* increase delta if the timer was already expired */
+ }
+
+ while ((int)(read_c0_count() - cnt) <= 0)
+ ; /* Wait for expiry */
+
+ while (read_c0_count() < (cnt + COMPARE_INT_SEEN_TICKS))
+ if (c0_compare_int_pending())
+ break;
+ if (!c0_compare_int_pending())
+ return 0;
+ cnt = read_c0_count();
+ write_c0_compare(cnt);
+ back_to_back_c0_hazard();
+ while (read_c0_count() < (cnt + COMPARE_INT_SEEN_TICKS))
+ if (!c0_compare_int_pending())
+ break;
+ if (c0_compare_int_pending())
+ return 0;
+
+ /*
+ * Feels like a real count / compare timer.
+ */
+ return 1;
+}
+
+unsigned int __weak get_c0_compare_int(void)
+{
+ return MIPS_CPU_IRQ_BASE + cp0_compare_irq;
+}
+
+#ifdef CONFIG_CPU_FREQ
+
+static unsigned long mips_ref_freq;
+
+static int r4k_cpufreq_callback(struct notifier_block *nb,
+ unsigned long val, void *data)
+{
+ struct cpufreq_freqs *freq = data;
+ struct clock_event_device *cd;
+ unsigned long rate;
+ int cpu;
+
+ if (!mips_ref_freq)
+ mips_ref_freq = freq->old;
+
+ if (val == CPUFREQ_POSTCHANGE) {
+ rate = cpufreq_scale(mips_hpt_frequency, mips_ref_freq,
+ freq->new);
+
+ for_each_cpu(cpu, freq->policy->cpus) {
+ cd = &per_cpu(mips_clockevent_device, cpu);
+
+ clockevents_update_freq(cd, rate);
+ }
+ }
+
+ return 0;
+}
+
+static struct notifier_block r4k_cpufreq_notifier = {
+ .notifier_call = r4k_cpufreq_callback,
+};
+
+static int __init r4k_register_cpufreq_notifier(void)
+{
+ return cpufreq_register_notifier(&r4k_cpufreq_notifier,
+ CPUFREQ_TRANSITION_NOTIFIER);
+
+}
+core_initcall(r4k_register_cpufreq_notifier);
+
+#endif /* !CONFIG_CPU_FREQ */
+
+int r4k_clockevent_init(void)
+{
+ unsigned long flags = IRQF_PERCPU | IRQF_TIMER | IRQF_SHARED;
+ unsigned int cpu = smp_processor_id();
+ struct clock_event_device *cd;
+ unsigned int irq, min_delta;
+
+ if (!cpu_has_counter || !mips_hpt_frequency)
+ return -ENXIO;
+
+ if (!c0_compare_int_usable())
+ return -ENXIO;
+
+ /*
+ * With vectored interrupts things are getting platform specific.
+ * get_c0_compare_int is a hook to allow a platform to return the
+ * interrupt number of its liking.
+ */
+ irq = get_c0_compare_int();
+
+ cd = &per_cpu(mips_clockevent_device, cpu);
+
+ cd->name = "MIPS";
+ cd->features = CLOCK_EVT_FEAT_ONESHOT |
+ CLOCK_EVT_FEAT_C3STOP |
+ CLOCK_EVT_FEAT_PERCPU;
+
+ min_delta = calculate_min_delta();
+
+ cd->rating = 300;
+ cd->irq = irq;
+ cd->cpumask = cpumask_of(cpu);
+ cd->set_next_event = mips_next_event;
+ cd->event_handler = mips_event_handler;
+
+ clockevents_config_and_register(cd, mips_hpt_frequency, min_delta, 0x7fffffff);
+
+ if (cp0_timer_irq_installed)
+ return 0;
+
+ cp0_timer_irq_installed = 1;
+
+ if (request_irq(irq, c0_compare_interrupt, flags, "timer",
+ c0_compare_interrupt))
+ pr_err("Failed to request irq %d (timer)\n", irq);
+
+ return 0;
+}
+
diff --git a/arch/mips/kernel/cevt-sb1250.c b/arch/mips/kernel/cevt-sb1250.c
new file mode 100644
index 000000000..0451273fa
--- /dev/null
+++ b/arch/mips/kernel/cevt-sb1250.c
@@ -0,0 +1,138 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2000, 2001 Broadcom Corporation
+ */
+#include <linux/clockchips.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/percpu.h>
+#include <linux/smp.h>
+
+#include <asm/addrspace.h>
+#include <asm/io.h>
+#include <asm/time.h>
+
+#include <asm/sibyte/sb1250.h>
+#include <asm/sibyte/sb1250_regs.h>
+#include <asm/sibyte/sb1250_int.h>
+#include <asm/sibyte/sb1250_scd.h>
+
+#define IMR_IP2_VAL K_INT_MAP_I0
+#define IMR_IP3_VAL K_INT_MAP_I1
+#define IMR_IP4_VAL K_INT_MAP_I2
+
+/*
+ * The general purpose timer ticks at 1MHz independent if
+ * the rest of the system
+ */
+
+static int sibyte_shutdown(struct clock_event_device *evt)
+{
+ void __iomem *cfg;
+
+ cfg = IOADDR(A_SCD_TIMER_REGISTER(smp_processor_id(), R_SCD_TIMER_CFG));
+
+ /* Stop the timer until we actually program a shot */
+ __raw_writeq(0, cfg);
+
+ return 0;
+}
+
+static int sibyte_set_periodic(struct clock_event_device *evt)
+{
+ unsigned int cpu = smp_processor_id();
+ void __iomem *cfg, *init;
+
+ cfg = IOADDR(A_SCD_TIMER_REGISTER(cpu, R_SCD_TIMER_CFG));
+ init = IOADDR(A_SCD_TIMER_REGISTER(cpu, R_SCD_TIMER_INIT));
+
+ __raw_writeq(0, cfg);
+ __raw_writeq((V_SCD_TIMER_FREQ / HZ) - 1, init);
+ __raw_writeq(M_SCD_TIMER_ENABLE | M_SCD_TIMER_MODE_CONTINUOUS, cfg);
+
+ return 0;
+}
+
+static int sibyte_next_event(unsigned long delta, struct clock_event_device *cd)
+{
+ unsigned int cpu = smp_processor_id();
+ void __iomem *cfg, *init;
+
+ cfg = IOADDR(A_SCD_TIMER_REGISTER(cpu, R_SCD_TIMER_CFG));
+ init = IOADDR(A_SCD_TIMER_REGISTER(cpu, R_SCD_TIMER_INIT));
+
+ __raw_writeq(0, cfg);
+ __raw_writeq(delta - 1, init);
+ __raw_writeq(M_SCD_TIMER_ENABLE, cfg);
+
+ return 0;
+}
+
+static irqreturn_t sibyte_counter_handler(int irq, void *dev_id)
+{
+ unsigned int cpu = smp_processor_id();
+ struct clock_event_device *cd = dev_id;
+ void __iomem *cfg;
+ unsigned long tmode;
+
+ if (clockevent_state_periodic(cd))
+ tmode = M_SCD_TIMER_ENABLE | M_SCD_TIMER_MODE_CONTINUOUS;
+ else
+ tmode = 0;
+
+ /* ACK interrupt */
+ cfg = IOADDR(A_SCD_TIMER_REGISTER(cpu, R_SCD_TIMER_CFG));
+ ____raw_writeq(tmode, cfg);
+
+ cd->event_handler(cd);
+
+ return IRQ_HANDLED;
+}
+
+static DEFINE_PER_CPU(struct clock_event_device, sibyte_hpt_clockevent);
+static DEFINE_PER_CPU(char [18], sibyte_hpt_name);
+
+void sb1250_clockevent_init(void)
+{
+ unsigned int cpu = smp_processor_id();
+ unsigned int irq = K_INT_TIMER_0 + cpu;
+ struct clock_event_device *cd = &per_cpu(sibyte_hpt_clockevent, cpu);
+ unsigned char *name = per_cpu(sibyte_hpt_name, cpu);
+ unsigned long flags = IRQF_PERCPU | IRQF_TIMER;
+
+ /* Only have 4 general purpose timers, and we use last one as hpt */
+ BUG_ON(cpu > 2);
+
+ sprintf(name, "sb1250-counter-%d", cpu);
+ cd->name = name;
+ cd->features = CLOCK_EVT_FEAT_PERIODIC |
+ CLOCK_EVT_FEAT_ONESHOT;
+ clockevent_set_clock(cd, V_SCD_TIMER_FREQ);
+ cd->max_delta_ns = clockevent_delta2ns(0x7fffff, cd);
+ cd->max_delta_ticks = 0x7fffff;
+ cd->min_delta_ns = clockevent_delta2ns(2, cd);
+ cd->min_delta_ticks = 2;
+ cd->rating = 200;
+ cd->irq = irq;
+ cd->cpumask = cpumask_of(cpu);
+ cd->set_next_event = sibyte_next_event;
+ cd->set_state_shutdown = sibyte_shutdown;
+ cd->set_state_periodic = sibyte_set_periodic;
+ cd->set_state_oneshot = sibyte_shutdown;
+ clockevents_register_device(cd);
+
+ sb1250_mask_irq(cpu, irq);
+
+ /*
+ * Map the timer interrupt to IP[4] of this cpu
+ */
+ __raw_writeq(IMR_IP4_VAL,
+ IOADDR(A_IMR_REGISTER(cpu, R_IMR_INTERRUPT_MAP_BASE) +
+ (irq << 3)));
+
+ sb1250_unmask_irq(cpu, irq);
+
+ irq_set_affinity(irq, cpumask_of(cpu));
+ if (request_irq(irq, sibyte_counter_handler, flags, name, cd))
+ pr_err("Failed to request irq %d (%s)\n", irq, name);
+}
diff --git a/arch/mips/kernel/cevt-txx9.c b/arch/mips/kernel/cevt-txx9.c
new file mode 100644
index 000000000..d761ead2e
--- /dev/null
+++ b/arch/mips/kernel/cevt-txx9.c
@@ -0,0 +1,220 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Based on linux/arch/mips/kernel/cevt-r4k.c,
+ * linux/arch/mips/jmr3927/rbhma3100/setup.c
+ *
+ * Copyright 2001 MontaVista Software Inc.
+ * Copyright (C) 2000-2001 Toshiba Corporation
+ * Copyright (C) 2007 MIPS Technologies, Inc.
+ * Copyright (C) 2007 Ralf Baechle <ralf@linux-mips.org>
+ */
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/sched_clock.h>
+#include <asm/time.h>
+#include <asm/txx9tmr.h>
+
+#define TCR_BASE (TXx9_TMTCR_CCDE | TXx9_TMTCR_CRE | TXx9_TMTCR_TMODE_ITVL)
+#define TIMER_CCD 0 /* 1/2 */
+#define TIMER_CLK(imclk) ((imclk) / (2 << TIMER_CCD))
+
+struct txx9_clocksource {
+ struct clocksource cs;
+ struct txx9_tmr_reg __iomem *tmrptr;
+};
+
+static u64 txx9_cs_read(struct clocksource *cs)
+{
+ struct txx9_clocksource *txx9_cs =
+ container_of(cs, struct txx9_clocksource, cs);
+ return __raw_readl(&txx9_cs->tmrptr->trr);
+}
+
+/* Use 1 bit smaller width to use full bits in that width */
+#define TXX9_CLOCKSOURCE_BITS (TXX9_TIMER_BITS - 1)
+
+static struct txx9_clocksource txx9_clocksource = {
+ .cs = {
+ .name = "TXx9",
+ .rating = 200,
+ .read = txx9_cs_read,
+ .mask = CLOCKSOURCE_MASK(TXX9_CLOCKSOURCE_BITS),
+ .flags = CLOCK_SOURCE_IS_CONTINUOUS,
+ },
+};
+
+static u64 notrace txx9_read_sched_clock(void)
+{
+ return __raw_readl(&txx9_clocksource.tmrptr->trr);
+}
+
+void __init txx9_clocksource_init(unsigned long baseaddr,
+ unsigned int imbusclk)
+{
+ struct txx9_tmr_reg __iomem *tmrptr;
+
+ clocksource_register_hz(&txx9_clocksource.cs, TIMER_CLK(imbusclk));
+
+ tmrptr = ioremap(baseaddr, sizeof(struct txx9_tmr_reg));
+ __raw_writel(TCR_BASE, &tmrptr->tcr);
+ __raw_writel(0, &tmrptr->tisr);
+ __raw_writel(TIMER_CCD, &tmrptr->ccdr);
+ __raw_writel(TXx9_TMITMR_TZCE, &tmrptr->itmr);
+ __raw_writel(1 << TXX9_CLOCKSOURCE_BITS, &tmrptr->cpra);
+ __raw_writel(TCR_BASE | TXx9_TMTCR_TCE, &tmrptr->tcr);
+ txx9_clocksource.tmrptr = tmrptr;
+
+ sched_clock_register(txx9_read_sched_clock, TXX9_CLOCKSOURCE_BITS,
+ TIMER_CLK(imbusclk));
+}
+
+struct txx9_clock_event_device {
+ struct clock_event_device cd;
+ struct txx9_tmr_reg __iomem *tmrptr;
+};
+
+static void txx9tmr_stop_and_clear(struct txx9_tmr_reg __iomem *tmrptr)
+{
+ /* stop and reset counter */
+ __raw_writel(TCR_BASE, &tmrptr->tcr);
+ /* clear pending interrupt */
+ __raw_writel(0, &tmrptr->tisr);
+}
+
+static int txx9tmr_set_state_periodic(struct clock_event_device *evt)
+{
+ struct txx9_clock_event_device *txx9_cd =
+ container_of(evt, struct txx9_clock_event_device, cd);
+ struct txx9_tmr_reg __iomem *tmrptr = txx9_cd->tmrptr;
+
+ txx9tmr_stop_and_clear(tmrptr);
+
+ __raw_writel(TXx9_TMITMR_TIIE | TXx9_TMITMR_TZCE, &tmrptr->itmr);
+ /* start timer */
+ __raw_writel(((u64)(NSEC_PER_SEC / HZ) * evt->mult) >> evt->shift,
+ &tmrptr->cpra);
+ __raw_writel(TCR_BASE | TXx9_TMTCR_TCE, &tmrptr->tcr);
+ return 0;
+}
+
+static int txx9tmr_set_state_oneshot(struct clock_event_device *evt)
+{
+ struct txx9_clock_event_device *txx9_cd =
+ container_of(evt, struct txx9_clock_event_device, cd);
+ struct txx9_tmr_reg __iomem *tmrptr = txx9_cd->tmrptr;
+
+ txx9tmr_stop_and_clear(tmrptr);
+ __raw_writel(TXx9_TMITMR_TIIE, &tmrptr->itmr);
+ return 0;
+}
+
+static int txx9tmr_set_state_shutdown(struct clock_event_device *evt)
+{
+ struct txx9_clock_event_device *txx9_cd =
+ container_of(evt, struct txx9_clock_event_device, cd);
+ struct txx9_tmr_reg __iomem *tmrptr = txx9_cd->tmrptr;
+
+ txx9tmr_stop_and_clear(tmrptr);
+ __raw_writel(0, &tmrptr->itmr);
+ return 0;
+}
+
+static int txx9tmr_tick_resume(struct clock_event_device *evt)
+{
+ struct txx9_clock_event_device *txx9_cd =
+ container_of(evt, struct txx9_clock_event_device, cd);
+ struct txx9_tmr_reg __iomem *tmrptr = txx9_cd->tmrptr;
+
+ txx9tmr_stop_and_clear(tmrptr);
+ __raw_writel(TIMER_CCD, &tmrptr->ccdr);
+ __raw_writel(0, &tmrptr->itmr);
+ return 0;
+}
+
+static int txx9tmr_set_next_event(unsigned long delta,
+ struct clock_event_device *evt)
+{
+ struct txx9_clock_event_device *txx9_cd =
+ container_of(evt, struct txx9_clock_event_device, cd);
+ struct txx9_tmr_reg __iomem *tmrptr = txx9_cd->tmrptr;
+
+ txx9tmr_stop_and_clear(tmrptr);
+ /* start timer */
+ __raw_writel(delta, &tmrptr->cpra);
+ __raw_writel(TCR_BASE | TXx9_TMTCR_TCE, &tmrptr->tcr);
+ return 0;
+}
+
+static struct txx9_clock_event_device txx9_clock_event_device = {
+ .cd = {
+ .name = "TXx9",
+ .features = CLOCK_EVT_FEAT_PERIODIC |
+ CLOCK_EVT_FEAT_ONESHOT,
+ .rating = 200,
+ .set_state_shutdown = txx9tmr_set_state_shutdown,
+ .set_state_periodic = txx9tmr_set_state_periodic,
+ .set_state_oneshot = txx9tmr_set_state_oneshot,
+ .tick_resume = txx9tmr_tick_resume,
+ .set_next_event = txx9tmr_set_next_event,
+ },
+};
+
+static irqreturn_t txx9tmr_interrupt(int irq, void *dev_id)
+{
+ struct txx9_clock_event_device *txx9_cd = dev_id;
+ struct clock_event_device *cd = &txx9_cd->cd;
+ struct txx9_tmr_reg __iomem *tmrptr = txx9_cd->tmrptr;
+
+ __raw_writel(0, &tmrptr->tisr); /* ack interrupt */
+ cd->event_handler(cd);
+ return IRQ_HANDLED;
+}
+
+void __init txx9_clockevent_init(unsigned long baseaddr, int irq,
+ unsigned int imbusclk)
+{
+ struct clock_event_device *cd = &txx9_clock_event_device.cd;
+ struct txx9_tmr_reg __iomem *tmrptr;
+
+ tmrptr = ioremap(baseaddr, sizeof(struct txx9_tmr_reg));
+ txx9tmr_stop_and_clear(tmrptr);
+ __raw_writel(TIMER_CCD, &tmrptr->ccdr);
+ __raw_writel(0, &tmrptr->itmr);
+ txx9_clock_event_device.tmrptr = tmrptr;
+
+ clockevent_set_clock(cd, TIMER_CLK(imbusclk));
+ cd->max_delta_ns =
+ clockevent_delta2ns(0xffffffff >> (32 - TXX9_TIMER_BITS), cd);
+ cd->max_delta_ticks = 0xffffffff >> (32 - TXX9_TIMER_BITS);
+ cd->min_delta_ns = clockevent_delta2ns(0xf, cd);
+ cd->min_delta_ticks = 0xf;
+ cd->irq = irq;
+ cd->cpumask = cpumask_of(0);
+ clockevents_register_device(cd);
+ if (request_irq(irq, txx9tmr_interrupt, IRQF_PERCPU | IRQF_TIMER,
+ "txx9tmr", &txx9_clock_event_device))
+ pr_err("Failed to request irq %d (txx9tmr)\n", irq);
+ printk(KERN_INFO "TXx9: clockevent device at 0x%lx, irq %d\n",
+ baseaddr, irq);
+}
+
+void __init txx9_tmr_init(unsigned long baseaddr)
+{
+ struct txx9_tmr_reg __iomem *tmrptr;
+
+ tmrptr = ioremap(baseaddr, sizeof(struct txx9_tmr_reg));
+ /* Start once to make CounterResetEnable effective */
+ __raw_writel(TXx9_TMTCR_CRE | TXx9_TMTCR_TCE, &tmrptr->tcr);
+ /* Stop and reset the counter */
+ __raw_writel(TXx9_TMTCR_CRE, &tmrptr->tcr);
+ __raw_writel(0, &tmrptr->tisr);
+ __raw_writel(0xffffffff, &tmrptr->cpra);
+ __raw_writel(0, &tmrptr->itmr);
+ __raw_writel(0, &tmrptr->ccdr);
+ __raw_writel(0, &tmrptr->pgmr);
+ iounmap(tmrptr);
+}
diff --git a/arch/mips/kernel/cmpxchg.c b/arch/mips/kernel/cmpxchg.c
new file mode 100644
index 000000000..e974a4954
--- /dev/null
+++ b/arch/mips/kernel/cmpxchg.c
@@ -0,0 +1,104 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2017 Imagination Technologies
+ * Author: Paul Burton <paul.burton@mips.com>
+ */
+
+#include <linux/bitops.h>
+#include <asm/cmpxchg.h>
+
+unsigned long __xchg_small(volatile void *ptr, unsigned long val, unsigned int size)
+{
+ u32 old32, new32, load32, mask;
+ volatile u32 *ptr32;
+ unsigned int shift;
+
+ /* Check that ptr is naturally aligned */
+ WARN_ON((unsigned long)ptr & (size - 1));
+
+ /* Mask value to the correct size. */
+ mask = GENMASK((size * BITS_PER_BYTE) - 1, 0);
+ val &= mask;
+
+ /*
+ * Calculate a shift & mask that correspond to the value we wish to
+ * exchange within the naturally aligned 4 byte integer that includes
+ * it.
+ */
+ shift = (unsigned long)ptr & 0x3;
+ if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
+ shift ^= sizeof(u32) - size;
+ shift *= BITS_PER_BYTE;
+ mask <<= shift;
+
+ /*
+ * Calculate a pointer to the naturally aligned 4 byte integer that
+ * includes our byte of interest, and load its value.
+ */
+ ptr32 = (volatile u32 *)((unsigned long)ptr & ~0x3);
+ load32 = *ptr32;
+
+ do {
+ old32 = load32;
+ new32 = (load32 & ~mask) | (val << shift);
+ load32 = arch_cmpxchg(ptr32, old32, new32);
+ } while (load32 != old32);
+
+ return (load32 & mask) >> shift;
+}
+
+unsigned long __cmpxchg_small(volatile void *ptr, unsigned long old,
+ unsigned long new, unsigned int size)
+{
+ u32 mask, old32, new32, load32, load;
+ volatile u32 *ptr32;
+ unsigned int shift;
+
+ /* Check that ptr is naturally aligned */
+ WARN_ON((unsigned long)ptr & (size - 1));
+
+ /* Mask inputs to the correct size. */
+ mask = GENMASK((size * BITS_PER_BYTE) - 1, 0);
+ old &= mask;
+ new &= mask;
+
+ /*
+ * Calculate a shift & mask that correspond to the value we wish to
+ * compare & exchange within the naturally aligned 4 byte integer
+ * that includes it.
+ */
+ shift = (unsigned long)ptr & 0x3;
+ if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
+ shift ^= sizeof(u32) - size;
+ shift *= BITS_PER_BYTE;
+ mask <<= shift;
+
+ /*
+ * Calculate a pointer to the naturally aligned 4 byte integer that
+ * includes our byte of interest, and load its value.
+ */
+ ptr32 = (volatile u32 *)((unsigned long)ptr & ~0x3);
+ load32 = *ptr32;
+
+ while (true) {
+ /*
+ * Ensure the byte we want to exchange matches the expected
+ * old value, and if not then bail.
+ */
+ load = (load32 & mask) >> shift;
+ if (load != old)
+ return load;
+
+ /*
+ * Calculate the old & new values of the naturally aligned
+ * 4 byte integer that include the byte we want to exchange.
+ * Attempt to exchange the old value for the new value, and
+ * return if we succeed.
+ */
+ old32 = (load32 & ~mask) | (old << shift);
+ new32 = (load32 & ~mask) | (new << shift);
+ load32 = arch_cmpxchg(ptr32, old32, new32);
+ if (load32 == old32)
+ return old;
+ }
+}
diff --git a/arch/mips/kernel/cps-vec-ns16550.S b/arch/mips/kernel/cps-vec-ns16550.S
new file mode 100644
index 000000000..30725e1df
--- /dev/null
+++ b/arch/mips/kernel/cps-vec-ns16550.S
@@ -0,0 +1,212 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (C) 2015 Imagination Technologies
+ * Author: Paul Burton <paul.burton@mips.com>
+ */
+
+#include <asm/addrspace.h>
+#include <asm/asm.h>
+#include <asm/asm-offsets.h>
+#include <asm/mipsregs.h>
+#include <asm/regdef.h>
+#include <linux/serial_reg.h>
+
+#define UART_TX_OFS (UART_TX << CONFIG_MIPS_CPS_NS16550_SHIFT)
+#define UART_LSR_OFS (UART_LSR << CONFIG_MIPS_CPS_NS16550_SHIFT)
+
+#if CONFIG_MIPS_CPS_NS16550_WIDTH == 1
+# define UART_L lb
+# define UART_S sb
+#elif CONFIG_MIPS_CPS_NS16550_WIDTH == 2
+# define UART_L lh
+# define UART_S sh
+#elif CONFIG_MIPS_CPS_NS16550_WIDTH == 4
+# define UART_L lw
+# define UART_S sw
+#else
+# define UART_L lb
+# define UART_S sb
+#endif
+
+/**
+ * _mips_cps_putc() - write a character to the UART
+ * @a0: ASCII character to write
+ * @t9: UART base address
+ */
+LEAF(_mips_cps_putc)
+1: UART_L t0, UART_LSR_OFS(t9)
+ andi t0, t0, UART_LSR_TEMT
+ beqz t0, 1b
+ UART_S a0, UART_TX_OFS(t9)
+ jr ra
+ END(_mips_cps_putc)
+
+/**
+ * _mips_cps_puts() - write a string to the UART
+ * @a0: pointer to NULL-terminated ASCII string
+ * @t9: UART base address
+ *
+ * Write a null-terminated ASCII string to the UART.
+ */
+NESTED(_mips_cps_puts, 0, ra)
+ move s7, ra
+ move s6, a0
+
+1: lb a0, 0(s6)
+ beqz a0, 2f
+ jal _mips_cps_putc
+ PTR_ADDIU s6, s6, 1
+ b 1b
+
+2: jr s7
+ END(_mips_cps_puts)
+
+/**
+ * _mips_cps_putx4 - write a 4b hex value to the UART
+ * @a0: the 4b value to write to the UART
+ * @t9: UART base address
+ *
+ * Write a single hexadecimal character to the UART.
+ */
+NESTED(_mips_cps_putx4, 0, ra)
+ andi a0, a0, 0xf
+ li t0, '0'
+ blt a0, 10, 1f
+ li t0, 'a'
+ addiu a0, a0, -10
+1: addu a0, a0, t0
+ b _mips_cps_putc
+ END(_mips_cps_putx4)
+
+/**
+ * _mips_cps_putx8 - write an 8b hex value to the UART
+ * @a0: the 8b value to write to the UART
+ * @t9: UART base address
+ *
+ * Write an 8 bit value (ie. 2 hexadecimal characters) to the UART.
+ */
+NESTED(_mips_cps_putx8, 0, ra)
+ move s3, ra
+ move s2, a0
+ srl a0, a0, 4
+ jal _mips_cps_putx4
+ move a0, s2
+ move ra, s3
+ b _mips_cps_putx4
+ END(_mips_cps_putx8)
+
+/**
+ * _mips_cps_putx16 - write a 16b hex value to the UART
+ * @a0: the 16b value to write to the UART
+ * @t9: UART base address
+ *
+ * Write a 16 bit value (ie. 4 hexadecimal characters) to the UART.
+ */
+NESTED(_mips_cps_putx16, 0, ra)
+ move s5, ra
+ move s4, a0
+ srl a0, a0, 8
+ jal _mips_cps_putx8
+ move a0, s4
+ move ra, s5
+ b _mips_cps_putx8
+ END(_mips_cps_putx16)
+
+/**
+ * _mips_cps_putx32 - write a 32b hex value to the UART
+ * @a0: the 32b value to write to the UART
+ * @t9: UART base address
+ *
+ * Write a 32 bit value (ie. 8 hexadecimal characters) to the UART.
+ */
+NESTED(_mips_cps_putx32, 0, ra)
+ move s7, ra
+ move s6, a0
+ srl a0, a0, 16
+ jal _mips_cps_putx16
+ move a0, s6
+ move ra, s7
+ b _mips_cps_putx16
+ END(_mips_cps_putx32)
+
+#ifdef CONFIG_64BIT
+
+/**
+ * _mips_cps_putx64 - write a 64b hex value to the UART
+ * @a0: the 64b value to write to the UART
+ * @t9: UART base address
+ *
+ * Write a 64 bit value (ie. 16 hexadecimal characters) to the UART.
+ */
+NESTED(_mips_cps_putx64, 0, ra)
+ move sp, ra
+ move s8, a0
+ dsrl32 a0, a0, 0
+ jal _mips_cps_putx32
+ move a0, s8
+ move ra, sp
+ b _mips_cps_putx32
+ END(_mips_cps_putx64)
+
+#define _mips_cps_putxlong _mips_cps_putx64
+
+#else /* !CONFIG_64BIT */
+
+#define _mips_cps_putxlong _mips_cps_putx32
+
+#endif /* !CONFIG_64BIT */
+
+/**
+ * mips_cps_bev_dump() - dump relevant exception state to UART
+ * @a0: pointer to NULL-terminated ASCII string naming the exception
+ *
+ * Write information that may be useful in debugging an exception to the
+ * UART configured by CONFIG_MIPS_CPS_NS16550_*. As this BEV exception
+ * will only be run if something goes horribly wrong very early during
+ * the bringup of a core and it is very likely to be unsafe to perform
+ * memory accesses at that point (cache state indeterminate, EVA may not
+ * be configured, coherence may be disabled) let alone have a stack,
+ * this is all written in assembly using only registers & unmapped
+ * uncached access to the UART registers.
+ */
+LEAF(mips_cps_bev_dump)
+ move s0, ra
+ move s1, a0
+
+ li t9, CKSEG1ADDR(CONFIG_MIPS_CPS_NS16550_BASE)
+
+ PTR_LA a0, str_newline
+ jal _mips_cps_puts
+ PTR_LA a0, str_bev
+ jal _mips_cps_puts
+ move a0, s1
+ jal _mips_cps_puts
+ PTR_LA a0, str_newline
+ jal _mips_cps_puts
+ PTR_LA a0, str_newline
+ jal _mips_cps_puts
+
+#define DUMP_COP0_REG(reg, name, sz, _mfc0) \
+ PTR_LA a0, 8f; \
+ jal _mips_cps_puts; \
+ _mfc0 a0, reg; \
+ jal _mips_cps_putx##sz; \
+ PTR_LA a0, str_newline; \
+ jal _mips_cps_puts; \
+ TEXT(name)
+
+ DUMP_COP0_REG(CP0_CAUSE, "Cause: 0x", 32, mfc0)
+ DUMP_COP0_REG(CP0_STATUS, "Status: 0x", 32, mfc0)
+ DUMP_COP0_REG(CP0_EBASE, "EBase: 0x", long, MFC0)
+ DUMP_COP0_REG(CP0_BADVADDR, "BadVAddr: 0x", long, MFC0)
+ DUMP_COP0_REG(CP0_BADINSTR, "BadInstr: 0x", 32, mfc0)
+
+ PTR_LA a0, str_newline
+ jal _mips_cps_puts
+ jr s0
+ END(mips_cps_bev_dump)
+
+.pushsection .data
+str_bev: .asciiz "BEV Exception: "
+str_newline: .asciiz "\r\n"
+.popsection
diff --git a/arch/mips/kernel/cps-vec.S b/arch/mips/kernel/cps-vec.S
new file mode 100644
index 000000000..975343240
--- /dev/null
+++ b/arch/mips/kernel/cps-vec.S
@@ -0,0 +1,629 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (C) 2013 Imagination Technologies
+ * Author: Paul Burton <paul.burton@mips.com>
+ */
+
+#include <asm/addrspace.h>
+#include <asm/asm.h>
+#include <asm/asm-offsets.h>
+#include <asm/asmmacro.h>
+#include <asm/cacheops.h>
+#include <asm/eva.h>
+#include <asm/mipsregs.h>
+#include <asm/mipsmtregs.h>
+#include <asm/pm.h>
+
+#define GCR_CPC_BASE_OFS 0x0088
+#define GCR_CL_COHERENCE_OFS 0x2008
+#define GCR_CL_ID_OFS 0x2028
+
+#define CPC_CL_VC_STOP_OFS 0x2020
+#define CPC_CL_VC_RUN_OFS 0x2028
+
+.extern mips_cm_base
+
+.set noreorder
+
+#ifdef CONFIG_64BIT
+# define STATUS_BITDEPS ST0_KX
+#else
+# define STATUS_BITDEPS 0
+#endif
+
+#ifdef CONFIG_MIPS_CPS_NS16550
+
+#define DUMP_EXCEP(name) \
+ PTR_LA a0, 8f; \
+ jal mips_cps_bev_dump; \
+ nop; \
+ TEXT(name)
+
+#else /* !CONFIG_MIPS_CPS_NS16550 */
+
+#define DUMP_EXCEP(name)
+
+#endif /* !CONFIG_MIPS_CPS_NS16550 */
+
+ /*
+ * Set dest to non-zero if the core supports the MT ASE, else zero. If
+ * MT is not supported then branch to nomt.
+ */
+ .macro has_mt dest, nomt
+ mfc0 \dest, CP0_CONFIG, 1
+ bgez \dest, \nomt
+ mfc0 \dest, CP0_CONFIG, 2
+ bgez \dest, \nomt
+ mfc0 \dest, CP0_CONFIG, 3
+ andi \dest, \dest, MIPS_CONF3_MT
+ beqz \dest, \nomt
+ nop
+ .endm
+
+ /*
+ * Set dest to non-zero if the core supports MIPSr6 multithreading
+ * (ie. VPs), else zero. If MIPSr6 multithreading is not supported then
+ * branch to nomt.
+ */
+ .macro has_vp dest, nomt
+ mfc0 \dest, CP0_CONFIG, 1
+ bgez \dest, \nomt
+ mfc0 \dest, CP0_CONFIG, 2
+ bgez \dest, \nomt
+ mfc0 \dest, CP0_CONFIG, 3
+ bgez \dest, \nomt
+ mfc0 \dest, CP0_CONFIG, 4
+ bgez \dest, \nomt
+ mfc0 \dest, CP0_CONFIG, 5
+ andi \dest, \dest, MIPS_CONF5_VP
+ beqz \dest, \nomt
+ nop
+ .endm
+
+ /* Calculate an uncached address for the CM GCRs */
+ .macro cmgcrb dest
+ .set push
+ .set noat
+ MFC0 $1, CP0_CMGCRBASE
+ PTR_SLL $1, $1, 4
+ PTR_LI \dest, UNCAC_BASE
+ PTR_ADDU \dest, \dest, $1
+ .set pop
+ .endm
+
+.balign 0x1000
+
+LEAF(mips_cps_core_entry)
+ /*
+ * These first 4 bytes will be patched by cps_smp_setup to load the
+ * CCA to use into register s0.
+ */
+ .word 0
+
+ /* Check whether we're here due to an NMI */
+ mfc0 k0, CP0_STATUS
+ and k0, k0, ST0_NMI
+ beqz k0, not_nmi
+ nop
+
+ /* This is an NMI */
+ PTR_LA k0, nmi_handler
+ jr k0
+ nop
+
+not_nmi:
+ /* Setup Cause */
+ li t0, CAUSEF_IV
+ mtc0 t0, CP0_CAUSE
+
+ /* Setup Status */
+ li t0, ST0_CU1 | ST0_CU0 | ST0_BEV | STATUS_BITDEPS
+ mtc0 t0, CP0_STATUS
+
+ /* Skip cache & coherence setup if we're already coherent */
+ cmgcrb v1
+ lw s7, GCR_CL_COHERENCE_OFS(v1)
+ bnez s7, 1f
+ nop
+
+ /* Initialize the L1 caches */
+ jal mips_cps_cache_init
+ nop
+
+ /* Enter the coherent domain */
+ li t0, 0xff
+ sw t0, GCR_CL_COHERENCE_OFS(v1)
+ ehb
+
+ /* Set Kseg0 CCA to that in s0 */
+1: mfc0 t0, CP0_CONFIG
+ ori t0, 0x7
+ xori t0, 0x7
+ or t0, t0, s0
+ mtc0 t0, CP0_CONFIG
+ ehb
+
+ /* Jump to kseg0 */
+ PTR_LA t0, 1f
+ jr t0
+ nop
+
+ /*
+ * We're up, cached & coherent. Perform any EVA initialization necessary
+ * before we access memory.
+ */
+1: eva_init
+
+ /* Retrieve boot configuration pointers */
+ jal mips_cps_get_bootcfg
+ nop
+
+ /* Skip core-level init if we started up coherent */
+ bnez s7, 1f
+ nop
+
+ /* Perform any further required core-level initialisation */
+ jal mips_cps_core_init
+ nop
+
+ /*
+ * Boot any other VPEs within this core that should be online, and
+ * deactivate this VPE if it should be offline.
+ */
+ move a1, t9
+ jal mips_cps_boot_vpes
+ move a0, v0
+
+ /* Off we go! */
+1: PTR_L t1, VPEBOOTCFG_PC(v1)
+ PTR_L gp, VPEBOOTCFG_GP(v1)
+ PTR_L sp, VPEBOOTCFG_SP(v1)
+ jr t1
+ nop
+ END(mips_cps_core_entry)
+
+.org 0x200
+LEAF(excep_tlbfill)
+ DUMP_EXCEP("TLB Fill")
+ b .
+ nop
+ END(excep_tlbfill)
+
+.org 0x280
+LEAF(excep_xtlbfill)
+ DUMP_EXCEP("XTLB Fill")
+ b .
+ nop
+ END(excep_xtlbfill)
+
+.org 0x300
+LEAF(excep_cache)
+ DUMP_EXCEP("Cache")
+ b .
+ nop
+ END(excep_cache)
+
+.org 0x380
+LEAF(excep_genex)
+ DUMP_EXCEP("General")
+ b .
+ nop
+ END(excep_genex)
+
+.org 0x400
+LEAF(excep_intex)
+ DUMP_EXCEP("Interrupt")
+ b .
+ nop
+ END(excep_intex)
+
+.org 0x480
+LEAF(excep_ejtag)
+ PTR_LA k0, ejtag_debug_handler
+ jr k0
+ nop
+ END(excep_ejtag)
+
+LEAF(mips_cps_core_init)
+#ifdef CONFIG_MIPS_MT_SMP
+ /* Check that the core implements the MT ASE */
+ has_mt t0, 3f
+
+ .set push
+ .set MIPS_ISA_LEVEL_RAW
+ .set mt
+
+ /* Only allow 1 TC per VPE to execute... */
+ dmt
+
+ /* ...and for the moment only 1 VPE */
+ dvpe
+ PTR_LA t1, 1f
+ jr.hb t1
+ nop
+
+ /* Enter VPE configuration state */
+1: mfc0 t0, CP0_MVPCONTROL
+ ori t0, t0, MVPCONTROL_VPC
+ mtc0 t0, CP0_MVPCONTROL
+
+ /* Retrieve the number of VPEs within the core */
+ mfc0 t0, CP0_MVPCONF0
+ srl t0, t0, MVPCONF0_PVPE_SHIFT
+ andi t0, t0, (MVPCONF0_PVPE >> MVPCONF0_PVPE_SHIFT)
+ addiu ta3, t0, 1
+
+ /* If there's only 1, we're done */
+ beqz t0, 2f
+ nop
+
+ /* Loop through each VPE within this core */
+ li ta1, 1
+
+1: /* Operate on the appropriate TC */
+ mtc0 ta1, CP0_VPECONTROL
+ ehb
+
+ /* Bind TC to VPE (1:1 TC:VPE mapping) */
+ mttc0 ta1, CP0_TCBIND
+
+ /* Set exclusive TC, non-active, master */
+ li t0, VPECONF0_MVP
+ sll t1, ta1, VPECONF0_XTC_SHIFT
+ or t0, t0, t1
+ mttc0 t0, CP0_VPECONF0
+
+ /* Set TC non-active, non-allocatable */
+ mttc0 zero, CP0_TCSTATUS
+
+ /* Set TC halted */
+ li t0, TCHALT_H
+ mttc0 t0, CP0_TCHALT
+
+ /* Next VPE */
+ addiu ta1, ta1, 1
+ slt t0, ta1, ta3
+ bnez t0, 1b
+ nop
+
+ /* Leave VPE configuration state */
+2: mfc0 t0, CP0_MVPCONTROL
+ xori t0, t0, MVPCONTROL_VPC
+ mtc0 t0, CP0_MVPCONTROL
+
+3: .set pop
+#endif
+ jr ra
+ nop
+ END(mips_cps_core_init)
+
+/**
+ * mips_cps_get_bootcfg() - retrieve boot configuration pointers
+ *
+ * Returns: pointer to struct core_boot_config in v0, pointer to
+ * struct vpe_boot_config in v1, VPE ID in t9
+ */
+LEAF(mips_cps_get_bootcfg)
+ /* Calculate a pointer to this cores struct core_boot_config */
+ cmgcrb t0
+ lw t0, GCR_CL_ID_OFS(t0)
+ li t1, COREBOOTCFG_SIZE
+ mul t0, t0, t1
+ PTR_LA t1, mips_cps_core_bootcfg
+ PTR_L t1, 0(t1)
+ PTR_ADDU v0, t0, t1
+
+ /* Calculate this VPEs ID. If the core doesn't support MT use 0 */
+ li t9, 0
+#if defined(CONFIG_CPU_MIPSR6)
+ has_vp ta2, 1f
+
+ /*
+ * Assume non-contiguous numbering. Perhaps some day we'll need
+ * to handle contiguous VP numbering, but no such systems yet
+ * exist.
+ */
+ mfc0 t9, CP0_GLOBALNUMBER
+ andi t9, t9, MIPS_GLOBALNUMBER_VP
+#elif defined(CONFIG_MIPS_MT_SMP)
+ has_mt ta2, 1f
+
+ /* Find the number of VPEs present in the core */
+ mfc0 t1, CP0_MVPCONF0
+ srl t1, t1, MVPCONF0_PVPE_SHIFT
+ andi t1, t1, MVPCONF0_PVPE >> MVPCONF0_PVPE_SHIFT
+ addiu t1, t1, 1
+
+ /* Calculate a mask for the VPE ID from EBase.CPUNum */
+ clz t1, t1
+ li t2, 31
+ subu t1, t2, t1
+ li t2, 1
+ sll t1, t2, t1
+ addiu t1, t1, -1
+
+ /* Retrieve the VPE ID from EBase.CPUNum */
+ mfc0 t9, $15, 1
+ and t9, t9, t1
+#endif
+
+1: /* Calculate a pointer to this VPEs struct vpe_boot_config */
+ li t1, VPEBOOTCFG_SIZE
+ mul v1, t9, t1
+ PTR_L ta3, COREBOOTCFG_VPECONFIG(v0)
+ PTR_ADDU v1, v1, ta3
+
+ jr ra
+ nop
+ END(mips_cps_get_bootcfg)
+
+LEAF(mips_cps_boot_vpes)
+ lw ta2, COREBOOTCFG_VPEMASK(a0)
+ PTR_L ta3, COREBOOTCFG_VPECONFIG(a0)
+
+#if defined(CONFIG_CPU_MIPSR6)
+
+ has_vp t0, 5f
+
+ /* Find base address of CPC */
+ cmgcrb t3
+ PTR_L t1, GCR_CPC_BASE_OFS(t3)
+ PTR_LI t2, ~0x7fff
+ and t1, t1, t2
+ PTR_LI t2, UNCAC_BASE
+ PTR_ADD t1, t1, t2
+
+ /* Start any other VPs that ought to be running */
+ PTR_S ta2, CPC_CL_VC_RUN_OFS(t1)
+
+ /* Ensure this VP stops running if it shouldn't be */
+ not ta2
+ PTR_S ta2, CPC_CL_VC_STOP_OFS(t1)
+ ehb
+
+#elif defined(CONFIG_MIPS_MT)
+
+ /* If the core doesn't support MT then return */
+ has_mt t0, 5f
+
+ /* Enter VPE configuration state */
+ .set push
+ .set MIPS_ISA_LEVEL_RAW
+ .set mt
+ dvpe
+ .set pop
+
+ PTR_LA t1, 1f
+ jr.hb t1
+ nop
+1: mfc0 t1, CP0_MVPCONTROL
+ ori t1, t1, MVPCONTROL_VPC
+ mtc0 t1, CP0_MVPCONTROL
+ ehb
+
+ /* Loop through each VPE */
+ move t8, ta2
+ li ta1, 0
+
+ /* Check whether the VPE should be running. If not, skip it */
+1: andi t0, ta2, 1
+ beqz t0, 2f
+ nop
+
+ /* Operate on the appropriate TC */
+ mfc0 t0, CP0_VPECONTROL
+ ori t0, t0, VPECONTROL_TARGTC
+ xori t0, t0, VPECONTROL_TARGTC
+ or t0, t0, ta1
+ mtc0 t0, CP0_VPECONTROL
+ ehb
+
+ .set push
+ .set MIPS_ISA_LEVEL_RAW
+ .set mt
+
+ /* Skip the VPE if its TC is not halted */
+ mftc0 t0, CP0_TCHALT
+ beqz t0, 2f
+ nop
+
+ /* Calculate a pointer to the VPEs struct vpe_boot_config */
+ li t0, VPEBOOTCFG_SIZE
+ mul t0, t0, ta1
+ addu t0, t0, ta3
+
+ /* Set the TC restart PC */
+ lw t1, VPEBOOTCFG_PC(t0)
+ mttc0 t1, CP0_TCRESTART
+
+ /* Set the TC stack pointer */
+ lw t1, VPEBOOTCFG_SP(t0)
+ mttgpr t1, sp
+
+ /* Set the TC global pointer */
+ lw t1, VPEBOOTCFG_GP(t0)
+ mttgpr t1, gp
+
+ /* Copy config from this VPE */
+ mfc0 t0, CP0_CONFIG
+ mttc0 t0, CP0_CONFIG
+
+ /*
+ * Copy the EVA config from this VPE if the CPU supports it.
+ * CONFIG3 must exist to be running MT startup - just read it.
+ */
+ mfc0 t0, CP0_CONFIG, 3
+ and t0, t0, MIPS_CONF3_SC
+ beqz t0, 3f
+ nop
+ mfc0 t0, CP0_SEGCTL0
+ mttc0 t0, CP0_SEGCTL0
+ mfc0 t0, CP0_SEGCTL1
+ mttc0 t0, CP0_SEGCTL1
+ mfc0 t0, CP0_SEGCTL2
+ mttc0 t0, CP0_SEGCTL2
+3:
+ /* Ensure no software interrupts are pending */
+ mttc0 zero, CP0_CAUSE
+ mttc0 zero, CP0_STATUS
+
+ /* Set TC active, not interrupt exempt */
+ mftc0 t0, CP0_TCSTATUS
+ li t1, ~TCSTATUS_IXMT
+ and t0, t0, t1
+ ori t0, t0, TCSTATUS_A
+ mttc0 t0, CP0_TCSTATUS
+
+ /* Clear the TC halt bit */
+ mttc0 zero, CP0_TCHALT
+
+ /* Set VPE active */
+ mftc0 t0, CP0_VPECONF0
+ ori t0, t0, VPECONF0_VPA
+ mttc0 t0, CP0_VPECONF0
+
+ /* Next VPE */
+2: srl ta2, ta2, 1
+ addiu ta1, ta1, 1
+ bnez ta2, 1b
+ nop
+
+ /* Leave VPE configuration state */
+ mfc0 t1, CP0_MVPCONTROL
+ xori t1, t1, MVPCONTROL_VPC
+ mtc0 t1, CP0_MVPCONTROL
+ ehb
+ evpe
+
+ .set pop
+
+ /* Check whether this VPE is meant to be running */
+ li t0, 1
+ sll t0, t0, a1
+ and t0, t0, t8
+ bnez t0, 2f
+ nop
+
+ /* This VPE should be offline, halt the TC */
+ li t0, TCHALT_H
+ mtc0 t0, CP0_TCHALT
+ PTR_LA t0, 1f
+1: jr.hb t0
+ nop
+
+2:
+
+#endif /* CONFIG_MIPS_MT_SMP */
+
+ /* Return */
+5: jr ra
+ nop
+ END(mips_cps_boot_vpes)
+
+LEAF(mips_cps_cache_init)
+ /*
+ * Clear the bits used to index the caches. Note that the architecture
+ * dictates that writing to any of TagLo or TagHi selects 0 or 2 should
+ * be valid for all MIPS32 CPUs, even those for which said writes are
+ * unnecessary.
+ */
+ mtc0 zero, CP0_TAGLO, 0
+ mtc0 zero, CP0_TAGHI, 0
+ mtc0 zero, CP0_TAGLO, 2
+ mtc0 zero, CP0_TAGHI, 2
+ ehb
+
+ /* Primary cache configuration is indicated by Config1 */
+ mfc0 v0, CP0_CONFIG, 1
+
+ /* Detect I-cache line size */
+ _EXT t0, v0, MIPS_CONF1_IL_SHF, MIPS_CONF1_IL_SZ
+ beqz t0, icache_done
+ li t1, 2
+ sllv t0, t1, t0
+
+ /* Detect I-cache size */
+ _EXT t1, v0, MIPS_CONF1_IS_SHF, MIPS_CONF1_IS_SZ
+ xori t2, t1, 0x7
+ beqz t2, 1f
+ li t3, 32
+ addiu t1, t1, 1
+ sllv t1, t3, t1
+1: /* At this point t1 == I-cache sets per way */
+ _EXT t2, v0, MIPS_CONF1_IA_SHF, MIPS_CONF1_IA_SZ
+ addiu t2, t2, 1
+ mul t1, t1, t0
+ mul t1, t1, t2
+
+ li a0, CKSEG0
+ PTR_ADD a1, a0, t1
+1: cache Index_Store_Tag_I, 0(a0)
+ PTR_ADD a0, a0, t0
+ bne a0, a1, 1b
+ nop
+icache_done:
+
+ /* Detect D-cache line size */
+ _EXT t0, v0, MIPS_CONF1_DL_SHF, MIPS_CONF1_DL_SZ
+ beqz t0, dcache_done
+ li t1, 2
+ sllv t0, t1, t0
+
+ /* Detect D-cache size */
+ _EXT t1, v0, MIPS_CONF1_DS_SHF, MIPS_CONF1_DS_SZ
+ xori t2, t1, 0x7
+ beqz t2, 1f
+ li t3, 32
+ addiu t1, t1, 1
+ sllv t1, t3, t1
+1: /* At this point t1 == D-cache sets per way */
+ _EXT t2, v0, MIPS_CONF1_DA_SHF, MIPS_CONF1_DA_SZ
+ addiu t2, t2, 1
+ mul t1, t1, t0
+ mul t1, t1, t2
+
+ li a0, CKSEG0
+ PTR_ADDU a1, a0, t1
+ PTR_SUBU a1, a1, t0
+1: cache Index_Store_Tag_D, 0(a0)
+ bne a0, a1, 1b
+ PTR_ADD a0, a0, t0
+dcache_done:
+
+ jr ra
+ nop
+ END(mips_cps_cache_init)
+
+#if defined(CONFIG_MIPS_CPS_PM) && defined(CONFIG_CPU_PM)
+
+ /* Calculate a pointer to this CPUs struct mips_static_suspend_state */
+ .macro psstate dest
+ .set push
+ .set noat
+ lw $1, TI_CPU(gp)
+ sll $1, $1, LONGLOG
+ PTR_LA \dest, __per_cpu_offset
+ addu $1, $1, \dest
+ lw $1, 0($1)
+ PTR_LA \dest, cps_cpu_state
+ addu \dest, \dest, $1
+ .set pop
+ .endm
+
+LEAF(mips_cps_pm_save)
+ /* Save CPU state */
+ SUSPEND_SAVE_REGS
+ psstate t1
+ SUSPEND_SAVE_STATIC
+ jr v0
+ nop
+ END(mips_cps_pm_save)
+
+LEAF(mips_cps_pm_restore)
+ /* Restore CPU state */
+ psstate t1
+ RESUME_RESTORE_STATIC
+ RESUME_RESTORE_REGS_RETURN
+ END(mips_cps_pm_restore)
+
+#endif /* CONFIG_MIPS_CPS_PM && CONFIG_CPU_PM */
diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c
new file mode 100644
index 000000000..482af80b8
--- /dev/null
+++ b/arch/mips/kernel/cpu-probe.c
@@ -0,0 +1,2043 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Processor capabilities determination functions.
+ *
+ * Copyright (C) xxxx the Anonymous
+ * Copyright (C) 1994 - 2006 Ralf Baechle
+ * Copyright (C) 2003, 2004 Maciej W. Rozycki
+ * Copyright (C) 2001, 2004, 2011, 2012 MIPS Technologies, Inc.
+ */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/ptrace.h>
+#include <linux/smp.h>
+#include <linux/stddef.h>
+#include <linux/export.h>
+
+#include <asm/bugs.h>
+#include <asm/cpu.h>
+#include <asm/cpu-features.h>
+#include <asm/cpu-type.h>
+#include <asm/fpu.h>
+#include <asm/mipsregs.h>
+#include <asm/mipsmtregs.h>
+#include <asm/msa.h>
+#include <asm/watch.h>
+#include <asm/elf.h>
+#include <asm/pgtable-bits.h>
+#include <asm/spram.h>
+#include <asm/traps.h>
+#include <linux/uaccess.h>
+
+#include "fpu-probe.h"
+
+#include <asm/mach-loongson64/cpucfg-emul.h>
+
+/* Hardware capabilities */
+unsigned int elf_hwcap __read_mostly;
+EXPORT_SYMBOL_GPL(elf_hwcap);
+
+static inline unsigned long cpu_get_msa_id(void)
+{
+ unsigned long status, msa_id;
+
+ status = read_c0_status();
+ __enable_fpu(FPU_64BIT);
+ enable_msa();
+ msa_id = read_msa_ir();
+ disable_msa();
+ write_c0_status(status);
+ return msa_id;
+}
+
+static int mips_dsp_disabled;
+
+static int __init dsp_disable(char *s)
+{
+ cpu_data[0].ases &= ~(MIPS_ASE_DSP | MIPS_ASE_DSP2P);
+ mips_dsp_disabled = 1;
+
+ return 1;
+}
+
+__setup("nodsp", dsp_disable);
+
+static int mips_htw_disabled;
+
+static int __init htw_disable(char *s)
+{
+ mips_htw_disabled = 1;
+ cpu_data[0].options &= ~MIPS_CPU_HTW;
+ write_c0_pwctl(read_c0_pwctl() &
+ ~(1 << MIPS_PWCTL_PWEN_SHIFT));
+
+ return 1;
+}
+
+__setup("nohtw", htw_disable);
+
+static int mips_ftlb_disabled;
+static int mips_has_ftlb_configured;
+
+enum ftlb_flags {
+ FTLB_EN = 1 << 0,
+ FTLB_SET_PROB = 1 << 1,
+};
+
+static int set_ftlb_enable(struct cpuinfo_mips *c, enum ftlb_flags flags);
+
+static int __init ftlb_disable(char *s)
+{
+ unsigned int config4, mmuextdef;
+
+ /*
+ * If the core hasn't done any FTLB configuration, there is nothing
+ * for us to do here.
+ */
+ if (!mips_has_ftlb_configured)
+ return 1;
+
+ /* Disable it in the boot cpu */
+ if (set_ftlb_enable(&cpu_data[0], 0)) {
+ pr_warn("Can't turn FTLB off\n");
+ return 1;
+ }
+
+ config4 = read_c0_config4();
+
+ /* Check that FTLB has been disabled */
+ mmuextdef = config4 & MIPS_CONF4_MMUEXTDEF;
+ /* MMUSIZEEXT == VTLB ON, FTLB OFF */
+ if (mmuextdef == MIPS_CONF4_MMUEXTDEF_FTLBSIZEEXT) {
+ /* This should never happen */
+ pr_warn("FTLB could not be disabled!\n");
+ return 1;
+ }
+
+ mips_ftlb_disabled = 1;
+ mips_has_ftlb_configured = 0;
+
+ /*
+ * noftlb is mainly used for debug purposes so print
+ * an informative message instead of using pr_debug()
+ */
+ pr_info("FTLB has been disabled\n");
+
+ /*
+ * Some of these bits are duplicated in the decode_config4.
+ * MIPS_CONF4_MMUEXTDEF_MMUSIZEEXT is the only possible case
+ * once FTLB has been disabled so undo what decode_config4 did.
+ */
+ cpu_data[0].tlbsize -= cpu_data[0].tlbsizeftlbways *
+ cpu_data[0].tlbsizeftlbsets;
+ cpu_data[0].tlbsizeftlbsets = 0;
+ cpu_data[0].tlbsizeftlbways = 0;
+
+ return 1;
+}
+
+__setup("noftlb", ftlb_disable);
+
+/*
+ * Check if the CPU has per tc perf counters
+ */
+static inline void cpu_set_mt_per_tc_perf(struct cpuinfo_mips *c)
+{
+ if (read_c0_config7() & MTI_CONF7_PTC)
+ c->options |= MIPS_CPU_MT_PER_TC_PERF_COUNTERS;
+}
+
+static inline void check_errata(void)
+{
+ struct cpuinfo_mips *c = &current_cpu_data;
+
+ switch (current_cpu_type()) {
+ case CPU_34K:
+ /*
+ * Erratum "RPS May Cause Incorrect Instruction Execution"
+ * This code only handles VPE0, any SMP/RTOS code
+ * making use of VPE1 will be responsible for that VPE.
+ */
+ if ((c->processor_id & PRID_REV_MASK) <= PRID_REV_34K_V1_0_2)
+ write_c0_config7(read_c0_config7() | MIPS_CONF7_RPS);
+ break;
+ default:
+ break;
+ }
+}
+
+void __init check_bugs32(void)
+{
+ check_errata();
+}
+
+/*
+ * Probe whether cpu has config register by trying to play with
+ * alternate cache bit and see whether it matters.
+ * It's used by cpu_probe to distinguish between R3000A and R3081.
+ */
+static inline int cpu_has_confreg(void)
+{
+#ifdef CONFIG_CPU_R3000
+ extern unsigned long r3k_cache_size(unsigned long);
+ unsigned long size1, size2;
+ unsigned long cfg = read_c0_conf();
+
+ size1 = r3k_cache_size(ST0_ISC);
+ write_c0_conf(cfg ^ R30XX_CONF_AC);
+ size2 = r3k_cache_size(ST0_ISC);
+ write_c0_conf(cfg);
+ return size1 != size2;
+#else
+ return 0;
+#endif
+}
+
+static inline void set_elf_platform(int cpu, const char *plat)
+{
+ if (cpu == 0)
+ __elf_platform = plat;
+}
+
+static inline void set_elf_base_platform(const char *plat)
+{
+ if (__elf_base_platform == NULL) {
+ __elf_base_platform = plat;
+ }
+}
+
+static inline void cpu_probe_vmbits(struct cpuinfo_mips *c)
+{
+#ifdef __NEED_VMBITS_PROBE
+ write_c0_entryhi(0x3fffffffffffe000ULL);
+ back_to_back_c0_hazard();
+ c->vmbits = fls64(read_c0_entryhi() & 0x3fffffffffffe000ULL);
+#endif
+}
+
+static void set_isa(struct cpuinfo_mips *c, unsigned int isa)
+{
+ switch (isa) {
+ case MIPS_CPU_ISA_M64R5:
+ c->isa_level |= MIPS_CPU_ISA_M32R5 | MIPS_CPU_ISA_M64R5;
+ set_elf_base_platform("mips64r5");
+ fallthrough;
+ case MIPS_CPU_ISA_M64R2:
+ c->isa_level |= MIPS_CPU_ISA_M32R2 | MIPS_CPU_ISA_M64R2;
+ set_elf_base_platform("mips64r2");
+ fallthrough;
+ case MIPS_CPU_ISA_M64R1:
+ c->isa_level |= MIPS_CPU_ISA_M32R1 | MIPS_CPU_ISA_M64R1;
+ set_elf_base_platform("mips64");
+ fallthrough;
+ case MIPS_CPU_ISA_V:
+ c->isa_level |= MIPS_CPU_ISA_V;
+ set_elf_base_platform("mips5");
+ fallthrough;
+ case MIPS_CPU_ISA_IV:
+ c->isa_level |= MIPS_CPU_ISA_IV;
+ set_elf_base_platform("mips4");
+ fallthrough;
+ case MIPS_CPU_ISA_III:
+ c->isa_level |= MIPS_CPU_ISA_II | MIPS_CPU_ISA_III;
+ set_elf_base_platform("mips3");
+ break;
+
+ /* R6 incompatible with everything else */
+ case MIPS_CPU_ISA_M64R6:
+ c->isa_level |= MIPS_CPU_ISA_M32R6 | MIPS_CPU_ISA_M64R6;
+ set_elf_base_platform("mips64r6");
+ fallthrough;
+ case MIPS_CPU_ISA_M32R6:
+ c->isa_level |= MIPS_CPU_ISA_M32R6;
+ set_elf_base_platform("mips32r6");
+ /* Break here so we don't add incompatible ISAs */
+ break;
+ case MIPS_CPU_ISA_M32R5:
+ c->isa_level |= MIPS_CPU_ISA_M32R5;
+ set_elf_base_platform("mips32r5");
+ fallthrough;
+ case MIPS_CPU_ISA_M32R2:
+ c->isa_level |= MIPS_CPU_ISA_M32R2;
+ set_elf_base_platform("mips32r2");
+ fallthrough;
+ case MIPS_CPU_ISA_M32R1:
+ c->isa_level |= MIPS_CPU_ISA_M32R1;
+ set_elf_base_platform("mips32");
+ fallthrough;
+ case MIPS_CPU_ISA_II:
+ c->isa_level |= MIPS_CPU_ISA_II;
+ set_elf_base_platform("mips2");
+ break;
+ }
+}
+
+static char unknown_isa[] = KERN_ERR \
+ "Unsupported ISA type, c0.config0: %d.";
+
+static unsigned int calculate_ftlb_probability(struct cpuinfo_mips *c)
+{
+
+ unsigned int probability = c->tlbsize / c->tlbsizevtlb;
+
+ /*
+ * 0 = All TLBWR instructions go to FTLB
+ * 1 = 15:1: For every 16 TBLWR instructions, 15 go to the
+ * FTLB and 1 goes to the VTLB.
+ * 2 = 7:1: As above with 7:1 ratio.
+ * 3 = 3:1: As above with 3:1 ratio.
+ *
+ * Use the linear midpoint as the probability threshold.
+ */
+ if (probability >= 12)
+ return 1;
+ else if (probability >= 6)
+ return 2;
+ else
+ /*
+ * So FTLB is less than 4 times bigger than VTLB.
+ * A 3:1 ratio can still be useful though.
+ */
+ return 3;
+}
+
+static int set_ftlb_enable(struct cpuinfo_mips *c, enum ftlb_flags flags)
+{
+ unsigned int config;
+
+ /* It's implementation dependent how the FTLB can be enabled */
+ switch (c->cputype) {
+ case CPU_PROAPTIV:
+ case CPU_P5600:
+ case CPU_P6600:
+ /* proAptiv & related cores use Config6 to enable the FTLB */
+ config = read_c0_config6();
+
+ if (flags & FTLB_EN)
+ config |= MTI_CONF6_FTLBEN;
+ else
+ config &= ~MTI_CONF6_FTLBEN;
+
+ if (flags & FTLB_SET_PROB) {
+ config &= ~(3 << MTI_CONF6_FTLBP_SHIFT);
+ config |= calculate_ftlb_probability(c)
+ << MTI_CONF6_FTLBP_SHIFT;
+ }
+
+ write_c0_config6(config);
+ back_to_back_c0_hazard();
+ break;
+ case CPU_I6400:
+ case CPU_I6500:
+ /* There's no way to disable the FTLB */
+ if (!(flags & FTLB_EN))
+ return 1;
+ return 0;
+ case CPU_LOONGSON64:
+ /* Flush ITLB, DTLB, VTLB and FTLB */
+ write_c0_diag(LOONGSON_DIAG_ITLB | LOONGSON_DIAG_DTLB |
+ LOONGSON_DIAG_VTLB | LOONGSON_DIAG_FTLB);
+ /* Loongson-3 cores use Config6 to enable the FTLB */
+ config = read_c0_config6();
+ if (flags & FTLB_EN)
+ /* Enable FTLB */
+ write_c0_config6(config & ~LOONGSON_CONF6_FTLBDIS);
+ else
+ /* Disable FTLB */
+ write_c0_config6(config | LOONGSON_CONF6_FTLBDIS);
+ break;
+ default:
+ return 1;
+ }
+
+ return 0;
+}
+
+static int mm_config(struct cpuinfo_mips *c)
+{
+ unsigned int config0, update, mm;
+
+ config0 = read_c0_config();
+ mm = config0 & MIPS_CONF_MM;
+
+ /*
+ * It's implementation dependent what type of write-merge is supported
+ * and whether it can be enabled/disabled. If it is settable lets make
+ * the merging allowed by default. Some platforms might have
+ * write-through caching unsupported. In this case just ignore the
+ * CP0.Config.MM bit field value.
+ */
+ switch (c->cputype) {
+ case CPU_24K:
+ case CPU_34K:
+ case CPU_74K:
+ case CPU_P5600:
+ case CPU_P6600:
+ c->options |= MIPS_CPU_MM_FULL;
+ update = MIPS_CONF_MM_FULL;
+ break;
+ case CPU_1004K:
+ case CPU_1074K:
+ case CPU_INTERAPTIV:
+ case CPU_PROAPTIV:
+ mm = 0;
+ fallthrough;
+ default:
+ update = 0;
+ break;
+ }
+
+ if (update) {
+ config0 = (config0 & ~MIPS_CONF_MM) | update;
+ write_c0_config(config0);
+ } else if (mm == MIPS_CONF_MM_SYSAD) {
+ c->options |= MIPS_CPU_MM_SYSAD;
+ } else if (mm == MIPS_CONF_MM_FULL) {
+ c->options |= MIPS_CPU_MM_FULL;
+ }
+
+ return 0;
+}
+
+static inline unsigned int decode_config0(struct cpuinfo_mips *c)
+{
+ unsigned int config0;
+ int isa, mt;
+
+ config0 = read_c0_config();
+
+ /*
+ * Look for Standard TLB or Dual VTLB and FTLB
+ */
+ mt = config0 & MIPS_CONF_MT;
+ if (mt == MIPS_CONF_MT_TLB)
+ c->options |= MIPS_CPU_TLB;
+ else if (mt == MIPS_CONF_MT_FTLB)
+ c->options |= MIPS_CPU_TLB | MIPS_CPU_FTLB;
+
+ isa = (config0 & MIPS_CONF_AT) >> 13;
+ switch (isa) {
+ case 0:
+ switch ((config0 & MIPS_CONF_AR) >> 10) {
+ case 0:
+ set_isa(c, MIPS_CPU_ISA_M32R1);
+ break;
+ case 1:
+ set_isa(c, MIPS_CPU_ISA_M32R2);
+ break;
+ case 2:
+ set_isa(c, MIPS_CPU_ISA_M32R6);
+ break;
+ default:
+ goto unknown;
+ }
+ break;
+ case 2:
+ switch ((config0 & MIPS_CONF_AR) >> 10) {
+ case 0:
+ set_isa(c, MIPS_CPU_ISA_M64R1);
+ break;
+ case 1:
+ set_isa(c, MIPS_CPU_ISA_M64R2);
+ break;
+ case 2:
+ set_isa(c, MIPS_CPU_ISA_M64R6);
+ break;
+ default:
+ goto unknown;
+ }
+ break;
+ default:
+ goto unknown;
+ }
+
+ return config0 & MIPS_CONF_M;
+
+unknown:
+ panic(unknown_isa, config0);
+}
+
+static inline unsigned int decode_config1(struct cpuinfo_mips *c)
+{
+ unsigned int config1;
+
+ config1 = read_c0_config1();
+
+ if (config1 & MIPS_CONF1_MD)
+ c->ases |= MIPS_ASE_MDMX;
+ if (config1 & MIPS_CONF1_PC)
+ c->options |= MIPS_CPU_PERF;
+ if (config1 & MIPS_CONF1_WR)
+ c->options |= MIPS_CPU_WATCH;
+ if (config1 & MIPS_CONF1_CA)
+ c->ases |= MIPS_ASE_MIPS16;
+ if (config1 & MIPS_CONF1_EP)
+ c->options |= MIPS_CPU_EJTAG;
+ if (config1 & MIPS_CONF1_FP) {
+ c->options |= MIPS_CPU_FPU;
+ c->options |= MIPS_CPU_32FPR;
+ }
+ if (cpu_has_tlb) {
+ c->tlbsize = ((config1 & MIPS_CONF1_TLBS) >> 25) + 1;
+ c->tlbsizevtlb = c->tlbsize;
+ c->tlbsizeftlbsets = 0;
+ }
+
+ return config1 & MIPS_CONF_M;
+}
+
+static inline unsigned int decode_config2(struct cpuinfo_mips *c)
+{
+ unsigned int config2;
+
+ config2 = read_c0_config2();
+
+ if (config2 & MIPS_CONF2_SL)
+ c->scache.flags &= ~MIPS_CACHE_NOT_PRESENT;
+
+ return config2 & MIPS_CONF_M;
+}
+
+static inline unsigned int decode_config3(struct cpuinfo_mips *c)
+{
+ unsigned int config3;
+
+ config3 = read_c0_config3();
+
+ if (config3 & MIPS_CONF3_SM) {
+ c->ases |= MIPS_ASE_SMARTMIPS;
+ c->options |= MIPS_CPU_RIXI | MIPS_CPU_CTXTC;
+ }
+ if (config3 & MIPS_CONF3_RXI)
+ c->options |= MIPS_CPU_RIXI;
+ if (config3 & MIPS_CONF3_CTXTC)
+ c->options |= MIPS_CPU_CTXTC;
+ if (config3 & MIPS_CONF3_DSP)
+ c->ases |= MIPS_ASE_DSP;
+ if (config3 & MIPS_CONF3_DSP2P) {
+ c->ases |= MIPS_ASE_DSP2P;
+ if (cpu_has_mips_r6)
+ c->ases |= MIPS_ASE_DSP3;
+ }
+ if (config3 & MIPS_CONF3_VINT)
+ c->options |= MIPS_CPU_VINT;
+ if (config3 & MIPS_CONF3_VEIC)
+ c->options |= MIPS_CPU_VEIC;
+ if (config3 & MIPS_CONF3_LPA)
+ c->options |= MIPS_CPU_LPA;
+ if (config3 & MIPS_CONF3_MT)
+ c->ases |= MIPS_ASE_MIPSMT;
+ if (config3 & MIPS_CONF3_ULRI)
+ c->options |= MIPS_CPU_ULRI;
+ if (config3 & MIPS_CONF3_ISA)
+ c->options |= MIPS_CPU_MICROMIPS;
+ if (config3 & MIPS_CONF3_VZ)
+ c->ases |= MIPS_ASE_VZ;
+ if (config3 & MIPS_CONF3_SC)
+ c->options |= MIPS_CPU_SEGMENTS;
+ if (config3 & MIPS_CONF3_BI)
+ c->options |= MIPS_CPU_BADINSTR;
+ if (config3 & MIPS_CONF3_BP)
+ c->options |= MIPS_CPU_BADINSTRP;
+ if (config3 & MIPS_CONF3_MSA)
+ c->ases |= MIPS_ASE_MSA;
+ if (config3 & MIPS_CONF3_PW) {
+ c->htw_seq = 0;
+ c->options |= MIPS_CPU_HTW;
+ }
+ if (config3 & MIPS_CONF3_CDMM)
+ c->options |= MIPS_CPU_CDMM;
+ if (config3 & MIPS_CONF3_SP)
+ c->options |= MIPS_CPU_SP;
+
+ return config3 & MIPS_CONF_M;
+}
+
+static inline unsigned int decode_config4(struct cpuinfo_mips *c)
+{
+ unsigned int config4;
+ unsigned int newcf4;
+ unsigned int mmuextdef;
+ unsigned int ftlb_page = MIPS_CONF4_FTLBPAGESIZE;
+ unsigned long asid_mask;
+
+ config4 = read_c0_config4();
+
+ if (cpu_has_tlb) {
+ if (((config4 & MIPS_CONF4_IE) >> 29) == 2)
+ c->options |= MIPS_CPU_TLBINV;
+
+ /*
+ * R6 has dropped the MMUExtDef field from config4.
+ * On R6 the fields always describe the FTLB, and only if it is
+ * present according to Config.MT.
+ */
+ if (!cpu_has_mips_r6)
+ mmuextdef = config4 & MIPS_CONF4_MMUEXTDEF;
+ else if (cpu_has_ftlb)
+ mmuextdef = MIPS_CONF4_MMUEXTDEF_VTLBSIZEEXT;
+ else
+ mmuextdef = 0;
+
+ switch (mmuextdef) {
+ case MIPS_CONF4_MMUEXTDEF_MMUSIZEEXT:
+ c->tlbsize += (config4 & MIPS_CONF4_MMUSIZEEXT) * 0x40;
+ c->tlbsizevtlb = c->tlbsize;
+ break;
+ case MIPS_CONF4_MMUEXTDEF_VTLBSIZEEXT:
+ c->tlbsizevtlb +=
+ ((config4 & MIPS_CONF4_VTLBSIZEEXT) >>
+ MIPS_CONF4_VTLBSIZEEXT_SHIFT) * 0x40;
+ c->tlbsize = c->tlbsizevtlb;
+ ftlb_page = MIPS_CONF4_VFTLBPAGESIZE;
+ fallthrough;
+ case MIPS_CONF4_MMUEXTDEF_FTLBSIZEEXT:
+ if (mips_ftlb_disabled)
+ break;
+ newcf4 = (config4 & ~ftlb_page) |
+ (page_size_ftlb(mmuextdef) <<
+ MIPS_CONF4_FTLBPAGESIZE_SHIFT);
+ write_c0_config4(newcf4);
+ back_to_back_c0_hazard();
+ config4 = read_c0_config4();
+ if (config4 != newcf4) {
+ pr_err("PAGE_SIZE 0x%lx is not supported by FTLB (config4=0x%x)\n",
+ PAGE_SIZE, config4);
+ /* Switch FTLB off */
+ set_ftlb_enable(c, 0);
+ mips_ftlb_disabled = 1;
+ break;
+ }
+ c->tlbsizeftlbsets = 1 <<
+ ((config4 & MIPS_CONF4_FTLBSETS) >>
+ MIPS_CONF4_FTLBSETS_SHIFT);
+ c->tlbsizeftlbways = ((config4 & MIPS_CONF4_FTLBWAYS) >>
+ MIPS_CONF4_FTLBWAYS_SHIFT) + 2;
+ c->tlbsize += c->tlbsizeftlbways * c->tlbsizeftlbsets;
+ mips_has_ftlb_configured = 1;
+ break;
+ }
+ }
+
+ c->kscratch_mask = (config4 & MIPS_CONF4_KSCREXIST)
+ >> MIPS_CONF4_KSCREXIST_SHIFT;
+
+ asid_mask = MIPS_ENTRYHI_ASID;
+ if (config4 & MIPS_CONF4_AE)
+ asid_mask |= MIPS_ENTRYHI_ASIDX;
+ set_cpu_asid_mask(c, asid_mask);
+
+ /*
+ * Warn if the computed ASID mask doesn't match the mask the kernel
+ * is built for. This may indicate either a serious problem or an
+ * easy optimisation opportunity, but either way should be addressed.
+ */
+ WARN_ON(asid_mask != cpu_asid_mask(c));
+
+ return config4 & MIPS_CONF_M;
+}
+
+static inline unsigned int decode_config5(struct cpuinfo_mips *c)
+{
+ unsigned int config5, max_mmid_width;
+ unsigned long asid_mask;
+
+ config5 = read_c0_config5();
+ config5 &= ~(MIPS_CONF5_UFR | MIPS_CONF5_UFE);
+
+ if (cpu_has_mips_r6) {
+ if (!__builtin_constant_p(cpu_has_mmid) || cpu_has_mmid)
+ config5 |= MIPS_CONF5_MI;
+ else
+ config5 &= ~MIPS_CONF5_MI;
+ }
+
+ write_c0_config5(config5);
+
+ if (config5 & MIPS_CONF5_EVA)
+ c->options |= MIPS_CPU_EVA;
+ if (config5 & MIPS_CONF5_MRP)
+ c->options |= MIPS_CPU_MAAR;
+ if (config5 & MIPS_CONF5_LLB)
+ c->options |= MIPS_CPU_RW_LLB;
+ if (config5 & MIPS_CONF5_MVH)
+ c->options |= MIPS_CPU_MVH;
+ if (cpu_has_mips_r6 && (config5 & MIPS_CONF5_VP))
+ c->options |= MIPS_CPU_VP;
+ if (config5 & MIPS_CONF5_CA2)
+ c->ases |= MIPS_ASE_MIPS16E2;
+
+ if (config5 & MIPS_CONF5_CRCP)
+ elf_hwcap |= HWCAP_MIPS_CRC32;
+
+ if (cpu_has_mips_r6) {
+ /* Ensure the write to config5 above takes effect */
+ back_to_back_c0_hazard();
+
+ /* Check whether we successfully enabled MMID support */
+ config5 = read_c0_config5();
+ if (config5 & MIPS_CONF5_MI)
+ c->options |= MIPS_CPU_MMID;
+
+ /*
+ * Warn if we've hardcoded cpu_has_mmid to a value unsuitable
+ * for the CPU we're running on, or if CPUs in an SMP system
+ * have inconsistent MMID support.
+ */
+ WARN_ON(!!cpu_has_mmid != !!(config5 & MIPS_CONF5_MI));
+
+ if (cpu_has_mmid) {
+ write_c0_memorymapid(~0ul);
+ back_to_back_c0_hazard();
+ asid_mask = read_c0_memorymapid();
+
+ /*
+ * We maintain a bitmap to track MMID allocation, and
+ * need a sensible upper bound on the size of that
+ * bitmap. The initial CPU with MMID support (I6500)
+ * supports 16 bit MMIDs, which gives us an 8KiB
+ * bitmap. The architecture recommends that hardware
+ * support 32 bit MMIDs, which would give us a 512MiB
+ * bitmap - that's too big in most cases.
+ *
+ * Cap MMID width at 16 bits for now & we can revisit
+ * this if & when hardware supports anything wider.
+ */
+ max_mmid_width = 16;
+ if (asid_mask > GENMASK(max_mmid_width - 1, 0)) {
+ pr_info("Capping MMID width at %d bits",
+ max_mmid_width);
+ asid_mask = GENMASK(max_mmid_width - 1, 0);
+ }
+
+ set_cpu_asid_mask(c, asid_mask);
+ }
+ }
+
+ return config5 & MIPS_CONF_M;
+}
+
+static void decode_configs(struct cpuinfo_mips *c)
+{
+ int ok;
+
+ /* MIPS32 or MIPS64 compliant CPU. */
+ c->options = MIPS_CPU_4KEX | MIPS_CPU_4K_CACHE | MIPS_CPU_COUNTER |
+ MIPS_CPU_DIVEC | MIPS_CPU_LLSC | MIPS_CPU_MCHECK;
+
+ c->scache.flags = MIPS_CACHE_NOT_PRESENT;
+
+ /* Enable FTLB if present and not disabled */
+ set_ftlb_enable(c, mips_ftlb_disabled ? 0 : FTLB_EN);
+
+ ok = decode_config0(c); /* Read Config registers. */
+ BUG_ON(!ok); /* Arch spec violation! */
+ if (ok)
+ ok = decode_config1(c);
+ if (ok)
+ ok = decode_config2(c);
+ if (ok)
+ ok = decode_config3(c);
+ if (ok)
+ ok = decode_config4(c);
+ if (ok)
+ ok = decode_config5(c);
+
+ /* Probe the EBase.WG bit */
+ if (cpu_has_mips_r2_r6) {
+ u64 ebase;
+ unsigned int status;
+
+ /* {read,write}_c0_ebase_64() may be UNDEFINED prior to r6 */
+ ebase = cpu_has_mips64r6 ? read_c0_ebase_64()
+ : (s32)read_c0_ebase();
+ if (ebase & MIPS_EBASE_WG) {
+ /* WG bit already set, we can avoid the clumsy probe */
+ c->options |= MIPS_CPU_EBASE_WG;
+ } else {
+ /* Its UNDEFINED to change EBase while BEV=0 */
+ status = read_c0_status();
+ write_c0_status(status | ST0_BEV);
+ irq_enable_hazard();
+ /*
+ * On pre-r6 cores, this may well clobber the upper bits
+ * of EBase. This is hard to avoid without potentially
+ * hitting UNDEFINED dm*c0 behaviour if EBase is 32-bit.
+ */
+ if (cpu_has_mips64r6)
+ write_c0_ebase_64(ebase | MIPS_EBASE_WG);
+ else
+ write_c0_ebase(ebase | MIPS_EBASE_WG);
+ back_to_back_c0_hazard();
+ /* Restore BEV */
+ write_c0_status(status);
+ if (read_c0_ebase() & MIPS_EBASE_WG) {
+ c->options |= MIPS_CPU_EBASE_WG;
+ write_c0_ebase(ebase);
+ }
+ }
+ }
+
+ /* configure the FTLB write probability */
+ set_ftlb_enable(c, (mips_ftlb_disabled ? 0 : FTLB_EN) | FTLB_SET_PROB);
+
+ mips_probe_watch_registers(c);
+
+#ifndef CONFIG_MIPS_CPS
+ if (cpu_has_mips_r2_r6) {
+ unsigned int core;
+
+ core = get_ebase_cpunum();
+ if (cpu_has_mipsmt)
+ core >>= fls(core_nvpes()) - 1;
+ cpu_set_core(c, core);
+ }
+#endif
+}
+
+/*
+ * Probe for certain guest capabilities by writing config bits and reading back.
+ * Finally write back the original value.
+ */
+#define probe_gc0_config(name, maxconf, bits) \
+do { \
+ unsigned int tmp; \
+ tmp = read_gc0_##name(); \
+ write_gc0_##name(tmp | (bits)); \
+ back_to_back_c0_hazard(); \
+ maxconf = read_gc0_##name(); \
+ write_gc0_##name(tmp); \
+} while (0)
+
+/*
+ * Probe for dynamic guest capabilities by changing certain config bits and
+ * reading back to see if they change. Finally write back the original value.
+ */
+#define probe_gc0_config_dyn(name, maxconf, dynconf, bits) \
+do { \
+ maxconf = read_gc0_##name(); \
+ write_gc0_##name(maxconf ^ (bits)); \
+ back_to_back_c0_hazard(); \
+ dynconf = maxconf ^ read_gc0_##name(); \
+ write_gc0_##name(maxconf); \
+ maxconf |= dynconf; \
+} while (0)
+
+static inline unsigned int decode_guest_config0(struct cpuinfo_mips *c)
+{
+ unsigned int config0;
+
+ probe_gc0_config(config, config0, MIPS_CONF_M);
+
+ if (config0 & MIPS_CONF_M)
+ c->guest.conf |= BIT(1);
+ return config0 & MIPS_CONF_M;
+}
+
+static inline unsigned int decode_guest_config1(struct cpuinfo_mips *c)
+{
+ unsigned int config1, config1_dyn;
+
+ probe_gc0_config_dyn(config1, config1, config1_dyn,
+ MIPS_CONF_M | MIPS_CONF1_PC | MIPS_CONF1_WR |
+ MIPS_CONF1_FP);
+
+ if (config1 & MIPS_CONF1_FP)
+ c->guest.options |= MIPS_CPU_FPU;
+ if (config1_dyn & MIPS_CONF1_FP)
+ c->guest.options_dyn |= MIPS_CPU_FPU;
+
+ if (config1 & MIPS_CONF1_WR)
+ c->guest.options |= MIPS_CPU_WATCH;
+ if (config1_dyn & MIPS_CONF1_WR)
+ c->guest.options_dyn |= MIPS_CPU_WATCH;
+
+ if (config1 & MIPS_CONF1_PC)
+ c->guest.options |= MIPS_CPU_PERF;
+ if (config1_dyn & MIPS_CONF1_PC)
+ c->guest.options_dyn |= MIPS_CPU_PERF;
+
+ if (config1 & MIPS_CONF_M)
+ c->guest.conf |= BIT(2);
+ return config1 & MIPS_CONF_M;
+}
+
+static inline unsigned int decode_guest_config2(struct cpuinfo_mips *c)
+{
+ unsigned int config2;
+
+ probe_gc0_config(config2, config2, MIPS_CONF_M);
+
+ if (config2 & MIPS_CONF_M)
+ c->guest.conf |= BIT(3);
+ return config2 & MIPS_CONF_M;
+}
+
+static inline unsigned int decode_guest_config3(struct cpuinfo_mips *c)
+{
+ unsigned int config3, config3_dyn;
+
+ probe_gc0_config_dyn(config3, config3, config3_dyn,
+ MIPS_CONF_M | MIPS_CONF3_MSA | MIPS_CONF3_ULRI |
+ MIPS_CONF3_CTXTC);
+
+ if (config3 & MIPS_CONF3_CTXTC)
+ c->guest.options |= MIPS_CPU_CTXTC;
+ if (config3_dyn & MIPS_CONF3_CTXTC)
+ c->guest.options_dyn |= MIPS_CPU_CTXTC;
+
+ if (config3 & MIPS_CONF3_PW)
+ c->guest.options |= MIPS_CPU_HTW;
+
+ if (config3 & MIPS_CONF3_ULRI)
+ c->guest.options |= MIPS_CPU_ULRI;
+
+ if (config3 & MIPS_CONF3_SC)
+ c->guest.options |= MIPS_CPU_SEGMENTS;
+
+ if (config3 & MIPS_CONF3_BI)
+ c->guest.options |= MIPS_CPU_BADINSTR;
+ if (config3 & MIPS_CONF3_BP)
+ c->guest.options |= MIPS_CPU_BADINSTRP;
+
+ if (config3 & MIPS_CONF3_MSA)
+ c->guest.ases |= MIPS_ASE_MSA;
+ if (config3_dyn & MIPS_CONF3_MSA)
+ c->guest.ases_dyn |= MIPS_ASE_MSA;
+
+ if (config3 & MIPS_CONF_M)
+ c->guest.conf |= BIT(4);
+ return config3 & MIPS_CONF_M;
+}
+
+static inline unsigned int decode_guest_config4(struct cpuinfo_mips *c)
+{
+ unsigned int config4;
+
+ probe_gc0_config(config4, config4,
+ MIPS_CONF_M | MIPS_CONF4_KSCREXIST);
+
+ c->guest.kscratch_mask = (config4 & MIPS_CONF4_KSCREXIST)
+ >> MIPS_CONF4_KSCREXIST_SHIFT;
+
+ if (config4 & MIPS_CONF_M)
+ c->guest.conf |= BIT(5);
+ return config4 & MIPS_CONF_M;
+}
+
+static inline unsigned int decode_guest_config5(struct cpuinfo_mips *c)
+{
+ unsigned int config5, config5_dyn;
+
+ probe_gc0_config_dyn(config5, config5, config5_dyn,
+ MIPS_CONF_M | MIPS_CONF5_MVH | MIPS_CONF5_MRP);
+
+ if (config5 & MIPS_CONF5_MRP)
+ c->guest.options |= MIPS_CPU_MAAR;
+ if (config5_dyn & MIPS_CONF5_MRP)
+ c->guest.options_dyn |= MIPS_CPU_MAAR;
+
+ if (config5 & MIPS_CONF5_LLB)
+ c->guest.options |= MIPS_CPU_RW_LLB;
+
+ if (config5 & MIPS_CONF5_MVH)
+ c->guest.options |= MIPS_CPU_MVH;
+
+ if (config5 & MIPS_CONF_M)
+ c->guest.conf |= BIT(6);
+ return config5 & MIPS_CONF_M;
+}
+
+static inline void decode_guest_configs(struct cpuinfo_mips *c)
+{
+ unsigned int ok;
+
+ ok = decode_guest_config0(c);
+ if (ok)
+ ok = decode_guest_config1(c);
+ if (ok)
+ ok = decode_guest_config2(c);
+ if (ok)
+ ok = decode_guest_config3(c);
+ if (ok)
+ ok = decode_guest_config4(c);
+ if (ok)
+ decode_guest_config5(c);
+}
+
+static inline void cpu_probe_guestctl0(struct cpuinfo_mips *c)
+{
+ unsigned int guestctl0, temp;
+
+ guestctl0 = read_c0_guestctl0();
+
+ if (guestctl0 & MIPS_GCTL0_G0E)
+ c->options |= MIPS_CPU_GUESTCTL0EXT;
+ if (guestctl0 & MIPS_GCTL0_G1)
+ c->options |= MIPS_CPU_GUESTCTL1;
+ if (guestctl0 & MIPS_GCTL0_G2)
+ c->options |= MIPS_CPU_GUESTCTL2;
+ if (!(guestctl0 & MIPS_GCTL0_RAD)) {
+ c->options |= MIPS_CPU_GUESTID;
+
+ /*
+ * Probe for Direct Root to Guest (DRG). Set GuestCtl1.RID = 0
+ * first, otherwise all data accesses will be fully virtualised
+ * as if they were performed by guest mode.
+ */
+ write_c0_guestctl1(0);
+ tlbw_use_hazard();
+
+ write_c0_guestctl0(guestctl0 | MIPS_GCTL0_DRG);
+ back_to_back_c0_hazard();
+ temp = read_c0_guestctl0();
+
+ if (temp & MIPS_GCTL0_DRG) {
+ write_c0_guestctl0(guestctl0);
+ c->options |= MIPS_CPU_DRG;
+ }
+ }
+}
+
+static inline void cpu_probe_guestctl1(struct cpuinfo_mips *c)
+{
+ if (cpu_has_guestid) {
+ /* determine the number of bits of GuestID available */
+ write_c0_guestctl1(MIPS_GCTL1_ID);
+ back_to_back_c0_hazard();
+ c->guestid_mask = (read_c0_guestctl1() & MIPS_GCTL1_ID)
+ >> MIPS_GCTL1_ID_SHIFT;
+ write_c0_guestctl1(0);
+ }
+}
+
+static inline void cpu_probe_gtoffset(struct cpuinfo_mips *c)
+{
+ /* determine the number of bits of GTOffset available */
+ write_c0_gtoffset(0xffffffff);
+ back_to_back_c0_hazard();
+ c->gtoffset_mask = read_c0_gtoffset();
+ write_c0_gtoffset(0);
+}
+
+static inline void cpu_probe_vz(struct cpuinfo_mips *c)
+{
+ cpu_probe_guestctl0(c);
+ if (cpu_has_guestctl1)
+ cpu_probe_guestctl1(c);
+
+ cpu_probe_gtoffset(c);
+
+ decode_guest_configs(c);
+}
+
+#define R4K_OPTS (MIPS_CPU_TLB | MIPS_CPU_4KEX | MIPS_CPU_4K_CACHE \
+ | MIPS_CPU_COUNTER)
+
+static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu)
+{
+ switch (c->processor_id & PRID_IMP_MASK) {
+ case PRID_IMP_R2000:
+ c->cputype = CPU_R2000;
+ __cpu_name[cpu] = "R2000";
+ c->fpu_msk31 |= FPU_CSR_CONDX | FPU_CSR_FS;
+ c->options = MIPS_CPU_TLB | MIPS_CPU_3K_CACHE |
+ MIPS_CPU_NOFPUEX;
+ if (__cpu_has_fpu())
+ c->options |= MIPS_CPU_FPU;
+ c->tlbsize = 64;
+ break;
+ case PRID_IMP_R3000:
+ if ((c->processor_id & PRID_REV_MASK) == PRID_REV_R3000A) {
+ if (cpu_has_confreg()) {
+ c->cputype = CPU_R3081E;
+ __cpu_name[cpu] = "R3081";
+ } else {
+ c->cputype = CPU_R3000A;
+ __cpu_name[cpu] = "R3000A";
+ }
+ } else {
+ c->cputype = CPU_R3000;
+ __cpu_name[cpu] = "R3000";
+ }
+ c->fpu_msk31 |= FPU_CSR_CONDX | FPU_CSR_FS;
+ c->options = MIPS_CPU_TLB | MIPS_CPU_3K_CACHE |
+ MIPS_CPU_NOFPUEX;
+ if (__cpu_has_fpu())
+ c->options |= MIPS_CPU_FPU;
+ c->tlbsize = 64;
+ break;
+ case PRID_IMP_R4000:
+ if (read_c0_config() & CONF_SC) {
+ if ((c->processor_id & PRID_REV_MASK) >=
+ PRID_REV_R4400) {
+ c->cputype = CPU_R4400PC;
+ __cpu_name[cpu] = "R4400PC";
+ } else {
+ c->cputype = CPU_R4000PC;
+ __cpu_name[cpu] = "R4000PC";
+ }
+ } else {
+ int cca = read_c0_config() & CONF_CM_CMASK;
+ int mc;
+
+ /*
+ * SC and MC versions can't be reliably told apart,
+ * but only the latter support coherent caching
+ * modes so assume the firmware has set the KSEG0
+ * coherency attribute reasonably (if uncached, we
+ * assume SC).
+ */
+ switch (cca) {
+ case CONF_CM_CACHABLE_CE:
+ case CONF_CM_CACHABLE_COW:
+ case CONF_CM_CACHABLE_CUW:
+ mc = 1;
+ break;
+ default:
+ mc = 0;
+ break;
+ }
+ if ((c->processor_id & PRID_REV_MASK) >=
+ PRID_REV_R4400) {
+ c->cputype = mc ? CPU_R4400MC : CPU_R4400SC;
+ __cpu_name[cpu] = mc ? "R4400MC" : "R4400SC";
+ } else {
+ c->cputype = mc ? CPU_R4000MC : CPU_R4000SC;
+ __cpu_name[cpu] = mc ? "R4000MC" : "R4000SC";
+ }
+ }
+
+ set_isa(c, MIPS_CPU_ISA_III);
+ c->fpu_msk31 |= FPU_CSR_CONDX;
+ c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_32FPR |
+ MIPS_CPU_WATCH | MIPS_CPU_VCE |
+ MIPS_CPU_LLSC;
+ c->tlbsize = 48;
+ break;
+ case PRID_IMP_R4300:
+ c->cputype = CPU_R4300;
+ __cpu_name[cpu] = "R4300";
+ set_isa(c, MIPS_CPU_ISA_III);
+ c->fpu_msk31 |= FPU_CSR_CONDX;
+ c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_32FPR |
+ MIPS_CPU_LLSC;
+ c->tlbsize = 32;
+ break;
+ case PRID_IMP_R4600:
+ c->cputype = CPU_R4600;
+ __cpu_name[cpu] = "R4600";
+ set_isa(c, MIPS_CPU_ISA_III);
+ c->fpu_msk31 |= FPU_CSR_CONDX;
+ c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_32FPR |
+ MIPS_CPU_LLSC;
+ c->tlbsize = 48;
+ break;
+ #if 0
+ case PRID_IMP_R4650:
+ /*
+ * This processor doesn't have an MMU, so it's not
+ * "real easy" to run Linux on it. It is left purely
+ * for documentation. Commented out because it shares
+ * it's c0_prid id number with the TX3900.
+ */
+ c->cputype = CPU_R4650;
+ __cpu_name[cpu] = "R4650";
+ set_isa(c, MIPS_CPU_ISA_III);
+ c->fpu_msk31 |= FPU_CSR_CONDX;
+ c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_LLSC;
+ c->tlbsize = 48;
+ break;
+ #endif
+ case PRID_IMP_R4700:
+ c->cputype = CPU_R4700;
+ __cpu_name[cpu] = "R4700";
+ set_isa(c, MIPS_CPU_ISA_III);
+ c->fpu_msk31 |= FPU_CSR_CONDX;
+ c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_32FPR |
+ MIPS_CPU_LLSC;
+ c->tlbsize = 48;
+ break;
+ case PRID_IMP_TX49:
+ c->cputype = CPU_TX49XX;
+ __cpu_name[cpu] = "R49XX";
+ set_isa(c, MIPS_CPU_ISA_III);
+ c->fpu_msk31 |= FPU_CSR_CONDX;
+ c->options = R4K_OPTS | MIPS_CPU_LLSC;
+ if (!(c->processor_id & 0x08))
+ c->options |= MIPS_CPU_FPU | MIPS_CPU_32FPR;
+ c->tlbsize = 48;
+ break;
+ case PRID_IMP_R5000:
+ c->cputype = CPU_R5000;
+ __cpu_name[cpu] = "R5000";
+ set_isa(c, MIPS_CPU_ISA_IV);
+ c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_32FPR |
+ MIPS_CPU_LLSC;
+ c->tlbsize = 48;
+ break;
+ case PRID_IMP_R5500:
+ c->cputype = CPU_R5500;
+ __cpu_name[cpu] = "R5500";
+ set_isa(c, MIPS_CPU_ISA_IV);
+ c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_32FPR |
+ MIPS_CPU_WATCH | MIPS_CPU_LLSC;
+ c->tlbsize = 48;
+ break;
+ case PRID_IMP_NEVADA:
+ c->cputype = CPU_NEVADA;
+ __cpu_name[cpu] = "Nevada";
+ set_isa(c, MIPS_CPU_ISA_IV);
+ c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_32FPR |
+ MIPS_CPU_DIVEC | MIPS_CPU_LLSC;
+ c->tlbsize = 48;
+ break;
+ case PRID_IMP_RM7000:
+ c->cputype = CPU_RM7000;
+ __cpu_name[cpu] = "RM7000";
+ set_isa(c, MIPS_CPU_ISA_IV);
+ c->options = R4K_OPTS | MIPS_CPU_FPU | MIPS_CPU_32FPR |
+ MIPS_CPU_LLSC;
+ /*
+ * Undocumented RM7000: Bit 29 in the info register of
+ * the RM7000 v2.0 indicates if the TLB has 48 or 64
+ * entries.
+ *
+ * 29 1 => 64 entry JTLB
+ * 0 => 48 entry JTLB
+ */
+ c->tlbsize = (read_c0_info() & (1 << 29)) ? 64 : 48;
+ break;
+ case PRID_IMP_R10000:
+ c->cputype = CPU_R10000;
+ __cpu_name[cpu] = "R10000";
+ set_isa(c, MIPS_CPU_ISA_IV);
+ c->options = MIPS_CPU_TLB | MIPS_CPU_4K_CACHE | MIPS_CPU_4KEX |
+ MIPS_CPU_FPU | MIPS_CPU_32FPR |
+ MIPS_CPU_COUNTER | MIPS_CPU_WATCH |
+ MIPS_CPU_LLSC;
+ c->tlbsize = 64;
+ break;
+ case PRID_IMP_R12000:
+ c->cputype = CPU_R12000;
+ __cpu_name[cpu] = "R12000";
+ set_isa(c, MIPS_CPU_ISA_IV);
+ c->options = MIPS_CPU_TLB | MIPS_CPU_4K_CACHE | MIPS_CPU_4KEX |
+ MIPS_CPU_FPU | MIPS_CPU_32FPR |
+ MIPS_CPU_COUNTER | MIPS_CPU_WATCH |
+ MIPS_CPU_LLSC;
+ c->tlbsize = 64;
+ write_c0_r10k_diag(read_c0_r10k_diag() | R10K_DIAG_E_GHIST);
+ break;
+ case PRID_IMP_R14000:
+ if (((c->processor_id >> 4) & 0x0f) > 2) {
+ c->cputype = CPU_R16000;
+ __cpu_name[cpu] = "R16000";
+ } else {
+ c->cputype = CPU_R14000;
+ __cpu_name[cpu] = "R14000";
+ }
+ set_isa(c, MIPS_CPU_ISA_IV);
+ c->options = MIPS_CPU_TLB | MIPS_CPU_4K_CACHE | MIPS_CPU_4KEX |
+ MIPS_CPU_FPU | MIPS_CPU_32FPR |
+ MIPS_CPU_COUNTER | MIPS_CPU_WATCH |
+ MIPS_CPU_LLSC;
+ c->tlbsize = 64;
+ write_c0_r10k_diag(read_c0_r10k_diag() | R10K_DIAG_E_GHIST);
+ break;
+ case PRID_IMP_LOONGSON_64C: /* Loongson-2/3 */
+ switch (c->processor_id & PRID_REV_MASK) {
+ case PRID_REV_LOONGSON2E:
+ c->cputype = CPU_LOONGSON2EF;
+ __cpu_name[cpu] = "ICT Loongson-2";
+ set_elf_platform(cpu, "loongson2e");
+ set_isa(c, MIPS_CPU_ISA_III);
+ c->fpu_msk31 |= FPU_CSR_CONDX;
+ break;
+ case PRID_REV_LOONGSON2F:
+ c->cputype = CPU_LOONGSON2EF;
+ __cpu_name[cpu] = "ICT Loongson-2";
+ set_elf_platform(cpu, "loongson2f");
+ set_isa(c, MIPS_CPU_ISA_III);
+ c->fpu_msk31 |= FPU_CSR_CONDX;
+ break;
+ case PRID_REV_LOONGSON3A_R1:
+ c->cputype = CPU_LOONGSON64;
+ __cpu_name[cpu] = "ICT Loongson-3";
+ set_elf_platform(cpu, "loongson3a");
+ set_isa(c, MIPS_CPU_ISA_M64R1);
+ c->ases |= (MIPS_ASE_LOONGSON_MMI | MIPS_ASE_LOONGSON_CAM |
+ MIPS_ASE_LOONGSON_EXT);
+ break;
+ case PRID_REV_LOONGSON3B_R1:
+ case PRID_REV_LOONGSON3B_R2:
+ c->cputype = CPU_LOONGSON64;
+ __cpu_name[cpu] = "ICT Loongson-3";
+ set_elf_platform(cpu, "loongson3b");
+ set_isa(c, MIPS_CPU_ISA_M64R1);
+ c->ases |= (MIPS_ASE_LOONGSON_MMI | MIPS_ASE_LOONGSON_CAM |
+ MIPS_ASE_LOONGSON_EXT);
+ break;
+ }
+
+ c->options = R4K_OPTS |
+ MIPS_CPU_FPU | MIPS_CPU_LLSC |
+ MIPS_CPU_32FPR;
+ c->tlbsize = 64;
+ set_cpu_asid_mask(c, MIPS_ENTRYHI_ASID);
+ c->writecombine = _CACHE_UNCACHED_ACCELERATED;
+ break;
+ case PRID_IMP_LOONGSON_32: /* Loongson-1 */
+ decode_configs(c);
+
+ c->cputype = CPU_LOONGSON32;
+
+ switch (c->processor_id & PRID_REV_MASK) {
+ case PRID_REV_LOONGSON1B:
+ __cpu_name[cpu] = "Loongson 1B";
+ break;
+ }
+
+ break;
+ }
+}
+
+static inline void cpu_probe_mips(struct cpuinfo_mips *c, unsigned int cpu)
+{
+ c->writecombine = _CACHE_UNCACHED_ACCELERATED;
+ switch (c->processor_id & PRID_IMP_MASK) {
+ case PRID_IMP_QEMU_GENERIC:
+ c->writecombine = _CACHE_UNCACHED;
+ c->cputype = CPU_QEMU_GENERIC;
+ __cpu_name[cpu] = "MIPS GENERIC QEMU";
+ break;
+ case PRID_IMP_4KC:
+ c->cputype = CPU_4KC;
+ c->writecombine = _CACHE_UNCACHED;
+ __cpu_name[cpu] = "MIPS 4Kc";
+ break;
+ case PRID_IMP_4KEC:
+ case PRID_IMP_4KECR2:
+ c->cputype = CPU_4KEC;
+ c->writecombine = _CACHE_UNCACHED;
+ __cpu_name[cpu] = "MIPS 4KEc";
+ break;
+ case PRID_IMP_4KSC:
+ case PRID_IMP_4KSD:
+ c->cputype = CPU_4KSC;
+ c->writecombine = _CACHE_UNCACHED;
+ __cpu_name[cpu] = "MIPS 4KSc";
+ break;
+ case PRID_IMP_5KC:
+ c->cputype = CPU_5KC;
+ c->writecombine = _CACHE_UNCACHED;
+ __cpu_name[cpu] = "MIPS 5Kc";
+ break;
+ case PRID_IMP_5KE:
+ c->cputype = CPU_5KE;
+ c->writecombine = _CACHE_UNCACHED;
+ __cpu_name[cpu] = "MIPS 5KE";
+ break;
+ case PRID_IMP_20KC:
+ c->cputype = CPU_20KC;
+ c->writecombine = _CACHE_UNCACHED;
+ __cpu_name[cpu] = "MIPS 20Kc";
+ break;
+ case PRID_IMP_24K:
+ c->cputype = CPU_24K;
+ c->writecombine = _CACHE_UNCACHED;
+ __cpu_name[cpu] = "MIPS 24Kc";
+ break;
+ case PRID_IMP_24KE:
+ c->cputype = CPU_24K;
+ c->writecombine = _CACHE_UNCACHED;
+ __cpu_name[cpu] = "MIPS 24KEc";
+ break;
+ case PRID_IMP_25KF:
+ c->cputype = CPU_25KF;
+ c->writecombine = _CACHE_UNCACHED;
+ __cpu_name[cpu] = "MIPS 25Kc";
+ break;
+ case PRID_IMP_34K:
+ c->cputype = CPU_34K;
+ c->writecombine = _CACHE_UNCACHED;
+ __cpu_name[cpu] = "MIPS 34Kc";
+ cpu_set_mt_per_tc_perf(c);
+ break;
+ case PRID_IMP_74K:
+ c->cputype = CPU_74K;
+ c->writecombine = _CACHE_UNCACHED;
+ __cpu_name[cpu] = "MIPS 74Kc";
+ break;
+ case PRID_IMP_M14KC:
+ c->cputype = CPU_M14KC;
+ c->writecombine = _CACHE_UNCACHED;
+ __cpu_name[cpu] = "MIPS M14Kc";
+ break;
+ case PRID_IMP_M14KEC:
+ c->cputype = CPU_M14KEC;
+ c->writecombine = _CACHE_UNCACHED;
+ __cpu_name[cpu] = "MIPS M14KEc";
+ break;
+ case PRID_IMP_1004K:
+ c->cputype = CPU_1004K;
+ c->writecombine = _CACHE_UNCACHED;
+ __cpu_name[cpu] = "MIPS 1004Kc";
+ cpu_set_mt_per_tc_perf(c);
+ break;
+ case PRID_IMP_1074K:
+ c->cputype = CPU_1074K;
+ c->writecombine = _CACHE_UNCACHED;
+ __cpu_name[cpu] = "MIPS 1074Kc";
+ break;
+ case PRID_IMP_INTERAPTIV_UP:
+ c->cputype = CPU_INTERAPTIV;
+ __cpu_name[cpu] = "MIPS interAptiv";
+ cpu_set_mt_per_tc_perf(c);
+ break;
+ case PRID_IMP_INTERAPTIV_MP:
+ c->cputype = CPU_INTERAPTIV;
+ __cpu_name[cpu] = "MIPS interAptiv (multi)";
+ cpu_set_mt_per_tc_perf(c);
+ break;
+ case PRID_IMP_PROAPTIV_UP:
+ c->cputype = CPU_PROAPTIV;
+ __cpu_name[cpu] = "MIPS proAptiv";
+ break;
+ case PRID_IMP_PROAPTIV_MP:
+ c->cputype = CPU_PROAPTIV;
+ __cpu_name[cpu] = "MIPS proAptiv (multi)";
+ break;
+ case PRID_IMP_P5600:
+ c->cputype = CPU_P5600;
+ __cpu_name[cpu] = "MIPS P5600";
+ break;
+ case PRID_IMP_P6600:
+ c->cputype = CPU_P6600;
+ __cpu_name[cpu] = "MIPS P6600";
+ break;
+ case PRID_IMP_I6400:
+ c->cputype = CPU_I6400;
+ __cpu_name[cpu] = "MIPS I6400";
+ break;
+ case PRID_IMP_I6500:
+ c->cputype = CPU_I6500;
+ __cpu_name[cpu] = "MIPS I6500";
+ break;
+ case PRID_IMP_M5150:
+ c->cputype = CPU_M5150;
+ __cpu_name[cpu] = "MIPS M5150";
+ break;
+ case PRID_IMP_M6250:
+ c->cputype = CPU_M6250;
+ __cpu_name[cpu] = "MIPS M6250";
+ break;
+ }
+
+ decode_configs(c);
+
+ spram_config();
+
+ mm_config(c);
+
+ switch (__get_cpu_type(c->cputype)) {
+ case CPU_M5150:
+ case CPU_P5600:
+ set_isa(c, MIPS_CPU_ISA_M32R5);
+ break;
+ case CPU_I6500:
+ c->options |= MIPS_CPU_SHARED_FTLB_ENTRIES;
+ fallthrough;
+ case CPU_I6400:
+ c->options |= MIPS_CPU_SHARED_FTLB_RAM;
+ fallthrough;
+ default:
+ break;
+ }
+
+ /* Recent MIPS cores use the implementation-dependent ExcCode 16 for
+ * cache/FTLB parity exceptions.
+ */
+ switch (__get_cpu_type(c->cputype)) {
+ case CPU_PROAPTIV:
+ case CPU_P5600:
+ case CPU_P6600:
+ case CPU_I6400:
+ case CPU_I6500:
+ c->options |= MIPS_CPU_FTLBPAREX;
+ break;
+ }
+}
+
+static inline void cpu_probe_alchemy(struct cpuinfo_mips *c, unsigned int cpu)
+{
+ decode_configs(c);
+ switch (c->processor_id & PRID_IMP_MASK) {
+ case PRID_IMP_AU1_REV1:
+ case PRID_IMP_AU1_REV2:
+ c->cputype = CPU_ALCHEMY;
+ switch ((c->processor_id >> 24) & 0xff) {
+ case 0:
+ __cpu_name[cpu] = "Au1000";
+ break;
+ case 1:
+ __cpu_name[cpu] = "Au1500";
+ break;
+ case 2:
+ __cpu_name[cpu] = "Au1100";
+ break;
+ case 3:
+ __cpu_name[cpu] = "Au1550";
+ break;
+ case 4:
+ __cpu_name[cpu] = "Au1200";
+ if ((c->processor_id & PRID_REV_MASK) == 2)
+ __cpu_name[cpu] = "Au1250";
+ break;
+ case 5:
+ __cpu_name[cpu] = "Au1210";
+ break;
+ default:
+ __cpu_name[cpu] = "Au1xxx";
+ break;
+ }
+ break;
+ case PRID_IMP_NETLOGIC_AU13XX:
+ c->cputype = CPU_ALCHEMY;
+ __cpu_name[cpu] = "Au1300";
+ break;
+ }
+}
+
+static inline void cpu_probe_sibyte(struct cpuinfo_mips *c, unsigned int cpu)
+{
+ decode_configs(c);
+
+ c->writecombine = _CACHE_UNCACHED_ACCELERATED;
+ switch (c->processor_id & PRID_IMP_MASK) {
+ case PRID_IMP_SB1:
+ c->cputype = CPU_SB1;
+ __cpu_name[cpu] = "SiByte SB1";
+ /* FPU in pass1 is known to have issues. */
+ if ((c->processor_id & PRID_REV_MASK) < 0x02)
+ c->options &= ~(MIPS_CPU_FPU | MIPS_CPU_32FPR);
+ break;
+ case PRID_IMP_SB1A:
+ c->cputype = CPU_SB1A;
+ __cpu_name[cpu] = "SiByte SB1A";
+ break;
+ }
+}
+
+static inline void cpu_probe_sandcraft(struct cpuinfo_mips *c, unsigned int cpu)
+{
+ decode_configs(c);
+ switch (c->processor_id & PRID_IMP_MASK) {
+ case PRID_IMP_SR71000:
+ c->cputype = CPU_SR71000;
+ __cpu_name[cpu] = "Sandcraft SR71000";
+ c->scache.ways = 8;
+ c->tlbsize = 64;
+ break;
+ }
+}
+
+static inline void cpu_probe_nxp(struct cpuinfo_mips *c, unsigned int cpu)
+{
+ decode_configs(c);
+ switch (c->processor_id & PRID_IMP_MASK) {
+ case PRID_IMP_PR4450:
+ c->cputype = CPU_PR4450;
+ __cpu_name[cpu] = "Philips PR4450";
+ set_isa(c, MIPS_CPU_ISA_M32R1);
+ break;
+ }
+}
+
+static inline void cpu_probe_broadcom(struct cpuinfo_mips *c, unsigned int cpu)
+{
+ decode_configs(c);
+ switch (c->processor_id & PRID_IMP_MASK) {
+ case PRID_IMP_BMIPS32_REV4:
+ case PRID_IMP_BMIPS32_REV8:
+ c->cputype = CPU_BMIPS32;
+ __cpu_name[cpu] = "Broadcom BMIPS32";
+ set_elf_platform(cpu, "bmips32");
+ break;
+ case PRID_IMP_BMIPS3300:
+ case PRID_IMP_BMIPS3300_ALT:
+ case PRID_IMP_BMIPS3300_BUG:
+ c->cputype = CPU_BMIPS3300;
+ __cpu_name[cpu] = "Broadcom BMIPS3300";
+ set_elf_platform(cpu, "bmips3300");
+ reserve_exception_space(0x400, VECTORSPACING * 64);
+ break;
+ case PRID_IMP_BMIPS43XX: {
+ int rev = c->processor_id & PRID_REV_MASK;
+
+ if (rev >= PRID_REV_BMIPS4380_LO &&
+ rev <= PRID_REV_BMIPS4380_HI) {
+ c->cputype = CPU_BMIPS4380;
+ __cpu_name[cpu] = "Broadcom BMIPS4380";
+ set_elf_platform(cpu, "bmips4380");
+ c->options |= MIPS_CPU_RIXI;
+ reserve_exception_space(0x400, VECTORSPACING * 64);
+ } else {
+ c->cputype = CPU_BMIPS4350;
+ __cpu_name[cpu] = "Broadcom BMIPS4350";
+ set_elf_platform(cpu, "bmips4350");
+ }
+ break;
+ }
+ case PRID_IMP_BMIPS5000:
+ case PRID_IMP_BMIPS5200:
+ c->cputype = CPU_BMIPS5000;
+ if ((c->processor_id & PRID_IMP_MASK) == PRID_IMP_BMIPS5200)
+ __cpu_name[cpu] = "Broadcom BMIPS5200";
+ else
+ __cpu_name[cpu] = "Broadcom BMIPS5000";
+ set_elf_platform(cpu, "bmips5000");
+ c->options |= MIPS_CPU_ULRI | MIPS_CPU_RIXI;
+ reserve_exception_space(0x1000, VECTORSPACING * 64);
+ break;
+ }
+}
+
+static inline void cpu_probe_cavium(struct cpuinfo_mips *c, unsigned int cpu)
+{
+ decode_configs(c);
+ switch (c->processor_id & PRID_IMP_MASK) {
+ case PRID_IMP_CAVIUM_CN38XX:
+ case PRID_IMP_CAVIUM_CN31XX:
+ case PRID_IMP_CAVIUM_CN30XX:
+ c->cputype = CPU_CAVIUM_OCTEON;
+ __cpu_name[cpu] = "Cavium Octeon";
+ goto platform;
+ case PRID_IMP_CAVIUM_CN58XX:
+ case PRID_IMP_CAVIUM_CN56XX:
+ case PRID_IMP_CAVIUM_CN50XX:
+ case PRID_IMP_CAVIUM_CN52XX:
+ c->cputype = CPU_CAVIUM_OCTEON_PLUS;
+ __cpu_name[cpu] = "Cavium Octeon+";
+platform:
+ set_elf_platform(cpu, "octeon");
+ break;
+ case PRID_IMP_CAVIUM_CN61XX:
+ case PRID_IMP_CAVIUM_CN63XX:
+ case PRID_IMP_CAVIUM_CN66XX:
+ case PRID_IMP_CAVIUM_CN68XX:
+ case PRID_IMP_CAVIUM_CNF71XX:
+ c->cputype = CPU_CAVIUM_OCTEON2;
+ __cpu_name[cpu] = "Cavium Octeon II";
+ set_elf_platform(cpu, "octeon2");
+ break;
+ case PRID_IMP_CAVIUM_CN70XX:
+ case PRID_IMP_CAVIUM_CN73XX:
+ case PRID_IMP_CAVIUM_CNF75XX:
+ case PRID_IMP_CAVIUM_CN78XX:
+ c->cputype = CPU_CAVIUM_OCTEON3;
+ __cpu_name[cpu] = "Cavium Octeon III";
+ set_elf_platform(cpu, "octeon3");
+ break;
+ default:
+ printk(KERN_INFO "Unknown Octeon chip!\n");
+ c->cputype = CPU_UNKNOWN;
+ break;
+ }
+}
+
+#ifdef CONFIG_CPU_LOONGSON64
+#include <loongson_regs.h>
+
+static inline void decode_cpucfg(struct cpuinfo_mips *c)
+{
+ u32 cfg1 = read_cpucfg(LOONGSON_CFG1);
+ u32 cfg2 = read_cpucfg(LOONGSON_CFG2);
+ u32 cfg3 = read_cpucfg(LOONGSON_CFG3);
+
+ if (cfg1 & LOONGSON_CFG1_MMI)
+ c->ases |= MIPS_ASE_LOONGSON_MMI;
+
+ if (cfg2 & LOONGSON_CFG2_LEXT1)
+ c->ases |= MIPS_ASE_LOONGSON_EXT;
+
+ if (cfg2 & LOONGSON_CFG2_LEXT2)
+ c->ases |= MIPS_ASE_LOONGSON_EXT2;
+
+ if (cfg2 & LOONGSON_CFG2_LSPW) {
+ c->options |= MIPS_CPU_LDPTE;
+ c->guest.options |= MIPS_CPU_LDPTE;
+ }
+
+ if (cfg3 & LOONGSON_CFG3_LCAMP)
+ c->ases |= MIPS_ASE_LOONGSON_CAM;
+}
+
+static inline void cpu_probe_loongson(struct cpuinfo_mips *c, unsigned int cpu)
+{
+ c->cputype = CPU_LOONGSON64;
+
+ /* All Loongson processors covered here define ExcCode 16 as GSExc. */
+ decode_configs(c);
+ c->options |= MIPS_CPU_GSEXCEX;
+
+ switch (c->processor_id & PRID_IMP_MASK) {
+ case PRID_IMP_LOONGSON_64R: /* Loongson-64 Reduced */
+ switch (c->processor_id & PRID_REV_MASK) {
+ case PRID_REV_LOONGSON2K_R1_0:
+ case PRID_REV_LOONGSON2K_R1_1:
+ case PRID_REV_LOONGSON2K_R1_2:
+ case PRID_REV_LOONGSON2K_R1_3:
+ __cpu_name[cpu] = "Loongson-2K";
+ set_elf_platform(cpu, "gs264e");
+ set_isa(c, MIPS_CPU_ISA_M64R2);
+ break;
+ }
+ c->ases |= (MIPS_ASE_LOONGSON_MMI | MIPS_ASE_LOONGSON_EXT |
+ MIPS_ASE_LOONGSON_EXT2);
+ break;
+ case PRID_IMP_LOONGSON_64C: /* Loongson-3 Classic */
+ switch (c->processor_id & PRID_REV_MASK) {
+ case PRID_REV_LOONGSON3A_R2_0:
+ case PRID_REV_LOONGSON3A_R2_1:
+ __cpu_name[cpu] = "ICT Loongson-3";
+ set_elf_platform(cpu, "loongson3a");
+ set_isa(c, MIPS_CPU_ISA_M64R2);
+ break;
+ case PRID_REV_LOONGSON3A_R3_0:
+ case PRID_REV_LOONGSON3A_R3_1:
+ __cpu_name[cpu] = "ICT Loongson-3";
+ set_elf_platform(cpu, "loongson3a");
+ set_isa(c, MIPS_CPU_ISA_M64R2);
+ break;
+ }
+ /*
+ * Loongson-3 Classic did not implement MIPS standard TLBINV
+ * but implemented TLBINVF and EHINV. As currently we're only
+ * using these two features, enable MIPS_CPU_TLBINV as well.
+ *
+ * Also some early Loongson-3A2000 had wrong TLB type in Config
+ * register, we correct it here.
+ */
+ c->options |= MIPS_CPU_FTLB | MIPS_CPU_TLBINV | MIPS_CPU_LDPTE;
+ c->ases |= (MIPS_ASE_LOONGSON_MMI | MIPS_ASE_LOONGSON_CAM |
+ MIPS_ASE_LOONGSON_EXT | MIPS_ASE_LOONGSON_EXT2);
+ c->ases &= ~MIPS_ASE_VZ; /* VZ of Loongson-3A2000/3000 is incomplete */
+ break;
+ case PRID_IMP_LOONGSON_64G:
+ __cpu_name[cpu] = "ICT Loongson-3";
+ set_elf_platform(cpu, "loongson3a");
+ set_isa(c, MIPS_CPU_ISA_M64R2);
+ decode_cpucfg(c);
+ break;
+ default:
+ panic("Unknown Loongson Processor ID!");
+ break;
+ }
+}
+#else
+static inline void cpu_probe_loongson(struct cpuinfo_mips *c, unsigned int cpu) { }
+#endif
+
+static inline void cpu_probe_ingenic(struct cpuinfo_mips *c, unsigned int cpu)
+{
+ decode_configs(c);
+
+ /*
+ * XBurst misses a config2 register, so config3 decode was skipped in
+ * decode_configs().
+ */
+ decode_config3(c);
+
+ /* XBurst does not implement the CP0 counter. */
+ c->options &= ~MIPS_CPU_COUNTER;
+ BUG_ON(__builtin_constant_p(cpu_has_counter) && cpu_has_counter);
+
+ /* XBurst has virtually tagged icache */
+ c->icache.flags |= MIPS_CACHE_VTAG;
+
+ switch (c->processor_id & PRID_IMP_MASK) {
+
+ /* XBurst®1 with MXU1.0/MXU1.1 SIMD ISA */
+ case PRID_IMP_XBURST_REV1:
+
+ /*
+ * The XBurst core by default attempts to avoid branch target
+ * buffer lookups by detecting & special casing loops. This
+ * feature will cause BogoMIPS and lpj calculate in error.
+ * Set cp0 config7 bit 4 to disable this feature.
+ */
+ set_c0_config7(MIPS_CONF7_BTB_LOOP_EN);
+
+ switch (c->processor_id & PRID_COMP_MASK) {
+
+ /*
+ * The config0 register in the XBurst CPUs with a processor ID of
+ * PRID_COMP_INGENIC_D0 report themselves as MIPS32r2 compatible,
+ * but they don't actually support this ISA.
+ */
+ case PRID_COMP_INGENIC_D0:
+ c->isa_level &= ~MIPS_CPU_ISA_M32R2;
+
+ /* FPU is not properly detected on JZ4760(B). */
+ if (c->processor_id == 0x2ed0024f)
+ c->options |= MIPS_CPU_FPU;
+
+ fallthrough;
+
+ /*
+ * The config0 register in the XBurst CPUs with a processor ID of
+ * PRID_COMP_INGENIC_D0 or PRID_COMP_INGENIC_D1 has an abandoned
+ * huge page tlb mode, this mode is not compatible with the MIPS
+ * standard, it will cause tlbmiss and into an infinite loop
+ * (line 21 in the tlb-funcs.S) when starting the init process.
+ * After chip reset, the default is HPTLB mode, Write 0xa9000000
+ * to cp0 register 5 sel 4 to switch back to VTLB mode to prevent
+ * getting stuck.
+ */
+ case PRID_COMP_INGENIC_D1:
+ write_c0_page_ctrl(XBURST_PAGECTRL_HPTLB_DIS);
+ break;
+
+ default:
+ break;
+ }
+ fallthrough;
+
+ /* XBurst®1 with MXU2.0 SIMD ISA */
+ case PRID_IMP_XBURST_REV2:
+ /* Ingenic uses the WA bit to achieve write-combine memory writes */
+ c->writecombine = _CACHE_CACHABLE_WA;
+ c->cputype = CPU_XBURST;
+ __cpu_name[cpu] = "Ingenic XBurst";
+ break;
+
+ /* XBurst®2 with MXU2.1 SIMD ISA */
+ case PRID_IMP_XBURST2:
+ c->cputype = CPU_XBURST;
+ __cpu_name[cpu] = "Ingenic XBurst II";
+ break;
+
+ default:
+ panic("Unknown Ingenic Processor ID!");
+ break;
+ }
+}
+
+#ifdef CONFIG_64BIT
+/* For use by uaccess.h */
+u64 __ua_limit;
+EXPORT_SYMBOL(__ua_limit);
+#endif
+
+const char *__cpu_name[NR_CPUS];
+const char *__elf_platform;
+const char *__elf_base_platform;
+
+void cpu_probe(void)
+{
+ struct cpuinfo_mips *c = &current_cpu_data;
+ unsigned int cpu = smp_processor_id();
+
+ /*
+ * Set a default elf platform, cpu probe may later
+ * overwrite it with a more precise value
+ */
+ set_elf_platform(cpu, "mips");
+
+ c->processor_id = PRID_IMP_UNKNOWN;
+ c->fpu_id = FPIR_IMP_NONE;
+ c->cputype = CPU_UNKNOWN;
+ c->writecombine = _CACHE_UNCACHED;
+
+ c->fpu_csr31 = FPU_CSR_RN;
+ c->fpu_msk31 = FPU_CSR_RSVD | FPU_CSR_ABS2008 | FPU_CSR_NAN2008;
+
+ c->processor_id = read_c0_prid();
+ switch (c->processor_id & PRID_COMP_MASK) {
+ case PRID_COMP_LEGACY:
+ cpu_probe_legacy(c, cpu);
+ break;
+ case PRID_COMP_MIPS:
+ cpu_probe_mips(c, cpu);
+ break;
+ case PRID_COMP_ALCHEMY:
+ case PRID_COMP_NETLOGIC:
+ cpu_probe_alchemy(c, cpu);
+ break;
+ case PRID_COMP_SIBYTE:
+ cpu_probe_sibyte(c, cpu);
+ break;
+ case PRID_COMP_BROADCOM:
+ cpu_probe_broadcom(c, cpu);
+ break;
+ case PRID_COMP_SANDCRAFT:
+ cpu_probe_sandcraft(c, cpu);
+ break;
+ case PRID_COMP_NXP:
+ cpu_probe_nxp(c, cpu);
+ break;
+ case PRID_COMP_CAVIUM:
+ cpu_probe_cavium(c, cpu);
+ break;
+ case PRID_COMP_LOONGSON:
+ cpu_probe_loongson(c, cpu);
+ break;
+ case PRID_COMP_INGENIC_13:
+ case PRID_COMP_INGENIC_D0:
+ case PRID_COMP_INGENIC_D1:
+ case PRID_COMP_INGENIC_E1:
+ cpu_probe_ingenic(c, cpu);
+ break;
+ }
+
+ BUG_ON(!__cpu_name[cpu]);
+ BUG_ON(c->cputype == CPU_UNKNOWN);
+
+ /*
+ * Platform code can force the cpu type to optimize code
+ * generation. In that case be sure the cpu type is correctly
+ * manually setup otherwise it could trigger some nasty bugs.
+ */
+ BUG_ON(current_cpu_type() != c->cputype);
+
+ if (cpu_has_rixi) {
+ /* Enable the RIXI exceptions */
+ set_c0_pagegrain(PG_IEC);
+ back_to_back_c0_hazard();
+ /* Verify the IEC bit is set */
+ if (read_c0_pagegrain() & PG_IEC)
+ c->options |= MIPS_CPU_RIXIEX;
+ }
+
+ if (mips_fpu_disabled)
+ c->options &= ~MIPS_CPU_FPU;
+
+ if (mips_dsp_disabled)
+ c->ases &= ~(MIPS_ASE_DSP | MIPS_ASE_DSP2P);
+
+ if (mips_htw_disabled) {
+ c->options &= ~MIPS_CPU_HTW;
+ write_c0_pwctl(read_c0_pwctl() &
+ ~(1 << MIPS_PWCTL_PWEN_SHIFT));
+ }
+
+ if (c->options & MIPS_CPU_FPU)
+ cpu_set_fpu_opts(c);
+ else
+ cpu_set_nofpu_opts(c);
+
+ if (cpu_has_mips_r2_r6) {
+ c->srsets = ((read_c0_srsctl() >> 26) & 0x0f) + 1;
+ /* R2 has Performance Counter Interrupt indicator */
+ c->options |= MIPS_CPU_PCI;
+ }
+ else
+ c->srsets = 1;
+
+ if (cpu_has_mips_r6)
+ elf_hwcap |= HWCAP_MIPS_R6;
+
+ if (cpu_has_msa) {
+ c->msa_id = cpu_get_msa_id();
+ WARN(c->msa_id & MSA_IR_WRPF,
+ "Vector register partitioning unimplemented!");
+ elf_hwcap |= HWCAP_MIPS_MSA;
+ }
+
+ if (cpu_has_mips16)
+ elf_hwcap |= HWCAP_MIPS_MIPS16;
+
+ if (cpu_has_mdmx)
+ elf_hwcap |= HWCAP_MIPS_MDMX;
+
+ if (cpu_has_mips3d)
+ elf_hwcap |= HWCAP_MIPS_MIPS3D;
+
+ if (cpu_has_smartmips)
+ elf_hwcap |= HWCAP_MIPS_SMARTMIPS;
+
+ if (cpu_has_dsp)
+ elf_hwcap |= HWCAP_MIPS_DSP;
+
+ if (cpu_has_dsp2)
+ elf_hwcap |= HWCAP_MIPS_DSP2;
+
+ if (cpu_has_dsp3)
+ elf_hwcap |= HWCAP_MIPS_DSP3;
+
+ if (cpu_has_mips16e2)
+ elf_hwcap |= HWCAP_MIPS_MIPS16E2;
+
+ if (cpu_has_loongson_mmi)
+ elf_hwcap |= HWCAP_LOONGSON_MMI;
+
+ if (cpu_has_loongson_ext)
+ elf_hwcap |= HWCAP_LOONGSON_EXT;
+
+ if (cpu_has_loongson_ext2)
+ elf_hwcap |= HWCAP_LOONGSON_EXT2;
+
+ if (cpu_has_vz)
+ cpu_probe_vz(c);
+
+ cpu_probe_vmbits(c);
+
+ /* Synthesize CPUCFG data if running on Loongson processors;
+ * no-op otherwise.
+ *
+ * This looks at previously probed features, so keep this at bottom.
+ */
+ loongson3_cpucfg_synthesize_data(c);
+
+#ifdef CONFIG_64BIT
+ if (cpu == 0)
+ __ua_limit = ~((1ull << cpu_vmbits) - 1);
+#endif
+
+ reserve_exception_space(0, 0x1000);
+}
+
+void cpu_report(void)
+{
+ struct cpuinfo_mips *c = &current_cpu_data;
+
+ pr_info("CPU%d revision is: %08x (%s)\n",
+ smp_processor_id(), c->processor_id, cpu_name_string());
+ if (c->options & MIPS_CPU_FPU)
+ printk(KERN_INFO "FPU revision is: %08x\n", c->fpu_id);
+ if (cpu_has_msa)
+ pr_info("MSA revision is: %08x\n", c->msa_id);
+}
+
+void cpu_set_cluster(struct cpuinfo_mips *cpuinfo, unsigned int cluster)
+{
+ /* Ensure the core number fits in the field */
+ WARN_ON(cluster > (MIPS_GLOBALNUMBER_CLUSTER >>
+ MIPS_GLOBALNUMBER_CLUSTER_SHF));
+
+ cpuinfo->globalnumber &= ~MIPS_GLOBALNUMBER_CLUSTER;
+ cpuinfo->globalnumber |= cluster << MIPS_GLOBALNUMBER_CLUSTER_SHF;
+}
+
+void cpu_set_core(struct cpuinfo_mips *cpuinfo, unsigned int core)
+{
+ /* Ensure the core number fits in the field */
+ WARN_ON(core > (MIPS_GLOBALNUMBER_CORE >> MIPS_GLOBALNUMBER_CORE_SHF));
+
+ cpuinfo->globalnumber &= ~MIPS_GLOBALNUMBER_CORE;
+ cpuinfo->globalnumber |= core << MIPS_GLOBALNUMBER_CORE_SHF;
+}
+
+void cpu_set_vpe_id(struct cpuinfo_mips *cpuinfo, unsigned int vpe)
+{
+ /* Ensure the VP(E) ID fits in the field */
+ WARN_ON(vpe > (MIPS_GLOBALNUMBER_VP >> MIPS_GLOBALNUMBER_VP_SHF));
+
+ /* Ensure we're not using VP(E)s without support */
+ WARN_ON(vpe && !IS_ENABLED(CONFIG_MIPS_MT_SMP) &&
+ !IS_ENABLED(CONFIG_CPU_MIPSR6));
+
+ cpuinfo->globalnumber &= ~MIPS_GLOBALNUMBER_VP;
+ cpuinfo->globalnumber |= vpe << MIPS_GLOBALNUMBER_VP_SHF;
+}
diff --git a/arch/mips/kernel/cpu-r3k-probe.c b/arch/mips/kernel/cpu-r3k-probe.c
new file mode 100644
index 000000000..be93469c0
--- /dev/null
+++ b/arch/mips/kernel/cpu-r3k-probe.c
@@ -0,0 +1,152 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Processor capabilities determination functions.
+ *
+ * Copyright (C) xxxx the Anonymous
+ * Copyright (C) 1994 - 2006 Ralf Baechle
+ * Copyright (C) 2003, 2004 Maciej W. Rozycki
+ * Copyright (C) 2001, 2004, 2011, 2012 MIPS Technologies, Inc.
+ */
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/ptrace.h>
+#include <linux/smp.h>
+#include <linux/stddef.h>
+#include <linux/export.h>
+
+#include <asm/bugs.h>
+#include <asm/cpu.h>
+#include <asm/cpu-features.h>
+#include <asm/cpu-type.h>
+#include <asm/fpu.h>
+#include <asm/mipsregs.h>
+#include <asm/elf.h>
+#include <asm/traps.h>
+
+#include "fpu-probe.h"
+
+/* Hardware capabilities */
+unsigned int elf_hwcap __read_mostly;
+EXPORT_SYMBOL_GPL(elf_hwcap);
+
+void __init check_bugs32(void)
+{
+
+}
+
+/*
+ * Probe whether cpu has config register by trying to play with
+ * alternate cache bit and see whether it matters.
+ * It's used by cpu_probe to distinguish between R3000A and R3081.
+ */
+static inline int cpu_has_confreg(void)
+{
+#ifdef CONFIG_CPU_R3000
+ extern unsigned long r3k_cache_size(unsigned long);
+ unsigned long size1, size2;
+ unsigned long cfg = read_c0_conf();
+
+ size1 = r3k_cache_size(ST0_ISC);
+ write_c0_conf(cfg ^ R30XX_CONF_AC);
+ size2 = r3k_cache_size(ST0_ISC);
+ write_c0_conf(cfg);
+ return size1 != size2;
+#else
+ return 0;
+#endif
+}
+
+static inline void set_elf_platform(int cpu, const char *plat)
+{
+ if (cpu == 0)
+ __elf_platform = plat;
+}
+
+const char *__cpu_name[NR_CPUS];
+const char *__elf_platform;
+const char *__elf_base_platform;
+
+void cpu_probe(void)
+{
+ struct cpuinfo_mips *c = &current_cpu_data;
+ unsigned int cpu = smp_processor_id();
+
+ /*
+ * Set a default elf platform, cpu probe may later
+ * overwrite it with a more precise value
+ */
+ set_elf_platform(cpu, "mips");
+
+ c->processor_id = PRID_IMP_UNKNOWN;
+ c->fpu_id = FPIR_IMP_NONE;
+ c->cputype = CPU_UNKNOWN;
+ c->writecombine = _CACHE_UNCACHED;
+
+ c->fpu_csr31 = FPU_CSR_RN;
+ c->fpu_msk31 = FPU_CSR_RSVD | FPU_CSR_ABS2008 | FPU_CSR_NAN2008 |
+ FPU_CSR_CONDX | FPU_CSR_FS;
+
+ c->srsets = 1;
+
+ c->processor_id = read_c0_prid();
+ switch (c->processor_id & (PRID_COMP_MASK | PRID_IMP_MASK)) {
+ case PRID_COMP_LEGACY | PRID_IMP_R2000:
+ c->cputype = CPU_R2000;
+ __cpu_name[cpu] = "R2000";
+ c->options = MIPS_CPU_TLB | MIPS_CPU_3K_CACHE |
+ MIPS_CPU_NOFPUEX;
+ if (__cpu_has_fpu())
+ c->options |= MIPS_CPU_FPU;
+ c->tlbsize = 64;
+ break;
+ case PRID_COMP_LEGACY | PRID_IMP_R3000:
+ if ((c->processor_id & PRID_REV_MASK) == PRID_REV_R3000A) {
+ if (cpu_has_confreg()) {
+ c->cputype = CPU_R3081E;
+ __cpu_name[cpu] = "R3081";
+ } else {
+ c->cputype = CPU_R3000A;
+ __cpu_name[cpu] = "R3000A";
+ }
+ } else {
+ c->cputype = CPU_R3000;
+ __cpu_name[cpu] = "R3000";
+ }
+ c->options = MIPS_CPU_TLB | MIPS_CPU_3K_CACHE |
+ MIPS_CPU_NOFPUEX;
+ if (__cpu_has_fpu())
+ c->options |= MIPS_CPU_FPU;
+ c->tlbsize = 64;
+ break;
+ }
+
+ BUG_ON(!__cpu_name[cpu]);
+ BUG_ON(c->cputype == CPU_UNKNOWN);
+
+ /*
+ * Platform code can force the cpu type to optimize code
+ * generation. In that case be sure the cpu type is correctly
+ * manually setup otherwise it could trigger some nasty bugs.
+ */
+ BUG_ON(current_cpu_type() != c->cputype);
+
+ if (mips_fpu_disabled)
+ c->options &= ~MIPS_CPU_FPU;
+
+ if (c->options & MIPS_CPU_FPU)
+ cpu_set_fpu_opts(c);
+ else
+ cpu_set_nofpu_opts(c);
+
+ reserve_exception_space(0, 0x400);
+}
+
+void cpu_report(void)
+{
+ struct cpuinfo_mips *c = &current_cpu_data;
+
+ pr_info("CPU%d revision is: %08x (%s)\n",
+ smp_processor_id(), c->processor_id, cpu_name_string());
+ if (c->options & MIPS_CPU_FPU)
+ pr_info("FPU revision is: %08x\n", c->fpu_id);
+}
diff --git a/arch/mips/kernel/crash.c b/arch/mips/kernel/crash.c
new file mode 100644
index 000000000..81845ba04
--- /dev/null
+++ b/arch/mips/kernel/crash.c
@@ -0,0 +1,103 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/kernel.h>
+#include <linux/smp.h>
+#include <linux/reboot.h>
+#include <linux/kexec.h>
+#include <linux/memblock.h>
+#include <linux/crash_dump.h>
+#include <linux/delay.h>
+#include <linux/irq.h>
+#include <linux/types.h>
+#include <linux/sched.h>
+#include <linux/sched/task_stack.h>
+
+/* This keeps a track of which one is crashing cpu. */
+static int crashing_cpu = -1;
+static cpumask_t cpus_in_crash = CPU_MASK_NONE;
+
+#ifdef CONFIG_SMP
+static void crash_shutdown_secondary(void *passed_regs)
+{
+ struct pt_regs *regs = passed_regs;
+ int cpu = smp_processor_id();
+
+ /*
+ * If we are passed registers, use those. Otherwise get the
+ * regs from the last interrupt, which should be correct, as
+ * we are in an interrupt. But if the regs are not there,
+ * pull them from the top of the stack. They are probably
+ * wrong, but we need something to keep from crashing again.
+ */
+ if (!regs)
+ regs = get_irq_regs();
+ if (!regs)
+ regs = task_pt_regs(current);
+
+ if (!cpu_online(cpu))
+ return;
+
+ /* We won't be sent IPIs any more. */
+ set_cpu_online(cpu, false);
+
+ local_irq_disable();
+ if (!cpumask_test_cpu(cpu, &cpus_in_crash))
+ crash_save_cpu(regs, cpu);
+ cpumask_set_cpu(cpu, &cpus_in_crash);
+
+ while (!atomic_read(&kexec_ready_to_reboot))
+ cpu_relax();
+
+ kexec_reboot();
+
+ /* NOTREACHED */
+}
+
+static void crash_kexec_prepare_cpus(void)
+{
+ static int cpus_stopped;
+ unsigned int msecs;
+ unsigned int ncpus;
+
+ if (cpus_stopped)
+ return;
+
+ ncpus = num_online_cpus() - 1;/* Excluding the panic cpu */
+
+ smp_call_function(crash_shutdown_secondary, NULL, 0);
+ smp_wmb();
+
+ /*
+ * The crash CPU sends an IPI and wait for other CPUs to
+ * respond. Delay of at least 10 seconds.
+ */
+ pr_emerg("Sending IPI to other cpus...\n");
+ msecs = 10000;
+ while ((cpumask_weight(&cpus_in_crash) < ncpus) && (--msecs > 0)) {
+ cpu_relax();
+ mdelay(1);
+ }
+
+ cpus_stopped = 1;
+}
+
+/* Override the weak function in kernel/panic.c */
+void crash_smp_send_stop(void)
+{
+ if (_crash_smp_send_stop)
+ _crash_smp_send_stop();
+
+ crash_kexec_prepare_cpus();
+}
+
+#else /* !defined(CONFIG_SMP) */
+static void crash_kexec_prepare_cpus(void) {}
+#endif /* !defined(CONFIG_SMP) */
+
+void default_machine_crash_shutdown(struct pt_regs *regs)
+{
+ local_irq_disable();
+ crashing_cpu = smp_processor_id();
+ crash_save_cpu(regs, crashing_cpu);
+ crash_kexec_prepare_cpus();
+ cpumask_set_cpu(crashing_cpu, &cpus_in_crash);
+}
diff --git a/arch/mips/kernel/crash_dump.c b/arch/mips/kernel/crash_dump.c
new file mode 100644
index 000000000..6e50f4902
--- /dev/null
+++ b/arch/mips/kernel/crash_dump.c
@@ -0,0 +1,19 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/highmem.h>
+#include <linux/crash_dump.h>
+#include <linux/uio.h>
+
+ssize_t copy_oldmem_page(struct iov_iter *iter, unsigned long pfn,
+ size_t csize, unsigned long offset)
+{
+ void *vaddr;
+
+ if (!csize)
+ return 0;
+
+ vaddr = kmap_local_pfn(pfn);
+ csize = copy_to_iter(vaddr + offset, csize, iter);
+ kunmap_local(vaddr);
+
+ return csize;
+}
diff --git a/arch/mips/kernel/csrc-bcm1480.c b/arch/mips/kernel/csrc-bcm1480.c
new file mode 100644
index 000000000..6c18a138f
--- /dev/null
+++ b/arch/mips/kernel/csrc-bcm1480.c
@@ -0,0 +1,48 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2000,2001,2004 Broadcom Corporation
+ */
+#include <linux/clocksource.h>
+#include <linux/sched_clock.h>
+
+#include <asm/addrspace.h>
+#include <asm/io.h>
+#include <asm/time.h>
+
+#include <asm/sibyte/bcm1480_regs.h>
+#include <asm/sibyte/sb1250_regs.h>
+#include <asm/sibyte/bcm1480_int.h>
+#include <asm/sibyte/bcm1480_scd.h>
+
+#include <asm/sibyte/sb1250.h>
+
+static u64 bcm1480_hpt_read(struct clocksource *cs)
+{
+ return (u64) __raw_readq(IOADDR(A_SCD_ZBBUS_CYCLE_COUNT));
+}
+
+struct clocksource bcm1480_clocksource = {
+ .name = "zbbus-cycles",
+ .rating = 200,
+ .read = bcm1480_hpt_read,
+ .mask = CLOCKSOURCE_MASK(64),
+ .flags = CLOCK_SOURCE_IS_CONTINUOUS,
+};
+
+static u64 notrace sb1480_read_sched_clock(void)
+{
+ return __raw_readq(IOADDR(A_SCD_ZBBUS_CYCLE_COUNT));
+}
+
+void __init sb1480_clocksource_init(void)
+{
+ struct clocksource *cs = &bcm1480_clocksource;
+ unsigned int plldiv;
+ unsigned long zbbus;
+
+ plldiv = G_BCM1480_SYS_PLL_DIV(__raw_readq(IOADDR(A_SCD_SYSTEM_CFG)));
+ zbbus = ((plldiv >> 1) * 50000000) + ((plldiv & 1) * 25000000);
+ clocksource_register_hz(cs, zbbus);
+
+ sched_clock_register(sb1480_read_sched_clock, 64, zbbus);
+}
diff --git a/arch/mips/kernel/csrc-ioasic.c b/arch/mips/kernel/csrc-ioasic.c
new file mode 100644
index 000000000..bad740ad3
--- /dev/null
+++ b/arch/mips/kernel/csrc-ioasic.c
@@ -0,0 +1,65 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * DEC I/O ASIC's counter clocksource
+ *
+ * Copyright (C) 2008 Yoichi Yuasa <yuasa@linux-mips.org>
+ */
+#include <linux/clocksource.h>
+#include <linux/sched_clock.h>
+#include <linux/init.h>
+
+#include <asm/ds1287.h>
+#include <asm/time.h>
+#include <asm/dec/ioasic.h>
+#include <asm/dec/ioasic_addrs.h>
+
+static u64 dec_ioasic_hpt_read(struct clocksource *cs)
+{
+ return ioasic_read(IO_REG_FCTR);
+}
+
+static struct clocksource clocksource_dec = {
+ .name = "dec-ioasic",
+ .read = dec_ioasic_hpt_read,
+ .mask = CLOCKSOURCE_MASK(32),
+ .flags = CLOCK_SOURCE_IS_CONTINUOUS,
+};
+
+static u64 notrace dec_ioasic_read_sched_clock(void)
+{
+ return ioasic_read(IO_REG_FCTR);
+}
+
+int __init dec_ioasic_clocksource_init(void)
+{
+ unsigned int freq;
+ u32 start, end;
+ int i = HZ / 8;
+
+ ds1287_timer_state();
+ while (!ds1287_timer_state())
+ ;
+
+ start = dec_ioasic_hpt_read(&clocksource_dec);
+
+ while (i--)
+ while (!ds1287_timer_state())
+ ;
+
+ end = dec_ioasic_hpt_read(&clocksource_dec);
+
+ freq = (end - start) * 8;
+
+ /* An early revision of the I/O ASIC didn't have the counter. */
+ if (!freq)
+ return -ENXIO;
+
+ printk(KERN_INFO "I/O ASIC clock frequency %dHz\n", freq);
+
+ clocksource_dec.rating = 200 + freq / 10000000;
+ clocksource_register_hz(&clocksource_dec, freq);
+
+ sched_clock_register(dec_ioasic_read_sched_clock, 32, freq);
+
+ return 0;
+}
diff --git a/arch/mips/kernel/csrc-r4k.c b/arch/mips/kernel/csrc-r4k.c
new file mode 100644
index 000000000..edc4afc08
--- /dev/null
+++ b/arch/mips/kernel/csrc-r4k.c
@@ -0,0 +1,130 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2007 by Ralf Baechle
+ */
+#include <linux/clocksource.h>
+#include <linux/cpufreq.h>
+#include <linux/init.h>
+#include <linux/sched_clock.h>
+
+#include <asm/time.h>
+
+static u64 c0_hpt_read(struct clocksource *cs)
+{
+ return read_c0_count();
+}
+
+static struct clocksource clocksource_mips = {
+ .name = "MIPS",
+ .read = c0_hpt_read,
+ .mask = CLOCKSOURCE_MASK(32),
+ .flags = CLOCK_SOURCE_IS_CONTINUOUS,
+};
+
+static u64 __maybe_unused notrace r4k_read_sched_clock(void)
+{
+ return read_c0_count();
+}
+
+static inline unsigned int rdhwr_count(void)
+{
+ unsigned int count;
+
+ __asm__ __volatile__(
+ " .set push\n"
+ " .set mips32r2\n"
+ " rdhwr %0, $2\n"
+ " .set pop\n"
+ : "=r" (count));
+
+ return count;
+}
+
+static bool rdhwr_count_usable(void)
+{
+ unsigned int prev, curr, i;
+
+ /*
+ * Older QEMUs have a broken implementation of RDHWR for the CP0 count
+ * which always returns a constant value. Try to identify this and don't
+ * use it in the VDSO if it is broken. This workaround can be removed
+ * once the fix has been in QEMU stable for a reasonable amount of time.
+ */
+ for (i = 0, prev = rdhwr_count(); i < 100; i++) {
+ curr = rdhwr_count();
+
+ if (curr != prev)
+ return true;
+
+ prev = curr;
+ }
+
+ pr_warn("Not using R4K clocksource in VDSO due to broken RDHWR\n");
+ return false;
+}
+
+#ifdef CONFIG_CPU_FREQ
+
+static bool __read_mostly r4k_clock_unstable;
+
+static void r4k_clocksource_unstable(char *reason)
+{
+ if (r4k_clock_unstable)
+ return;
+
+ r4k_clock_unstable = true;
+
+ pr_info("R4K timer is unstable due to %s\n", reason);
+
+ clocksource_mark_unstable(&clocksource_mips);
+}
+
+static int r4k_cpufreq_callback(struct notifier_block *nb,
+ unsigned long val, void *data)
+{
+ if (val == CPUFREQ_POSTCHANGE)
+ r4k_clocksource_unstable("CPU frequency change");
+
+ return 0;
+}
+
+static struct notifier_block r4k_cpufreq_notifier = {
+ .notifier_call = r4k_cpufreq_callback,
+};
+
+static int __init r4k_register_cpufreq_notifier(void)
+{
+ return cpufreq_register_notifier(&r4k_cpufreq_notifier,
+ CPUFREQ_TRANSITION_NOTIFIER);
+
+}
+core_initcall(r4k_register_cpufreq_notifier);
+
+#endif /* !CONFIG_CPU_FREQ */
+
+int __init init_r4k_clocksource(void)
+{
+ if (!cpu_has_counter || !mips_hpt_frequency)
+ return -ENXIO;
+
+ /* Calculate a somewhat reasonable rating value */
+ clocksource_mips.rating = 200 + mips_hpt_frequency / 10000000;
+
+ /*
+ * R2 onwards makes the count accessible to user mode so it can be used
+ * by the VDSO (HWREna is configured by configure_hwrena()).
+ */
+ if (cpu_has_mips_r2_r6 && rdhwr_count_usable())
+ clocksource_mips.vdso_clock_mode = VDSO_CLOCKMODE_R4K;
+
+ clocksource_register_hz(&clocksource_mips, mips_hpt_frequency);
+
+#ifndef CONFIG_CPU_FREQ
+ sched_clock_register(r4k_read_sched_clock, 32, mips_hpt_frequency);
+#endif
+
+ return 0;
+}
diff --git a/arch/mips/kernel/csrc-sb1250.c b/arch/mips/kernel/csrc-sb1250.c
new file mode 100644
index 000000000..fa2fa3e10
--- /dev/null
+++ b/arch/mips/kernel/csrc-sb1250.c
@@ -0,0 +1,71 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2000, 2001 Broadcom Corporation
+ */
+#include <linux/clocksource.h>
+#include <linux/sched_clock.h>
+
+#include <asm/addrspace.h>
+#include <asm/io.h>
+#include <asm/time.h>
+
+#include <asm/sibyte/sb1250.h>
+#include <asm/sibyte/sb1250_regs.h>
+#include <asm/sibyte/sb1250_int.h>
+#include <asm/sibyte/sb1250_scd.h>
+
+#define SB1250_HPT_NUM 3
+#define SB1250_HPT_VALUE M_SCD_TIMER_CNT /* max value */
+
+/*
+ * The HPT is free running from SB1250_HPT_VALUE down to 0 then starts over
+ * again.
+ */
+static inline u64 sb1250_hpt_get_cycles(void)
+{
+ unsigned int count;
+ void __iomem *addr;
+
+ addr = IOADDR(A_SCD_TIMER_REGISTER(SB1250_HPT_NUM, R_SCD_TIMER_CNT));
+ count = G_SCD_TIMER_CNT(__raw_readq(addr));
+
+ return SB1250_HPT_VALUE - count;
+}
+
+static u64 sb1250_hpt_read(struct clocksource *cs)
+{
+ return sb1250_hpt_get_cycles();
+}
+
+struct clocksource bcm1250_clocksource = {
+ .name = "bcm1250-counter-3",
+ .rating = 200,
+ .read = sb1250_hpt_read,
+ .mask = CLOCKSOURCE_MASK(23),
+ .flags = CLOCK_SOURCE_IS_CONTINUOUS,
+};
+
+static u64 notrace sb1250_read_sched_clock(void)
+{
+ return sb1250_hpt_get_cycles();
+}
+
+void __init sb1250_clocksource_init(void)
+{
+ struct clocksource *cs = &bcm1250_clocksource;
+
+ /* Setup hpt using timer #3 but do not enable irq for it */
+ __raw_writeq(0,
+ IOADDR(A_SCD_TIMER_REGISTER(SB1250_HPT_NUM,
+ R_SCD_TIMER_CFG)));
+ __raw_writeq(SB1250_HPT_VALUE,
+ IOADDR(A_SCD_TIMER_REGISTER(SB1250_HPT_NUM,
+ R_SCD_TIMER_INIT)));
+ __raw_writeq(M_SCD_TIMER_ENABLE | M_SCD_TIMER_MODE_CONTINUOUS,
+ IOADDR(A_SCD_TIMER_REGISTER(SB1250_HPT_NUM,
+ R_SCD_TIMER_CFG)));
+
+ clocksource_register_hz(cs, V_SCD_TIMER_FREQ);
+
+ sched_clock_register(sb1250_read_sched_clock, 23, V_SCD_TIMER_FREQ);
+}
diff --git a/arch/mips/kernel/early_printk.c b/arch/mips/kernel/early_printk.c
new file mode 100644
index 000000000..4a1647ddf
--- /dev/null
+++ b/arch/mips/kernel/early_printk.c
@@ -0,0 +1,41 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2002, 2003, 06, 07 Ralf Baechle (ralf@linux-mips.org)
+ * Copyright (C) 2007 MIPS Technologies, Inc.
+ * written by Ralf Baechle (ralf@linux-mips.org)
+ */
+#include <linux/kernel.h>
+#include <linux/console.h>
+#include <linux/printk.h>
+#include <linux/init.h>
+
+#include <asm/setup.h>
+
+static void early_console_write(struct console *con, const char *s, unsigned n)
+{
+ while (n-- && *s) {
+ if (*s == '\n')
+ prom_putchar('\r');
+ prom_putchar(*s);
+ s++;
+ }
+}
+
+static struct console early_console_prom = {
+ .name = "early",
+ .write = early_console_write,
+ .flags = CON_PRINTBUFFER | CON_BOOT,
+ .index = -1
+};
+
+void __init setup_early_printk(void)
+{
+ if (early_console)
+ return;
+ early_console = &early_console_prom;
+
+ register_console(&early_console_prom);
+}
diff --git a/arch/mips/kernel/early_printk_8250.c b/arch/mips/kernel/early_printk_8250.c
new file mode 100644
index 000000000..567c6ec0c
--- /dev/null
+++ b/arch/mips/kernel/early_printk_8250.c
@@ -0,0 +1,54 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * 8250/16550-type serial ports prom_putchar()
+ *
+ * Copyright (C) 2010 Yoichi Yuasa <yuasa@linux-mips.org>
+ */
+#include <linux/io.h>
+#include <linux/serial_core.h>
+#include <linux/serial_reg.h>
+#include <asm/setup.h>
+
+static void __iomem *serial8250_base;
+static unsigned int serial8250_reg_shift;
+static unsigned int serial8250_tx_timeout;
+
+void setup_8250_early_printk_port(unsigned long base, unsigned int reg_shift,
+ unsigned int timeout)
+{
+ serial8250_base = (void __iomem *)base;
+ serial8250_reg_shift = reg_shift;
+ serial8250_tx_timeout = timeout;
+}
+
+static inline u8 serial_in(int offset)
+{
+ return readb(serial8250_base + (offset << serial8250_reg_shift));
+}
+
+static inline void serial_out(int offset, char value)
+{
+ writeb(value, serial8250_base + (offset << serial8250_reg_shift));
+}
+
+void prom_putchar(char c)
+{
+ unsigned int timeout;
+ int status, bits;
+
+ if (!serial8250_base)
+ return;
+
+ timeout = serial8250_tx_timeout;
+ bits = UART_LSR_TEMT | UART_LSR_THRE;
+
+ do {
+ status = serial_in(UART_LSR);
+
+ if (--timeout == 0)
+ break;
+ } while ((status & bits) != bits);
+
+ if (timeout)
+ serial_out(UART_TX, c);
+}
diff --git a/arch/mips/kernel/elf.c b/arch/mips/kernel/elf.c
new file mode 100644
index 000000000..7aa2c2360
--- /dev/null
+++ b/arch/mips/kernel/elf.c
@@ -0,0 +1,343 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2014 Imagination Technologies
+ * Author: Paul Burton <paul.burton@mips.com>
+ */
+
+#include <linux/binfmts.h>
+#include <linux/elf.h>
+#include <linux/export.h>
+#include <linux/sched.h>
+
+#include <asm/cpu-features.h>
+#include <asm/cpu-info.h>
+#include <asm/fpu.h>
+
+#ifdef CONFIG_MIPS_FP_SUPPORT
+
+/* Whether to accept legacy-NaN and 2008-NaN user binaries. */
+bool mips_use_nan_legacy;
+bool mips_use_nan_2008;
+
+/* FPU modes */
+enum {
+ FP_FRE,
+ FP_FR0,
+ FP_FR1,
+};
+
+/**
+ * struct mode_req - ABI FPU mode requirements
+ * @single: The program being loaded needs an FPU but it will only issue
+ * single precision instructions meaning that it can execute in
+ * either FR0 or FR1.
+ * @soft: The soft(-float) requirement means that the program being
+ * loaded needs has no FPU dependency at all (i.e. it has no
+ * FPU instructions).
+ * @fr1: The program being loaded depends on FPU being in FR=1 mode.
+ * @frdefault: The program being loaded depends on the default FPU mode.
+ * That is FR0 for O32 and FR1 for N32/N64.
+ * @fre: The program being loaded depends on FPU with FRE=1. This mode is
+ * a bridge which uses FR=1 whilst still being able to maintain
+ * full compatibility with pre-existing code using the O32 FP32
+ * ABI.
+ *
+ * More information about the FP ABIs can be found here:
+ *
+ * https://dmz-portal.mips.com/wiki/MIPS_O32_ABI_-_FR0_and_FR1_Interlinking#10.4.1._Basic_mode_set-up
+ *
+ */
+
+struct mode_req {
+ bool single;
+ bool soft;
+ bool fr1;
+ bool frdefault;
+ bool fre;
+};
+
+static const struct mode_req fpu_reqs[] = {
+ [MIPS_ABI_FP_ANY] = { true, true, true, true, true },
+ [MIPS_ABI_FP_DOUBLE] = { false, false, false, true, true },
+ [MIPS_ABI_FP_SINGLE] = { true, false, false, false, false },
+ [MIPS_ABI_FP_SOFT] = { false, true, false, false, false },
+ [MIPS_ABI_FP_OLD_64] = { false, false, false, false, false },
+ [MIPS_ABI_FP_XX] = { false, false, true, true, true },
+ [MIPS_ABI_FP_64] = { false, false, true, false, false },
+ [MIPS_ABI_FP_64A] = { false, false, true, false, true }
+};
+
+/*
+ * Mode requirements when .MIPS.abiflags is not present in the ELF.
+ * Not present means that everything is acceptable except FR1.
+ */
+static struct mode_req none_req = { true, true, false, true, true };
+
+int arch_elf_pt_proc(void *_ehdr, void *_phdr, struct file *elf,
+ bool is_interp, struct arch_elf_state *state)
+{
+ union {
+ struct elf32_hdr e32;
+ struct elf64_hdr e64;
+ } *ehdr = _ehdr;
+ struct elf32_phdr *phdr32 = _phdr;
+ struct elf64_phdr *phdr64 = _phdr;
+ struct mips_elf_abiflags_v0 abiflags;
+ bool elf32;
+ u32 flags;
+ int ret;
+ loff_t pos;
+
+ elf32 = ehdr->e32.e_ident[EI_CLASS] == ELFCLASS32;
+ flags = elf32 ? ehdr->e32.e_flags : ehdr->e64.e_flags;
+
+ /* Let's see if this is an O32 ELF */
+ if (elf32) {
+ if (flags & EF_MIPS_FP64) {
+ /*
+ * Set MIPS_ABI_FP_OLD_64 for EF_MIPS_FP64. We will override it
+ * later if needed
+ */
+ if (is_interp)
+ state->interp_fp_abi = MIPS_ABI_FP_OLD_64;
+ else
+ state->fp_abi = MIPS_ABI_FP_OLD_64;
+ }
+ if (phdr32->p_type != PT_MIPS_ABIFLAGS)
+ return 0;
+
+ if (phdr32->p_filesz < sizeof(abiflags))
+ return -EINVAL;
+ pos = phdr32->p_offset;
+ } else {
+ if (phdr64->p_type != PT_MIPS_ABIFLAGS)
+ return 0;
+ if (phdr64->p_filesz < sizeof(abiflags))
+ return -EINVAL;
+ pos = phdr64->p_offset;
+ }
+
+ ret = kernel_read(elf, &abiflags, sizeof(abiflags), &pos);
+ if (ret < 0)
+ return ret;
+ if (ret != sizeof(abiflags))
+ return -EIO;
+
+ /* Record the required FP ABIs for use by mips_check_elf */
+ if (is_interp)
+ state->interp_fp_abi = abiflags.fp_abi;
+ else
+ state->fp_abi = abiflags.fp_abi;
+
+ return 0;
+}
+
+int arch_check_elf(void *_ehdr, bool has_interpreter, void *_interp_ehdr,
+ struct arch_elf_state *state)
+{
+ union {
+ struct elf32_hdr e32;
+ struct elf64_hdr e64;
+ } *ehdr = _ehdr;
+ union {
+ struct elf32_hdr e32;
+ struct elf64_hdr e64;
+ } *iehdr = _interp_ehdr;
+ struct mode_req prog_req, interp_req;
+ int fp_abi, interp_fp_abi, abi0, abi1, max_abi;
+ bool elf32;
+ u32 flags;
+
+ elf32 = ehdr->e32.e_ident[EI_CLASS] == ELFCLASS32;
+ flags = elf32 ? ehdr->e32.e_flags : ehdr->e64.e_flags;
+
+ /*
+ * Determine the NaN personality, reject the binary if not allowed.
+ * Also ensure that any interpreter matches the executable.
+ */
+ if (flags & EF_MIPS_NAN2008) {
+ if (mips_use_nan_2008)
+ state->nan_2008 = 1;
+ else
+ return -ENOEXEC;
+ } else {
+ if (mips_use_nan_legacy)
+ state->nan_2008 = 0;
+ else
+ return -ENOEXEC;
+ }
+ if (has_interpreter) {
+ bool ielf32;
+ u32 iflags;
+
+ ielf32 = iehdr->e32.e_ident[EI_CLASS] == ELFCLASS32;
+ iflags = ielf32 ? iehdr->e32.e_flags : iehdr->e64.e_flags;
+
+ if ((flags ^ iflags) & EF_MIPS_NAN2008)
+ return -ELIBBAD;
+ }
+
+ if (!IS_ENABLED(CONFIG_MIPS_O32_FP64_SUPPORT))
+ return 0;
+
+ fp_abi = state->fp_abi;
+
+ if (has_interpreter) {
+ interp_fp_abi = state->interp_fp_abi;
+
+ abi0 = min(fp_abi, interp_fp_abi);
+ abi1 = max(fp_abi, interp_fp_abi);
+ } else {
+ abi0 = abi1 = fp_abi;
+ }
+
+ if (elf32 && !(flags & EF_MIPS_ABI2)) {
+ /* Default to a mode capable of running code expecting FR=0 */
+ state->overall_fp_mode = cpu_has_mips_r6 ? FP_FRE : FP_FR0;
+
+ /* Allow all ABIs we know about */
+ max_abi = MIPS_ABI_FP_64A;
+ } else {
+ /* MIPS64 code always uses FR=1, thus the default is easy */
+ state->overall_fp_mode = FP_FR1;
+
+ /* Disallow access to the various FPXX & FP64 ABIs */
+ max_abi = MIPS_ABI_FP_SOFT;
+ }
+
+ if ((abi0 > max_abi && abi0 != MIPS_ABI_FP_UNKNOWN) ||
+ (abi1 > max_abi && abi1 != MIPS_ABI_FP_UNKNOWN))
+ return -ELIBBAD;
+
+ /* It's time to determine the FPU mode requirements */
+ prog_req = (abi0 == MIPS_ABI_FP_UNKNOWN) ? none_req : fpu_reqs[abi0];
+ interp_req = (abi1 == MIPS_ABI_FP_UNKNOWN) ? none_req : fpu_reqs[abi1];
+
+ /*
+ * Check whether the program's and interp's ABIs have a matching FPU
+ * mode requirement.
+ */
+ prog_req.single = interp_req.single && prog_req.single;
+ prog_req.soft = interp_req.soft && prog_req.soft;
+ prog_req.fr1 = interp_req.fr1 && prog_req.fr1;
+ prog_req.frdefault = interp_req.frdefault && prog_req.frdefault;
+ prog_req.fre = interp_req.fre && prog_req.fre;
+
+ /*
+ * Determine the desired FPU mode
+ *
+ * Decision making:
+ *
+ * - We want FR_FRE if FRE=1 and both FR=1 and FR=0 are false. This
+ * means that we have a combination of program and interpreter
+ * that inherently require the hybrid FP mode.
+ * - If FR1 and FRDEFAULT is true, that means we hit the any-abi or
+ * fpxx case. This is because, in any-ABI (or no-ABI) we have no FPU
+ * instructions so we don't care about the mode. We will simply use
+ * the one preferred by the hardware. In fpxx case, that ABI can
+ * handle both FR=1 and FR=0, so, again, we simply choose the one
+ * preferred by the hardware. Next, if we only use single-precision
+ * FPU instructions, and the default ABI FPU mode is not good
+ * (ie single + any ABI combination), we set again the FPU mode to the
+ * one is preferred by the hardware. Next, if we know that the code
+ * will only use single-precision instructions, shown by single being
+ * true but frdefault being false, then we again set the FPU mode to
+ * the one that is preferred by the hardware.
+ * - We want FP_FR1 if that's the only matching mode and the default one
+ * is not good.
+ * - Return with -ELIBADD if we can't find a matching FPU mode.
+ */
+ if (prog_req.fre && !prog_req.frdefault && !prog_req.fr1)
+ state->overall_fp_mode = FP_FRE;
+ else if ((prog_req.fr1 && prog_req.frdefault) ||
+ (prog_req.single && !prog_req.frdefault))
+ /* Make sure 64-bit MIPS III/IV/64R1 will not pick FR1 */
+ state->overall_fp_mode = ((raw_current_cpu_data.fpu_id & MIPS_FPIR_F64) &&
+ cpu_has_mips_r2_r6) ?
+ FP_FR1 : FP_FR0;
+ else if (prog_req.fr1)
+ state->overall_fp_mode = FP_FR1;
+ else if (!prog_req.fre && !prog_req.frdefault &&
+ !prog_req.fr1 && !prog_req.single && !prog_req.soft)
+ return -ELIBBAD;
+
+ return 0;
+}
+
+static inline void set_thread_fp_mode(int hybrid, int regs32)
+{
+ if (hybrid)
+ set_thread_flag(TIF_HYBRID_FPREGS);
+ else
+ clear_thread_flag(TIF_HYBRID_FPREGS);
+ if (regs32)
+ set_thread_flag(TIF_32BIT_FPREGS);
+ else
+ clear_thread_flag(TIF_32BIT_FPREGS);
+}
+
+void mips_set_personality_fp(struct arch_elf_state *state)
+{
+ /*
+ * This function is only ever called for O32 ELFs so we should
+ * not be worried about N32/N64 binaries.
+ */
+
+ if (!IS_ENABLED(CONFIG_MIPS_O32_FP64_SUPPORT))
+ return;
+
+ switch (state->overall_fp_mode) {
+ case FP_FRE:
+ set_thread_fp_mode(1, 0);
+ break;
+ case FP_FR0:
+ set_thread_fp_mode(0, 1);
+ break;
+ case FP_FR1:
+ set_thread_fp_mode(0, 0);
+ break;
+ default:
+ BUG();
+ }
+}
+
+/*
+ * Select the IEEE 754 NaN encoding and ABS.fmt/NEG.fmt execution mode
+ * in FCSR according to the ELF NaN personality.
+ */
+void mips_set_personality_nan(struct arch_elf_state *state)
+{
+ struct cpuinfo_mips *c = &boot_cpu_data;
+ struct task_struct *t = current;
+
+ /* Do this early so t->thread.fpu.fcr31 won't be clobbered in case
+ * we are preempted before the lose_fpu(0) in start_thread.
+ */
+ lose_fpu(0);
+
+ t->thread.fpu.fcr31 = c->fpu_csr31;
+ switch (state->nan_2008) {
+ case 0:
+ break;
+ case 1:
+ if (!(c->fpu_msk31 & FPU_CSR_NAN2008))
+ t->thread.fpu.fcr31 |= FPU_CSR_NAN2008;
+ if (!(c->fpu_msk31 & FPU_CSR_ABS2008))
+ t->thread.fpu.fcr31 |= FPU_CSR_ABS2008;
+ break;
+ default:
+ BUG();
+ }
+}
+
+#endif /* CONFIG_MIPS_FP_SUPPORT */
+
+int mips_elf_read_implies_exec(void *elf_ex, int exstack)
+{
+ /*
+ * Set READ_IMPLIES_EXEC only on non-NX systems that
+ * do not request a specific state via PT_GNU_STACK.
+ */
+ return (!cpu_has_rixi && exstack == EXSTACK_DEFAULT);
+}
+EXPORT_SYMBOL(mips_elf_read_implies_exec);
diff --git a/arch/mips/kernel/entry.S b/arch/mips/kernel/entry.S
new file mode 100644
index 000000000..891393626
--- /dev/null
+++ b/arch/mips/kernel/entry.S
@@ -0,0 +1,185 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1994 - 2000, 2001, 2003 Ralf Baechle
+ * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
+ * Copyright (C) 2001 MIPS Technologies, Inc.
+ */
+
+#include <asm/asm.h>
+#include <asm/asmmacro.h>
+#include <asm/compiler.h>
+#include <asm/irqflags.h>
+#include <asm/regdef.h>
+#include <asm/mipsregs.h>
+#include <asm/stackframe.h>
+#include <asm/isadep.h>
+#include <asm/thread_info.h>
+
+#ifndef CONFIG_PREEMPTION
+#define resume_kernel restore_all
+#else
+#define __ret_from_irq ret_from_exception
+#endif
+
+ .text
+ .align 5
+#ifndef CONFIG_PREEMPTION
+FEXPORT(ret_from_exception)
+ local_irq_disable # preempt stop
+ b __ret_from_irq
+#endif
+FEXPORT(ret_from_irq)
+ LONG_S s0, TI_REGS($28)
+FEXPORT(__ret_from_irq)
+/*
+ * We can be coming here from a syscall done in the kernel space,
+ * e.g. a failed kernel_execve().
+ */
+resume_userspace_check:
+ LONG_L t0, PT_STATUS(sp) # returning to kernel mode?
+ andi t0, t0, KU_USER
+ beqz t0, resume_kernel
+
+resume_userspace:
+ local_irq_disable # make sure we dont miss an
+ # interrupt setting need_resched
+ # between sampling and return
+ LONG_L a2, TI_FLAGS($28) # current->work
+ andi t0, a2, _TIF_WORK_MASK # (ignoring syscall_trace)
+ bnez t0, work_pending
+ j restore_all
+
+#ifdef CONFIG_PREEMPTION
+resume_kernel:
+ local_irq_disable
+ lw t0, TI_PRE_COUNT($28)
+ bnez t0, restore_all
+ LONG_L t0, TI_FLAGS($28)
+ andi t1, t0, _TIF_NEED_RESCHED
+ beqz t1, restore_all
+ LONG_L t0, PT_STATUS(sp) # Interrupts off?
+ andi t0, 1
+ beqz t0, restore_all
+ PTR_LA ra, restore_all
+ j preempt_schedule_irq
+#endif
+
+FEXPORT(ret_from_kernel_thread)
+ jal schedule_tail # a0 = struct task_struct *prev
+ move a0, s1
+ jal s0
+ j syscall_exit
+
+FEXPORT(ret_from_fork)
+ jal schedule_tail # a0 = struct task_struct *prev
+
+FEXPORT(syscall_exit)
+#ifdef CONFIG_DEBUG_RSEQ
+ move a0, sp
+ jal rseq_syscall
+#endif
+ local_irq_disable # make sure need_resched and
+ # signals dont change between
+ # sampling and return
+ LONG_L a2, TI_FLAGS($28) # current->work
+ li t0, _TIF_ALLWORK_MASK
+ and t0, a2, t0
+ bnez t0, syscall_exit_work
+
+restore_all: # restore full frame
+ .set noat
+ RESTORE_TEMP
+ RESTORE_AT
+ RESTORE_STATIC
+restore_partial: # restore partial frame
+#ifdef CONFIG_TRACE_IRQFLAGS
+ SAVE_STATIC
+ SAVE_AT
+ SAVE_TEMP
+ LONG_L v0, PT_STATUS(sp)
+#if defined(CONFIG_CPU_R3000)
+ and v0, ST0_IEP
+#else
+ and v0, ST0_IE
+#endif
+ beqz v0, 1f
+ jal trace_hardirqs_on
+ b 2f
+1: jal trace_hardirqs_off
+2:
+ RESTORE_TEMP
+ RESTORE_AT
+ RESTORE_STATIC
+#endif
+ RESTORE_SOME
+ RESTORE_SP_AND_RET
+ .set at
+
+work_pending:
+ andi t0, a2, _TIF_NEED_RESCHED # a2 is preloaded with TI_FLAGS
+ beqz t0, work_notifysig
+work_resched:
+ TRACE_IRQS_OFF
+ jal schedule
+
+ local_irq_disable # make sure need_resched and
+ # signals dont change between
+ # sampling and return
+ LONG_L a2, TI_FLAGS($28)
+ andi t0, a2, _TIF_WORK_MASK # is there any work to be done
+ # other than syscall tracing?
+ beqz t0, restore_all
+ andi t0, a2, _TIF_NEED_RESCHED
+ bnez t0, work_resched
+
+work_notifysig: # deal with pending signals and
+ # notify-resume requests
+ move a0, sp
+ li a1, 0
+ jal do_notify_resume # a2 already loaded
+ j resume_userspace_check
+
+FEXPORT(syscall_exit_partial)
+#ifdef CONFIG_DEBUG_RSEQ
+ move a0, sp
+ jal rseq_syscall
+#endif
+ local_irq_disable # make sure need_resched doesn't
+ # change between and return
+ LONG_L a2, TI_FLAGS($28) # current->work
+ li t0, _TIF_ALLWORK_MASK
+ and t0, a2
+ beqz t0, restore_partial
+ SAVE_STATIC
+syscall_exit_work:
+ LONG_L t0, PT_STATUS(sp) # returning to kernel mode?
+ andi t0, t0, KU_USER
+ beqz t0, resume_kernel
+ li t0, _TIF_WORK_SYSCALL_EXIT
+ and t0, a2 # a2 is preloaded with TI_FLAGS
+ beqz t0, work_pending # trace bit set?
+ local_irq_enable # could let syscall_trace_leave()
+ # call schedule() instead
+ TRACE_IRQS_ON
+ move a0, sp
+ jal syscall_trace_leave
+ b resume_userspace
+
+#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR5) || \
+ defined(CONFIG_CPU_MIPSR6) || defined(CONFIG_MIPS_MT)
+
+/*
+ * MIPS32R2 Instruction Hazard Barrier - must be called
+ *
+ * For C code use the inline version named instruction_hazard().
+ */
+LEAF(mips_ihb)
+ .set MIPS_ISA_LEVEL_RAW
+ jr.hb ra
+ nop
+ END(mips_ihb)
+
+#endif /* CONFIG_CPU_MIPSR2 - CONFIG_CPU_MIPSR6 or CONFIG_MIPS_MT */
diff --git a/arch/mips/kernel/fpu-probe.c b/arch/mips/kernel/fpu-probe.c
new file mode 100644
index 000000000..e689d6a83
--- /dev/null
+++ b/arch/mips/kernel/fpu-probe.c
@@ -0,0 +1,321 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Processor capabilities determination functions.
+ *
+ * Copyright (C) xxxx the Anonymous
+ * Copyright (C) 1994 - 2006 Ralf Baechle
+ * Copyright (C) 2003, 2004 Maciej W. Rozycki
+ * Copyright (C) 2001, 2004, 2011, 2012 MIPS Technologies, Inc.
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+
+#include <asm/bugs.h>
+#include <asm/cpu.h>
+#include <asm/cpu-features.h>
+#include <asm/cpu-type.h>
+#include <asm/elf.h>
+#include <asm/fpu.h>
+#include <asm/mipsregs.h>
+
+#include "fpu-probe.h"
+
+/*
+ * Get the FPU Implementation/Revision.
+ */
+static inline unsigned long cpu_get_fpu_id(void)
+{
+ unsigned long tmp, fpu_id;
+
+ tmp = read_c0_status();
+ __enable_fpu(FPU_AS_IS);
+ fpu_id = read_32bit_cp1_register(CP1_REVISION);
+ write_c0_status(tmp);
+ return fpu_id;
+}
+
+/*
+ * Check if the CPU has an external FPU.
+ */
+int __cpu_has_fpu(void)
+{
+ return (cpu_get_fpu_id() & FPIR_IMP_MASK) != FPIR_IMP_NONE;
+}
+
+/*
+ * Determine the FCSR mask for FPU hardware.
+ */
+static inline void cpu_set_fpu_fcsr_mask(struct cpuinfo_mips *c)
+{
+ unsigned long sr, mask, fcsr, fcsr0, fcsr1;
+
+ fcsr = c->fpu_csr31;
+ mask = FPU_CSR_ALL_X | FPU_CSR_ALL_E | FPU_CSR_ALL_S | FPU_CSR_RM;
+
+ sr = read_c0_status();
+ __enable_fpu(FPU_AS_IS);
+
+ fcsr0 = fcsr & mask;
+ write_32bit_cp1_register(CP1_STATUS, fcsr0);
+ fcsr0 = read_32bit_cp1_register(CP1_STATUS);
+
+ fcsr1 = fcsr | ~mask;
+ write_32bit_cp1_register(CP1_STATUS, fcsr1);
+ fcsr1 = read_32bit_cp1_register(CP1_STATUS);
+
+ write_32bit_cp1_register(CP1_STATUS, fcsr);
+
+ write_c0_status(sr);
+
+ c->fpu_msk31 = ~(fcsr0 ^ fcsr1) & ~mask;
+}
+
+/*
+ * Determine the IEEE 754 NaN encodings and ABS.fmt/NEG.fmt execution modes
+ * supported by FPU hardware.
+ */
+static void cpu_set_fpu_2008(struct cpuinfo_mips *c)
+{
+ if (c->isa_level & (MIPS_CPU_ISA_M32R1 | MIPS_CPU_ISA_M64R1 |
+ MIPS_CPU_ISA_M32R2 | MIPS_CPU_ISA_M64R2 |
+ MIPS_CPU_ISA_M32R5 | MIPS_CPU_ISA_M64R5 |
+ MIPS_CPU_ISA_M32R6 | MIPS_CPU_ISA_M64R6)) {
+ unsigned long sr, fir, fcsr, fcsr0, fcsr1;
+
+ sr = read_c0_status();
+ __enable_fpu(FPU_AS_IS);
+
+ fir = read_32bit_cp1_register(CP1_REVISION);
+ if (fir & MIPS_FPIR_HAS2008) {
+ fcsr = read_32bit_cp1_register(CP1_STATUS);
+
+ /*
+ * MAC2008 toolchain never landed in real world, so
+ * we're only testing whether it can be disabled and
+ * don't try to enabled it.
+ */
+ fcsr0 = fcsr & ~(FPU_CSR_ABS2008 | FPU_CSR_NAN2008 |
+ FPU_CSR_MAC2008);
+ write_32bit_cp1_register(CP1_STATUS, fcsr0);
+ fcsr0 = read_32bit_cp1_register(CP1_STATUS);
+
+ fcsr1 = fcsr | FPU_CSR_ABS2008 | FPU_CSR_NAN2008;
+ write_32bit_cp1_register(CP1_STATUS, fcsr1);
+ fcsr1 = read_32bit_cp1_register(CP1_STATUS);
+
+ write_32bit_cp1_register(CP1_STATUS, fcsr);
+
+ if (c->isa_level & (MIPS_CPU_ISA_M32R2 |
+ MIPS_CPU_ISA_M64R2)) {
+ /*
+ * The bit for MAC2008 might be reused by R6
+ * in future, so we only test for R2-R5.
+ */
+ if (fcsr0 & FPU_CSR_MAC2008)
+ c->options |= MIPS_CPU_MAC_2008_ONLY;
+ }
+
+ if (!(fcsr0 & FPU_CSR_NAN2008))
+ c->options |= MIPS_CPU_NAN_LEGACY;
+ if (fcsr1 & FPU_CSR_NAN2008)
+ c->options |= MIPS_CPU_NAN_2008;
+
+ if ((fcsr0 ^ fcsr1) & FPU_CSR_ABS2008)
+ c->fpu_msk31 &= ~FPU_CSR_ABS2008;
+ else
+ c->fpu_csr31 |= fcsr & FPU_CSR_ABS2008;
+
+ if ((fcsr0 ^ fcsr1) & FPU_CSR_NAN2008)
+ c->fpu_msk31 &= ~FPU_CSR_NAN2008;
+ else
+ c->fpu_csr31 |= fcsr & FPU_CSR_NAN2008;
+ } else {
+ c->options |= MIPS_CPU_NAN_LEGACY;
+ }
+
+ write_c0_status(sr);
+ } else {
+ c->options |= MIPS_CPU_NAN_LEGACY;
+ }
+}
+
+/*
+ * IEEE 754 conformance mode to use. Affects the NaN encoding and the
+ * ABS.fmt/NEG.fmt execution mode.
+ */
+static enum { STRICT, LEGACY, STD2008, RELAXED } ieee754 = STRICT;
+
+/*
+ * Set the IEEE 754 NaN encodings and the ABS.fmt/NEG.fmt execution modes
+ * to support by the FPU emulator according to the IEEE 754 conformance
+ * mode selected. Note that "relaxed" straps the emulator so that it
+ * allows 2008-NaN binaries even for legacy processors.
+ */
+static void cpu_set_nofpu_2008(struct cpuinfo_mips *c)
+{
+ c->options &= ~(MIPS_CPU_NAN_2008 | MIPS_CPU_NAN_LEGACY);
+ c->fpu_csr31 &= ~(FPU_CSR_ABS2008 | FPU_CSR_NAN2008);
+ c->fpu_msk31 &= ~(FPU_CSR_ABS2008 | FPU_CSR_NAN2008);
+
+ switch (ieee754) {
+ case STRICT:
+ if (c->isa_level & (MIPS_CPU_ISA_M32R1 | MIPS_CPU_ISA_M64R1 |
+ MIPS_CPU_ISA_M32R2 | MIPS_CPU_ISA_M64R2 |
+ MIPS_CPU_ISA_M32R5 | MIPS_CPU_ISA_M64R5 |
+ MIPS_CPU_ISA_M32R6 | MIPS_CPU_ISA_M64R6)) {
+ c->options |= MIPS_CPU_NAN_2008 | MIPS_CPU_NAN_LEGACY;
+ } else {
+ c->options |= MIPS_CPU_NAN_LEGACY;
+ c->fpu_msk31 |= FPU_CSR_ABS2008 | FPU_CSR_NAN2008;
+ }
+ break;
+ case LEGACY:
+ c->options |= MIPS_CPU_NAN_LEGACY;
+ c->fpu_msk31 |= FPU_CSR_ABS2008 | FPU_CSR_NAN2008;
+ break;
+ case STD2008:
+ c->options |= MIPS_CPU_NAN_2008;
+ c->fpu_csr31 |= FPU_CSR_ABS2008 | FPU_CSR_NAN2008;
+ c->fpu_msk31 |= FPU_CSR_ABS2008 | FPU_CSR_NAN2008;
+ break;
+ case RELAXED:
+ c->options |= MIPS_CPU_NAN_2008 | MIPS_CPU_NAN_LEGACY;
+ break;
+ }
+}
+
+/*
+ * Override the IEEE 754 NaN encoding and ABS.fmt/NEG.fmt execution mode
+ * according to the "ieee754=" parameter.
+ */
+static void cpu_set_nan_2008(struct cpuinfo_mips *c)
+{
+ switch (ieee754) {
+ case STRICT:
+ mips_use_nan_legacy = !!cpu_has_nan_legacy;
+ mips_use_nan_2008 = !!cpu_has_nan_2008;
+ break;
+ case LEGACY:
+ mips_use_nan_legacy = !!cpu_has_nan_legacy;
+ mips_use_nan_2008 = !cpu_has_nan_legacy;
+ break;
+ case STD2008:
+ mips_use_nan_legacy = !cpu_has_nan_2008;
+ mips_use_nan_2008 = !!cpu_has_nan_2008;
+ break;
+ case RELAXED:
+ mips_use_nan_legacy = true;
+ mips_use_nan_2008 = true;
+ break;
+ }
+}
+
+/*
+ * IEEE 754 NaN encoding and ABS.fmt/NEG.fmt execution mode override
+ * settings:
+ *
+ * strict: accept binaries that request a NaN encoding supported by the FPU
+ * legacy: only accept legacy-NaN binaries
+ * 2008: only accept 2008-NaN binaries
+ * relaxed: accept any binaries regardless of whether supported by the FPU
+ */
+static int __init ieee754_setup(char *s)
+{
+ if (!s)
+ return -1;
+ else if (!strcmp(s, "strict"))
+ ieee754 = STRICT;
+ else if (!strcmp(s, "legacy"))
+ ieee754 = LEGACY;
+ else if (!strcmp(s, "2008"))
+ ieee754 = STD2008;
+ else if (!strcmp(s, "relaxed"))
+ ieee754 = RELAXED;
+ else
+ return -1;
+
+ if (!(boot_cpu_data.options & MIPS_CPU_FPU))
+ cpu_set_nofpu_2008(&boot_cpu_data);
+ cpu_set_nan_2008(&boot_cpu_data);
+
+ return 0;
+}
+
+early_param("ieee754", ieee754_setup);
+
+/*
+ * Set the FIR feature flags for the FPU emulator.
+ */
+static void cpu_set_nofpu_id(struct cpuinfo_mips *c)
+{
+ u32 value;
+
+ value = 0;
+ if (c->isa_level & (MIPS_CPU_ISA_M32R1 | MIPS_CPU_ISA_M64R1 |
+ MIPS_CPU_ISA_M32R2 | MIPS_CPU_ISA_M64R2 |
+ MIPS_CPU_ISA_M32R5 | MIPS_CPU_ISA_M64R5 |
+ MIPS_CPU_ISA_M32R6 | MIPS_CPU_ISA_M64R6))
+ value |= MIPS_FPIR_D | MIPS_FPIR_S;
+ if (c->isa_level & (MIPS_CPU_ISA_M32R2 | MIPS_CPU_ISA_M64R2 |
+ MIPS_CPU_ISA_M32R5 | MIPS_CPU_ISA_M64R5 |
+ MIPS_CPU_ISA_M32R6 | MIPS_CPU_ISA_M64R6))
+ value |= MIPS_FPIR_F64 | MIPS_FPIR_L | MIPS_FPIR_W;
+ if (c->options & MIPS_CPU_NAN_2008)
+ value |= MIPS_FPIR_HAS2008;
+ c->fpu_id = value;
+}
+
+/* Determined FPU emulator mask to use for the boot CPU with "nofpu". */
+static unsigned int mips_nofpu_msk31;
+
+/*
+ * Set options for FPU hardware.
+ */
+void cpu_set_fpu_opts(struct cpuinfo_mips *c)
+{
+ c->fpu_id = cpu_get_fpu_id();
+ mips_nofpu_msk31 = c->fpu_msk31;
+
+ if (c->isa_level & (MIPS_CPU_ISA_M32R1 | MIPS_CPU_ISA_M64R1 |
+ MIPS_CPU_ISA_M32R2 | MIPS_CPU_ISA_M64R2 |
+ MIPS_CPU_ISA_M32R5 | MIPS_CPU_ISA_M64R5 |
+ MIPS_CPU_ISA_M32R6 | MIPS_CPU_ISA_M64R6)) {
+ if (c->fpu_id & MIPS_FPIR_3D)
+ c->ases |= MIPS_ASE_MIPS3D;
+ if (c->fpu_id & MIPS_FPIR_UFRP)
+ c->options |= MIPS_CPU_UFR;
+ if (c->fpu_id & MIPS_FPIR_FREP)
+ c->options |= MIPS_CPU_FRE;
+ }
+
+ cpu_set_fpu_fcsr_mask(c);
+ cpu_set_fpu_2008(c);
+ cpu_set_nan_2008(c);
+}
+
+/*
+ * Set options for the FPU emulator.
+ */
+void cpu_set_nofpu_opts(struct cpuinfo_mips *c)
+{
+ c->options &= ~MIPS_CPU_FPU;
+ c->fpu_msk31 = mips_nofpu_msk31;
+
+ cpu_set_nofpu_2008(c);
+ cpu_set_nan_2008(c);
+ cpu_set_nofpu_id(c);
+}
+
+int mips_fpu_disabled;
+
+static int __init fpu_disable(char *s)
+{
+ cpu_set_nofpu_opts(&boot_cpu_data);
+ mips_fpu_disabled = 1;
+
+ return 1;
+}
+
+__setup("nofpu", fpu_disable);
+
diff --git a/arch/mips/kernel/fpu-probe.h b/arch/mips/kernel/fpu-probe.h
new file mode 100644
index 000000000..951ce5089
--- /dev/null
+++ b/arch/mips/kernel/fpu-probe.h
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+
+#include <linux/kernel.h>
+
+#include <asm/cpu.h>
+#include <asm/cpu-info.h>
+
+#ifdef CONFIG_MIPS_FP_SUPPORT
+
+extern int mips_fpu_disabled;
+
+int __cpu_has_fpu(void);
+void cpu_set_fpu_opts(struct cpuinfo_mips *c);
+void cpu_set_nofpu_opts(struct cpuinfo_mips *c);
+
+#else /* !CONFIG_MIPS_FP_SUPPORT */
+
+#define mips_fpu_disabled 1
+
+static inline unsigned long cpu_get_fpu_id(void)
+{
+ return FPIR_IMP_NONE;
+}
+
+static inline int __cpu_has_fpu(void)
+{
+ return 0;
+}
+
+static inline void cpu_set_fpu_opts(struct cpuinfo_mips *c)
+{
+ /* no-op */
+}
+
+static inline void cpu_set_nofpu_opts(struct cpuinfo_mips *c)
+{
+ /* no-op */
+}
+
+#endif /* CONFIG_MIPS_FP_SUPPORT */
diff --git a/arch/mips/kernel/ftrace.c b/arch/mips/kernel/ftrace.c
new file mode 100644
index 000000000..8c401e423
--- /dev/null
+++ b/arch/mips/kernel/ftrace.c
@@ -0,0 +1,402 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Code for replacing ftrace calls with jumps.
+ *
+ * Copyright (C) 2007-2008 Steven Rostedt <srostedt@redhat.com>
+ * Copyright (C) 2009, 2010 DSLab, Lanzhou University, China
+ * Author: Wu Zhangjin <wuzhangjin@gmail.com>
+ *
+ * Thanks goes to Steven Rostedt for writing the original x86 version.
+ */
+
+#include <linux/uaccess.h>
+#include <linux/init.h>
+#include <linux/ftrace.h>
+#include <linux/syscalls.h>
+
+#include <asm/asm.h>
+#include <asm/asm-offsets.h>
+#include <asm/cacheflush.h>
+#include <asm/syscall.h>
+#include <asm/uasm.h>
+#include <asm/unistd.h>
+
+#include <asm-generic/sections.h>
+
+#if defined(KBUILD_MCOUNT_RA_ADDRESS) && defined(CONFIG_32BIT)
+#define MCOUNT_OFFSET_INSNS 5
+#else
+#define MCOUNT_OFFSET_INSNS 4
+#endif
+
+#ifdef CONFIG_DYNAMIC_FTRACE
+
+/* Arch override because MIPS doesn't need to run this from stop_machine() */
+void arch_ftrace_update_code(int command)
+{
+ ftrace_modify_all_code(command);
+}
+
+#define JAL 0x0c000000 /* jump & link: ip --> ra, jump to target */
+#define ADDR_MASK 0x03ffffff /* op_code|addr : 31...26|25 ....0 */
+#define JUMP_RANGE_MASK ((1UL << 28) - 1)
+
+#define INSN_NOP 0x00000000 /* nop */
+#define INSN_JAL(addr) \
+ ((unsigned int)(JAL | (((addr) >> 2) & ADDR_MASK)))
+
+static unsigned int insn_jal_ftrace_caller __read_mostly;
+static unsigned int insn_la_mcount[2] __read_mostly;
+static unsigned int insn_j_ftrace_graph_caller __maybe_unused __read_mostly;
+
+static inline void ftrace_dyn_arch_init_insns(void)
+{
+ u32 *buf;
+ unsigned int v1;
+
+ /* la v1, _mcount */
+ v1 = 3;
+ buf = (u32 *)&insn_la_mcount[0];
+ UASM_i_LA(&buf, v1, MCOUNT_ADDR);
+
+ /* jal (ftrace_caller + 8), jump over the first two instruction */
+ buf = (u32 *)&insn_jal_ftrace_caller;
+ uasm_i_jal(&buf, (FTRACE_ADDR + 8) & JUMP_RANGE_MASK);
+
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+ /* j ftrace_graph_caller */
+ buf = (u32 *)&insn_j_ftrace_graph_caller;
+ uasm_i_j(&buf, (unsigned long)ftrace_graph_caller & JUMP_RANGE_MASK);
+#endif
+}
+
+static int ftrace_modify_code(unsigned long ip, unsigned int new_code)
+{
+ int faulted;
+
+ /* *(unsigned int *)ip = new_code; */
+ safe_store_code(new_code, ip, faulted);
+
+ if (unlikely(faulted))
+ return -EFAULT;
+
+ flush_icache_range(ip, ip + 8);
+
+ return 0;
+}
+
+#ifndef CONFIG_64BIT
+static int ftrace_modify_code_2(unsigned long ip, unsigned int new_code1,
+ unsigned int new_code2)
+{
+ int faulted;
+
+ safe_store_code(new_code1, ip, faulted);
+ if (unlikely(faulted))
+ return -EFAULT;
+
+ ip += 4;
+ safe_store_code(new_code2, ip, faulted);
+ if (unlikely(faulted))
+ return -EFAULT;
+
+ ip -= 4;
+ flush_icache_range(ip, ip + 8);
+
+ return 0;
+}
+
+static int ftrace_modify_code_2r(unsigned long ip, unsigned int new_code1,
+ unsigned int new_code2)
+{
+ int faulted;
+
+ ip += 4;
+ safe_store_code(new_code2, ip, faulted);
+ if (unlikely(faulted))
+ return -EFAULT;
+
+ ip -= 4;
+ safe_store_code(new_code1, ip, faulted);
+ if (unlikely(faulted))
+ return -EFAULT;
+
+ flush_icache_range(ip, ip + 8);
+
+ return 0;
+}
+#endif
+
+/*
+ * The details about the calling site of mcount on MIPS
+ *
+ * 1. For kernel:
+ *
+ * move at, ra
+ * jal _mcount --> nop
+ * sub sp, sp, 8 --> nop (CONFIG_32BIT)
+ *
+ * 2. For modules:
+ *
+ * 2.1 For KBUILD_MCOUNT_RA_ADDRESS and CONFIG_32BIT
+ *
+ * lui v1, hi_16bit_of_mcount --> b 1f (0x10000005)
+ * addiu v1, v1, low_16bit_of_mcount --> nop (CONFIG_32BIT)
+ * move at, ra
+ * move $12, ra_address
+ * jalr v1
+ * sub sp, sp, 8
+ * 1: offset = 5 instructions
+ * 2.2 For the Other situations
+ *
+ * lui v1, hi_16bit_of_mcount --> b 1f (0x10000004)
+ * addiu v1, v1, low_16bit_of_mcount --> nop (CONFIG_32BIT)
+ * move at, ra
+ * jalr v1
+ * nop | move $12, ra_address | sub sp, sp, 8
+ * 1: offset = 4 instructions
+ */
+
+#define INSN_B_1F (0x10000000 | MCOUNT_OFFSET_INSNS)
+
+int ftrace_make_nop(struct module *mod,
+ struct dyn_ftrace *rec, unsigned long addr)
+{
+ unsigned int new;
+ unsigned long ip = rec->ip;
+
+ /*
+ * If ip is in kernel space, no long call, otherwise, long call is
+ * needed.
+ */
+ new = core_kernel_text(ip) ? INSN_NOP : INSN_B_1F;
+#ifdef CONFIG_64BIT
+ return ftrace_modify_code(ip, new);
+#else
+ /*
+ * On 32 bit MIPS platforms, gcc adds a stack adjust
+ * instruction in the delay slot after the branch to
+ * mcount and expects mcount to restore the sp on return.
+ * This is based on a legacy API and does nothing but
+ * waste instructions so it's being removed at runtime.
+ */
+ return ftrace_modify_code_2(ip, new, INSN_NOP);
+#endif
+}
+
+int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
+{
+ unsigned int new;
+ unsigned long ip = rec->ip;
+
+ new = core_kernel_text(ip) ? insn_jal_ftrace_caller : insn_la_mcount[0];
+
+#ifdef CONFIG_64BIT
+ return ftrace_modify_code(ip, new);
+#else
+ return ftrace_modify_code_2r(ip, new, core_kernel_text(ip) ?
+ INSN_NOP : insn_la_mcount[1]);
+#endif
+}
+
+#define FTRACE_CALL_IP ((unsigned long)(&ftrace_call))
+
+int ftrace_update_ftrace_func(ftrace_func_t func)
+{
+ unsigned int new;
+
+ new = INSN_JAL((unsigned long)func);
+
+ return ftrace_modify_code(FTRACE_CALL_IP, new);
+}
+
+int __init ftrace_dyn_arch_init(void)
+{
+ /* Encode the instructions when booting */
+ ftrace_dyn_arch_init_insns();
+
+ /* Remove "b ftrace_stub" to ensure ftrace_caller() is executed */
+ ftrace_modify_code(MCOUNT_ADDR, INSN_NOP);
+
+ return 0;
+}
+#endif /* CONFIG_DYNAMIC_FTRACE */
+
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+
+#ifdef CONFIG_DYNAMIC_FTRACE
+
+extern void ftrace_graph_call(void);
+#define FTRACE_GRAPH_CALL_IP ((unsigned long)(&ftrace_graph_call))
+
+int ftrace_enable_ftrace_graph_caller(void)
+{
+ return ftrace_modify_code(FTRACE_GRAPH_CALL_IP,
+ insn_j_ftrace_graph_caller);
+}
+
+int ftrace_disable_ftrace_graph_caller(void)
+{
+ return ftrace_modify_code(FTRACE_GRAPH_CALL_IP, INSN_NOP);
+}
+
+#endif /* CONFIG_DYNAMIC_FTRACE */
+
+#ifndef KBUILD_MCOUNT_RA_ADDRESS
+
+#define S_RA_SP (0xafbf << 16) /* s{d,w} ra, offset(sp) */
+#define S_R_SP (0xafb0 << 16) /* s{d,w} R, offset(sp) */
+#define OFFSET_MASK 0xffff /* stack offset range: 0 ~ PT_SIZE */
+
+unsigned long ftrace_get_parent_ra_addr(unsigned long self_ra, unsigned long
+ old_parent_ra, unsigned long parent_ra_addr, unsigned long fp)
+{
+ unsigned long sp, ip, tmp;
+ unsigned int code;
+ int faulted;
+
+ /*
+ * For module, move the ip from the return address after the
+ * instruction "lui v1, hi_16bit_of_mcount"(offset is 24), but for
+ * kernel, move after the instruction "move ra, at"(offset is 16)
+ */
+ ip = self_ra - (core_kernel_text(self_ra) ? 16 : 24);
+
+ /*
+ * search the text until finding the non-store instruction or "s{d,w}
+ * ra, offset(sp)" instruction
+ */
+ do {
+ /* get the code at "ip": code = *(unsigned int *)ip; */
+ safe_load_code(code, ip, faulted);
+
+ if (unlikely(faulted))
+ return 0;
+ /*
+ * If we hit the non-store instruction before finding where the
+ * ra is stored, then this is a leaf function and it does not
+ * store the ra on the stack
+ */
+ if ((code & S_R_SP) != S_R_SP)
+ return parent_ra_addr;
+
+ /* Move to the next instruction */
+ ip -= 4;
+ } while ((code & S_RA_SP) != S_RA_SP);
+
+ sp = fp + (code & OFFSET_MASK);
+
+ /* tmp = *(unsigned long *)sp; */
+ safe_load_stack(tmp, sp, faulted);
+ if (unlikely(faulted))
+ return 0;
+
+ if (tmp == old_parent_ra)
+ return sp;
+ return 0;
+}
+
+#endif /* !KBUILD_MCOUNT_RA_ADDRESS */
+
+/*
+ * Hook the return address and push it in the stack of return addrs
+ * in current thread info.
+ */
+void prepare_ftrace_return(unsigned long *parent_ra_addr, unsigned long self_ra,
+ unsigned long fp)
+{
+ unsigned long old_parent_ra;
+ unsigned long return_hooker = (unsigned long)
+ &return_to_handler;
+ int faulted, insns;
+
+ if (unlikely(ftrace_graph_is_dead()))
+ return;
+
+ if (unlikely(atomic_read(&current->tracing_graph_pause)))
+ return;
+
+ /*
+ * "parent_ra_addr" is the stack address where the return address of
+ * the caller of _mcount is saved.
+ *
+ * If gcc < 4.5, a leaf function does not save the return address
+ * in the stack address, so we "emulate" one in _mcount's stack space,
+ * and hijack it directly.
+ * For a non-leaf function, it does save the return address to its own
+ * stack space, so we can not hijack it directly, but need to find the
+ * real stack address, which is done by ftrace_get_parent_addr().
+ *
+ * If gcc >= 4.5, with the new -mmcount-ra-address option, for a
+ * non-leaf function, the location of the return address will be saved
+ * to $12 for us.
+ * For a leaf function, it just puts a zero into $12, so we handle
+ * it in ftrace_graph_caller() of mcount.S.
+ */
+
+ /* old_parent_ra = *parent_ra_addr; */
+ safe_load_stack(old_parent_ra, parent_ra_addr, faulted);
+ if (unlikely(faulted))
+ goto out;
+#ifndef KBUILD_MCOUNT_RA_ADDRESS
+ parent_ra_addr = (unsigned long *)ftrace_get_parent_ra_addr(self_ra,
+ old_parent_ra, (unsigned long)parent_ra_addr, fp);
+ /*
+ * If fails when getting the stack address of the non-leaf function's
+ * ra, stop function graph tracer and return
+ */
+ if (parent_ra_addr == NULL)
+ goto out;
+#endif
+ /* *parent_ra_addr = return_hooker; */
+ safe_store_stack(return_hooker, parent_ra_addr, faulted);
+ if (unlikely(faulted))
+ goto out;
+
+ /*
+ * Get the recorded ip of the current mcount calling site in the
+ * __mcount_loc section, which will be used to filter the function
+ * entries configured through the tracing/set_graph_function interface.
+ */
+
+ insns = core_kernel_text(self_ra) ? 2 : MCOUNT_OFFSET_INSNS + 1;
+ self_ra -= (MCOUNT_INSN_SIZE * insns);
+
+ if (function_graph_enter(old_parent_ra, self_ra, fp, NULL))
+ *parent_ra_addr = old_parent_ra;
+ return;
+out:
+ ftrace_graph_stop();
+ WARN_ON(1);
+}
+#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
+
+#ifdef CONFIG_FTRACE_SYSCALLS
+
+#ifdef CONFIG_32BIT
+unsigned long __init arch_syscall_addr(int nr)
+{
+ return (unsigned long)sys_call_table[nr - __NR_O32_Linux];
+}
+#endif
+
+#ifdef CONFIG_64BIT
+
+unsigned long __init arch_syscall_addr(int nr)
+{
+#ifdef CONFIG_MIPS32_N32
+ if (nr >= __NR_N32_Linux && nr < __NR_N32_Linux + __NR_N32_Linux_syscalls)
+ return (unsigned long)sysn32_call_table[nr - __NR_N32_Linux];
+#endif
+ if (nr >= __NR_64_Linux && nr < __NR_64_Linux + __NR_64_Linux_syscalls)
+ return (unsigned long)sys_call_table[nr - __NR_64_Linux];
+#ifdef CONFIG_MIPS32_O32
+ if (nr >= __NR_O32_Linux && nr < __NR_O32_Linux + __NR_O32_Linux_syscalls)
+ return (unsigned long)sys32_call_table[nr - __NR_O32_Linux];
+#endif
+
+ return (unsigned long) &sys_ni_syscall;
+}
+#endif
+
+#endif /* CONFIG_FTRACE_SYSCALLS */
diff --git a/arch/mips/kernel/genex.S b/arch/mips/kernel/genex.S
new file mode 100644
index 000000000..3425df601
--- /dev/null
+++ b/arch/mips/kernel/genex.S
@@ -0,0 +1,681 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1994 - 2000, 2001, 2003 Ralf Baechle
+ * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
+ * Copyright (C) 2002, 2007 Maciej W. Rozycki
+ * Copyright (C) 2001, 2012 MIPS Technologies, Inc. All rights reserved.
+ */
+#include <linux/init.h>
+
+#include <asm/asm.h>
+#include <asm/asmmacro.h>
+#include <asm/cacheops.h>
+#include <asm/irqflags.h>
+#include <asm/regdef.h>
+#include <asm/fpregdef.h>
+#include <asm/mipsregs.h>
+#include <asm/stackframe.h>
+#include <asm/sync.h>
+#include <asm/thread_info.h>
+
+ __INIT
+
+/*
+ * General exception vector for all other CPUs.
+ *
+ * Be careful when changing this, it has to be at most 128 bytes
+ * to fit into space reserved for the exception handler.
+ */
+NESTED(except_vec3_generic, 0, sp)
+ .set push
+ .set noat
+ mfc0 k1, CP0_CAUSE
+ andi k1, k1, 0x7c
+#ifdef CONFIG_64BIT
+ dsll k1, k1, 1
+#endif
+ PTR_L k0, exception_handlers(k1)
+ jr k0
+ .set pop
+ END(except_vec3_generic)
+
+/*
+ * General exception handler for CPUs with virtual coherency exception.
+ *
+ * Be careful when changing this, it has to be at most 256 (as a special
+ * exception) bytes to fit into space reserved for the exception handler.
+ */
+NESTED(except_vec3_r4000, 0, sp)
+ .set push
+ .set arch=r4000
+ .set noat
+ mfc0 k1, CP0_CAUSE
+ li k0, 31<<2
+ andi k1, k1, 0x7c
+ .set push
+ .set noreorder
+ .set nomacro
+ beq k1, k0, handle_vced
+ li k0, 14<<2
+ beq k1, k0, handle_vcei
+#ifdef CONFIG_64BIT
+ dsll k1, k1, 1
+#endif
+ .set pop
+ PTR_L k0, exception_handlers(k1)
+ jr k0
+
+ /*
+ * Big shit, we now may have two dirty primary cache lines for the same
+ * physical address. We can safely invalidate the line pointed to by
+ * c0_badvaddr because after return from this exception handler the
+ * load / store will be re-executed.
+ */
+handle_vced:
+ MFC0 k0, CP0_BADVADDR
+ li k1, -4 # Is this ...
+ and k0, k1 # ... really needed?
+ mtc0 zero, CP0_TAGLO
+ cache Index_Store_Tag_D, (k0)
+ cache Hit_Writeback_Inv_SD, (k0)
+#ifdef CONFIG_PROC_FS
+ PTR_LA k0, vced_count
+ lw k1, (k0)
+ addiu k1, 1
+ sw k1, (k0)
+#endif
+ eret
+
+handle_vcei:
+ MFC0 k0, CP0_BADVADDR
+ cache Hit_Writeback_Inv_SD, (k0) # also cleans pi
+#ifdef CONFIG_PROC_FS
+ PTR_LA k0, vcei_count
+ lw k1, (k0)
+ addiu k1, 1
+ sw k1, (k0)
+#endif
+ eret
+ .set pop
+ END(except_vec3_r4000)
+
+ __FINIT
+
+ .align 5 /* 32 byte rollback region */
+LEAF(__r4k_wait)
+ .set push
+ .set noreorder
+ /* start of rollback region */
+ LONG_L t0, TI_FLAGS($28)
+ nop
+ andi t0, _TIF_NEED_RESCHED
+ bnez t0, 1f
+ nop
+ nop
+ nop
+#ifdef CONFIG_CPU_MICROMIPS
+ nop
+ nop
+ nop
+ nop
+#endif
+ .set MIPS_ISA_ARCH_LEVEL_RAW
+ wait
+ /* end of rollback region (the region size must be power of two) */
+1:
+ jr ra
+ nop
+ .set pop
+ END(__r4k_wait)
+
+ .macro BUILD_ROLLBACK_PROLOGUE handler
+ FEXPORT(rollback_\handler)
+ .set push
+ .set noat
+ MFC0 k0, CP0_EPC
+ PTR_LA k1, __r4k_wait
+ ori k0, 0x1f /* 32 byte rollback region */
+ xori k0, 0x1f
+ bne k0, k1, \handler
+ MTC0 k0, CP0_EPC
+ .set pop
+ .endm
+
+ .align 5
+BUILD_ROLLBACK_PROLOGUE handle_int
+NESTED(handle_int, PT_SIZE, sp)
+ .cfi_signal_frame
+#ifdef CONFIG_TRACE_IRQFLAGS
+ /*
+ * Check to see if the interrupted code has just disabled
+ * interrupts and ignore this interrupt for now if so.
+ *
+ * local_irq_disable() disables interrupts and then calls
+ * trace_hardirqs_off() to track the state. If an interrupt is taken
+ * after interrupts are disabled but before the state is updated
+ * it will appear to restore_all that it is incorrectly returning with
+ * interrupts disabled
+ */
+ .set push
+ .set noat
+ mfc0 k0, CP0_STATUS
+#if defined(CONFIG_CPU_R3000)
+ and k0, ST0_IEP
+ bnez k0, 1f
+
+ mfc0 k0, CP0_EPC
+ .set noreorder
+ j k0
+ rfe
+#else
+ and k0, ST0_IE
+ bnez k0, 1f
+
+ eret
+#endif
+1:
+ .set pop
+#endif
+ SAVE_ALL docfi=1
+ CLI
+ TRACE_IRQS_OFF
+
+ LONG_L s0, TI_REGS($28)
+ LONG_S sp, TI_REGS($28)
+
+ /*
+ * SAVE_ALL ensures we are using a valid kernel stack for the thread.
+ * Check if we are already using the IRQ stack.
+ */
+ move s1, sp # Preserve the sp
+
+ /* Get IRQ stack for this CPU */
+ ASM_CPUID_MFC0 k0, ASM_SMP_CPUID_REG
+#if defined(CONFIG_32BIT) || defined(KBUILD_64BIT_SYM32)
+ lui k1, %hi(irq_stack)
+#else
+ lui k1, %highest(irq_stack)
+ daddiu k1, %higher(irq_stack)
+ dsll k1, 16
+ daddiu k1, %hi(irq_stack)
+ dsll k1, 16
+#endif
+ LONG_SRL k0, SMP_CPUID_PTRSHIFT
+ LONG_ADDU k1, k0
+ LONG_L t0, %lo(irq_stack)(k1)
+
+ # Check if already on IRQ stack
+ PTR_LI t1, ~(_THREAD_SIZE-1)
+ and t1, t1, sp
+ beq t0, t1, 2f
+
+ /* Switch to IRQ stack */
+ li t1, _IRQ_STACK_START
+ PTR_ADD sp, t0, t1
+
+ /* Save task's sp on IRQ stack so that unwinding can follow it */
+ LONG_S s1, 0(sp)
+2:
+ jal plat_irq_dispatch
+
+ /* Restore sp */
+ move sp, s1
+
+ j ret_from_irq
+#ifdef CONFIG_CPU_MICROMIPS
+ nop
+#endif
+ END(handle_int)
+
+ __INIT
+
+/*
+ * Special interrupt vector for MIPS64 ISA & embedded MIPS processors.
+ * This is a dedicated interrupt exception vector which reduces the
+ * interrupt processing overhead. The jump instruction will be replaced
+ * at the initialization time.
+ *
+ * Be careful when changing this, it has to be at most 128 bytes
+ * to fit into space reserved for the exception handler.
+ */
+NESTED(except_vec4, 0, sp)
+1: j 1b /* Dummy, will be replaced */
+ END(except_vec4)
+
+/*
+ * EJTAG debug exception handler.
+ * The EJTAG debug exception entry point is 0xbfc00480, which
+ * normally is in the boot PROM, so the boot PROM must do an
+ * unconditional jump to this vector.
+ */
+NESTED(except_vec_ejtag_debug, 0, sp)
+ j ejtag_debug_handler
+#ifdef CONFIG_CPU_MICROMIPS
+ nop
+#endif
+ END(except_vec_ejtag_debug)
+
+ __FINIT
+
+/*
+ * Vectored interrupt handler.
+ * This prototype is copied to ebase + n*IntCtl.VS and patched
+ * to invoke the handler
+ */
+BUILD_ROLLBACK_PROLOGUE except_vec_vi
+NESTED(except_vec_vi, 0, sp)
+ SAVE_SOME docfi=1
+ SAVE_AT docfi=1
+ .set push
+ .set noreorder
+ PTR_LA v1, except_vec_vi_handler
+FEXPORT(except_vec_vi_lui)
+ lui v0, 0 /* Patched */
+ jr v1
+FEXPORT(except_vec_vi_ori)
+ ori v0, 0 /* Patched */
+ .set pop
+ END(except_vec_vi)
+EXPORT(except_vec_vi_end)
+
+/*
+ * Common Vectored Interrupt code
+ * Complete the register saves and invoke the handler which is passed in $v0
+ */
+NESTED(except_vec_vi_handler, 0, sp)
+ SAVE_TEMP
+ SAVE_STATIC
+ CLI
+#ifdef CONFIG_TRACE_IRQFLAGS
+ move s0, v0
+ TRACE_IRQS_OFF
+ move v0, s0
+#endif
+
+ LONG_L s0, TI_REGS($28)
+ LONG_S sp, TI_REGS($28)
+
+ /*
+ * SAVE_ALL ensures we are using a valid kernel stack for the thread.
+ * Check if we are already using the IRQ stack.
+ */
+ move s1, sp # Preserve the sp
+
+ /* Get IRQ stack for this CPU */
+ ASM_CPUID_MFC0 k0, ASM_SMP_CPUID_REG
+#if defined(CONFIG_32BIT) || defined(KBUILD_64BIT_SYM32)
+ lui k1, %hi(irq_stack)
+#else
+ lui k1, %highest(irq_stack)
+ daddiu k1, %higher(irq_stack)
+ dsll k1, 16
+ daddiu k1, %hi(irq_stack)
+ dsll k1, 16
+#endif
+ LONG_SRL k0, SMP_CPUID_PTRSHIFT
+ LONG_ADDU k1, k0
+ LONG_L t0, %lo(irq_stack)(k1)
+
+ # Check if already on IRQ stack
+ PTR_LI t1, ~(_THREAD_SIZE-1)
+ and t1, t1, sp
+ beq t0, t1, 2f
+
+ /* Switch to IRQ stack */
+ li t1, _IRQ_STACK_START
+ PTR_ADD sp, t0, t1
+
+ /* Save task's sp on IRQ stack so that unwinding can follow it */
+ LONG_S s1, 0(sp)
+2:
+ jalr v0
+
+ /* Restore sp */
+ move sp, s1
+
+ j ret_from_irq
+ END(except_vec_vi_handler)
+
+/*
+ * EJTAG debug exception handler.
+ */
+NESTED(ejtag_debug_handler, PT_SIZE, sp)
+ .set push
+ .set noat
+ MTC0 k0, CP0_DESAVE
+ mfc0 k0, CP0_DEBUG
+
+ andi k0, k0, MIPS_DEBUG_DBP # Check for SDBBP.
+ beqz k0, ejtag_return
+
+#ifdef CONFIG_SMP
+1: PTR_LA k0, ejtag_debug_buffer_spinlock
+ __SYNC(full, loongson3_war)
+2: ll k0, 0(k0)
+ bnez k0, 2b
+ PTR_LA k0, ejtag_debug_buffer_spinlock
+ sc k0, 0(k0)
+ beqz k0, 1b
+# ifdef CONFIG_WEAK_REORDERING_BEYOND_LLSC
+ sync
+# endif
+
+ PTR_LA k0, ejtag_debug_buffer
+ LONG_S k1, 0(k0)
+
+ ASM_CPUID_MFC0 k1, ASM_SMP_CPUID_REG
+ PTR_SRL k1, SMP_CPUID_PTRSHIFT
+ PTR_SLL k1, LONGLOG
+ PTR_LA k0, ejtag_debug_buffer_per_cpu
+ PTR_ADDU k0, k1
+
+ PTR_LA k1, ejtag_debug_buffer
+ LONG_L k1, 0(k1)
+ LONG_S k1, 0(k0)
+
+ PTR_LA k0, ejtag_debug_buffer_spinlock
+ sw zero, 0(k0)
+#else
+ PTR_LA k0, ejtag_debug_buffer
+ LONG_S k1, 0(k0)
+#endif
+
+ SAVE_ALL
+ move a0, sp
+ jal ejtag_exception_handler
+ RESTORE_ALL
+
+#ifdef CONFIG_SMP
+ ASM_CPUID_MFC0 k1, ASM_SMP_CPUID_REG
+ PTR_SRL k1, SMP_CPUID_PTRSHIFT
+ PTR_SLL k1, LONGLOG
+ PTR_LA k0, ejtag_debug_buffer_per_cpu
+ PTR_ADDU k0, k1
+ LONG_L k1, 0(k0)
+#else
+ PTR_LA k0, ejtag_debug_buffer
+ LONG_L k1, 0(k0)
+#endif
+
+ejtag_return:
+ back_to_back_c0_hazard
+ MFC0 k0, CP0_DESAVE
+ .set mips32
+ deret
+ .set pop
+ END(ejtag_debug_handler)
+
+/*
+ * This buffer is reserved for the use of the EJTAG debug
+ * handler.
+ */
+ .data
+EXPORT(ejtag_debug_buffer)
+ .fill LONGSIZE
+#ifdef CONFIG_SMP
+EXPORT(ejtag_debug_buffer_spinlock)
+ .fill LONGSIZE
+EXPORT(ejtag_debug_buffer_per_cpu)
+ .fill LONGSIZE * NR_CPUS
+#endif
+ .previous
+
+ __INIT
+
+/*
+ * NMI debug exception handler for MIPS reference boards.
+ * The NMI debug exception entry point is 0xbfc00000, which
+ * normally is in the boot PROM, so the boot PROM must do a
+ * unconditional jump to this vector.
+ */
+NESTED(except_vec_nmi, 0, sp)
+ j nmi_handler
+#ifdef CONFIG_CPU_MICROMIPS
+ nop
+#endif
+ END(except_vec_nmi)
+
+ __FINIT
+
+NESTED(nmi_handler, PT_SIZE, sp)
+ .cfi_signal_frame
+ .set push
+ .set noat
+ /*
+ * Clear ERL - restore segment mapping
+ * Clear BEV - required for page fault exception handler to work
+ */
+ mfc0 k0, CP0_STATUS
+ ori k0, k0, ST0_EXL
+ li k1, ~(ST0_BEV | ST0_ERL)
+ and k0, k0, k1
+ mtc0 k0, CP0_STATUS
+ _ehb
+ SAVE_ALL
+ move a0, sp
+ jal nmi_exception_handler
+ /* nmi_exception_handler never returns */
+ .set pop
+ END(nmi_handler)
+
+ .macro __build_clear_none
+ .endm
+
+ .macro __build_clear_sti
+ TRACE_IRQS_ON
+ STI
+ .endm
+
+ .macro __build_clear_cli
+ CLI
+ TRACE_IRQS_OFF
+ .endm
+
+ .macro __build_clear_fpe
+ CLI
+ TRACE_IRQS_OFF
+ .set push
+ /* gas fails to assemble cfc1 for some archs (octeon).*/ \
+ .set mips1
+ SET_HARDFLOAT
+ cfc1 a1, fcr31
+ .set pop
+ .endm
+
+ .macro __build_clear_msa_fpe
+ CLI
+ TRACE_IRQS_OFF
+ _cfcmsa a1, MSA_CSR
+ .endm
+
+ .macro __build_clear_ade
+ MFC0 t0, CP0_BADVADDR
+ PTR_S t0, PT_BVADDR(sp)
+ KMODE
+ .endm
+
+ .macro __build_clear_gsexc
+ .set push
+ /*
+ * We need to specify a selector to access the CP0.Diag1 (GSCause)
+ * register. All GSExc-equipped processors have MIPS32.
+ */
+ .set mips32
+ mfc0 a1, CP0_DIAGNOSTIC1
+ .set pop
+ TRACE_IRQS_ON
+ STI
+ .endm
+
+ .macro __BUILD_silent exception
+ .endm
+
+ /* Gas tries to parse the ASM_PRINT argument as a string containing
+ string escapes and emits bogus warnings if it believes to
+ recognize an unknown escape code. So make the arguments
+ start with an n and gas will believe \n is ok ... */
+ .macro __BUILD_verbose nexception
+ LONG_L a1, PT_EPC(sp)
+#ifdef CONFIG_32BIT
+ ASM_PRINT("Got \nexception at %08lx\012")
+#endif
+#ifdef CONFIG_64BIT
+ ASM_PRINT("Got \nexception at %016lx\012")
+#endif
+ .endm
+
+ .macro __BUILD_count exception
+ LONG_L t0,exception_count_\exception
+ LONG_ADDIU t0, 1
+ LONG_S t0,exception_count_\exception
+ .comm exception_count\exception, 8, 8
+ .endm
+
+ .macro __BUILD_HANDLER exception handler clear verbose ext
+ .align 5
+ NESTED(handle_\exception, PT_SIZE, sp)
+ .cfi_signal_frame
+ .set noat
+ SAVE_ALL
+ FEXPORT(handle_\exception\ext)
+ __build_clear_\clear
+ .set at
+ __BUILD_\verbose \exception
+ move a0, sp
+ jal do_\handler
+ j ret_from_exception
+ END(handle_\exception)
+ .endm
+
+ .macro BUILD_HANDLER exception handler clear verbose
+ __BUILD_HANDLER \exception \handler \clear \verbose _int
+ .endm
+
+ BUILD_HANDLER adel ade ade silent /* #4 */
+ BUILD_HANDLER ades ade ade silent /* #5 */
+ BUILD_HANDLER ibe be cli silent /* #6 */
+ BUILD_HANDLER dbe be cli silent /* #7 */
+ BUILD_HANDLER bp bp sti silent /* #9 */
+ BUILD_HANDLER ri ri sti silent /* #10 */
+ BUILD_HANDLER cpu cpu sti silent /* #11 */
+ BUILD_HANDLER ov ov sti silent /* #12 */
+ BUILD_HANDLER tr tr sti silent /* #13 */
+ BUILD_HANDLER msa_fpe msa_fpe msa_fpe silent /* #14 */
+#ifdef CONFIG_MIPS_FP_SUPPORT
+ BUILD_HANDLER fpe fpe fpe silent /* #15 */
+#endif
+ BUILD_HANDLER ftlb ftlb none silent /* #16 */
+ BUILD_HANDLER gsexc gsexc gsexc silent /* #16 */
+ BUILD_HANDLER msa msa sti silent /* #21 */
+ BUILD_HANDLER mdmx mdmx sti silent /* #22 */
+#ifdef CONFIG_HARDWARE_WATCHPOINTS
+ /*
+ * For watch, interrupts will be enabled after the watch
+ * registers are read.
+ */
+ BUILD_HANDLER watch watch cli silent /* #23 */
+#else
+ BUILD_HANDLER watch watch sti verbose /* #23 */
+#endif
+ BUILD_HANDLER mcheck mcheck cli verbose /* #24 */
+ BUILD_HANDLER mt mt sti silent /* #25 */
+ BUILD_HANDLER dsp dsp sti silent /* #26 */
+ BUILD_HANDLER reserved reserved sti verbose /* others */
+
+ .align 5
+ LEAF(handle_ri_rdhwr_tlbp)
+ .set push
+ .set noat
+ .set noreorder
+ /* check if TLB contains a entry for EPC */
+ MFC0 k1, CP0_ENTRYHI
+ andi k1, MIPS_ENTRYHI_ASID | MIPS_ENTRYHI_ASIDX
+ MFC0 k0, CP0_EPC
+ PTR_SRL k0, _PAGE_SHIFT + 1
+ PTR_SLL k0, _PAGE_SHIFT + 1
+ or k1, k0
+ MTC0 k1, CP0_ENTRYHI
+ mtc0_tlbw_hazard
+ tlbp
+ tlb_probe_hazard
+ mfc0 k1, CP0_INDEX
+ .set pop
+ bltz k1, handle_ri /* slow path */
+ /* fall thru */
+ END(handle_ri_rdhwr_tlbp)
+
+ LEAF(handle_ri_rdhwr)
+ .set push
+ .set noat
+ .set noreorder
+ /* MIPS32: 0x7c03e83b: rdhwr v1,$29 */
+ /* microMIPS: 0x007d6b3c: rdhwr v1,$29 */
+ MFC0 k1, CP0_EPC
+#if defined(CONFIG_CPU_MICROMIPS) || defined(CONFIG_CPU_MIPS32_R2) || defined(CONFIG_CPU_MIPS64_R2)
+ and k0, k1, 1
+ beqz k0, 1f
+ xor k1, k0
+ lhu k0, (k1)
+ lhu k1, 2(k1)
+ ins k1, k0, 16, 16
+ lui k0, 0x007d
+ b docheck
+ ori k0, 0x6b3c
+1:
+ lui k0, 0x7c03
+ lw k1, (k1)
+ ori k0, 0xe83b
+#else
+ andi k0, k1, 1
+ bnez k0, handle_ri
+ lui k0, 0x7c03
+ lw k1, (k1)
+ ori k0, 0xe83b
+#endif
+ .set reorder
+docheck:
+ bne k0, k1, handle_ri /* if not ours */
+
+isrdhwr:
+ /* The insn is rdhwr. No need to check CAUSE.BD here. */
+ get_saved_sp /* k1 := current_thread_info */
+ .set noreorder
+ MFC0 k0, CP0_EPC
+#if defined(CONFIG_CPU_R3000)
+ ori k1, _THREAD_MASK
+ xori k1, _THREAD_MASK
+ LONG_L v1, TI_TP_VALUE(k1)
+ LONG_ADDIU k0, 4
+ jr k0
+ rfe
+#else
+#ifndef CONFIG_CPU_DADDI_WORKAROUNDS
+ LONG_ADDIU k0, 4 /* stall on $k0 */
+#else
+ .set at=v1
+ LONG_ADDIU k0, 4
+ .set noat
+#endif
+ MTC0 k0, CP0_EPC
+ /* I hope three instructions between MTC0 and ERET are enough... */
+ ori k1, _THREAD_MASK
+ xori k1, _THREAD_MASK
+ LONG_L v1, TI_TP_VALUE(k1)
+ .set push
+ .set arch=r4000
+ eret
+ .set pop
+#endif
+ .set pop
+ END(handle_ri_rdhwr)
+
+#ifdef CONFIG_CPU_R4X00_BUGS64
+/* A temporary overflow handler used by check_daddi(). */
+
+ __INIT
+
+ BUILD_HANDLER daddi_ov daddi_ov none silent /* #12 */
+#endif
diff --git a/arch/mips/kernel/gpio_txx9.c b/arch/mips/kernel/gpio_txx9.c
new file mode 100644
index 000000000..8c083612d
--- /dev/null
+++ b/arch/mips/kernel/gpio_txx9.c
@@ -0,0 +1,86 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * A gpio chip driver for TXx9 SoCs
+ *
+ * Copyright (C) 2008 Atsushi Nemoto <anemo@mba.ocn.ne.jp>
+ */
+
+#include <linux/init.h>
+#include <linux/spinlock.h>
+#include <linux/gpio/driver.h>
+#include <linux/errno.h>
+#include <linux/io.h>
+#include <asm/txx9pio.h>
+
+static DEFINE_SPINLOCK(txx9_gpio_lock);
+
+static struct txx9_pio_reg __iomem *txx9_pioptr;
+
+static int txx9_gpio_get(struct gpio_chip *chip, unsigned int offset)
+{
+ return !!(__raw_readl(&txx9_pioptr->din) & (1 << offset));
+}
+
+static void txx9_gpio_set_raw(unsigned int offset, int value)
+{
+ u32 val;
+ val = __raw_readl(&txx9_pioptr->dout);
+ if (value)
+ val |= 1 << offset;
+ else
+ val &= ~(1 << offset);
+ __raw_writel(val, &txx9_pioptr->dout);
+}
+
+static void txx9_gpio_set(struct gpio_chip *chip, unsigned int offset,
+ int value)
+{
+ unsigned long flags;
+ spin_lock_irqsave(&txx9_gpio_lock, flags);
+ txx9_gpio_set_raw(offset, value);
+ mmiowb();
+ spin_unlock_irqrestore(&txx9_gpio_lock, flags);
+}
+
+static int txx9_gpio_dir_in(struct gpio_chip *chip, unsigned int offset)
+{
+ unsigned long flags;
+ spin_lock_irqsave(&txx9_gpio_lock, flags);
+ __raw_writel(__raw_readl(&txx9_pioptr->dir) & ~(1 << offset),
+ &txx9_pioptr->dir);
+ mmiowb();
+ spin_unlock_irqrestore(&txx9_gpio_lock, flags);
+ return 0;
+}
+
+static int txx9_gpio_dir_out(struct gpio_chip *chip, unsigned int offset,
+ int value)
+{
+ unsigned long flags;
+ spin_lock_irqsave(&txx9_gpio_lock, flags);
+ txx9_gpio_set_raw(offset, value);
+ __raw_writel(__raw_readl(&txx9_pioptr->dir) | (1 << offset),
+ &txx9_pioptr->dir);
+ mmiowb();
+ spin_unlock_irqrestore(&txx9_gpio_lock, flags);
+ return 0;
+}
+
+static struct gpio_chip txx9_gpio_chip = {
+ .get = txx9_gpio_get,
+ .set = txx9_gpio_set,
+ .direction_input = txx9_gpio_dir_in,
+ .direction_output = txx9_gpio_dir_out,
+ .label = "TXx9",
+};
+
+int __init txx9_gpio_init(unsigned long baseaddr,
+ unsigned int base, unsigned int num)
+{
+ txx9_pioptr = ioremap(baseaddr, sizeof(struct txx9_pio_reg));
+ if (!txx9_pioptr)
+ return -ENODEV;
+ txx9_gpio_chip.base = base;
+ txx9_gpio_chip.ngpio = num;
+ return gpiochip_add_data(&txx9_gpio_chip, NULL);
+}
diff --git a/arch/mips/kernel/head.S b/arch/mips/kernel/head.S
new file mode 100644
index 000000000..b825ed447
--- /dev/null
+++ b/arch/mips/kernel/head.S
@@ -0,0 +1,154 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1994, 1995 Waldorf Electronics
+ * Written by Ralf Baechle and Andreas Busse
+ * Copyright (C) 1994 - 99, 2003, 06 Ralf Baechle
+ * Copyright (C) 1996 Paul M. Antoine
+ * Modified for DECStation and hence R3000 support by Paul M. Antoine
+ * Further modifications by David S. Miller and Harald Koerfgen
+ * Copyright (C) 1999 Silicon Graphics, Inc.
+ * Kevin Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com
+ * Copyright (C) 2000 MIPS Technologies, Inc. All rights reserved.
+ */
+#include <linux/init.h>
+#include <linux/threads.h>
+
+#include <asm/addrspace.h>
+#include <asm/asm.h>
+#include <asm/asmmacro.h>
+#include <asm/irqflags.h>
+#include <asm/regdef.h>
+#include <asm/mipsregs.h>
+#include <asm/stackframe.h>
+
+#include <kernel-entry-init.h>
+
+ /*
+ * For the moment disable interrupts, mark the kernel mode and
+ * set ST0_KX so that the CPU does not spit fire when using
+ * 64-bit addresses. A full initialization of the CPU's status
+ * register is done later in per_cpu_trap_init().
+ */
+ .macro setup_c0_status set clr
+ .set push
+ mfc0 t0, CP0_STATUS
+ or t0, ST0_KERNEL_CUMASK|\set|0x1f|\clr
+ xor t0, 0x1f|\clr
+ mtc0 t0, CP0_STATUS
+ .set noreorder
+ sll zero,3 # ehb
+ .set pop
+ .endm
+
+ .macro setup_c0_status_pri
+#ifdef CONFIG_64BIT
+ setup_c0_status ST0_KX 0
+#else
+ setup_c0_status 0 0
+#endif
+ .endm
+
+ .macro setup_c0_status_sec
+#ifdef CONFIG_64BIT
+ setup_c0_status ST0_KX ST0_BEV
+#else
+ setup_c0_status 0 ST0_BEV
+#endif
+ .endm
+
+#ifndef CONFIG_NO_EXCEPT_FILL
+ /*
+ * Reserved space for exception handlers.
+ * Necessary for machines which link their kernels at KSEG0.
+ */
+ .fill 0x400
+#endif
+
+EXPORT(_stext)
+
+#ifdef CONFIG_BOOT_RAW
+ /*
+ * Give us a fighting chance of running if execution beings at the
+ * kernel load address. This is needed because this platform does
+ * not have a ELF loader yet.
+ */
+FEXPORT(__kernel_entry)
+ j kernel_entry
+#endif /* CONFIG_BOOT_RAW */
+
+ __REF
+
+NESTED(kernel_entry, 16, sp) # kernel entry point
+
+ kernel_entry_setup # cpu specific setup
+
+ setup_c0_status_pri
+
+ /* We might not get launched at the address the kernel is linked to,
+ so we jump there. */
+ PTR_LA t0, 0f
+ jr t0
+0:
+
+ PTR_LA t0, __bss_start # clear .bss
+ LONG_S zero, (t0)
+ PTR_LA t1, __bss_stop - LONGSIZE
+1:
+ PTR_ADDIU t0, LONGSIZE
+ LONG_S zero, (t0)
+ bne t0, t1, 1b
+
+ LONG_S a0, fw_arg0 # firmware arguments
+ LONG_S a1, fw_arg1
+ LONG_S a2, fw_arg2
+ LONG_S a3, fw_arg3
+
+ MTC0 zero, CP0_CONTEXT # clear context register
+#ifdef CONFIG_64BIT
+ MTC0 zero, CP0_XCONTEXT
+#endif
+ PTR_LA $28, init_thread_union
+ /* Set the SP after an empty pt_regs. */
+ PTR_LI sp, _THREAD_SIZE - 32 - PT_SIZE
+ PTR_ADDU sp, $28
+ back_to_back_c0_hazard
+ set_saved_sp sp, t0, t1
+ PTR_SUBU sp, 4 * SZREG # init stack pointer
+
+#ifdef CONFIG_RELOCATABLE
+ /* Copy kernel and apply the relocations */
+ jal relocate_kernel
+
+ /* Repoint the sp into the new kernel image */
+ PTR_LI sp, _THREAD_SIZE - 32 - PT_SIZE
+ PTR_ADDU sp, $28
+ set_saved_sp sp, t0, t1
+ PTR_SUBU sp, 4 * SZREG # init stack pointer
+
+ /*
+ * relocate_kernel returns the entry point either
+ * in the relocated kernel or the original if for
+ * some reason relocation failed - jump there now
+ * with instruction hazard barrier because of the
+ * newly sync'd icache.
+ */
+ jr.hb v0
+#else /* !CONFIG_RELOCATABLE */
+ j start_kernel
+#endif /* !CONFIG_RELOCATABLE */
+ END(kernel_entry)
+
+#ifdef CONFIG_SMP
+/*
+ * SMP slave cpus entry point. Board specific code for bootstrap calls this
+ * function after setting up the stack and gp registers.
+ */
+NESTED(smp_bootstrap, 16, sp)
+ smp_slave_setup
+ setup_c0_status_sec
+ j start_secondary
+ END(smp_bootstrap)
+#endif /* CONFIG_SMP */
diff --git a/arch/mips/kernel/i8253.c b/arch/mips/kernel/i8253.c
new file mode 100644
index 000000000..ca21210e0
--- /dev/null
+++ b/arch/mips/kernel/i8253.c
@@ -0,0 +1,38 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * i8253.c 8253/PIT functions
+ *
+ */
+#include <linux/clockchips.h>
+#include <linux/i8253.h>
+#include <linux/export.h>
+#include <linux/smp.h>
+#include <linux/irq.h>
+
+#include <asm/time.h>
+
+static irqreturn_t timer_interrupt(int irq, void *dev_id)
+{
+ i8253_clockevent.event_handler(&i8253_clockevent);
+
+ return IRQ_HANDLED;
+}
+
+void __init setup_pit_timer(void)
+{
+ unsigned long flags = IRQF_NOBALANCING | IRQF_TIMER;
+
+ clockevent_i8253_init(true);
+ if (request_irq(0, timer_interrupt, flags, "timer", NULL))
+ pr_err("Failed to request irq 0 (timer)\n");
+}
+
+static int __init init_pit_clocksource(void)
+{
+ if (num_possible_cpus() > 1 || /* PIT does not scale! */
+ !clockevent_state_periodic(&i8253_clockevent))
+ return 0;
+
+ return clocksource_i8253_init();
+}
+arch_initcall(init_pit_clocksource);
diff --git a/arch/mips/kernel/idle.c b/arch/mips/kernel/idle.c
new file mode 100644
index 000000000..53adcc1b2
--- /dev/null
+++ b/arch/mips/kernel/idle.c
@@ -0,0 +1,261 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * MIPS idle loop and WAIT instruction support.
+ *
+ * Copyright (C) xxxx the Anonymous
+ * Copyright (C) 1994 - 2006 Ralf Baechle
+ * Copyright (C) 2003, 2004 Maciej W. Rozycki
+ * Copyright (C) 2001, 2004, 2011, 2012 MIPS Technologies, Inc.
+ */
+#include <linux/cpu.h>
+#include <linux/export.h>
+#include <linux/init.h>
+#include <linux/irqflags.h>
+#include <linux/printk.h>
+#include <linux/sched.h>
+#include <asm/cpu.h>
+#include <asm/cpu-info.h>
+#include <asm/cpu-type.h>
+#include <asm/idle.h>
+#include <asm/mipsregs.h>
+
+/*
+ * Not all of the MIPS CPUs have the "wait" instruction available. Moreover,
+ * the implementation of the "wait" feature differs between CPU families. This
+ * points to the function that implements CPU specific wait.
+ * The wait instruction stops the pipeline and reduces the power consumption of
+ * the CPU very much.
+ */
+void (*cpu_wait)(void);
+EXPORT_SYMBOL(cpu_wait);
+
+static void __cpuidle r3081_wait(void)
+{
+ unsigned long cfg = read_c0_conf();
+ write_c0_conf(cfg | R30XX_CONF_HALT);
+ raw_local_irq_enable();
+}
+
+void __cpuidle r4k_wait(void)
+{
+ raw_local_irq_enable();
+ __r4k_wait();
+}
+
+/*
+ * This variant is preferable as it allows testing need_resched and going to
+ * sleep depending on the outcome atomically. Unfortunately the "It is
+ * implementation-dependent whether the pipeline restarts when a non-enabled
+ * interrupt is requested" restriction in the MIPS32/MIPS64 architecture makes
+ * using this version a gamble.
+ */
+void __cpuidle r4k_wait_irqoff(void)
+{
+ if (!need_resched())
+ __asm__(
+ " .set push \n"
+ " .set arch=r4000 \n"
+ " wait \n"
+ " .set pop \n");
+ raw_local_irq_enable();
+}
+
+/*
+ * The RM7000 variant has to handle erratum 38. The workaround is to not
+ * have any pending stores when the WAIT instruction is executed.
+ */
+static void __cpuidle rm7k_wait_irqoff(void)
+{
+ if (!need_resched())
+ __asm__(
+ " .set push \n"
+ " .set arch=r4000 \n"
+ " .set noat \n"
+ " mfc0 $1, $12 \n"
+ " sync \n"
+ " mtc0 $1, $12 # stalls until W stage \n"
+ " wait \n"
+ " mtc0 $1, $12 # stalls until W stage \n"
+ " .set pop \n");
+ raw_local_irq_enable();
+}
+
+/*
+ * Au1 'wait' is only useful when the 32kHz counter is used as timer,
+ * since coreclock (and the cp0 counter) stops upon executing it. Only an
+ * interrupt can wake it, so they must be enabled before entering idle modes.
+ */
+static void __cpuidle au1k_wait(void)
+{
+ unsigned long c0status = read_c0_status() | 1; /* irqs on */
+
+ __asm__(
+ " .set push \n"
+ " .set arch=r4000 \n"
+ " cache 0x14, 0(%0) \n"
+ " cache 0x14, 32(%0) \n"
+ " sync \n"
+ " mtc0 %1, $12 \n" /* wr c0status */
+ " wait \n"
+ " nop \n"
+ " nop \n"
+ " nop \n"
+ " nop \n"
+ " .set pop \n"
+ : : "r" (au1k_wait), "r" (c0status));
+}
+
+static int __initdata nowait;
+
+static int __init wait_disable(char *s)
+{
+ nowait = 1;
+
+ return 1;
+}
+
+__setup("nowait", wait_disable);
+
+void __init check_wait(void)
+{
+ struct cpuinfo_mips *c = &current_cpu_data;
+
+ if (nowait) {
+ printk("Wait instruction disabled.\n");
+ return;
+ }
+
+ /*
+ * MIPSr6 specifies that masked interrupts should unblock an executing
+ * wait instruction, and thus that it is safe for us to use
+ * r4k_wait_irqoff. Yippee!
+ */
+ if (cpu_has_mips_r6) {
+ cpu_wait = r4k_wait_irqoff;
+ return;
+ }
+
+ switch (current_cpu_type()) {
+ case CPU_R3081:
+ case CPU_R3081E:
+ cpu_wait = r3081_wait;
+ break;
+ case CPU_R4200:
+/* case CPU_R4300: */
+ case CPU_R4600:
+ case CPU_R4640:
+ case CPU_R4650:
+ case CPU_R4700:
+ case CPU_R5000:
+ case CPU_R5500:
+ case CPU_NEVADA:
+ case CPU_4KC:
+ case CPU_4KEC:
+ case CPU_4KSC:
+ case CPU_5KC:
+ case CPU_5KE:
+ case CPU_25KF:
+ case CPU_PR4450:
+ case CPU_BMIPS3300:
+ case CPU_BMIPS4350:
+ case CPU_BMIPS4380:
+ case CPU_CAVIUM_OCTEON:
+ case CPU_CAVIUM_OCTEON_PLUS:
+ case CPU_CAVIUM_OCTEON2:
+ case CPU_CAVIUM_OCTEON3:
+ case CPU_XBURST:
+ case CPU_LOONGSON32:
+ cpu_wait = r4k_wait;
+ break;
+ case CPU_LOONGSON64:
+ if ((c->processor_id & (PRID_IMP_MASK | PRID_REV_MASK)) >=
+ (PRID_IMP_LOONGSON_64C | PRID_REV_LOONGSON3A_R2_0) ||
+ (c->processor_id & PRID_IMP_MASK) == PRID_IMP_LOONGSON_64R)
+ cpu_wait = r4k_wait;
+ break;
+
+ case CPU_BMIPS5000:
+ cpu_wait = r4k_wait_irqoff;
+ break;
+ case CPU_RM7000:
+ cpu_wait = rm7k_wait_irqoff;
+ break;
+
+ case CPU_PROAPTIV:
+ case CPU_P5600:
+ /*
+ * Incoming Fast Debug Channel (FDC) data during a wait
+ * instruction causes the wait never to resume, even if an
+ * interrupt is received. Avoid using wait at all if FDC data is
+ * likely to be received.
+ */
+ if (IS_ENABLED(CONFIG_MIPS_EJTAG_FDC_TTY))
+ break;
+ fallthrough;
+ case CPU_M14KC:
+ case CPU_M14KEC:
+ case CPU_24K:
+ case CPU_34K:
+ case CPU_1004K:
+ case CPU_1074K:
+ case CPU_INTERAPTIV:
+ case CPU_M5150:
+ case CPU_QEMU_GENERIC:
+ cpu_wait = r4k_wait;
+ if (read_c0_config7() & MIPS_CONF7_WII)
+ cpu_wait = r4k_wait_irqoff;
+ break;
+
+ case CPU_74K:
+ cpu_wait = r4k_wait;
+ if ((c->processor_id & 0xff) >= PRID_REV_ENCODE_332(2, 1, 0))
+ cpu_wait = r4k_wait_irqoff;
+ break;
+
+ case CPU_TX49XX:
+ cpu_wait = r4k_wait_irqoff;
+ break;
+ case CPU_ALCHEMY:
+ cpu_wait = au1k_wait;
+ break;
+ case CPU_20KC:
+ /*
+ * WAIT on Rev1.0 has E1, E2, E3 and E16.
+ * WAIT on Rev2.0 and Rev3.0 has E16.
+ * Rev3.1 WAIT is nop, why bother
+ */
+ if ((c->processor_id & 0xff) <= 0x64)
+ break;
+
+ /*
+ * Another rev is incrementing c0_count at a reduced clock
+ * rate while in WAIT mode. So we basically have the choice
+ * between using the cp0 timer as clocksource or avoiding
+ * the WAIT instruction. Until more details are known,
+ * disable the use of WAIT for 20Kc entirely.
+ cpu_wait = r4k_wait;
+ */
+ break;
+ default:
+ break;
+ }
+}
+
+void arch_cpu_idle(void)
+{
+ if (cpu_wait)
+ cpu_wait();
+ else
+ raw_local_irq_enable();
+}
+
+#ifdef CONFIG_CPU_IDLE
+
+int mips_cpuidle_wait_enter(struct cpuidle_device *dev,
+ struct cpuidle_driver *drv, int index)
+{
+ arch_cpu_idle();
+ return index;
+}
+
+#endif
diff --git a/arch/mips/kernel/irq-gt641xx.c b/arch/mips/kernel/irq-gt641xx.c
new file mode 100644
index 000000000..93bcf5736
--- /dev/null
+++ b/arch/mips/kernel/irq-gt641xx.c
@@ -0,0 +1,118 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * GT641xx IRQ routines.
+ *
+ * Copyright (C) 2007 Yoichi Yuasa <yuasa@linux-mips.org>
+ */
+#include <linux/hardirq.h>
+#include <linux/init.h>
+#include <linux/irq.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+
+#include <asm/gt64120.h>
+
+#define GT641XX_IRQ_TO_BIT(irq) (1U << (irq - GT641XX_IRQ_BASE))
+
+static DEFINE_RAW_SPINLOCK(gt641xx_irq_lock);
+
+static void ack_gt641xx_irq(struct irq_data *d)
+{
+ unsigned long flags;
+ u32 cause;
+
+ raw_spin_lock_irqsave(&gt641xx_irq_lock, flags);
+ cause = GT_READ(GT_INTRCAUSE_OFS);
+ cause &= ~GT641XX_IRQ_TO_BIT(d->irq);
+ GT_WRITE(GT_INTRCAUSE_OFS, cause);
+ raw_spin_unlock_irqrestore(&gt641xx_irq_lock, flags);
+}
+
+static void mask_gt641xx_irq(struct irq_data *d)
+{
+ unsigned long flags;
+ u32 mask;
+
+ raw_spin_lock_irqsave(&gt641xx_irq_lock, flags);
+ mask = GT_READ(GT_INTRMASK_OFS);
+ mask &= ~GT641XX_IRQ_TO_BIT(d->irq);
+ GT_WRITE(GT_INTRMASK_OFS, mask);
+ raw_spin_unlock_irqrestore(&gt641xx_irq_lock, flags);
+}
+
+static void mask_ack_gt641xx_irq(struct irq_data *d)
+{
+ unsigned long flags;
+ u32 cause, mask;
+
+ raw_spin_lock_irqsave(&gt641xx_irq_lock, flags);
+ mask = GT_READ(GT_INTRMASK_OFS);
+ mask &= ~GT641XX_IRQ_TO_BIT(d->irq);
+ GT_WRITE(GT_INTRMASK_OFS, mask);
+
+ cause = GT_READ(GT_INTRCAUSE_OFS);
+ cause &= ~GT641XX_IRQ_TO_BIT(d->irq);
+ GT_WRITE(GT_INTRCAUSE_OFS, cause);
+ raw_spin_unlock_irqrestore(&gt641xx_irq_lock, flags);
+}
+
+static void unmask_gt641xx_irq(struct irq_data *d)
+{
+ unsigned long flags;
+ u32 mask;
+
+ raw_spin_lock_irqsave(&gt641xx_irq_lock, flags);
+ mask = GT_READ(GT_INTRMASK_OFS);
+ mask |= GT641XX_IRQ_TO_BIT(d->irq);
+ GT_WRITE(GT_INTRMASK_OFS, mask);
+ raw_spin_unlock_irqrestore(&gt641xx_irq_lock, flags);
+}
+
+static struct irq_chip gt641xx_irq_chip = {
+ .name = "GT641xx",
+ .irq_ack = ack_gt641xx_irq,
+ .irq_mask = mask_gt641xx_irq,
+ .irq_mask_ack = mask_ack_gt641xx_irq,
+ .irq_unmask = unmask_gt641xx_irq,
+};
+
+void gt641xx_irq_dispatch(void)
+{
+ u32 cause, mask;
+ int i;
+
+ cause = GT_READ(GT_INTRCAUSE_OFS);
+ mask = GT_READ(GT_INTRMASK_OFS);
+ cause &= mask;
+
+ /*
+ * bit0 : logical or of all the interrupt bits.
+ * bit30: logical or of bits[29:26,20:1].
+ * bit31: logical or of bits[25:1].
+ */
+ for (i = 1; i < 30; i++) {
+ if (cause & (1U << i)) {
+ do_IRQ(GT641XX_IRQ_BASE + i);
+ return;
+ }
+ }
+
+ atomic_inc(&irq_err_count);
+}
+
+void __init gt641xx_irq_init(void)
+{
+ int i;
+
+ GT_WRITE(GT_INTRMASK_OFS, 0);
+ GT_WRITE(GT_INTRCAUSE_OFS, 0);
+
+ /*
+ * bit0 : logical or of all the interrupt bits.
+ * bit30: logical or of bits[29:26,20:1].
+ * bit31: logical or of bits[25:1].
+ */
+ for (i = 1; i < 30; i++)
+ irq_set_chip_and_handler(GT641XX_IRQ_BASE + i,
+ &gt641xx_irq_chip, handle_level_irq);
+}
diff --git a/arch/mips/kernel/irq-msc01.c b/arch/mips/kernel/irq-msc01.c
new file mode 100644
index 000000000..ab511b64a
--- /dev/null
+++ b/arch/mips/kernel/irq-msc01.c
@@ -0,0 +1,156 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ *
+ * Copyright (c) 2004 MIPS Inc
+ * Author: chris@mips.com
+ *
+ * Copyright (C) 2004, 06 Ralf Baechle <ralf@linux-mips.org>
+ */
+#include <linux/interrupt.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/kernel_stat.h>
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <asm/msc01_ic.h>
+#include <asm/traps.h>
+
+static unsigned long _icctrl_msc;
+#define MSC01_IC_REG_BASE _icctrl_msc
+
+#define MSCIC_WRITE(reg, data) do { *(volatile u32 *)(reg) = data; } while (0)
+#define MSCIC_READ(reg, data) do { data = *(volatile u32 *)(reg); } while (0)
+
+static unsigned int irq_base;
+
+/* mask off an interrupt */
+static inline void mask_msc_irq(struct irq_data *d)
+{
+ unsigned int irq = d->irq;
+
+ if (irq < (irq_base + 32))
+ MSCIC_WRITE(MSC01_IC_DISL, 1<<(irq - irq_base));
+ else
+ MSCIC_WRITE(MSC01_IC_DISH, 1<<(irq - irq_base - 32));
+}
+
+/* unmask an interrupt */
+static inline void unmask_msc_irq(struct irq_data *d)
+{
+ unsigned int irq = d->irq;
+
+ if (irq < (irq_base + 32))
+ MSCIC_WRITE(MSC01_IC_ENAL, 1<<(irq - irq_base));
+ else
+ MSCIC_WRITE(MSC01_IC_ENAH, 1<<(irq - irq_base - 32));
+}
+
+/*
+ * Masks and ACKs an IRQ
+ */
+static void level_mask_and_ack_msc_irq(struct irq_data *d)
+{
+ mask_msc_irq(d);
+ if (!cpu_has_veic)
+ MSCIC_WRITE(MSC01_IC_EOI, 0);
+}
+
+/*
+ * Masks and ACKs an IRQ
+ */
+static void edge_mask_and_ack_msc_irq(struct irq_data *d)
+{
+ unsigned int irq = d->irq;
+
+ mask_msc_irq(d);
+ if (!cpu_has_veic)
+ MSCIC_WRITE(MSC01_IC_EOI, 0);
+ else {
+ u32 r;
+ MSCIC_READ(MSC01_IC_SUP+irq*8, r);
+ MSCIC_WRITE(MSC01_IC_SUP+irq*8, r | ~MSC01_IC_SUP_EDGE_BIT);
+ MSCIC_WRITE(MSC01_IC_SUP+irq*8, r);
+ }
+}
+
+/*
+ * Interrupt handler for interrupts coming from SOC-it.
+ */
+void ll_msc_irq(void)
+{
+ unsigned int irq;
+
+ /* read the interrupt vector register */
+ MSCIC_READ(MSC01_IC_VEC, irq);
+ if (irq < 64)
+ do_IRQ(irq + irq_base);
+ else {
+ /* Ignore spurious interrupt */
+ }
+}
+
+static void msc_bind_eic_interrupt(int irq, int set)
+{
+ MSCIC_WRITE(MSC01_IC_RAMW,
+ (irq<<MSC01_IC_RAMW_ADDR_SHF) | (set<<MSC01_IC_RAMW_DATA_SHF));
+}
+
+static struct irq_chip msc_levelirq_type = {
+ .name = "SOC-it-Level",
+ .irq_ack = level_mask_and_ack_msc_irq,
+ .irq_mask = mask_msc_irq,
+ .irq_mask_ack = level_mask_and_ack_msc_irq,
+ .irq_unmask = unmask_msc_irq,
+ .irq_eoi = unmask_msc_irq,
+};
+
+static struct irq_chip msc_edgeirq_type = {
+ .name = "SOC-it-Edge",
+ .irq_ack = edge_mask_and_ack_msc_irq,
+ .irq_mask = mask_msc_irq,
+ .irq_mask_ack = edge_mask_and_ack_msc_irq,
+ .irq_unmask = unmask_msc_irq,
+ .irq_eoi = unmask_msc_irq,
+};
+
+
+void __init init_msc_irqs(unsigned long icubase, unsigned int irqbase, msc_irqmap_t *imp, int nirq)
+{
+ _icctrl_msc = (unsigned long) ioremap(icubase, 0x40000);
+
+ /* Reset interrupt controller - initialises all registers to 0 */
+ MSCIC_WRITE(MSC01_IC_RST, MSC01_IC_RST_RST_BIT);
+
+ board_bind_eic_interrupt = &msc_bind_eic_interrupt;
+
+ for (; nirq > 0; nirq--, imp++) {
+ int n = imp->im_irq;
+
+ switch (imp->im_type) {
+ case MSC01_IRQ_EDGE:
+ irq_set_chip_and_handler_name(irqbase + n,
+ &msc_edgeirq_type,
+ handle_edge_irq,
+ "edge");
+ if (cpu_has_veic)
+ MSCIC_WRITE(MSC01_IC_SUP+n*8, MSC01_IC_SUP_EDGE_BIT);
+ else
+ MSCIC_WRITE(MSC01_IC_SUP+n*8, MSC01_IC_SUP_EDGE_BIT | imp->im_lvl);
+ break;
+ case MSC01_IRQ_LEVEL:
+ irq_set_chip_and_handler_name(irqbase + n,
+ &msc_levelirq_type,
+ handle_level_irq,
+ "level");
+ if (cpu_has_veic)
+ MSCIC_WRITE(MSC01_IC_SUP+n*8, 0);
+ else
+ MSCIC_WRITE(MSC01_IC_SUP+n*8, imp->im_lvl);
+ }
+ }
+
+ irq_base = irqbase;
+
+ MSCIC_WRITE(MSC01_IC_GENA, MSC01_IC_GENA_GENA_BIT); /* Enable interrupt generation */
+
+}
diff --git a/arch/mips/kernel/irq.c b/arch/mips/kernel/irq.c
new file mode 100644
index 000000000..5e11582fe
--- /dev/null
+++ b/arch/mips/kernel/irq.c
@@ -0,0 +1,119 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Code to handle x86 style IRQs plus some generic interrupt stuff.
+ *
+ * Copyright (C) 1992 Linus Torvalds
+ * Copyright (C) 1994 - 2000 Ralf Baechle
+ */
+#include <linux/kernel.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/kernel_stat.h>
+#include <linux/proc_fs.h>
+#include <linux/mm.h>
+#include <linux/random.h>
+#include <linux/sched.h>
+#include <linux/seq_file.h>
+#include <linux/kallsyms.h>
+#include <linux/kgdb.h>
+#include <linux/ftrace.h>
+#include <linux/irqdomain.h>
+
+#include <linux/atomic.h>
+#include <linux/uaccess.h>
+
+void *irq_stack[NR_CPUS];
+
+/*
+ * 'what should we do if we get a hw irq event on an illegal vector'.
+ * each architecture has to answer this themselves.
+ */
+void ack_bad_irq(unsigned int irq)
+{
+ printk("unexpected IRQ # %d\n", irq);
+}
+
+atomic_t irq_err_count;
+
+int arch_show_interrupts(struct seq_file *p, int prec)
+{
+ seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
+ return 0;
+}
+
+asmlinkage void spurious_interrupt(void)
+{
+ atomic_inc(&irq_err_count);
+}
+
+void __init init_IRQ(void)
+{
+ int i;
+ unsigned int order = get_order(IRQ_STACK_SIZE);
+
+ for (i = 0; i < NR_IRQS; i++)
+ irq_set_noprobe(i);
+
+ if (cpu_has_veic)
+ clear_c0_status(ST0_IM);
+
+ arch_init_irq();
+
+ for_each_possible_cpu(i) {
+ void *s = (void *)__get_free_pages(GFP_KERNEL, order);
+
+ irq_stack[i] = s;
+ pr_debug("CPU%d IRQ stack at 0x%p - 0x%p\n", i,
+ irq_stack[i], irq_stack[i] + IRQ_STACK_SIZE);
+ }
+}
+
+#ifdef CONFIG_DEBUG_STACKOVERFLOW
+static inline void check_stack_overflow(void)
+{
+ unsigned long sp;
+
+ __asm__ __volatile__("move %0, $sp" : "=r" (sp));
+ sp &= THREAD_MASK;
+
+ /*
+ * Check for stack overflow: is there less than STACK_WARN free?
+ * STACK_WARN is defined as 1/8 of THREAD_SIZE by default.
+ */
+ if (unlikely(sp < (sizeof(struct thread_info) + STACK_WARN))) {
+ printk("do_IRQ: stack overflow: %ld\n",
+ sp - sizeof(struct thread_info));
+ dump_stack();
+ }
+}
+#else
+static inline void check_stack_overflow(void) {}
+#endif
+
+
+/*
+ * do_IRQ handles all normal device IRQ's (the special
+ * SMP cross-CPU interrupts have their own specific
+ * handlers).
+ */
+void __irq_entry do_IRQ(unsigned int irq)
+{
+ irq_enter();
+ check_stack_overflow();
+ generic_handle_irq(irq);
+ irq_exit();
+}
+
+#ifdef CONFIG_IRQ_DOMAIN
+void __irq_entry do_domain_IRQ(struct irq_domain *domain, unsigned int hwirq)
+{
+ irq_enter();
+ check_stack_overflow();
+ generic_handle_domain_irq(domain, hwirq);
+ irq_exit();
+}
+#endif
diff --git a/arch/mips/kernel/irq_txx9.c b/arch/mips/kernel/irq_txx9.c
new file mode 100644
index 000000000..af3ef4c9f
--- /dev/null
+++ b/arch/mips/kernel/irq_txx9.c
@@ -0,0 +1,178 @@
+/*
+ * Based on linux/arch/mips/jmr3927/rbhma3100/irq.c,
+ * linux/arch/mips/tx4927/common/tx4927_irq.c,
+ * linux/arch/mips/tx4938/common/irq.c
+ *
+ * Copyright 2001, 2003-2005 MontaVista Software Inc.
+ * Author: MontaVista Software, Inc.
+ * ahennessy@mvista.com
+ * source@mvista.com
+ * Copyright (C) 2000-2001 Toshiba Corporation
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/types.h>
+#include <linux/irq.h>
+#include <asm/txx9irq.h>
+
+struct txx9_irc_reg {
+ u32 cer;
+ u32 cr[2];
+ u32 unused0;
+ u32 ilr[8];
+ u32 unused1[4];
+ u32 imr;
+ u32 unused2[7];
+ u32 scr;
+ u32 unused3[7];
+ u32 ssr;
+ u32 unused4[7];
+ u32 csr;
+};
+
+/* IRCER : Int. Control Enable */
+#define TXx9_IRCER_ICE 0x00000001
+
+/* IRCR : Int. Control */
+#define TXx9_IRCR_LOW 0x00000000
+#define TXx9_IRCR_HIGH 0x00000001
+#define TXx9_IRCR_DOWN 0x00000002
+#define TXx9_IRCR_UP 0x00000003
+#define TXx9_IRCR_EDGE(cr) ((cr) & 0x00000002)
+
+/* IRSCR : Int. Status Control */
+#define TXx9_IRSCR_EIClrE 0x00000100
+#define TXx9_IRSCR_EIClr_MASK 0x0000000f
+
+/* IRCSR : Int. Current Status */
+#define TXx9_IRCSR_IF 0x00010000
+#define TXx9_IRCSR_ILV_MASK 0x00000700
+#define TXx9_IRCSR_IVL_MASK 0x0000001f
+
+#define irc_dlevel 0
+#define irc_elevel 1
+
+static struct txx9_irc_reg __iomem *txx9_ircptr __read_mostly;
+
+static struct {
+ unsigned char level;
+ unsigned char mode;
+} txx9irq[TXx9_MAX_IR] __read_mostly;
+
+static void txx9_irq_unmask(struct irq_data *d)
+{
+ unsigned int irq_nr = d->irq - TXX9_IRQ_BASE;
+ u32 __iomem *ilrp = &txx9_ircptr->ilr[(irq_nr % 16 ) / 2];
+ int ofs = irq_nr / 16 * 16 + (irq_nr & 1) * 8;
+
+ __raw_writel((__raw_readl(ilrp) & ~(0xff << ofs))
+ | (txx9irq[irq_nr].level << ofs),
+ ilrp);
+}
+
+static inline void txx9_irq_mask(struct irq_data *d)
+{
+ unsigned int irq_nr = d->irq - TXX9_IRQ_BASE;
+ u32 __iomem *ilrp = &txx9_ircptr->ilr[(irq_nr % 16) / 2];
+ int ofs = irq_nr / 16 * 16 + (irq_nr & 1) * 8;
+
+ __raw_writel((__raw_readl(ilrp) & ~(0xff << ofs))
+ | (irc_dlevel << ofs),
+ ilrp);
+ mmiowb();
+}
+
+static void txx9_irq_mask_ack(struct irq_data *d)
+{
+ unsigned int irq_nr = d->irq - TXX9_IRQ_BASE;
+
+ txx9_irq_mask(d);
+ /* clear edge detection */
+ if (unlikely(TXx9_IRCR_EDGE(txx9irq[irq_nr].mode)))
+ __raw_writel(TXx9_IRSCR_EIClrE | irq_nr, &txx9_ircptr->scr);
+}
+
+static int txx9_irq_set_type(struct irq_data *d, unsigned int flow_type)
+{
+ unsigned int irq_nr = d->irq - TXX9_IRQ_BASE;
+ u32 cr;
+ u32 __iomem *crp;
+ int ofs;
+ int mode;
+
+ if (flow_type & IRQF_TRIGGER_PROBE)
+ return 0;
+ switch (flow_type & IRQF_TRIGGER_MASK) {
+ case IRQF_TRIGGER_RISING: mode = TXx9_IRCR_UP; break;
+ case IRQF_TRIGGER_FALLING: mode = TXx9_IRCR_DOWN; break;
+ case IRQF_TRIGGER_HIGH: mode = TXx9_IRCR_HIGH; break;
+ case IRQF_TRIGGER_LOW: mode = TXx9_IRCR_LOW; break;
+ default:
+ return -EINVAL;
+ }
+ crp = &txx9_ircptr->cr[(unsigned int)irq_nr / 8];
+ cr = __raw_readl(crp);
+ ofs = (irq_nr & (8 - 1)) * 2;
+ cr &= ~(0x3 << ofs);
+ cr |= (mode & 0x3) << ofs;
+ __raw_writel(cr, crp);
+ txx9irq[irq_nr].mode = mode;
+ return 0;
+}
+
+static struct irq_chip txx9_irq_chip = {
+ .name = "TXX9",
+ .irq_ack = txx9_irq_mask_ack,
+ .irq_mask = txx9_irq_mask,
+ .irq_mask_ack = txx9_irq_mask_ack,
+ .irq_unmask = txx9_irq_unmask,
+ .irq_set_type = txx9_irq_set_type,
+};
+
+void __init txx9_irq_init(unsigned long baseaddr)
+{
+ int i;
+
+ txx9_ircptr = ioremap(baseaddr, sizeof(struct txx9_irc_reg));
+ for (i = 0; i < TXx9_MAX_IR; i++) {
+ txx9irq[i].level = 4; /* middle level */
+ txx9irq[i].mode = TXx9_IRCR_LOW;
+ irq_set_chip_and_handler(TXX9_IRQ_BASE + i, &txx9_irq_chip,
+ handle_level_irq);
+ }
+
+ /* mask all IRC interrupts */
+ __raw_writel(0, &txx9_ircptr->imr);
+ for (i = 0; i < 8; i++)
+ __raw_writel(0, &txx9_ircptr->ilr[i]);
+ /* setup IRC interrupt mode (Low Active) */
+ for (i = 0; i < 2; i++)
+ __raw_writel(0, &txx9_ircptr->cr[i]);
+ /* enable interrupt control */
+ __raw_writel(TXx9_IRCER_ICE, &txx9_ircptr->cer);
+ __raw_writel(irc_elevel, &txx9_ircptr->imr);
+}
+
+int __init txx9_irq_set_pri(int irc_irq, int new_pri)
+{
+ int old_pri;
+
+ if ((unsigned int)irc_irq >= TXx9_MAX_IR)
+ return 0;
+ old_pri = txx9irq[irc_irq].level;
+ txx9irq[irc_irq].level = new_pri;
+ return old_pri;
+}
+
+int txx9_irq(void)
+{
+ u32 csr = __raw_readl(&txx9_ircptr->csr);
+
+ if (likely(!(csr & TXx9_IRCSR_IF)))
+ return TXX9_IRQ_BASE + (csr & (TXx9_MAX_IR - 1));
+ return -1;
+}
diff --git a/arch/mips/kernel/jump_label.c b/arch/mips/kernel/jump_label.c
new file mode 100644
index 000000000..f7978d50a
--- /dev/null
+++ b/arch/mips/kernel/jump_label.c
@@ -0,0 +1,109 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (c) 2010 Cavium Networks, Inc.
+ */
+
+#include <linux/jump_label.h>
+#include <linux/kernel.h>
+#include <linux/memory.h>
+#include <linux/mutex.h>
+#include <linux/types.h>
+#include <linux/cpu.h>
+
+#include <asm/cacheflush.h>
+#include <asm/inst.h>
+
+/*
+ * Define parameters for the standard MIPS and the microMIPS jump
+ * instruction encoding respectively:
+ *
+ * - the ISA bit of the target, either 0 or 1 respectively,
+ *
+ * - the amount the jump target address is shifted right to fit in the
+ * immediate field of the machine instruction, either 2 or 1,
+ *
+ * - the mask determining the size of the jump region relative to the
+ * delay-slot instruction, either 256MB or 128MB,
+ *
+ * - the jump target alignment, either 4 or 2 bytes.
+ */
+#define J_ISA_BIT IS_ENABLED(CONFIG_CPU_MICROMIPS)
+#define J_RANGE_SHIFT (2 - J_ISA_BIT)
+#define J_RANGE_MASK ((1ul << (26 + J_RANGE_SHIFT)) - 1)
+#define J_ALIGN_MASK ((1ul << J_RANGE_SHIFT) - 1)
+
+void arch_jump_label_transform(struct jump_entry *e,
+ enum jump_label_type type)
+{
+ union mips_instruction *insn_p;
+ union mips_instruction insn;
+ long offset;
+
+ insn_p = (union mips_instruction *)msk_isa16_mode(e->code);
+
+ /* Target must have the right alignment and ISA must be preserved. */
+ BUG_ON((e->target & J_ALIGN_MASK) != J_ISA_BIT);
+
+ if (type == JUMP_LABEL_JMP) {
+ if (!IS_ENABLED(CONFIG_CPU_MICROMIPS) && MIPS_ISA_REV >= 6) {
+ offset = e->target - ((unsigned long)insn_p + 4);
+ offset >>= 2;
+
+ /*
+ * The branch offset must fit in the instruction's 26
+ * bit field.
+ */
+ WARN_ON((offset >= (long)BIT(25)) ||
+ (offset < -(long)BIT(25)));
+
+ insn.j_format.opcode = bc6_op;
+ insn.j_format.target = offset;
+ } else {
+ /*
+ * Jump only works within an aligned region its delay
+ * slot is in.
+ */
+ WARN_ON((e->target & ~J_RANGE_MASK) !=
+ ((e->code + 4) & ~J_RANGE_MASK));
+
+ insn.j_format.opcode = J_ISA_BIT ? mm_j32_op : j_op;
+ insn.j_format.target = e->target >> J_RANGE_SHIFT;
+ }
+ } else {
+ insn.word = 0; /* nop */
+ }
+
+ mutex_lock(&text_mutex);
+ if (IS_ENABLED(CONFIG_CPU_MICROMIPS)) {
+ insn_p->halfword[0] = insn.word >> 16;
+ insn_p->halfword[1] = insn.word;
+ } else
+ *insn_p = insn;
+
+ flush_icache_range((unsigned long)insn_p,
+ (unsigned long)insn_p + sizeof(*insn_p));
+
+ mutex_unlock(&text_mutex);
+}
+
+#ifdef CONFIG_MODULES
+void jump_label_apply_nops(struct module *mod)
+{
+ struct jump_entry *iter_start = mod->jump_entries;
+ struct jump_entry *iter_stop = iter_start + mod->num_jump_entries;
+ struct jump_entry *iter;
+
+ /* if the module doesn't have jump label entries, just return */
+ if (iter_start == iter_stop)
+ return;
+
+ for (iter = iter_start; iter < iter_stop; iter++) {
+ /* Only write NOPs for arch_branch_static(). */
+ if (jump_label_init_type(iter) == JUMP_LABEL_NOP)
+ arch_jump_label_transform(iter, JUMP_LABEL_NOP);
+ }
+}
+#endif
diff --git a/arch/mips/kernel/kgdb.c b/arch/mips/kernel/kgdb.c
new file mode 100644
index 000000000..09a2d7bb9
--- /dev/null
+++ b/arch/mips/kernel/kgdb.c
@@ -0,0 +1,394 @@
+/*
+ * Originally written by Glenn Engel, Lake Stevens Instrument Division
+ *
+ * Contributed by HP Systems
+ *
+ * Modified for Linux/MIPS (and MIPS in general) by Andreas Busse
+ * Send complaints, suggestions etc. to <andy@waldorf-gmbh.de>
+ *
+ * Copyright (C) 1995 Andreas Busse
+ *
+ * Copyright (C) 2003 MontaVista Software Inc.
+ * Author: Jun Sun, jsun@mvista.com or jsun@junsun.net
+ *
+ * Copyright (C) 2004-2005 MontaVista Software Inc.
+ * Author: Manish Lachwani, mlachwani@mvista.com or manish@koffee-break.com
+ *
+ * Copyright (C) 2007-2008 Wind River Systems, Inc.
+ * Author/Maintainer: Jason Wessel, jason.wessel@windriver.com
+ *
+ * This file is licensed under the terms of the GNU General Public License
+ * version 2. This program is licensed "as is" without any warranty of any
+ * kind, whether express or implied.
+ */
+
+#include <linux/ptrace.h> /* for linux pt_regs struct */
+#include <linux/kgdb.h>
+#include <linux/kdebug.h>
+#include <linux/sched.h>
+#include <linux/smp.h>
+#include <asm/inst.h>
+#include <asm/fpu.h>
+#include <asm/cacheflush.h>
+#include <asm/processor.h>
+#include <asm/sigcontext.h>
+#include <asm/irq_regs.h>
+
+static struct hard_trap_info {
+ unsigned char tt; /* Trap type code for MIPS R3xxx and R4xxx */
+ unsigned char signo; /* Signal that we map this trap into */
+} hard_trap_info[] = {
+ { 6, SIGBUS }, /* instruction bus error */
+ { 7, SIGBUS }, /* data bus error */
+ { 9, SIGTRAP }, /* break */
+/* { 11, SIGILL }, */ /* CPU unusable */
+ { 12, SIGFPE }, /* overflow */
+ { 13, SIGTRAP }, /* trap */
+ { 14, SIGSEGV }, /* virtual instruction cache coherency */
+ { 15, SIGFPE }, /* floating point exception */
+ { 23, SIGSEGV }, /* watch */
+ { 31, SIGSEGV }, /* virtual data cache coherency */
+ { 0, 0} /* Must be last */
+};
+
+struct dbg_reg_def_t dbg_reg_def[DBG_MAX_REG_NUM] =
+{
+ { "zero", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[0]) },
+ { "at", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[1]) },
+ { "v0", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[2]) },
+ { "v1", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[3]) },
+ { "a0", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[4]) },
+ { "a1", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[5]) },
+ { "a2", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[6]) },
+ { "a3", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[7]) },
+ { "t0", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[8]) },
+ { "t1", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[9]) },
+ { "t2", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[10]) },
+ { "t3", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[11]) },
+ { "t4", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[12]) },
+ { "t5", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[13]) },
+ { "t6", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[14]) },
+ { "t7", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[15]) },
+ { "s0", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[16]) },
+ { "s1", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[17]) },
+ { "s2", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[18]) },
+ { "s3", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[19]) },
+ { "s4", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[20]) },
+ { "s5", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[21]) },
+ { "s6", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[22]) },
+ { "s7", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[23]) },
+ { "t8", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[24]) },
+ { "t9", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[25]) },
+ { "k0", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[26]) },
+ { "k1", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[27]) },
+ { "gp", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[28]) },
+ { "sp", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[29]) },
+ { "s8", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[30]) },
+ { "ra", GDB_SIZEOF_REG, offsetof(struct pt_regs, regs[31]) },
+ { "sr", GDB_SIZEOF_REG, offsetof(struct pt_regs, cp0_status) },
+ { "lo", GDB_SIZEOF_REG, offsetof(struct pt_regs, lo) },
+ { "hi", GDB_SIZEOF_REG, offsetof(struct pt_regs, hi) },
+ { "bad", GDB_SIZEOF_REG, offsetof(struct pt_regs, cp0_badvaddr) },
+ { "cause", GDB_SIZEOF_REG, offsetof(struct pt_regs, cp0_cause) },
+ { "pc", GDB_SIZEOF_REG, offsetof(struct pt_regs, cp0_epc) },
+ { "f0", GDB_SIZEOF_REG, 0 },
+ { "f1", GDB_SIZEOF_REG, 1 },
+ { "f2", GDB_SIZEOF_REG, 2 },
+ { "f3", GDB_SIZEOF_REG, 3 },
+ { "f4", GDB_SIZEOF_REG, 4 },
+ { "f5", GDB_SIZEOF_REG, 5 },
+ { "f6", GDB_SIZEOF_REG, 6 },
+ { "f7", GDB_SIZEOF_REG, 7 },
+ { "f8", GDB_SIZEOF_REG, 8 },
+ { "f9", GDB_SIZEOF_REG, 9 },
+ { "f10", GDB_SIZEOF_REG, 10 },
+ { "f11", GDB_SIZEOF_REG, 11 },
+ { "f12", GDB_SIZEOF_REG, 12 },
+ { "f13", GDB_SIZEOF_REG, 13 },
+ { "f14", GDB_SIZEOF_REG, 14 },
+ { "f15", GDB_SIZEOF_REG, 15 },
+ { "f16", GDB_SIZEOF_REG, 16 },
+ { "f17", GDB_SIZEOF_REG, 17 },
+ { "f18", GDB_SIZEOF_REG, 18 },
+ { "f19", GDB_SIZEOF_REG, 19 },
+ { "f20", GDB_SIZEOF_REG, 20 },
+ { "f21", GDB_SIZEOF_REG, 21 },
+ { "f22", GDB_SIZEOF_REG, 22 },
+ { "f23", GDB_SIZEOF_REG, 23 },
+ { "f24", GDB_SIZEOF_REG, 24 },
+ { "f25", GDB_SIZEOF_REG, 25 },
+ { "f26", GDB_SIZEOF_REG, 26 },
+ { "f27", GDB_SIZEOF_REG, 27 },
+ { "f28", GDB_SIZEOF_REG, 28 },
+ { "f29", GDB_SIZEOF_REG, 29 },
+ { "f30", GDB_SIZEOF_REG, 30 },
+ { "f31", GDB_SIZEOF_REG, 31 },
+ { "fsr", GDB_SIZEOF_REG, 0 },
+ { "fir", GDB_SIZEOF_REG, 0 },
+};
+
+int dbg_set_reg(int regno, void *mem, struct pt_regs *regs)
+{
+ int fp_reg;
+
+ if (regno < 0 || regno >= DBG_MAX_REG_NUM)
+ return -EINVAL;
+
+ if (dbg_reg_def[regno].offset != -1 && regno < 38) {
+ memcpy((void *)regs + dbg_reg_def[regno].offset, mem,
+ dbg_reg_def[regno].size);
+ } else if (current && dbg_reg_def[regno].offset != -1 && regno < 72) {
+ /* FP registers 38 -> 69 */
+ if (!(regs->cp0_status & ST0_CU1))
+ return 0;
+ if (regno == 70) {
+ /* Process the fcr31/fsr (register 70) */
+ memcpy((void *)&current->thread.fpu.fcr31, mem,
+ dbg_reg_def[regno].size);
+ goto out_save;
+ } else if (regno == 71) {
+ /* Ignore the fir (register 71) */
+ goto out_save;
+ }
+ fp_reg = dbg_reg_def[regno].offset;
+ memcpy((void *)&current->thread.fpu.fpr[fp_reg], mem,
+ dbg_reg_def[regno].size);
+out_save:
+ restore_fp(current);
+ }
+
+ return 0;
+}
+
+char *dbg_get_reg(int regno, void *mem, struct pt_regs *regs)
+{
+ int fp_reg;
+
+ if (regno >= DBG_MAX_REG_NUM || regno < 0)
+ return NULL;
+
+ if (dbg_reg_def[regno].offset != -1 && regno < 38) {
+ /* First 38 registers */
+ memcpy(mem, (void *)regs + dbg_reg_def[regno].offset,
+ dbg_reg_def[regno].size);
+ } else if (current && dbg_reg_def[regno].offset != -1 && regno < 72) {
+ /* FP registers 38 -> 69 */
+ if (!(regs->cp0_status & ST0_CU1))
+ goto out;
+ save_fp(current);
+ if (regno == 70) {
+ /* Process the fcr31/fsr (register 70) */
+ memcpy(mem, (void *)&current->thread.fpu.fcr31,
+ dbg_reg_def[regno].size);
+ goto out;
+ } else if (regno == 71) {
+ /* Ignore the fir (register 71) */
+ memset(mem, 0, dbg_reg_def[regno].size);
+ goto out;
+ }
+ fp_reg = dbg_reg_def[regno].offset;
+ memcpy(mem, (void *)&current->thread.fpu.fpr[fp_reg],
+ dbg_reg_def[regno].size);
+ }
+
+out:
+ return dbg_reg_def[regno].name;
+
+}
+
+void arch_kgdb_breakpoint(void)
+{
+ __asm__ __volatile__(
+ ".globl breakinst\n\t"
+ ".set\tnoreorder\n\t"
+ "nop\n"
+ "breakinst:\tbreak\n\t"
+ "nop\n\t"
+ ".set\treorder");
+}
+
+static int compute_signal(int tt)
+{
+ struct hard_trap_info *ht;
+
+ for (ht = hard_trap_info; ht->tt && ht->signo; ht++)
+ if (ht->tt == tt)
+ return ht->signo;
+
+ return SIGHUP; /* default for things we don't know about */
+}
+
+/*
+ * Similar to regs_to_gdb_regs() except that process is sleeping and so
+ * we may not be able to get all the info.
+ */
+void sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *p)
+{
+ int reg;
+#if (KGDB_GDB_REG_SIZE == 32)
+ u32 *ptr = (u32 *)gdb_regs;
+#else
+ u64 *ptr = (u64 *)gdb_regs;
+#endif
+
+ for (reg = 0; reg < 16; reg++)
+ *(ptr++) = 0;
+
+ /* S0 - S7 */
+ *(ptr++) = p->thread.reg16;
+ *(ptr++) = p->thread.reg17;
+ *(ptr++) = p->thread.reg18;
+ *(ptr++) = p->thread.reg19;
+ *(ptr++) = p->thread.reg20;
+ *(ptr++) = p->thread.reg21;
+ *(ptr++) = p->thread.reg22;
+ *(ptr++) = p->thread.reg23;
+
+ for (reg = 24; reg < 28; reg++)
+ *(ptr++) = 0;
+
+ /* GP, SP, FP, RA */
+ *(ptr++) = (long)p;
+ *(ptr++) = p->thread.reg29;
+ *(ptr++) = p->thread.reg30;
+ *(ptr++) = p->thread.reg31;
+
+ *(ptr++) = p->thread.cp0_status;
+
+ /* lo, hi */
+ *(ptr++) = 0;
+ *(ptr++) = 0;
+
+ /*
+ * BadVAddr, Cause
+ * Ideally these would come from the last exception frame up the stack
+ * but that requires unwinding, otherwise we can't know much for sure.
+ */
+ *(ptr++) = 0;
+ *(ptr++) = 0;
+
+ /*
+ * PC
+ * use return address (RA), i.e. the moment after return from resume()
+ */
+ *(ptr++) = p->thread.reg31;
+}
+
+void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long pc)
+{
+ regs->cp0_epc = pc;
+}
+
+/*
+ * Calls linux_debug_hook before the kernel dies. If KGDB is enabled,
+ * then try to fall into the debugger
+ */
+static int kgdb_mips_notify(struct notifier_block *self, unsigned long cmd,
+ void *ptr)
+{
+ struct die_args *args = (struct die_args *)ptr;
+ struct pt_regs *regs = args->regs;
+ int trap = (regs->cp0_cause & 0x7c) >> 2;
+
+#ifdef CONFIG_KPROBES
+ /*
+ * Return immediately if the kprobes fault notifier has set
+ * DIE_PAGE_FAULT.
+ */
+ if (cmd == DIE_PAGE_FAULT)
+ return NOTIFY_DONE;
+#endif /* CONFIG_KPROBES */
+
+ /* Userspace events, ignore. */
+ if (user_mode(regs))
+ return NOTIFY_DONE;
+
+ if (atomic_read(&kgdb_active) != -1)
+ kgdb_nmicallback(smp_processor_id(), regs);
+
+ if (kgdb_handle_exception(trap, compute_signal(trap), cmd, regs))
+ return NOTIFY_DONE;
+
+ if (atomic_read(&kgdb_setting_breakpoint))
+ if ((trap == 9) && (regs->cp0_epc == (unsigned long)breakinst))
+ regs->cp0_epc += 4;
+
+ /* In SMP mode, __flush_cache_all does IPI */
+ local_irq_enable();
+ __flush_cache_all();
+
+ return NOTIFY_STOP;
+}
+
+#ifdef CONFIG_KGDB_LOW_LEVEL_TRAP
+int kgdb_ll_trap(int cmd, const char *str,
+ struct pt_regs *regs, long err, int trap, int sig)
+{
+ struct die_args args = {
+ .regs = regs,
+ .str = str,
+ .err = err,
+ .trapnr = trap,
+ .signr = sig,
+
+ };
+
+ if (!kgdb_io_module_registered)
+ return NOTIFY_DONE;
+
+ return kgdb_mips_notify(NULL, cmd, &args);
+}
+#endif /* CONFIG_KGDB_LOW_LEVEL_TRAP */
+
+static struct notifier_block kgdb_notifier = {
+ .notifier_call = kgdb_mips_notify,
+};
+
+/*
+ * Handle the 'c' command
+ */
+int kgdb_arch_handle_exception(int vector, int signo, int err_code,
+ char *remcom_in_buffer, char *remcom_out_buffer,
+ struct pt_regs *regs)
+{
+ char *ptr;
+ unsigned long address;
+
+ switch (remcom_in_buffer[0]) {
+ case 'c':
+ /* handle the optional parameter */
+ ptr = &remcom_in_buffer[1];
+ if (kgdb_hex2long(&ptr, &address))
+ regs->cp0_epc = address;
+
+ return 0;
+ }
+
+ return -1;
+}
+
+const struct kgdb_arch arch_kgdb_ops = {
+#ifdef CONFIG_CPU_BIG_ENDIAN
+ .gdb_bpt_instr = { spec_op << 2, 0x00, 0x00, break_op },
+#else
+ .gdb_bpt_instr = { break_op, 0x00, 0x00, spec_op << 2 },
+#endif
+};
+
+int kgdb_arch_init(void)
+{
+ register_die_notifier(&kgdb_notifier);
+
+ return 0;
+}
+
+/*
+ * kgdb_arch_exit - Perform any architecture specific uninitalization.
+ *
+ * This function will handle the uninitalization of any architecture
+ * specific callbacks, for dynamic registration and unregistration.
+ */
+void kgdb_arch_exit(void)
+{
+ unregister_die_notifier(&kgdb_notifier);
+}
diff --git a/arch/mips/kernel/kprobes.c b/arch/mips/kernel/kprobes.c
new file mode 100644
index 000000000..316b27d0d
--- /dev/null
+++ b/arch/mips/kernel/kprobes.c
@@ -0,0 +1,525 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Kernel Probes (KProbes)
+ * arch/mips/kernel/kprobes.c
+ *
+ * Copyright 2006 Sony Corp.
+ * Copyright 2010 Cavium Networks
+ *
+ * Some portions copied from the powerpc version.
+ *
+ * Copyright (C) IBM Corporation, 2002, 2004
+ */
+
+#define pr_fmt(fmt) "kprobes: " fmt
+
+#include <linux/kprobes.h>
+#include <linux/preempt.h>
+#include <linux/uaccess.h>
+#include <linux/kdebug.h>
+#include <linux/slab.h>
+
+#include <asm/ptrace.h>
+#include <asm/branch.h>
+#include <asm/break.h>
+
+#include "probes-common.h"
+
+static const union mips_instruction breakpoint_insn = {
+ .b_format = {
+ .opcode = spec_op,
+ .code = BRK_KPROBE_BP,
+ .func = break_op
+ }
+};
+
+static const union mips_instruction breakpoint2_insn = {
+ .b_format = {
+ .opcode = spec_op,
+ .code = BRK_KPROBE_SSTEPBP,
+ .func = break_op
+ }
+};
+
+DEFINE_PER_CPU(struct kprobe *, current_kprobe);
+DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
+
+static int insn_has_delayslot(union mips_instruction insn)
+{
+ return __insn_has_delay_slot(insn);
+}
+NOKPROBE_SYMBOL(insn_has_delayslot);
+
+/*
+ * insn_has_ll_or_sc function checks whether instruction is ll or sc
+ * one; putting breakpoint on top of atomic ll/sc pair is bad idea;
+ * so we need to prevent it and refuse kprobes insertion for such
+ * instructions; cannot do much about breakpoint in the middle of
+ * ll/sc pair; it is upto user to avoid those places
+ */
+static int insn_has_ll_or_sc(union mips_instruction insn)
+{
+ int ret = 0;
+
+ switch (insn.i_format.opcode) {
+ case ll_op:
+ case lld_op:
+ case sc_op:
+ case scd_op:
+ ret = 1;
+ break;
+ default:
+ break;
+ }
+ return ret;
+}
+NOKPROBE_SYMBOL(insn_has_ll_or_sc);
+
+int arch_prepare_kprobe(struct kprobe *p)
+{
+ union mips_instruction insn;
+ union mips_instruction prev_insn;
+ int ret = 0;
+
+ insn = p->addr[0];
+
+ if (insn_has_ll_or_sc(insn)) {
+ pr_notice("Kprobes for ll and sc instructions are not supported\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (copy_from_kernel_nofault(&prev_insn, p->addr - 1,
+ sizeof(mips_instruction)) == 0 &&
+ insn_has_delayslot(prev_insn)) {
+ pr_notice("Kprobes for branch delayslot are not supported\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ if (__insn_is_compact_branch(insn)) {
+ pr_notice("Kprobes for compact branches are not supported\n");
+ ret = -EINVAL;
+ goto out;
+ }
+
+ /* insn: must be on special executable page on mips. */
+ p->ainsn.insn = get_insn_slot();
+ if (!p->ainsn.insn) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ /*
+ * In the kprobe->ainsn.insn[] array we store the original
+ * instruction at index zero and a break trap instruction at
+ * index one.
+ *
+ * On MIPS arch if the instruction at probed address is a
+ * branch instruction, we need to execute the instruction at
+ * Branch Delayslot (BD) at the time of probe hit. As MIPS also
+ * doesn't have single stepping support, the BD instruction can
+ * not be executed in-line and it would be executed on SSOL slot
+ * using a normal breakpoint instruction in the next slot.
+ * So, read the instruction and save it for later execution.
+ */
+ if (insn_has_delayslot(insn))
+ memcpy(&p->ainsn.insn[0], p->addr + 1, sizeof(kprobe_opcode_t));
+ else
+ memcpy(&p->ainsn.insn[0], p->addr, sizeof(kprobe_opcode_t));
+
+ p->ainsn.insn[1] = breakpoint2_insn;
+ p->opcode = *p->addr;
+
+out:
+ return ret;
+}
+NOKPROBE_SYMBOL(arch_prepare_kprobe);
+
+void arch_arm_kprobe(struct kprobe *p)
+{
+ *p->addr = breakpoint_insn;
+ flush_insn_slot(p);
+}
+NOKPROBE_SYMBOL(arch_arm_kprobe);
+
+void arch_disarm_kprobe(struct kprobe *p)
+{
+ *p->addr = p->opcode;
+ flush_insn_slot(p);
+}
+NOKPROBE_SYMBOL(arch_disarm_kprobe);
+
+void arch_remove_kprobe(struct kprobe *p)
+{
+ if (p->ainsn.insn) {
+ free_insn_slot(p->ainsn.insn, 0);
+ p->ainsn.insn = NULL;
+ }
+}
+NOKPROBE_SYMBOL(arch_remove_kprobe);
+
+static void save_previous_kprobe(struct kprobe_ctlblk *kcb)
+{
+ kcb->prev_kprobe.kp = kprobe_running();
+ kcb->prev_kprobe.status = kcb->kprobe_status;
+ kcb->prev_kprobe.old_SR = kcb->kprobe_old_SR;
+ kcb->prev_kprobe.saved_SR = kcb->kprobe_saved_SR;
+ kcb->prev_kprobe.saved_epc = kcb->kprobe_saved_epc;
+}
+
+static void restore_previous_kprobe(struct kprobe_ctlblk *kcb)
+{
+ __this_cpu_write(current_kprobe, kcb->prev_kprobe.kp);
+ kcb->kprobe_status = kcb->prev_kprobe.status;
+ kcb->kprobe_old_SR = kcb->prev_kprobe.old_SR;
+ kcb->kprobe_saved_SR = kcb->prev_kprobe.saved_SR;
+ kcb->kprobe_saved_epc = kcb->prev_kprobe.saved_epc;
+}
+
+static void set_current_kprobe(struct kprobe *p, struct pt_regs *regs,
+ struct kprobe_ctlblk *kcb)
+{
+ __this_cpu_write(current_kprobe, p);
+ kcb->kprobe_saved_SR = kcb->kprobe_old_SR = (regs->cp0_status & ST0_IE);
+ kcb->kprobe_saved_epc = regs->cp0_epc;
+}
+
+/**
+ * evaluate_branch_instrucion -
+ *
+ * Evaluate the branch instruction at probed address during probe hit. The
+ * result of evaluation would be the updated epc. The insturction in delayslot
+ * would actually be single stepped using a normal breakpoint) on SSOL slot.
+ *
+ * The result is also saved in the kprobe control block for later use,
+ * in case we need to execute the delayslot instruction. The latter will be
+ * false for NOP instruction in dealyslot and the branch-likely instructions
+ * when the branch is taken. And for those cases we set a flag as
+ * SKIP_DELAYSLOT in the kprobe control block
+ */
+static int evaluate_branch_instruction(struct kprobe *p, struct pt_regs *regs,
+ struct kprobe_ctlblk *kcb)
+{
+ union mips_instruction insn = p->opcode;
+ long epc;
+ int ret = 0;
+
+ epc = regs->cp0_epc;
+ if (epc & 3)
+ goto unaligned;
+
+ if (p->ainsn.insn->word == 0)
+ kcb->flags |= SKIP_DELAYSLOT;
+ else
+ kcb->flags &= ~SKIP_DELAYSLOT;
+
+ ret = __compute_return_epc_for_insn(regs, insn);
+ if (ret < 0)
+ return ret;
+
+ if (ret == BRANCH_LIKELY_TAKEN)
+ kcb->flags |= SKIP_DELAYSLOT;
+
+ kcb->target_epc = regs->cp0_epc;
+
+ return 0;
+
+unaligned:
+ pr_notice("Failed to emulate branch instruction because of unaligned epc - sending SIGBUS to %s.\n", current->comm);
+ force_sig(SIGBUS);
+ return -EFAULT;
+
+}
+
+static void prepare_singlestep(struct kprobe *p, struct pt_regs *regs,
+ struct kprobe_ctlblk *kcb)
+{
+ int ret = 0;
+
+ regs->cp0_status &= ~ST0_IE;
+
+ /* single step inline if the instruction is a break */
+ if (p->opcode.word == breakpoint_insn.word ||
+ p->opcode.word == breakpoint2_insn.word)
+ regs->cp0_epc = (unsigned long)p->addr;
+ else if (insn_has_delayslot(p->opcode)) {
+ ret = evaluate_branch_instruction(p, regs, kcb);
+ if (ret < 0)
+ return;
+ }
+ regs->cp0_epc = (unsigned long)&p->ainsn.insn[0];
+}
+
+/*
+ * Called after single-stepping. p->addr is the address of the
+ * instruction whose first byte has been replaced by the "break 0"
+ * instruction. To avoid the SMP problems that can occur when we
+ * temporarily put back the original opcode to single-step, we
+ * single-stepped a copy of the instruction. The address of this
+ * copy is p->ainsn.insn.
+ *
+ * This function prepares to return from the post-single-step
+ * breakpoint trap. In case of branch instructions, the target
+ * epc to be restored.
+ */
+static void resume_execution(struct kprobe *p,
+ struct pt_regs *regs,
+ struct kprobe_ctlblk *kcb)
+{
+ if (insn_has_delayslot(p->opcode))
+ regs->cp0_epc = kcb->target_epc;
+ else {
+ unsigned long orig_epc = kcb->kprobe_saved_epc;
+ regs->cp0_epc = orig_epc + 4;
+ }
+}
+NOKPROBE_SYMBOL(resume_execution);
+
+static int kprobe_handler(struct pt_regs *regs)
+{
+ struct kprobe *p;
+ int ret = 0;
+ kprobe_opcode_t *addr;
+ struct kprobe_ctlblk *kcb;
+
+ addr = (kprobe_opcode_t *) regs->cp0_epc;
+
+ /*
+ * We don't want to be preempted for the entire
+ * duration of kprobe processing
+ */
+ preempt_disable();
+ kcb = get_kprobe_ctlblk();
+
+ /* Check we're not actually recursing */
+ if (kprobe_running()) {
+ p = get_kprobe(addr);
+ if (p) {
+ if (kcb->kprobe_status == KPROBE_HIT_SS &&
+ p->ainsn.insn->word == breakpoint_insn.word) {
+ regs->cp0_status &= ~ST0_IE;
+ regs->cp0_status |= kcb->kprobe_saved_SR;
+ goto no_kprobe;
+ }
+ /*
+ * We have reentered the kprobe_handler(), since
+ * another probe was hit while within the handler.
+ * We here save the original kprobes variables and
+ * just single step on the instruction of the new probe
+ * without calling any user handlers.
+ */
+ save_previous_kprobe(kcb);
+ set_current_kprobe(p, regs, kcb);
+ kprobes_inc_nmissed_count(p);
+ prepare_singlestep(p, regs, kcb);
+ kcb->kprobe_status = KPROBE_REENTER;
+ if (kcb->flags & SKIP_DELAYSLOT) {
+ resume_execution(p, regs, kcb);
+ restore_previous_kprobe(kcb);
+ preempt_enable_no_resched();
+ }
+ return 1;
+ } else if (addr->word != breakpoint_insn.word) {
+ /*
+ * The breakpoint instruction was removed by
+ * another cpu right after we hit, no further
+ * handling of this interrupt is appropriate
+ */
+ ret = 1;
+ }
+ goto no_kprobe;
+ }
+
+ p = get_kprobe(addr);
+ if (!p) {
+ if (addr->word != breakpoint_insn.word) {
+ /*
+ * The breakpoint instruction was removed right
+ * after we hit it. Another cpu has removed
+ * either a probepoint or a debugger breakpoint
+ * at this address. In either case, no further
+ * handling of this interrupt is appropriate.
+ */
+ ret = 1;
+ }
+ /* Not one of ours: let kernel handle it */
+ goto no_kprobe;
+ }
+
+ set_current_kprobe(p, regs, kcb);
+ kcb->kprobe_status = KPROBE_HIT_ACTIVE;
+
+ if (p->pre_handler && p->pre_handler(p, regs)) {
+ /* handler has already set things up, so skip ss setup */
+ reset_current_kprobe();
+ preempt_enable_no_resched();
+ return 1;
+ }
+
+ prepare_singlestep(p, regs, kcb);
+ if (kcb->flags & SKIP_DELAYSLOT) {
+ kcb->kprobe_status = KPROBE_HIT_SSDONE;
+ if (p->post_handler)
+ p->post_handler(p, regs, 0);
+ resume_execution(p, regs, kcb);
+ preempt_enable_no_resched();
+ } else
+ kcb->kprobe_status = KPROBE_HIT_SS;
+
+ return 1;
+
+no_kprobe:
+ preempt_enable_no_resched();
+ return ret;
+
+}
+NOKPROBE_SYMBOL(kprobe_handler);
+
+static inline int post_kprobe_handler(struct pt_regs *regs)
+{
+ struct kprobe *cur = kprobe_running();
+ struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
+
+ if (!cur)
+ return 0;
+
+ if ((kcb->kprobe_status != KPROBE_REENTER) && cur->post_handler) {
+ kcb->kprobe_status = KPROBE_HIT_SSDONE;
+ cur->post_handler(cur, regs, 0);
+ }
+
+ resume_execution(cur, regs, kcb);
+
+ regs->cp0_status |= kcb->kprobe_saved_SR;
+
+ /* Restore back the original saved kprobes variables and continue. */
+ if (kcb->kprobe_status == KPROBE_REENTER) {
+ restore_previous_kprobe(kcb);
+ goto out;
+ }
+ reset_current_kprobe();
+out:
+ preempt_enable_no_resched();
+
+ return 1;
+}
+
+int kprobe_fault_handler(struct pt_regs *regs, int trapnr)
+{
+ struct kprobe *cur = kprobe_running();
+ struct kprobe_ctlblk *kcb = get_kprobe_ctlblk();
+
+ if (kcb->kprobe_status & KPROBE_HIT_SS) {
+ resume_execution(cur, regs, kcb);
+ regs->cp0_status |= kcb->kprobe_old_SR;
+
+ reset_current_kprobe();
+ preempt_enable_no_resched();
+ }
+ return 0;
+}
+
+/*
+ * Wrapper routine for handling exceptions.
+ */
+int kprobe_exceptions_notify(struct notifier_block *self,
+ unsigned long val, void *data)
+{
+
+ struct die_args *args = (struct die_args *)data;
+ int ret = NOTIFY_DONE;
+
+ switch (val) {
+ case DIE_BREAK:
+ if (kprobe_handler(args->regs))
+ ret = NOTIFY_STOP;
+ break;
+ case DIE_SSTEPBP:
+ if (post_kprobe_handler(args->regs))
+ ret = NOTIFY_STOP;
+ break;
+
+ case DIE_PAGE_FAULT:
+ /* kprobe_running() needs smp_processor_id() */
+ preempt_disable();
+
+ if (kprobe_running()
+ && kprobe_fault_handler(args->regs, args->trapnr))
+ ret = NOTIFY_STOP;
+ preempt_enable();
+ break;
+ default:
+ break;
+ }
+ return ret;
+}
+NOKPROBE_SYMBOL(kprobe_exceptions_notify);
+
+/*
+ * Function return probe trampoline:
+ * - init_kprobes() establishes a probepoint here
+ * - When the probed function returns, this probe causes the
+ * handlers to fire
+ */
+static void __used kretprobe_trampoline_holder(void)
+{
+ asm volatile(
+ ".set push\n\t"
+ /* Keep the assembler from reordering and placing JR here. */
+ ".set noreorder\n\t"
+ "nop\n\t"
+ ".global __kretprobe_trampoline\n"
+ "__kretprobe_trampoline:\n\t"
+ "nop\n\t"
+ ".set pop"
+ : : : "memory");
+}
+
+void __kretprobe_trampoline(void);
+
+void arch_prepare_kretprobe(struct kretprobe_instance *ri,
+ struct pt_regs *regs)
+{
+ ri->ret_addr = (kprobe_opcode_t *) regs->regs[31];
+ ri->fp = NULL;
+
+ /* Replace the return addr with trampoline addr */
+ regs->regs[31] = (unsigned long)__kretprobe_trampoline;
+}
+NOKPROBE_SYMBOL(arch_prepare_kretprobe);
+
+/*
+ * Called when the probe at kretprobe trampoline is hit
+ */
+static int trampoline_probe_handler(struct kprobe *p,
+ struct pt_regs *regs)
+{
+ instruction_pointer(regs) = __kretprobe_trampoline_handler(regs, NULL);
+ /*
+ * By returning a non-zero value, we are telling
+ * kprobe_handler() that we don't want the post_handler
+ * to run (and have re-enabled preemption)
+ */
+ return 1;
+}
+NOKPROBE_SYMBOL(trampoline_probe_handler);
+
+int arch_trampoline_kprobe(struct kprobe *p)
+{
+ if (p->addr == (kprobe_opcode_t *)__kretprobe_trampoline)
+ return 1;
+
+ return 0;
+}
+NOKPROBE_SYMBOL(arch_trampoline_kprobe);
+
+static struct kprobe trampoline_p = {
+ .addr = (kprobe_opcode_t *)__kretprobe_trampoline,
+ .pre_handler = trampoline_probe_handler
+};
+
+int __init arch_init_kprobes(void)
+{
+ return register_kprobe(&trampoline_p);
+}
diff --git a/arch/mips/kernel/linux32.c b/arch/mips/kernel/linux32.c
new file mode 100644
index 000000000..6b61be486
--- /dev/null
+++ b/arch/mips/kernel/linux32.c
@@ -0,0 +1,133 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Conversion between 32-bit and 64-bit native system calls.
+ *
+ * Copyright (C) 2000 Silicon Graphics, Inc.
+ * Written by Ulf Carlsson (ulfc@engr.sgi.com)
+ */
+#include <linux/compiler.h>
+#include <linux/mm.h>
+#include <linux/errno.h>
+#include <linux/file.h>
+#include <linux/highuid.h>
+#include <linux/resource.h>
+#include <linux/highmem.h>
+#include <linux/time.h>
+#include <linux/times.h>
+#include <linux/poll.h>
+#include <linux/skbuff.h>
+#include <linux/filter.h>
+#include <linux/shm.h>
+#include <linux/sem.h>
+#include <linux/msg.h>
+#include <linux/icmpv6.h>
+#include <linux/syscalls.h>
+#include <linux/sysctl.h>
+#include <linux/utime.h>
+#include <linux/utsname.h>
+#include <linux/personality.h>
+#include <linux/dnotify.h>
+#include <linux/binfmts.h>
+#include <linux/security.h>
+#include <linux/compat.h>
+#include <linux/vfs.h>
+#include <linux/ipc.h>
+#include <linux/slab.h>
+
+#include <net/sock.h>
+#include <net/scm.h>
+
+#include <asm/compat-signal.h>
+#include <asm/sim.h>
+#include <linux/uaccess.h>
+#include <asm/mmu_context.h>
+#include <asm/mman.h>
+
+#ifdef __MIPSEB__
+#define merge_64(r1, r2) ((((r1) & 0xffffffffUL) << 32) + ((r2) & 0xffffffffUL))
+#endif
+#ifdef __MIPSEL__
+#define merge_64(r1, r2) ((((r2) & 0xffffffffUL) << 32) + ((r1) & 0xffffffffUL))
+#endif
+
+SYSCALL_DEFINE4(32_truncate64, const char __user *, path,
+ unsigned long, __dummy, unsigned long, a2, unsigned long, a3)
+{
+ return ksys_truncate(path, merge_64(a2, a3));
+}
+
+SYSCALL_DEFINE4(32_ftruncate64, unsigned long, fd, unsigned long, __dummy,
+ unsigned long, a2, unsigned long, a3)
+{
+ return ksys_ftruncate(fd, merge_64(a2, a3));
+}
+
+SYSCALL_DEFINE5(32_llseek, unsigned int, fd, unsigned int, offset_high,
+ unsigned int, offset_low, loff_t __user *, result,
+ unsigned int, origin)
+{
+ return sys_llseek(fd, offset_high, offset_low, result, origin);
+}
+
+/* From the Single Unix Spec: pread & pwrite act like lseek to pos + op +
+ lseek back to original location. They fail just like lseek does on
+ non-seekable files. */
+
+SYSCALL_DEFINE6(32_pread, unsigned long, fd, char __user *, buf, size_t, count,
+ unsigned long, unused, unsigned long, a4, unsigned long, a5)
+{
+ return ksys_pread64(fd, buf, count, merge_64(a4, a5));
+}
+
+SYSCALL_DEFINE6(32_pwrite, unsigned int, fd, const char __user *, buf,
+ size_t, count, u32, unused, u64, a4, u64, a5)
+{
+ return ksys_pwrite64(fd, buf, count, merge_64(a4, a5));
+}
+
+SYSCALL_DEFINE1(32_personality, unsigned long, personality)
+{
+ unsigned int p = personality & 0xffffffff;
+ int ret;
+
+ if (personality(current->personality) == PER_LINUX32 &&
+ personality(p) == PER_LINUX)
+ p = (p & ~PER_MASK) | PER_LINUX32;
+ ret = sys_personality(p);
+ if (ret != -1 && personality(ret) == PER_LINUX32)
+ ret = (ret & ~PER_MASK) | PER_LINUX;
+ return ret;
+}
+
+asmlinkage ssize_t sys32_readahead(int fd, u32 pad0, u64 a2, u64 a3,
+ size_t count)
+{
+ return ksys_readahead(fd, merge_64(a2, a3), count);
+}
+
+asmlinkage long sys32_sync_file_range(int fd, int __pad,
+ unsigned long a2, unsigned long a3,
+ unsigned long a4, unsigned long a5,
+ int flags)
+{
+ return ksys_sync_file_range(fd,
+ merge_64(a2, a3), merge_64(a4, a5),
+ flags);
+}
+
+asmlinkage long sys32_fadvise64_64(int fd, int __pad,
+ unsigned long a2, unsigned long a3,
+ unsigned long a4, unsigned long a5,
+ int flags)
+{
+ return ksys_fadvise64_64(fd,
+ merge_64(a2, a3), merge_64(a4, a5),
+ flags);
+}
+
+asmlinkage long sys32_fallocate(int fd, int mode, unsigned offset_a2,
+ unsigned offset_a3, unsigned len_a4, unsigned len_a5)
+{
+ return ksys_fallocate(fd, mode, merge_64(offset_a2, offset_a3),
+ merge_64(len_a4, len_a5));
+}
diff --git a/arch/mips/kernel/machine_kexec.c b/arch/mips/kernel/machine_kexec.c
new file mode 100644
index 000000000..432bfd3e7
--- /dev/null
+++ b/arch/mips/kernel/machine_kexec.c
@@ -0,0 +1,264 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * machine_kexec.c for kexec
+ * Created by <nschichan@corp.free.fr> on Thu Oct 12 15:15:06 2006
+ */
+#include <linux/compiler.h>
+#include <linux/kexec.h>
+#include <linux/mm.h>
+#include <linux/delay.h>
+#include <linux/libfdt.h>
+
+#include <asm/cacheflush.h>
+#include <asm/page.h>
+
+extern const unsigned char relocate_new_kernel[];
+extern const size_t relocate_new_kernel_size;
+
+extern unsigned long kexec_start_address;
+extern unsigned long kexec_indirection_page;
+
+static unsigned long reboot_code_buffer;
+
+#ifdef CONFIG_SMP
+static void (*relocated_kexec_smp_wait)(void *);
+
+atomic_t kexec_ready_to_reboot = ATOMIC_INIT(0);
+void (*_crash_smp_send_stop)(void) = NULL;
+#endif
+
+void (*_machine_kexec_shutdown)(void) = NULL;
+void (*_machine_crash_shutdown)(struct pt_regs *regs) = NULL;
+
+static void kexec_image_info(const struct kimage *kimage)
+{
+ unsigned long i;
+
+ pr_debug("kexec kimage info:\n");
+ pr_debug(" type: %d\n", kimage->type);
+ pr_debug(" start: %lx\n", kimage->start);
+ pr_debug(" head: %lx\n", kimage->head);
+ pr_debug(" nr_segments: %lu\n", kimage->nr_segments);
+
+ for (i = 0; i < kimage->nr_segments; i++) {
+ pr_debug(" segment[%lu]: %016lx - %016lx, 0x%lx bytes, %lu pages\n",
+ i,
+ kimage->segment[i].mem,
+ kimage->segment[i].mem + kimage->segment[i].memsz,
+ (unsigned long)kimage->segment[i].memsz,
+ (unsigned long)kimage->segment[i].memsz / PAGE_SIZE);
+ }
+}
+
+#ifdef CONFIG_UHI_BOOT
+
+static int uhi_machine_kexec_prepare(struct kimage *kimage)
+{
+ int i;
+
+ /*
+ * In case DTB file is not passed to the new kernel, a flat device
+ * tree will be created by kexec tool. It holds modified command
+ * line for the new kernel.
+ */
+ for (i = 0; i < kimage->nr_segments; i++) {
+ struct fdt_header fdt;
+
+ if (kimage->segment[i].memsz <= sizeof(fdt))
+ continue;
+
+ if (copy_from_user(&fdt, kimage->segment[i].buf, sizeof(fdt)))
+ continue;
+
+ if (fdt_check_header(&fdt))
+ continue;
+
+ kexec_args[0] = -2;
+ kexec_args[1] = (unsigned long)
+ phys_to_virt((unsigned long)kimage->segment[i].mem);
+ break;
+ }
+
+ return 0;
+}
+
+int (*_machine_kexec_prepare)(struct kimage *) = uhi_machine_kexec_prepare;
+
+#else
+
+int (*_machine_kexec_prepare)(struct kimage *) = NULL;
+
+#endif /* CONFIG_UHI_BOOT */
+
+int
+machine_kexec_prepare(struct kimage *kimage)
+{
+#ifdef CONFIG_SMP
+ if (!kexec_nonboot_cpu_func())
+ return -EINVAL;
+#endif
+
+ kexec_image_info(kimage);
+
+ if (_machine_kexec_prepare)
+ return _machine_kexec_prepare(kimage);
+
+ return 0;
+}
+
+void
+machine_kexec_cleanup(struct kimage *kimage)
+{
+}
+
+#ifdef CONFIG_SMP
+static void kexec_shutdown_secondary(void *param)
+{
+ int cpu = smp_processor_id();
+
+ if (!cpu_online(cpu))
+ return;
+
+ /* We won't be sent IPIs any more. */
+ set_cpu_online(cpu, false);
+
+ local_irq_disable();
+ while (!atomic_read(&kexec_ready_to_reboot))
+ cpu_relax();
+
+ kexec_reboot();
+
+ /* NOTREACHED */
+}
+#endif
+
+void
+machine_shutdown(void)
+{
+ if (_machine_kexec_shutdown)
+ _machine_kexec_shutdown();
+
+#ifdef CONFIG_SMP
+ smp_call_function(kexec_shutdown_secondary, NULL, 0);
+
+ while (num_online_cpus() > 1) {
+ cpu_relax();
+ mdelay(1);
+ }
+#endif
+}
+
+void
+machine_crash_shutdown(struct pt_regs *regs)
+{
+ if (_machine_crash_shutdown)
+ _machine_crash_shutdown(regs);
+ else
+ default_machine_crash_shutdown(regs);
+}
+
+#ifdef CONFIG_SMP
+void kexec_nonboot_cpu_jump(void)
+{
+ local_flush_icache_range((unsigned long)relocated_kexec_smp_wait,
+ reboot_code_buffer + relocate_new_kernel_size);
+
+ relocated_kexec_smp_wait(NULL);
+}
+#endif
+
+void kexec_reboot(void)
+{
+ void (*do_kexec)(void) __noreturn;
+
+ /*
+ * We know we were online, and there will be no incoming IPIs at
+ * this point. Mark online again before rebooting so that the crash
+ * analysis tool will see us correctly.
+ */
+ set_cpu_online(smp_processor_id(), true);
+
+ /* Ensure remote CPUs observe that we're online before rebooting. */
+ smp_mb__after_atomic();
+
+#ifdef CONFIG_SMP
+ if (smp_processor_id() > 0) {
+ /*
+ * Instead of cpu_relax() or wait, this is needed for kexec
+ * smp reboot. Kdump usually doesn't require an smp new
+ * kernel, but kexec may do.
+ */
+ kexec_nonboot_cpu();
+
+ /* NOTREACHED */
+ }
+#endif
+
+ /*
+ * Make sure we get correct instructions written by the
+ * machine_kexec() CPU.
+ */
+ local_flush_icache_range(reboot_code_buffer,
+ reboot_code_buffer + relocate_new_kernel_size);
+
+ do_kexec = (void *)reboot_code_buffer;
+ do_kexec();
+}
+
+void
+machine_kexec(struct kimage *image)
+{
+ unsigned long entry;
+ unsigned long *ptr;
+
+ reboot_code_buffer =
+ (unsigned long)page_address(image->control_code_page);
+
+ kexec_start_address =
+ (unsigned long) phys_to_virt(image->start);
+
+ if (image->type == KEXEC_TYPE_DEFAULT) {
+ kexec_indirection_page =
+ (unsigned long) phys_to_virt(image->head & PAGE_MASK);
+ } else {
+ kexec_indirection_page = (unsigned long)&image->head;
+ }
+
+ memcpy((void*)reboot_code_buffer, relocate_new_kernel,
+ relocate_new_kernel_size);
+
+ /*
+ * The generic kexec code builds a page list with physical
+ * addresses. they are directly accessible through KSEG0 (or
+ * CKSEG0 or XPHYS if on 64bit system), hence the
+ * phys_to_virt() call.
+ */
+ for (ptr = &image->head; (entry = *ptr) && !(entry &IND_DONE);
+ ptr = (entry & IND_INDIRECTION) ?
+ phys_to_virt(entry & PAGE_MASK) : ptr + 1) {
+ if (*ptr & IND_SOURCE || *ptr & IND_INDIRECTION ||
+ *ptr & IND_DESTINATION)
+ *ptr = (unsigned long) phys_to_virt(*ptr);
+ }
+
+ /* Mark offline BEFORE disabling local irq. */
+ set_cpu_online(smp_processor_id(), false);
+
+ /*
+ * we do not want to be bothered.
+ */
+ local_irq_disable();
+
+ printk("Will call new kernel at %08lx\n", image->start);
+ printk("Bye ...\n");
+ /* Make reboot code buffer available to the boot CPU. */
+ __flush_cache_all();
+#ifdef CONFIG_SMP
+ /* All secondary cpus now may jump to kexec_wait cycle */
+ relocated_kexec_smp_wait = reboot_code_buffer +
+ (void *)(kexec_smp_wait - relocate_new_kernel);
+ smp_wmb();
+ atomic_set(&kexec_ready_to_reboot, 1);
+#endif
+ kexec_reboot();
+}
diff --git a/arch/mips/kernel/mcount.S b/arch/mips/kernel/mcount.S
new file mode 100644
index 000000000..cff52b283
--- /dev/null
+++ b/arch/mips/kernel/mcount.S
@@ -0,0 +1,220 @@
+/*
+ * MIPS specific _mcount support
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive for
+ * more details.
+ *
+ * Copyright (C) 2009 Lemote Inc. & DSLab, Lanzhou University, China
+ * Copyright (C) 2010 DSLab, Lanzhou University, China
+ * Author: Wu Zhangjin <wuzhangjin@gmail.com>
+ */
+
+#include <asm/export.h>
+#include <asm/regdef.h>
+#include <asm/stackframe.h>
+#include <asm/ftrace.h>
+
+ .text
+ .set noreorder
+ .set noat
+
+ .macro MCOUNT_SAVE_REGS
+ PTR_SUBU sp, PT_SIZE
+ PTR_S ra, PT_R31(sp)
+ PTR_S AT, PT_R1(sp)
+ PTR_S a0, PT_R4(sp)
+ PTR_S a1, PT_R5(sp)
+ PTR_S a2, PT_R6(sp)
+ PTR_S a3, PT_R7(sp)
+#ifdef CONFIG_64BIT
+ PTR_S a4, PT_R8(sp)
+ PTR_S a5, PT_R9(sp)
+ PTR_S a6, PT_R10(sp)
+ PTR_S a7, PT_R11(sp)
+#endif
+ .endm
+
+ .macro MCOUNT_RESTORE_REGS
+ PTR_L ra, PT_R31(sp)
+ PTR_L AT, PT_R1(sp)
+ PTR_L a0, PT_R4(sp)
+ PTR_L a1, PT_R5(sp)
+ PTR_L a2, PT_R6(sp)
+ PTR_L a3, PT_R7(sp)
+#ifdef CONFIG_64BIT
+ PTR_L a4, PT_R8(sp)
+ PTR_L a5, PT_R9(sp)
+ PTR_L a6, PT_R10(sp)
+ PTR_L a7, PT_R11(sp)
+#endif
+ PTR_ADDIU sp, PT_SIZE
+ .endm
+
+ .macro RETURN_BACK
+ jr ra
+ move ra, AT
+ .endm
+
+/*
+ * The -mmcount-ra-address option of gcc 4.5 uses register $12 to pass
+ * the location of the parent's return address.
+ */
+#define MCOUNT_RA_ADDRESS_REG $12
+
+#ifdef CONFIG_DYNAMIC_FTRACE
+
+NESTED(ftrace_caller, PT_SIZE, ra)
+ .globl _mcount
+_mcount:
+EXPORT_SYMBOL(_mcount)
+ b ftrace_stub
+#ifdef CONFIG_32BIT
+ addiu sp,sp,8
+#else
+ nop
+#endif
+
+ /* When tracing is activated, it calls ftrace_caller+8 (aka here) */
+ MCOUNT_SAVE_REGS
+#ifdef KBUILD_MCOUNT_RA_ADDRESS
+ PTR_S MCOUNT_RA_ADDRESS_REG, PT_R12(sp)
+#endif
+
+ PTR_SUBU a0, ra, 8 /* arg1: self address */
+ PTR_LA t1, _stext
+ sltu t2, a0, t1 /* t2 = (a0 < _stext) */
+ PTR_LA t1, _etext
+ sltu t3, t1, a0 /* t3 = (a0 > _etext) */
+ or t1, t2, t3
+ beqz t1, ftrace_call
+ nop
+#if defined(KBUILD_MCOUNT_RA_ADDRESS) && defined(CONFIG_32BIT)
+ PTR_SUBU a0, a0, 16 /* arg1: adjust to module's recorded callsite */
+#else
+ PTR_SUBU a0, a0, 12
+#endif
+
+ .globl ftrace_call
+ftrace_call:
+ nop /* a placeholder for the call to a real tracing function */
+ move a1, AT /* arg2: parent's return address */
+
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+ .globl ftrace_graph_call
+ftrace_graph_call:
+ nop
+ nop
+#endif
+
+ MCOUNT_RESTORE_REGS
+ .globl ftrace_stub
+ftrace_stub:
+ RETURN_BACK
+ END(ftrace_caller)
+
+#else /* ! CONFIG_DYNAMIC_FTRACE */
+
+NESTED(_mcount, PT_SIZE, ra)
+EXPORT_SYMBOL(_mcount)
+ PTR_LA t1, ftrace_stub
+ PTR_L t2, ftrace_trace_function /* Prepare t2 for (1) */
+ beq t1, t2, fgraph_trace
+ nop
+
+ MCOUNT_SAVE_REGS
+
+ move a0, ra /* arg1: self return address */
+ jalr t2 /* (1) call *ftrace_trace_function */
+ move a1, AT /* arg2: parent's return address */
+
+ MCOUNT_RESTORE_REGS
+
+fgraph_trace:
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+ PTR_LA t1, ftrace_stub
+ PTR_L t3, ftrace_graph_return
+ bne t1, t3, ftrace_graph_caller
+ nop
+ PTR_LA t1, ftrace_graph_entry_stub
+ PTR_L t3, ftrace_graph_entry
+ bne t1, t3, ftrace_graph_caller
+ nop
+#endif
+
+#ifdef CONFIG_32BIT
+ addiu sp, sp, 8
+#endif
+
+ .globl ftrace_stub
+ftrace_stub:
+ RETURN_BACK
+ END(_mcount)
+
+#endif /* ! CONFIG_DYNAMIC_FTRACE */
+
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+
+NESTED(ftrace_graph_caller, PT_SIZE, ra)
+#ifndef CONFIG_DYNAMIC_FTRACE
+ MCOUNT_SAVE_REGS
+#endif
+
+ /* arg1: Get the location of the parent's return address */
+#ifdef KBUILD_MCOUNT_RA_ADDRESS
+#ifdef CONFIG_DYNAMIC_FTRACE
+ PTR_L a0, PT_R12(sp)
+#else
+ move a0, MCOUNT_RA_ADDRESS_REG
+#endif
+ bnez a0, 1f /* non-leaf func: stored in MCOUNT_RA_ADDRESS_REG */
+ nop
+#endif
+ PTR_LA a0, PT_R1(sp) /* leaf func: the location in current stack */
+1:
+
+ /* arg2: Get self return address */
+#ifdef CONFIG_DYNAMIC_FTRACE
+ PTR_L a1, PT_R31(sp)
+#else
+ move a1, ra
+#endif
+
+ /* arg3: Get frame pointer of current stack */
+#ifdef CONFIG_64BIT
+ PTR_LA a2, PT_SIZE(sp)
+#else
+ PTR_LA a2, (PT_SIZE+8)(sp)
+#endif
+
+ jal prepare_ftrace_return
+ nop
+ MCOUNT_RESTORE_REGS
+#ifndef CONFIG_DYNAMIC_FTRACE
+#ifdef CONFIG_32BIT
+ addiu sp, sp, 8
+#endif
+#endif
+ RETURN_BACK
+ END(ftrace_graph_caller)
+
+ .align 2
+ .globl return_to_handler
+return_to_handler:
+ PTR_SUBU sp, PT_SIZE
+ PTR_S v0, PT_R2(sp)
+
+ jal ftrace_return_to_handler
+ PTR_S v1, PT_R3(sp)
+
+ /* restore the real parent address: v0 -> ra */
+ move ra, v0
+
+ PTR_L v0, PT_R2(sp)
+ PTR_L v1, PT_R3(sp)
+ jr ra
+ PTR_ADDIU sp, PT_SIZE
+#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
+
+ .set at
+ .set reorder
diff --git a/arch/mips/kernel/mips-cm.c b/arch/mips/kernel/mips-cm.c
new file mode 100644
index 000000000..b4f7d950c
--- /dev/null
+++ b/arch/mips/kernel/mips-cm.c
@@ -0,0 +1,515 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2013 Imagination Technologies
+ * Author: Paul Burton <paul.burton@mips.com>
+ */
+
+#include <linux/errno.h>
+#include <linux/percpu.h>
+#include <linux/spinlock.h>
+
+#include <asm/mips-cps.h>
+#include <asm/mipsregs.h>
+
+void __iomem *mips_gcr_base;
+void __iomem *mips_cm_l2sync_base;
+int mips_cm_is64;
+
+static char *cm2_tr[8] = {
+ "mem", "gcr", "gic", "mmio",
+ "0x04", "cpc", "0x06", "0x07"
+};
+
+/* CM3 Tag ECC transaction type */
+static char *cm3_tr[16] = {
+ [0x0] = "ReqNoData",
+ [0x1] = "0x1",
+ [0x2] = "ReqWData",
+ [0x3] = "0x3",
+ [0x4] = "IReqNoResp",
+ [0x5] = "IReqWResp",
+ [0x6] = "IReqNoRespDat",
+ [0x7] = "IReqWRespDat",
+ [0x8] = "RespNoData",
+ [0x9] = "RespDataFol",
+ [0xa] = "RespWData",
+ [0xb] = "RespDataOnly",
+ [0xc] = "IRespNoData",
+ [0xd] = "IRespDataFol",
+ [0xe] = "IRespWData",
+ [0xf] = "IRespDataOnly"
+};
+
+static char *cm2_cmd[32] = {
+ [0x00] = "0x00",
+ [0x01] = "Legacy Write",
+ [0x02] = "Legacy Read",
+ [0x03] = "0x03",
+ [0x04] = "0x04",
+ [0x05] = "0x05",
+ [0x06] = "0x06",
+ [0x07] = "0x07",
+ [0x08] = "Coherent Read Own",
+ [0x09] = "Coherent Read Share",
+ [0x0a] = "Coherent Read Discard",
+ [0x0b] = "Coherent Ready Share Always",
+ [0x0c] = "Coherent Upgrade",
+ [0x0d] = "Coherent Writeback",
+ [0x0e] = "0x0e",
+ [0x0f] = "0x0f",
+ [0x10] = "Coherent Copyback",
+ [0x11] = "Coherent Copyback Invalidate",
+ [0x12] = "Coherent Invalidate",
+ [0x13] = "Coherent Write Invalidate",
+ [0x14] = "Coherent Completion Sync",
+ [0x15] = "0x15",
+ [0x16] = "0x16",
+ [0x17] = "0x17",
+ [0x18] = "0x18",
+ [0x19] = "0x19",
+ [0x1a] = "0x1a",
+ [0x1b] = "0x1b",
+ [0x1c] = "0x1c",
+ [0x1d] = "0x1d",
+ [0x1e] = "0x1e",
+ [0x1f] = "0x1f"
+};
+
+/* CM3 Tag ECC command type */
+static char *cm3_cmd[16] = {
+ [0x0] = "Legacy Read",
+ [0x1] = "Legacy Write",
+ [0x2] = "Coherent Read Own",
+ [0x3] = "Coherent Read Share",
+ [0x4] = "Coherent Read Discard",
+ [0x5] = "Coherent Evicted",
+ [0x6] = "Coherent Upgrade",
+ [0x7] = "Coherent Upgrade for Store Conditional",
+ [0x8] = "Coherent Writeback",
+ [0x9] = "Coherent Write Invalidate",
+ [0xa] = "0xa",
+ [0xb] = "0xb",
+ [0xc] = "0xc",
+ [0xd] = "0xd",
+ [0xe] = "0xe",
+ [0xf] = "0xf"
+};
+
+/* CM3 Tag ECC command group */
+static char *cm3_cmd_group[8] = {
+ [0x0] = "Normal",
+ [0x1] = "Registers",
+ [0x2] = "TLB",
+ [0x3] = "0x3",
+ [0x4] = "L1I",
+ [0x5] = "L1D",
+ [0x6] = "L3",
+ [0x7] = "L2"
+};
+
+static char *cm2_core[8] = {
+ "Invalid/OK", "Invalid/Data",
+ "Shared/OK", "Shared/Data",
+ "Modified/OK", "Modified/Data",
+ "Exclusive/OK", "Exclusive/Data"
+};
+
+static char *cm2_l2_type[4] = {
+ [0x0] = "None",
+ [0x1] = "Tag RAM single/double ECC error",
+ [0x2] = "Data RAM single/double ECC error",
+ [0x3] = "WS RAM uncorrectable dirty parity"
+};
+
+static char *cm2_l2_instr[32] = {
+ [0x00] = "L2_NOP",
+ [0x01] = "L2_ERR_CORR",
+ [0x02] = "L2_TAG_INV",
+ [0x03] = "L2_WS_CLEAN",
+ [0x04] = "L2_RD_MDYFY_WR",
+ [0x05] = "L2_WS_MRU",
+ [0x06] = "L2_EVICT_LN2",
+ [0x07] = "0x07",
+ [0x08] = "L2_EVICT",
+ [0x09] = "L2_REFL",
+ [0x0a] = "L2_RD",
+ [0x0b] = "L2_WR",
+ [0x0c] = "L2_EVICT_MRU",
+ [0x0d] = "L2_SYNC",
+ [0x0e] = "L2_REFL_ERR",
+ [0x0f] = "0x0f",
+ [0x10] = "L2_INDX_WB_INV",
+ [0x11] = "L2_INDX_LD_TAG",
+ [0x12] = "L2_INDX_ST_TAG",
+ [0x13] = "L2_INDX_ST_DATA",
+ [0x14] = "L2_INDX_ST_ECC",
+ [0x15] = "0x15",
+ [0x16] = "0x16",
+ [0x17] = "0x17",
+ [0x18] = "L2_FTCH_AND_LCK",
+ [0x19] = "L2_HIT_INV",
+ [0x1a] = "L2_HIT_WB_INV",
+ [0x1b] = "L2_HIT_WB",
+ [0x1c] = "0x1c",
+ [0x1d] = "0x1d",
+ [0x1e] = "0x1e",
+ [0x1f] = "0x1f"
+};
+
+static char *cm2_causes[32] = {
+ "None", "GC_WR_ERR", "GC_RD_ERR", "COH_WR_ERR",
+ "COH_RD_ERR", "MMIO_WR_ERR", "MMIO_RD_ERR", "0x07",
+ "0x08", "0x09", "0x0a", "0x0b",
+ "0x0c", "0x0d", "0x0e", "0x0f",
+ "0x10", "INTVN_WR_ERR", "INTVN_RD_ERR", "0x13",
+ "0x14", "0x15", "0x16", "0x17",
+ "L2_RD_UNCORR", "L2_WR_UNCORR", "L2_CORR", "0x1b",
+ "0x1c", "0x1d", "0x1e", "0x1f"
+};
+
+static char *cm3_causes[32] = {
+ "0x0", "MP_CORRECTABLE_ECC_ERR", "MP_REQUEST_DECODE_ERR",
+ "MP_UNCORRECTABLE_ECC_ERR", "MP_PARITY_ERR", "MP_COHERENCE_ERR",
+ "CMBIU_REQUEST_DECODE_ERR", "CMBIU_PARITY_ERR", "CMBIU_AXI_RESP_ERR",
+ "0x9", "RBI_BUS_ERR", "0xb", "0xc", "0xd", "0xe", "0xf", "0x10",
+ "0x11", "0x12", "0x13", "0x14", "0x15", "0x16", "0x17", "0x18",
+ "0x19", "0x1a", "0x1b", "0x1c", "0x1d", "0x1e", "0x1f"
+};
+
+static DEFINE_PER_CPU_ALIGNED(spinlock_t, cm_core_lock);
+static DEFINE_PER_CPU_ALIGNED(unsigned long, cm_core_lock_flags);
+
+phys_addr_t __mips_cm_phys_base(void)
+{
+ u32 config3 = read_c0_config3();
+ unsigned long cmgcr;
+
+ /* Check the CMGCRBase register is implemented */
+ if (!(config3 & MIPS_CONF3_CMGCR))
+ return 0;
+
+ /* Read the address from CMGCRBase */
+ cmgcr = read_c0_cmgcrbase();
+ return (cmgcr & MIPS_CMGCRF_BASE) << (36 - 32);
+}
+
+phys_addr_t mips_cm_phys_base(void)
+ __attribute__((weak, alias("__mips_cm_phys_base")));
+
+phys_addr_t __mips_cm_l2sync_phys_base(void)
+{
+ u32 base_reg;
+
+ /*
+ * If the L2-only sync region is already enabled then leave it at it's
+ * current location.
+ */
+ base_reg = read_gcr_l2_only_sync_base();
+ if (base_reg & CM_GCR_L2_ONLY_SYNC_BASE_SYNCEN)
+ return base_reg & CM_GCR_L2_ONLY_SYNC_BASE_SYNCBASE;
+
+ /* Default to following the CM */
+ return mips_cm_phys_base() + MIPS_CM_GCR_SIZE;
+}
+
+phys_addr_t mips_cm_l2sync_phys_base(void)
+ __attribute__((weak, alias("__mips_cm_l2sync_phys_base")));
+
+static void mips_cm_probe_l2sync(void)
+{
+ unsigned major_rev;
+ phys_addr_t addr;
+
+ /* L2-only sync was introduced with CM major revision 6 */
+ major_rev = FIELD_GET(CM_GCR_REV_MAJOR, read_gcr_rev());
+ if (major_rev < 6)
+ return;
+
+ /* Find a location for the L2 sync region */
+ addr = mips_cm_l2sync_phys_base();
+ BUG_ON((addr & CM_GCR_L2_ONLY_SYNC_BASE_SYNCBASE) != addr);
+ if (!addr)
+ return;
+
+ /* Set the region base address & enable it */
+ write_gcr_l2_only_sync_base(addr | CM_GCR_L2_ONLY_SYNC_BASE_SYNCEN);
+
+ /* Map the region */
+ mips_cm_l2sync_base = ioremap(addr, MIPS_CM_L2SYNC_SIZE);
+}
+
+int mips_cm_probe(void)
+{
+ phys_addr_t addr;
+ u32 base_reg;
+ unsigned cpu;
+
+ /*
+ * No need to probe again if we have already been
+ * here before.
+ */
+ if (mips_gcr_base)
+ return 0;
+
+ addr = mips_cm_phys_base();
+ BUG_ON((addr & CM_GCR_BASE_GCRBASE) != addr);
+ if (!addr)
+ return -ENODEV;
+
+ mips_gcr_base = ioremap(addr, MIPS_CM_GCR_SIZE);
+ if (!mips_gcr_base)
+ return -ENXIO;
+
+ /* sanity check that we're looking at a CM */
+ base_reg = read_gcr_base();
+ if ((base_reg & CM_GCR_BASE_GCRBASE) != addr) {
+ pr_err("GCRs appear to have been moved (expected them at 0x%08lx)!\n",
+ (unsigned long)addr);
+ iounmap(mips_gcr_base);
+ mips_gcr_base = NULL;
+ return -ENODEV;
+ }
+
+ /* set default target to memory */
+ change_gcr_base(CM_GCR_BASE_CMDEFTGT, CM_GCR_BASE_CMDEFTGT_MEM);
+
+ /* disable CM regions */
+ write_gcr_reg0_base(CM_GCR_REGn_BASE_BASEADDR);
+ write_gcr_reg0_mask(CM_GCR_REGn_MASK_ADDRMASK);
+ write_gcr_reg1_base(CM_GCR_REGn_BASE_BASEADDR);
+ write_gcr_reg1_mask(CM_GCR_REGn_MASK_ADDRMASK);
+ write_gcr_reg2_base(CM_GCR_REGn_BASE_BASEADDR);
+ write_gcr_reg2_mask(CM_GCR_REGn_MASK_ADDRMASK);
+ write_gcr_reg3_base(CM_GCR_REGn_BASE_BASEADDR);
+ write_gcr_reg3_mask(CM_GCR_REGn_MASK_ADDRMASK);
+
+ /* probe for an L2-only sync region */
+ mips_cm_probe_l2sync();
+
+ /* determine register width for this CM */
+ mips_cm_is64 = IS_ENABLED(CONFIG_64BIT) && (mips_cm_revision() >= CM_REV_CM3);
+
+ for_each_possible_cpu(cpu)
+ spin_lock_init(&per_cpu(cm_core_lock, cpu));
+
+ return 0;
+}
+
+void mips_cm_lock_other(unsigned int cluster, unsigned int core,
+ unsigned int vp, unsigned int block)
+{
+ unsigned int curr_core, cm_rev;
+ u32 val;
+
+ cm_rev = mips_cm_revision();
+ preempt_disable();
+
+ if (cm_rev >= CM_REV_CM3) {
+ val = FIELD_PREP(CM3_GCR_Cx_OTHER_CORE, core) |
+ FIELD_PREP(CM3_GCR_Cx_OTHER_VP, vp);
+
+ if (cm_rev >= CM_REV_CM3_5) {
+ val |= CM_GCR_Cx_OTHER_CLUSTER_EN;
+ val |= FIELD_PREP(CM_GCR_Cx_OTHER_CLUSTER, cluster);
+ val |= FIELD_PREP(CM_GCR_Cx_OTHER_BLOCK, block);
+ } else {
+ WARN_ON(cluster != 0);
+ WARN_ON(block != CM_GCR_Cx_OTHER_BLOCK_LOCAL);
+ }
+
+ /*
+ * We need to disable interrupts in SMP systems in order to
+ * ensure that we don't interrupt the caller with code which
+ * may modify the redirect register. We do so here in a
+ * slightly obscure way by using a spin lock, since this has
+ * the neat property of also catching any nested uses of
+ * mips_cm_lock_other() leading to a deadlock or a nice warning
+ * with lockdep enabled.
+ */
+ spin_lock_irqsave(this_cpu_ptr(&cm_core_lock),
+ *this_cpu_ptr(&cm_core_lock_flags));
+ } else {
+ WARN_ON(cluster != 0);
+ WARN_ON(block != CM_GCR_Cx_OTHER_BLOCK_LOCAL);
+
+ /*
+ * We only have a GCR_CL_OTHER per core in systems with
+ * CM 2.5 & older, so have to ensure other VP(E)s don't
+ * race with us.
+ */
+ curr_core = cpu_core(&current_cpu_data);
+ spin_lock_irqsave(&per_cpu(cm_core_lock, curr_core),
+ per_cpu(cm_core_lock_flags, curr_core));
+
+ val = FIELD_PREP(CM_GCR_Cx_OTHER_CORENUM, core);
+ }
+
+ write_gcr_cl_other(val);
+
+ /*
+ * Ensure the core-other region reflects the appropriate core &
+ * VP before any accesses to it occur.
+ */
+ mb();
+}
+
+void mips_cm_unlock_other(void)
+{
+ unsigned int curr_core;
+
+ if (mips_cm_revision() < CM_REV_CM3) {
+ curr_core = cpu_core(&current_cpu_data);
+ spin_unlock_irqrestore(&per_cpu(cm_core_lock, curr_core),
+ per_cpu(cm_core_lock_flags, curr_core));
+ } else {
+ spin_unlock_irqrestore(this_cpu_ptr(&cm_core_lock),
+ *this_cpu_ptr(&cm_core_lock_flags));
+ }
+
+ preempt_enable();
+}
+
+void mips_cm_error_report(void)
+{
+ u64 cm_error, cm_addr, cm_other;
+ unsigned long revision;
+ int ocause, cause;
+ char buf[256];
+
+ if (!mips_cm_present())
+ return;
+
+ revision = mips_cm_revision();
+ cm_error = read_gcr_error_cause();
+ cm_addr = read_gcr_error_addr();
+ cm_other = read_gcr_error_mult();
+
+ if (revision < CM_REV_CM3) { /* CM2 */
+ cause = FIELD_GET(CM_GCR_ERROR_CAUSE_ERRTYPE, cm_error);
+ ocause = FIELD_GET(CM_GCR_ERROR_MULT_ERR2ND, cm_other);
+
+ if (!cause)
+ return;
+
+ if (cause < 16) {
+ unsigned long cca_bits = (cm_error >> 15) & 7;
+ unsigned long tr_bits = (cm_error >> 12) & 7;
+ unsigned long cmd_bits = (cm_error >> 7) & 0x1f;
+ unsigned long stag_bits = (cm_error >> 3) & 15;
+ unsigned long sport_bits = (cm_error >> 0) & 7;
+
+ snprintf(buf, sizeof(buf),
+ "CCA=%lu TR=%s MCmd=%s STag=%lu "
+ "SPort=%lu\n", cca_bits, cm2_tr[tr_bits],
+ cm2_cmd[cmd_bits], stag_bits, sport_bits);
+ } else if (cause < 24) {
+ /* glob state & sresp together */
+ unsigned long c3_bits = (cm_error >> 18) & 7;
+ unsigned long c2_bits = (cm_error >> 15) & 7;
+ unsigned long c1_bits = (cm_error >> 12) & 7;
+ unsigned long c0_bits = (cm_error >> 9) & 7;
+ unsigned long sc_bit = (cm_error >> 8) & 1;
+ unsigned long cmd_bits = (cm_error >> 3) & 0x1f;
+ unsigned long sport_bits = (cm_error >> 0) & 7;
+
+ snprintf(buf, sizeof(buf),
+ "C3=%s C2=%s C1=%s C0=%s SC=%s "
+ "MCmd=%s SPort=%lu\n",
+ cm2_core[c3_bits], cm2_core[c2_bits],
+ cm2_core[c1_bits], cm2_core[c0_bits],
+ sc_bit ? "True" : "False",
+ cm2_cmd[cmd_bits], sport_bits);
+ } else {
+ unsigned long muc_bit = (cm_error >> 23) & 1;
+ unsigned long ins_bits = (cm_error >> 18) & 0x1f;
+ unsigned long arr_bits = (cm_error >> 16) & 3;
+ unsigned long dw_bits = (cm_error >> 12) & 15;
+ unsigned long way_bits = (cm_error >> 9) & 7;
+ unsigned long mway_bit = (cm_error >> 8) & 1;
+ unsigned long syn_bits = (cm_error >> 0) & 0xFF;
+
+ snprintf(buf, sizeof(buf),
+ "Type=%s%s Instr=%s DW=%lu Way=%lu "
+ "MWay=%s Syndrome=0x%02lx",
+ muc_bit ? "Multi-UC " : "",
+ cm2_l2_type[arr_bits],
+ cm2_l2_instr[ins_bits], dw_bits, way_bits,
+ mway_bit ? "True" : "False", syn_bits);
+ }
+ pr_err("CM_ERROR=%08llx %s <%s>\n", cm_error,
+ cm2_causes[cause], buf);
+ pr_err("CM_ADDR =%08llx\n", cm_addr);
+ pr_err("CM_OTHER=%08llx %s\n", cm_other, cm2_causes[ocause]);
+ } else { /* CM3 */
+ ulong core_id_bits, vp_id_bits, cmd_bits, cmd_group_bits;
+ ulong cm3_cca_bits, mcp_bits, cm3_tr_bits, sched_bit;
+
+ cause = FIELD_GET(CM3_GCR_ERROR_CAUSE_ERRTYPE, cm_error);
+ ocause = FIELD_GET(CM_GCR_ERROR_MULT_ERR2ND, cm_other);
+
+ if (!cause)
+ return;
+
+ /* Used by cause == {1,2,3} */
+ core_id_bits = (cm_error >> 22) & 0xf;
+ vp_id_bits = (cm_error >> 18) & 0xf;
+ cmd_bits = (cm_error >> 14) & 0xf;
+ cmd_group_bits = (cm_error >> 11) & 0xf;
+ cm3_cca_bits = (cm_error >> 8) & 7;
+ mcp_bits = (cm_error >> 5) & 0xf;
+ cm3_tr_bits = (cm_error >> 1) & 0xf;
+ sched_bit = cm_error & 0x1;
+
+ if (cause == 1 || cause == 3) { /* Tag ECC */
+ unsigned long tag_ecc = (cm_error >> 57) & 0x1;
+ unsigned long tag_way_bits = (cm_error >> 29) & 0xffff;
+ unsigned long dword_bits = (cm_error >> 49) & 0xff;
+ unsigned long data_way_bits = (cm_error >> 45) & 0xf;
+ unsigned long data_sets_bits = (cm_error >> 29) & 0xfff;
+ unsigned long bank_bit = (cm_error >> 28) & 0x1;
+ snprintf(buf, sizeof(buf),
+ "%s ECC Error: Way=%lu (DWORD=%lu, Sets=%lu)"
+ "Bank=%lu CoreID=%lu VPID=%lu Command=%s"
+ "Command Group=%s CCA=%lu MCP=%d"
+ "Transaction type=%s Scheduler=%lu\n",
+ tag_ecc ? "TAG" : "DATA",
+ tag_ecc ? (unsigned long)ffs(tag_way_bits) - 1 :
+ data_way_bits, bank_bit, dword_bits,
+ data_sets_bits,
+ core_id_bits, vp_id_bits,
+ cm3_cmd[cmd_bits],
+ cm3_cmd_group[cmd_group_bits],
+ cm3_cca_bits, 1 << mcp_bits,
+ cm3_tr[cm3_tr_bits], sched_bit);
+ } else if (cause == 2) {
+ unsigned long data_error_type = (cm_error >> 41) & 0xfff;
+ unsigned long data_decode_cmd = (cm_error >> 37) & 0xf;
+ unsigned long data_decode_group = (cm_error >> 34) & 0x7;
+ unsigned long data_decode_destination_id = (cm_error >> 28) & 0x3f;
+
+ snprintf(buf, sizeof(buf),
+ "Decode Request Error: Type=%lu, Command=%lu"
+ "Command Group=%lu Destination ID=%lu"
+ "CoreID=%lu VPID=%lu Command=%s"
+ "Command Group=%s CCA=%lu MCP=%d"
+ "Transaction type=%s Scheduler=%lu\n",
+ data_error_type, data_decode_cmd,
+ data_decode_group, data_decode_destination_id,
+ core_id_bits, vp_id_bits,
+ cm3_cmd[cmd_bits],
+ cm3_cmd_group[cmd_group_bits],
+ cm3_cca_bits, 1 << mcp_bits,
+ cm3_tr[cm3_tr_bits], sched_bit);
+ } else {
+ buf[0] = 0;
+ }
+
+ pr_err("CM_ERROR=%llx %s <%s>\n", cm_error,
+ cm3_causes[cause], buf);
+ pr_err("CM_ADDR =%llx\n", cm_addr);
+ pr_err("CM_OTHER=%llx %s\n", cm_other, cm3_causes[ocause]);
+ }
+
+ /* reprime cause register */
+ write_gcr_error_cause(cm_error);
+}
diff --git a/arch/mips/kernel/mips-cpc.c b/arch/mips/kernel/mips-cpc.c
new file mode 100644
index 000000000..3e386f7e1
--- /dev/null
+++ b/arch/mips/kernel/mips-cpc.c
@@ -0,0 +1,123 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2013 Imagination Technologies
+ * Author: Paul Burton <paul.burton@mips.com>
+ */
+
+#include <linux/bitfield.h>
+#include <linux/errno.h>
+#include <linux/percpu.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/spinlock.h>
+
+#include <asm/mips-cps.h>
+
+void __iomem *mips_cpc_base;
+
+static DEFINE_PER_CPU_ALIGNED(spinlock_t, cpc_core_lock);
+
+static DEFINE_PER_CPU_ALIGNED(unsigned long, cpc_core_lock_flags);
+
+phys_addr_t __weak mips_cpc_default_phys_base(void)
+{
+ struct device_node *cpc_node;
+ struct resource res;
+ int err;
+
+ cpc_node = of_find_compatible_node(of_root, NULL, "mti,mips-cpc");
+ if (cpc_node) {
+ err = of_address_to_resource(cpc_node, 0, &res);
+ of_node_put(cpc_node);
+ if (!err)
+ return res.start;
+ }
+
+ return 0;
+}
+
+/**
+ * mips_cpc_phys_base - retrieve the physical base address of the CPC
+ *
+ * This function returns the physical base address of the Cluster Power
+ * Controller memory mapped registers, or 0 if no Cluster Power Controller
+ * is present.
+ */
+static phys_addr_t mips_cpc_phys_base(void)
+{
+ unsigned long cpc_base;
+
+ if (!mips_cm_present())
+ return 0;
+
+ if (!(read_gcr_cpc_status() & CM_GCR_CPC_STATUS_EX))
+ return 0;
+
+ /* If the CPC is already enabled, leave it so */
+ cpc_base = read_gcr_cpc_base();
+ if (cpc_base & CM_GCR_CPC_BASE_CPCEN)
+ return cpc_base & CM_GCR_CPC_BASE_CPCBASE;
+
+ /* Otherwise, use the default address */
+ cpc_base = mips_cpc_default_phys_base();
+ if (!cpc_base)
+ return cpc_base;
+
+ /* Enable the CPC, mapped at the default address */
+ write_gcr_cpc_base(cpc_base | CM_GCR_CPC_BASE_CPCEN);
+ return cpc_base;
+}
+
+int mips_cpc_probe(void)
+{
+ phys_addr_t addr;
+ unsigned int cpu;
+
+ for_each_possible_cpu(cpu)
+ spin_lock_init(&per_cpu(cpc_core_lock, cpu));
+
+ addr = mips_cpc_phys_base();
+ if (!addr)
+ return -ENODEV;
+
+ mips_cpc_base = ioremap(addr, 0x8000);
+ if (!mips_cpc_base)
+ return -ENXIO;
+
+ return 0;
+}
+
+void mips_cpc_lock_other(unsigned int core)
+{
+ unsigned int curr_core;
+
+ if (mips_cm_revision() >= CM_REV_CM3)
+ /* Systems with CM >= 3 lock the CPC via mips_cm_lock_other */
+ return;
+
+ preempt_disable();
+ curr_core = cpu_core(&current_cpu_data);
+ spin_lock_irqsave(&per_cpu(cpc_core_lock, curr_core),
+ per_cpu(cpc_core_lock_flags, curr_core));
+ write_cpc_cl_other(FIELD_PREP(CPC_Cx_OTHER_CORENUM, core));
+
+ /*
+ * Ensure the core-other region reflects the appropriate core &
+ * VP before any accesses to it occur.
+ */
+ mb();
+}
+
+void mips_cpc_unlock_other(void)
+{
+ unsigned int curr_core;
+
+ if (mips_cm_revision() >= CM_REV_CM3)
+ /* Systems with CM >= 3 lock the CPC via mips_cm_lock_other */
+ return;
+
+ curr_core = cpu_core(&current_cpu_data);
+ spin_unlock_irqrestore(&per_cpu(cpc_core_lock, curr_core),
+ per_cpu(cpc_core_lock_flags, curr_core));
+ preempt_enable();
+}
diff --git a/arch/mips/kernel/mips-mt-fpaff.c b/arch/mips/kernel/mips-mt-fpaff.c
new file mode 100644
index 000000000..67e130d3f
--- /dev/null
+++ b/arch/mips/kernel/mips-mt-fpaff.c
@@ -0,0 +1,219 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * General MIPS MT support routines, usable in AP/SP and SMVP.
+ * Copyright (C) 2005 Mips Technologies, Inc
+ */
+#include <linux/cpu.h>
+#include <linux/cpuset.h>
+#include <linux/cpumask.h>
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/sched.h>
+#include <linux/sched/task.h>
+#include <linux/cred.h>
+#include <linux/security.h>
+#include <linux/types.h>
+#include <linux/uaccess.h>
+
+/*
+ * CPU mask used to set process affinity for MT VPEs/TCs with FPUs
+ */
+cpumask_t mt_fpu_cpumask;
+
+static int fpaff_threshold = -1;
+unsigned long mt_fpemul_threshold;
+
+/*
+ * Replacement functions for the sys_sched_setaffinity() and
+ * sys_sched_getaffinity() system calls, so that we can integrate
+ * FPU affinity with the user's requested processor affinity.
+ * This code is 98% identical with the sys_sched_setaffinity()
+ * and sys_sched_getaffinity() system calls, and should be
+ * updated when kernel/sched/core.c changes.
+ */
+
+/*
+ * find_process_by_pid - find a process with a matching PID value.
+ * used in sys_sched_set/getaffinity() in kernel/sched/core.c, so
+ * cloned here.
+ */
+static inline struct task_struct *find_process_by_pid(pid_t pid)
+{
+ return pid ? find_task_by_vpid(pid) : current;
+}
+
+/*
+ * check the target process has a UID that matches the current process's
+ */
+static bool check_same_owner(struct task_struct *p)
+{
+ const struct cred *cred = current_cred(), *pcred;
+ bool match;
+
+ rcu_read_lock();
+ pcred = __task_cred(p);
+ match = (uid_eq(cred->euid, pcred->euid) ||
+ uid_eq(cred->euid, pcred->uid));
+ rcu_read_unlock();
+ return match;
+}
+
+/*
+ * mipsmt_sys_sched_setaffinity - set the cpu affinity of a process
+ */
+asmlinkage long mipsmt_sys_sched_setaffinity(pid_t pid, unsigned int len,
+ unsigned long __user *user_mask_ptr)
+{
+ cpumask_var_t cpus_allowed, new_mask, effective_mask;
+ struct thread_info *ti;
+ struct task_struct *p;
+ int retval;
+
+ if (len < sizeof(new_mask))
+ return -EINVAL;
+
+ if (copy_from_user(&new_mask, user_mask_ptr, sizeof(new_mask)))
+ return -EFAULT;
+
+ cpus_read_lock();
+ rcu_read_lock();
+
+ p = find_process_by_pid(pid);
+ if (!p) {
+ rcu_read_unlock();
+ cpus_read_unlock();
+ return -ESRCH;
+ }
+
+ /* Prevent p going away */
+ get_task_struct(p);
+ rcu_read_unlock();
+
+ if (!alloc_cpumask_var(&cpus_allowed, GFP_KERNEL)) {
+ retval = -ENOMEM;
+ goto out_put_task;
+ }
+ if (!alloc_cpumask_var(&new_mask, GFP_KERNEL)) {
+ retval = -ENOMEM;
+ goto out_free_cpus_allowed;
+ }
+ if (!alloc_cpumask_var(&effective_mask, GFP_KERNEL)) {
+ retval = -ENOMEM;
+ goto out_free_new_mask;
+ }
+ if (!check_same_owner(p) && !capable(CAP_SYS_NICE)) {
+ retval = -EPERM;
+ goto out_unlock;
+ }
+
+ retval = security_task_setscheduler(p);
+ if (retval)
+ goto out_unlock;
+
+ /* Record new user-specified CPU set for future reference */
+ cpumask_copy(&p->thread.user_cpus_allowed, new_mask);
+
+ again:
+ /* Compute new global allowed CPU set if necessary */
+ ti = task_thread_info(p);
+ if (test_ti_thread_flag(ti, TIF_FPUBOUND) &&
+ cpumask_intersects(new_mask, &mt_fpu_cpumask)) {
+ cpumask_and(effective_mask, new_mask, &mt_fpu_cpumask);
+ retval = set_cpus_allowed_ptr(p, effective_mask);
+ } else {
+ cpumask_copy(effective_mask, new_mask);
+ clear_ti_thread_flag(ti, TIF_FPUBOUND);
+ retval = set_cpus_allowed_ptr(p, new_mask);
+ }
+
+ if (!retval) {
+ cpuset_cpus_allowed(p, cpus_allowed);
+ if (!cpumask_subset(effective_mask, cpus_allowed)) {
+ /*
+ * We must have raced with a concurrent cpuset
+ * update. Just reset the cpus_allowed to the
+ * cpuset's cpus_allowed
+ */
+ cpumask_copy(new_mask, cpus_allowed);
+ goto again;
+ }
+ }
+out_unlock:
+ free_cpumask_var(effective_mask);
+out_free_new_mask:
+ free_cpumask_var(new_mask);
+out_free_cpus_allowed:
+ free_cpumask_var(cpus_allowed);
+out_put_task:
+ put_task_struct(p);
+ cpus_read_unlock();
+ return retval;
+}
+
+/*
+ * mipsmt_sys_sched_getaffinity - get the cpu affinity of a process
+ */
+asmlinkage long mipsmt_sys_sched_getaffinity(pid_t pid, unsigned int len,
+ unsigned long __user *user_mask_ptr)
+{
+ unsigned int real_len;
+ cpumask_t allowed, mask;
+ int retval;
+ struct task_struct *p;
+
+ real_len = sizeof(mask);
+ if (len < real_len)
+ return -EINVAL;
+
+ cpus_read_lock();
+ rcu_read_lock();
+
+ retval = -ESRCH;
+ p = find_process_by_pid(pid);
+ if (!p)
+ goto out_unlock;
+ retval = security_task_getscheduler(p);
+ if (retval)
+ goto out_unlock;
+
+ cpumask_or(&allowed, &p->thread.user_cpus_allowed, p->cpus_ptr);
+ cpumask_and(&mask, &allowed, cpu_active_mask);
+
+out_unlock:
+ rcu_read_unlock();
+ cpus_read_unlock();
+ if (retval)
+ return retval;
+ if (copy_to_user(user_mask_ptr, &mask, real_len))
+ return -EFAULT;
+ return real_len;
+}
+
+
+static int __init fpaff_thresh(char *str)
+{
+ get_option(&str, &fpaff_threshold);
+ return 1;
+}
+__setup("fpaff=", fpaff_thresh);
+
+/*
+ * FPU Use Factor empirically derived from experiments on 34K
+ */
+#define FPUSEFACTOR 2000
+
+static __init int mt_fp_affinity_init(void)
+{
+ if (fpaff_threshold >= 0) {
+ mt_fpemul_threshold = fpaff_threshold;
+ } else {
+ mt_fpemul_threshold =
+ (FPUSEFACTOR * (loops_per_jiffy/(500000/HZ))) / HZ;
+ }
+ printk(KERN_DEBUG "FPU Affinity set after %ld emulations\n",
+ mt_fpemul_threshold);
+
+ return 0;
+}
+arch_initcall(mt_fp_affinity_init);
diff --git a/arch/mips/kernel/mips-mt.c b/arch/mips/kernel/mips-mt.c
new file mode 100644
index 000000000..dc023a979
--- /dev/null
+++ b/arch/mips/kernel/mips-mt.c
@@ -0,0 +1,246 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * General MIPS MT support routines, usable in AP/SP and SMVP.
+ * Copyright (C) 2005 Mips Technologies, Inc
+ */
+
+#include <linux/device.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/export.h>
+#include <linux/interrupt.h>
+#include <linux/security.h>
+
+#include <asm/cpu.h>
+#include <asm/processor.h>
+#include <linux/atomic.h>
+#include <asm/hardirq.h>
+#include <asm/mmu_context.h>
+#include <asm/mipsmtregs.h>
+#include <asm/r4kcache.h>
+#include <asm/cacheflush.h>
+
+int vpelimit;
+
+static int __init maxvpes(char *str)
+{
+ get_option(&str, &vpelimit);
+
+ return 1;
+}
+
+__setup("maxvpes=", maxvpes);
+
+int tclimit;
+
+static int __init maxtcs(char *str)
+{
+ get_option(&str, &tclimit);
+
+ return 1;
+}
+
+__setup("maxtcs=", maxtcs);
+
+/*
+ * Dump new MIPS MT state for the core. Does not leave TCs halted.
+ * Takes an argument which taken to be a pre-call MVPControl value.
+ */
+
+void mips_mt_regdump(unsigned long mvpctl)
+{
+ unsigned long flags;
+ unsigned long vpflags;
+ unsigned long mvpconf0;
+ int nvpe;
+ int ntc;
+ int i;
+ int tc;
+ unsigned long haltval;
+ unsigned long tcstatval;
+
+ local_irq_save(flags);
+ vpflags = dvpe();
+ printk("=== MIPS MT State Dump ===\n");
+ printk("-- Global State --\n");
+ printk(" MVPControl Passed: %08lx\n", mvpctl);
+ printk(" MVPControl Read: %08lx\n", vpflags);
+ printk(" MVPConf0 : %08lx\n", (mvpconf0 = read_c0_mvpconf0()));
+ nvpe = ((mvpconf0 & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT) + 1;
+ ntc = ((mvpconf0 & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT) + 1;
+ printk("-- per-VPE State --\n");
+ for (i = 0; i < nvpe; i++) {
+ for (tc = 0; tc < ntc; tc++) {
+ settc(tc);
+ if ((read_tc_c0_tcbind() & TCBIND_CURVPE) == i) {
+ printk(" VPE %d\n", i);
+ printk(" VPEControl : %08lx\n",
+ read_vpe_c0_vpecontrol());
+ printk(" VPEConf0 : %08lx\n",
+ read_vpe_c0_vpeconf0());
+ printk(" VPE%d.Status : %08lx\n",
+ i, read_vpe_c0_status());
+ printk(" VPE%d.EPC : %08lx %pS\n",
+ i, read_vpe_c0_epc(),
+ (void *) read_vpe_c0_epc());
+ printk(" VPE%d.Cause : %08lx\n",
+ i, read_vpe_c0_cause());
+ printk(" VPE%d.Config7 : %08lx\n",
+ i, read_vpe_c0_config7());
+ break; /* Next VPE */
+ }
+ }
+ }
+ printk("-- per-TC State --\n");
+ for (tc = 0; tc < ntc; tc++) {
+ settc(tc);
+ if (read_tc_c0_tcbind() == read_c0_tcbind()) {
+ /* Are we dumping ourself? */
+ haltval = 0; /* Then we're not halted, and mustn't be */
+ tcstatval = flags; /* And pre-dump TCStatus is flags */
+ printk(" TC %d (current TC with VPE EPC above)\n", tc);
+ } else {
+ haltval = read_tc_c0_tchalt();
+ write_tc_c0_tchalt(1);
+ tcstatval = read_tc_c0_tcstatus();
+ printk(" TC %d\n", tc);
+ }
+ printk(" TCStatus : %08lx\n", tcstatval);
+ printk(" TCBind : %08lx\n", read_tc_c0_tcbind());
+ printk(" TCRestart : %08lx %pS\n",
+ read_tc_c0_tcrestart(), (void *) read_tc_c0_tcrestart());
+ printk(" TCHalt : %08lx\n", haltval);
+ printk(" TCContext : %08lx\n", read_tc_c0_tccontext());
+ if (!haltval)
+ write_tc_c0_tchalt(0);
+ }
+ printk("===========================\n");
+ evpe(vpflags);
+ local_irq_restore(flags);
+}
+
+static int mt_opt_rpsctl = -1;
+static int mt_opt_nblsu = -1;
+static int mt_opt_forceconfig7;
+static int mt_opt_config7 = -1;
+
+static int __init rpsctl_set(char *str)
+{
+ get_option(&str, &mt_opt_rpsctl);
+ return 1;
+}
+__setup("rpsctl=", rpsctl_set);
+
+static int __init nblsu_set(char *str)
+{
+ get_option(&str, &mt_opt_nblsu);
+ return 1;
+}
+__setup("nblsu=", nblsu_set);
+
+static int __init config7_set(char *str)
+{
+ get_option(&str, &mt_opt_config7);
+ mt_opt_forceconfig7 = 1;
+ return 1;
+}
+__setup("config7=", config7_set);
+
+static unsigned int itc_base;
+
+static int __init set_itc_base(char *str)
+{
+ get_option(&str, &itc_base);
+ return 1;
+}
+
+__setup("itcbase=", set_itc_base);
+
+void mips_mt_set_cpuoptions(void)
+{
+ unsigned int oconfig7 = read_c0_config7();
+ unsigned int nconfig7 = oconfig7;
+
+ if (mt_opt_rpsctl >= 0) {
+ printk("34K return prediction stack override set to %d.\n",
+ mt_opt_rpsctl);
+ if (mt_opt_rpsctl)
+ nconfig7 |= (1 << 2);
+ else
+ nconfig7 &= ~(1 << 2);
+ }
+ if (mt_opt_nblsu >= 0) {
+ printk("34K ALU/LSU sync override set to %d.\n", mt_opt_nblsu);
+ if (mt_opt_nblsu)
+ nconfig7 |= (1 << 5);
+ else
+ nconfig7 &= ~(1 << 5);
+ }
+ if (mt_opt_forceconfig7) {
+ printk("CP0.Config7 forced to 0x%08x.\n", mt_opt_config7);
+ nconfig7 = mt_opt_config7;
+ }
+ if (oconfig7 != nconfig7) {
+ __asm__ __volatile("sync");
+ write_c0_config7(nconfig7);
+ ehb();
+ printk("Config7: 0x%08x\n", read_c0_config7());
+ }
+
+ if (itc_base != 0) {
+ /*
+ * Configure ITC mapping. This code is very
+ * specific to the 34K core family, which uses
+ * a special mode bit ("ITC") in the ErrCtl
+ * register to enable access to ITC control
+ * registers via cache "tag" operations.
+ */
+ unsigned long ectlval;
+ unsigned long itcblkgrn;
+
+ /* ErrCtl register is known as "ecc" to Linux */
+ ectlval = read_c0_ecc();
+ write_c0_ecc(ectlval | (0x1 << 26));
+ ehb();
+#define INDEX_0 (0x80000000)
+#define INDEX_8 (0x80000008)
+ /* Read "cache tag" for Dcache pseudo-index 8 */
+ cache_op(Index_Load_Tag_D, INDEX_8);
+ ehb();
+ itcblkgrn = read_c0_dtaglo();
+ itcblkgrn &= 0xfffe0000;
+ /* Set for 128 byte pitch of ITC cells */
+ itcblkgrn |= 0x00000c00;
+ /* Stage in Tag register */
+ write_c0_dtaglo(itcblkgrn);
+ ehb();
+ /* Write out to ITU with CACHE op */
+ cache_op(Index_Store_Tag_D, INDEX_8);
+ /* Now set base address, and turn ITC on with 0x1 bit */
+ write_c0_dtaglo((itc_base & 0xfffffc00) | 0x1 );
+ ehb();
+ /* Write out to ITU with CACHE op */
+ cache_op(Index_Store_Tag_D, INDEX_0);
+ write_c0_ecc(ectlval);
+ ehb();
+ printk("Mapped %ld ITC cells starting at 0x%08x\n",
+ ((itcblkgrn & 0x7fe00000) >> 20), itc_base);
+ }
+}
+
+struct class *mt_class;
+
+static int __init mips_mt_init(void)
+{
+ struct class *mtc;
+
+ mtc = class_create(THIS_MODULE, "mt");
+ if (IS_ERR(mtc))
+ return PTR_ERR(mtc);
+
+ mt_class = mtc;
+
+ return 0;
+}
+
+subsys_initcall(mips_mt_init);
diff --git a/arch/mips/kernel/mips-r2-to-r6-emul.c b/arch/mips/kernel/mips-r2-to-r6-emul.c
new file mode 100644
index 000000000..750fe5698
--- /dev/null
+++ b/arch/mips/kernel/mips-r2-to-r6-emul.c
@@ -0,0 +1,2363 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (c) 2014 Imagination Technologies Ltd.
+ * Author: Leonid Yegoshin <Leonid.Yegoshin@imgtec.com>
+ * Author: Markos Chandras <markos.chandras@imgtec.com>
+ *
+ * MIPS R2 user space instruction emulator for MIPS R6
+ *
+ */
+#include <linux/bug.h>
+#include <linux/compiler.h>
+#include <linux/debugfs.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/ptrace.h>
+#include <linux/seq_file.h>
+
+#include <asm/asm.h>
+#include <asm/branch.h>
+#include <asm/break.h>
+#include <asm/debug.h>
+#include <asm/fpu.h>
+#include <asm/fpu_emulator.h>
+#include <asm/inst.h>
+#include <asm/mips-r2-to-r6-emul.h>
+#include <asm/local.h>
+#include <asm/mipsregs.h>
+#include <asm/ptrace.h>
+#include <linux/uaccess.h>
+
+#ifdef CONFIG_64BIT
+#define ADDIU "daddiu "
+#define INS "dins "
+#define EXT "dext "
+#else
+#define ADDIU "addiu "
+#define INS "ins "
+#define EXT "ext "
+#endif /* CONFIG_64BIT */
+
+#define SB "sb "
+#define LB "lb "
+#define LL "ll "
+#define SC "sc "
+
+#ifdef CONFIG_DEBUG_FS
+static DEFINE_PER_CPU(struct mips_r2_emulator_stats, mipsr2emustats);
+static DEFINE_PER_CPU(struct mips_r2_emulator_stats, mipsr2bdemustats);
+static DEFINE_PER_CPU(struct mips_r2br_emulator_stats, mipsr2bremustats);
+#endif
+
+extern const unsigned int fpucondbit[8];
+
+#define MIPS_R2_EMUL_TOTAL_PASS 10
+
+int mipsr2_emulation = 0;
+
+static int __init mipsr2emu_enable(char *s)
+{
+ mipsr2_emulation = 1;
+
+ pr_info("MIPS R2-to-R6 Emulator Enabled!");
+
+ return 1;
+}
+__setup("mipsr2emu", mipsr2emu_enable);
+
+/**
+ * mipsr6_emul - Emulate some frequent R2/R5/R6 instructions in delay slot
+ * for performance instead of the traditional way of using a stack trampoline
+ * which is rather slow.
+ * @regs: Process register set
+ * @ir: Instruction
+ */
+static inline int mipsr6_emul(struct pt_regs *regs, u32 ir)
+{
+ switch (MIPSInst_OPCODE(ir)) {
+ case addiu_op:
+ if (MIPSInst_RT(ir))
+ regs->regs[MIPSInst_RT(ir)] =
+ (s32)regs->regs[MIPSInst_RS(ir)] +
+ (s32)MIPSInst_SIMM(ir);
+ return 0;
+ case daddiu_op:
+ if (IS_ENABLED(CONFIG_32BIT))
+ break;
+
+ if (MIPSInst_RT(ir))
+ regs->regs[MIPSInst_RT(ir)] =
+ (s64)regs->regs[MIPSInst_RS(ir)] +
+ (s64)MIPSInst_SIMM(ir);
+ return 0;
+ case lwc1_op:
+ case swc1_op:
+ case cop1_op:
+ case cop1x_op:
+ /* FPU instructions in delay slot */
+ return -SIGFPE;
+ case spec_op:
+ switch (MIPSInst_FUNC(ir)) {
+ case or_op:
+ if (MIPSInst_RD(ir))
+ regs->regs[MIPSInst_RD(ir)] =
+ regs->regs[MIPSInst_RS(ir)] |
+ regs->regs[MIPSInst_RT(ir)];
+ return 0;
+ case sll_op:
+ if (MIPSInst_RS(ir))
+ break;
+
+ if (MIPSInst_RD(ir))
+ regs->regs[MIPSInst_RD(ir)] =
+ (s32)(((u32)regs->regs[MIPSInst_RT(ir)]) <<
+ MIPSInst_FD(ir));
+ return 0;
+ case srl_op:
+ if (MIPSInst_RS(ir))
+ break;
+
+ if (MIPSInst_RD(ir))
+ regs->regs[MIPSInst_RD(ir)] =
+ (s32)(((u32)regs->regs[MIPSInst_RT(ir)]) >>
+ MIPSInst_FD(ir));
+ return 0;
+ case addu_op:
+ if (MIPSInst_FD(ir))
+ break;
+
+ if (MIPSInst_RD(ir))
+ regs->regs[MIPSInst_RD(ir)] =
+ (s32)((u32)regs->regs[MIPSInst_RS(ir)] +
+ (u32)regs->regs[MIPSInst_RT(ir)]);
+ return 0;
+ case subu_op:
+ if (MIPSInst_FD(ir))
+ break;
+
+ if (MIPSInst_RD(ir))
+ regs->regs[MIPSInst_RD(ir)] =
+ (s32)((u32)regs->regs[MIPSInst_RS(ir)] -
+ (u32)regs->regs[MIPSInst_RT(ir)]);
+ return 0;
+ case dsll_op:
+ if (IS_ENABLED(CONFIG_32BIT) || MIPSInst_RS(ir))
+ break;
+
+ if (MIPSInst_RD(ir))
+ regs->regs[MIPSInst_RD(ir)] =
+ (s64)(((u64)regs->regs[MIPSInst_RT(ir)]) <<
+ MIPSInst_FD(ir));
+ return 0;
+ case dsrl_op:
+ if (IS_ENABLED(CONFIG_32BIT) || MIPSInst_RS(ir))
+ break;
+
+ if (MIPSInst_RD(ir))
+ regs->regs[MIPSInst_RD(ir)] =
+ (s64)(((u64)regs->regs[MIPSInst_RT(ir)]) >>
+ MIPSInst_FD(ir));
+ return 0;
+ case daddu_op:
+ if (IS_ENABLED(CONFIG_32BIT) || MIPSInst_FD(ir))
+ break;
+
+ if (MIPSInst_RD(ir))
+ regs->regs[MIPSInst_RD(ir)] =
+ (u64)regs->regs[MIPSInst_RS(ir)] +
+ (u64)regs->regs[MIPSInst_RT(ir)];
+ return 0;
+ case dsubu_op:
+ if (IS_ENABLED(CONFIG_32BIT) || MIPSInst_FD(ir))
+ break;
+
+ if (MIPSInst_RD(ir))
+ regs->regs[MIPSInst_RD(ir)] =
+ (s64)((u64)regs->regs[MIPSInst_RS(ir)] -
+ (u64)regs->regs[MIPSInst_RT(ir)]);
+ return 0;
+ }
+ break;
+ default:
+ pr_debug("No fastpath BD emulation for instruction 0x%08x (op: %02x)\n",
+ ir, MIPSInst_OPCODE(ir));
+ }
+
+ return SIGILL;
+}
+
+/**
+ * movf_func - Emulate a MOVF instruction
+ * @regs: Process register set
+ * @ir: Instruction
+ *
+ * Returns 0 since it always succeeds.
+ */
+static int movf_func(struct pt_regs *regs, u32 ir)
+{
+ u32 csr;
+ u32 cond;
+
+ csr = current->thread.fpu.fcr31;
+ cond = fpucondbit[MIPSInst_RT(ir) >> 2];
+
+ if (((csr & cond) == 0) && MIPSInst_RD(ir))
+ regs->regs[MIPSInst_RD(ir)] = regs->regs[MIPSInst_RS(ir)];
+
+ MIPS_R2_STATS(movs);
+
+ return 0;
+}
+
+/**
+ * movt_func - Emulate a MOVT instruction
+ * @regs: Process register set
+ * @ir: Instruction
+ *
+ * Returns 0 since it always succeeds.
+ */
+static int movt_func(struct pt_regs *regs, u32 ir)
+{
+ u32 csr;
+ u32 cond;
+
+ csr = current->thread.fpu.fcr31;
+ cond = fpucondbit[MIPSInst_RT(ir) >> 2];
+
+ if (((csr & cond) != 0) && MIPSInst_RD(ir))
+ regs->regs[MIPSInst_RD(ir)] = regs->regs[MIPSInst_RS(ir)];
+
+ MIPS_R2_STATS(movs);
+
+ return 0;
+}
+
+/**
+ * jr_func - Emulate a JR instruction.
+ * @pt_regs: Process register set
+ * @ir: Instruction
+ *
+ * Returns SIGILL if JR was in delay slot, SIGEMT if we
+ * can't compute the EPC, SIGSEGV if we can't access the
+ * userland instruction or 0 on success.
+ */
+static int jr_func(struct pt_regs *regs, u32 ir)
+{
+ int err;
+ unsigned long cepc, epc, nepc;
+ u32 nir;
+
+ if (delay_slot(regs))
+ return SIGILL;
+
+ /* EPC after the RI/JR instruction */
+ nepc = regs->cp0_epc;
+ /* Roll back to the reserved R2 JR instruction */
+ regs->cp0_epc -= 4;
+ epc = regs->cp0_epc;
+ err = __compute_return_epc(regs);
+
+ if (err < 0)
+ return SIGEMT;
+
+
+ /* Computed EPC */
+ cepc = regs->cp0_epc;
+
+ /* Get DS instruction */
+ err = __get_user(nir, (u32 __user *)nepc);
+ if (err)
+ return SIGSEGV;
+
+ MIPS_R2BR_STATS(jrs);
+
+ /* If nir == 0(NOP), then nothing else to do */
+ if (nir) {
+ /*
+ * Negative err means FPU instruction in BD-slot,
+ * Zero err means 'BD-slot emulation done'
+ * For anything else we go back to trampoline emulation.
+ */
+ err = mipsr6_emul(regs, nir);
+ if (err > 0) {
+ regs->cp0_epc = nepc;
+ err = mips_dsemul(regs, nir, epc, cepc);
+ if (err == SIGILL)
+ err = SIGEMT;
+ MIPS_R2_STATS(dsemul);
+ }
+ }
+
+ return err;
+}
+
+/**
+ * movz_func - Emulate a MOVZ instruction
+ * @regs: Process register set
+ * @ir: Instruction
+ *
+ * Returns 0 since it always succeeds.
+ */
+static int movz_func(struct pt_regs *regs, u32 ir)
+{
+ if (((regs->regs[MIPSInst_RT(ir)]) == 0) && MIPSInst_RD(ir))
+ regs->regs[MIPSInst_RD(ir)] = regs->regs[MIPSInst_RS(ir)];
+ MIPS_R2_STATS(movs);
+
+ return 0;
+}
+
+/**
+ * movn_func - Emulate a MOVZ instruction
+ * @regs: Process register set
+ * @ir: Instruction
+ *
+ * Returns 0 since it always succeeds.
+ */
+static int movn_func(struct pt_regs *regs, u32 ir)
+{
+ if (((regs->regs[MIPSInst_RT(ir)]) != 0) && MIPSInst_RD(ir))
+ regs->regs[MIPSInst_RD(ir)] = regs->regs[MIPSInst_RS(ir)];
+ MIPS_R2_STATS(movs);
+
+ return 0;
+}
+
+/**
+ * mfhi_func - Emulate a MFHI instruction
+ * @regs: Process register set
+ * @ir: Instruction
+ *
+ * Returns 0 since it always succeeds.
+ */
+static int mfhi_func(struct pt_regs *regs, u32 ir)
+{
+ if (MIPSInst_RD(ir))
+ regs->regs[MIPSInst_RD(ir)] = regs->hi;
+
+ MIPS_R2_STATS(hilo);
+
+ return 0;
+}
+
+/**
+ * mthi_func - Emulate a MTHI instruction
+ * @regs: Process register set
+ * @ir: Instruction
+ *
+ * Returns 0 since it always succeeds.
+ */
+static int mthi_func(struct pt_regs *regs, u32 ir)
+{
+ regs->hi = regs->regs[MIPSInst_RS(ir)];
+
+ MIPS_R2_STATS(hilo);
+
+ return 0;
+}
+
+/**
+ * mflo_func - Emulate a MFLO instruction
+ * @regs: Process register set
+ * @ir: Instruction
+ *
+ * Returns 0 since it always succeeds.
+ */
+static int mflo_func(struct pt_regs *regs, u32 ir)
+{
+ if (MIPSInst_RD(ir))
+ regs->regs[MIPSInst_RD(ir)] = regs->lo;
+
+ MIPS_R2_STATS(hilo);
+
+ return 0;
+}
+
+/**
+ * mtlo_func - Emulate a MTLO instruction
+ * @regs: Process register set
+ * @ir: Instruction
+ *
+ * Returns 0 since it always succeeds.
+ */
+static int mtlo_func(struct pt_regs *regs, u32 ir)
+{
+ regs->lo = regs->regs[MIPSInst_RS(ir)];
+
+ MIPS_R2_STATS(hilo);
+
+ return 0;
+}
+
+/**
+ * mult_func - Emulate a MULT instruction
+ * @regs: Process register set
+ * @ir: Instruction
+ *
+ * Returns 0 since it always succeeds.
+ */
+static int mult_func(struct pt_regs *regs, u32 ir)
+{
+ s64 res;
+ s32 rt, rs;
+
+ rt = regs->regs[MIPSInst_RT(ir)];
+ rs = regs->regs[MIPSInst_RS(ir)];
+ res = (s64)rt * (s64)rs;
+
+ rs = res;
+ regs->lo = (s64)rs;
+ rt = res >> 32;
+ res = (s64)rt;
+ regs->hi = res;
+
+ MIPS_R2_STATS(muls);
+
+ return 0;
+}
+
+/**
+ * multu_func - Emulate a MULTU instruction
+ * @regs: Process register set
+ * @ir: Instruction
+ *
+ * Returns 0 since it always succeeds.
+ */
+static int multu_func(struct pt_regs *regs, u32 ir)
+{
+ u64 res;
+ u32 rt, rs;
+
+ rt = regs->regs[MIPSInst_RT(ir)];
+ rs = regs->regs[MIPSInst_RS(ir)];
+ res = (u64)rt * (u64)rs;
+ rt = res;
+ regs->lo = (s64)(s32)rt;
+ regs->hi = (s64)(s32)(res >> 32);
+
+ MIPS_R2_STATS(muls);
+
+ return 0;
+}
+
+/**
+ * div_func - Emulate a DIV instruction
+ * @regs: Process register set
+ * @ir: Instruction
+ *
+ * Returns 0 since it always succeeds.
+ */
+static int div_func(struct pt_regs *regs, u32 ir)
+{
+ s32 rt, rs;
+
+ rt = regs->regs[MIPSInst_RT(ir)];
+ rs = regs->regs[MIPSInst_RS(ir)];
+
+ regs->lo = (s64)(rs / rt);
+ regs->hi = (s64)(rs % rt);
+
+ MIPS_R2_STATS(divs);
+
+ return 0;
+}
+
+/**
+ * divu_func - Emulate a DIVU instruction
+ * @regs: Process register set
+ * @ir: Instruction
+ *
+ * Returns 0 since it always succeeds.
+ */
+static int divu_func(struct pt_regs *regs, u32 ir)
+{
+ u32 rt, rs;
+
+ rt = regs->regs[MIPSInst_RT(ir)];
+ rs = regs->regs[MIPSInst_RS(ir)];
+
+ regs->lo = (s64)(rs / rt);
+ regs->hi = (s64)(rs % rt);
+
+ MIPS_R2_STATS(divs);
+
+ return 0;
+}
+
+/**
+ * dmult_func - Emulate a DMULT instruction
+ * @regs: Process register set
+ * @ir: Instruction
+ *
+ * Returns 0 on success or SIGILL for 32-bit kernels.
+ */
+static int dmult_func(struct pt_regs *regs, u32 ir)
+{
+ s64 res;
+ s64 rt, rs;
+
+ if (IS_ENABLED(CONFIG_32BIT))
+ return SIGILL;
+
+ rt = regs->regs[MIPSInst_RT(ir)];
+ rs = regs->regs[MIPSInst_RS(ir)];
+ res = rt * rs;
+
+ regs->lo = res;
+ __asm__ __volatile__(
+ "dmuh %0, %1, %2\t\n"
+ : "=r"(res)
+ : "r"(rt), "r"(rs));
+
+ regs->hi = res;
+
+ MIPS_R2_STATS(muls);
+
+ return 0;
+}
+
+/**
+ * dmultu_func - Emulate a DMULTU instruction
+ * @regs: Process register set
+ * @ir: Instruction
+ *
+ * Returns 0 on success or SIGILL for 32-bit kernels.
+ */
+static int dmultu_func(struct pt_regs *regs, u32 ir)
+{
+ u64 res;
+ u64 rt, rs;
+
+ if (IS_ENABLED(CONFIG_32BIT))
+ return SIGILL;
+
+ rt = regs->regs[MIPSInst_RT(ir)];
+ rs = regs->regs[MIPSInst_RS(ir)];
+ res = rt * rs;
+
+ regs->lo = res;
+ __asm__ __volatile__(
+ "dmuhu %0, %1, %2\t\n"
+ : "=r"(res)
+ : "r"(rt), "r"(rs));
+
+ regs->hi = res;
+
+ MIPS_R2_STATS(muls);
+
+ return 0;
+}
+
+/**
+ * ddiv_func - Emulate a DDIV instruction
+ * @regs: Process register set
+ * @ir: Instruction
+ *
+ * Returns 0 on success or SIGILL for 32-bit kernels.
+ */
+static int ddiv_func(struct pt_regs *regs, u32 ir)
+{
+ s64 rt, rs;
+
+ if (IS_ENABLED(CONFIG_32BIT))
+ return SIGILL;
+
+ rt = regs->regs[MIPSInst_RT(ir)];
+ rs = regs->regs[MIPSInst_RS(ir)];
+
+ regs->lo = rs / rt;
+ regs->hi = rs % rt;
+
+ MIPS_R2_STATS(divs);
+
+ return 0;
+}
+
+/**
+ * ddivu_func - Emulate a DDIVU instruction
+ * @regs: Process register set
+ * @ir: Instruction
+ *
+ * Returns 0 on success or SIGILL for 32-bit kernels.
+ */
+static int ddivu_func(struct pt_regs *regs, u32 ir)
+{
+ u64 rt, rs;
+
+ if (IS_ENABLED(CONFIG_32BIT))
+ return SIGILL;
+
+ rt = regs->regs[MIPSInst_RT(ir)];
+ rs = regs->regs[MIPSInst_RS(ir)];
+
+ regs->lo = rs / rt;
+ regs->hi = rs % rt;
+
+ MIPS_R2_STATS(divs);
+
+ return 0;
+}
+
+/* R6 removed instructions for the SPECIAL opcode */
+static const struct r2_decoder_table spec_op_table[] = {
+ { 0xfc1ff83f, 0x00000008, jr_func },
+ { 0xfc00ffff, 0x00000018, mult_func },
+ { 0xfc00ffff, 0x00000019, multu_func },
+ { 0xfc00ffff, 0x0000001c, dmult_func },
+ { 0xfc00ffff, 0x0000001d, dmultu_func },
+ { 0xffff07ff, 0x00000010, mfhi_func },
+ { 0xfc1fffff, 0x00000011, mthi_func },
+ { 0xffff07ff, 0x00000012, mflo_func },
+ { 0xfc1fffff, 0x00000013, mtlo_func },
+ { 0xfc0307ff, 0x00000001, movf_func },
+ { 0xfc0307ff, 0x00010001, movt_func },
+ { 0xfc0007ff, 0x0000000a, movz_func },
+ { 0xfc0007ff, 0x0000000b, movn_func },
+ { 0xfc00ffff, 0x0000001a, div_func },
+ { 0xfc00ffff, 0x0000001b, divu_func },
+ { 0xfc00ffff, 0x0000001e, ddiv_func },
+ { 0xfc00ffff, 0x0000001f, ddivu_func },
+ {}
+};
+
+/**
+ * madd_func - Emulate a MADD instruction
+ * @regs: Process register set
+ * @ir: Instruction
+ *
+ * Returns 0 since it always succeeds.
+ */
+static int madd_func(struct pt_regs *regs, u32 ir)
+{
+ s64 res;
+ s32 rt, rs;
+
+ rt = regs->regs[MIPSInst_RT(ir)];
+ rs = regs->regs[MIPSInst_RS(ir)];
+ res = (s64)rt * (s64)rs;
+ rt = regs->hi;
+ rs = regs->lo;
+ res += ((((s64)rt) << 32) | (u32)rs);
+
+ rt = res;
+ regs->lo = (s64)rt;
+ rs = res >> 32;
+ regs->hi = (s64)rs;
+
+ MIPS_R2_STATS(dsps);
+
+ return 0;
+}
+
+/**
+ * maddu_func - Emulate a MADDU instruction
+ * @regs: Process register set
+ * @ir: Instruction
+ *
+ * Returns 0 since it always succeeds.
+ */
+static int maddu_func(struct pt_regs *regs, u32 ir)
+{
+ u64 res;
+ u32 rt, rs;
+
+ rt = regs->regs[MIPSInst_RT(ir)];
+ rs = regs->regs[MIPSInst_RS(ir)];
+ res = (u64)rt * (u64)rs;
+ rt = regs->hi;
+ rs = regs->lo;
+ res += ((((s64)rt) << 32) | (u32)rs);
+
+ rt = res;
+ regs->lo = (s64)(s32)rt;
+ rs = res >> 32;
+ regs->hi = (s64)(s32)rs;
+
+ MIPS_R2_STATS(dsps);
+
+ return 0;
+}
+
+/**
+ * msub_func - Emulate a MSUB instruction
+ * @regs: Process register set
+ * @ir: Instruction
+ *
+ * Returns 0 since it always succeeds.
+ */
+static int msub_func(struct pt_regs *regs, u32 ir)
+{
+ s64 res;
+ s32 rt, rs;
+
+ rt = regs->regs[MIPSInst_RT(ir)];
+ rs = regs->regs[MIPSInst_RS(ir)];
+ res = (s64)rt * (s64)rs;
+ rt = regs->hi;
+ rs = regs->lo;
+ res = ((((s64)rt) << 32) | (u32)rs) - res;
+
+ rt = res;
+ regs->lo = (s64)rt;
+ rs = res >> 32;
+ regs->hi = (s64)rs;
+
+ MIPS_R2_STATS(dsps);
+
+ return 0;
+}
+
+/**
+ * msubu_func - Emulate a MSUBU instruction
+ * @regs: Process register set
+ * @ir: Instruction
+ *
+ * Returns 0 since it always succeeds.
+ */
+static int msubu_func(struct pt_regs *regs, u32 ir)
+{
+ u64 res;
+ u32 rt, rs;
+
+ rt = regs->regs[MIPSInst_RT(ir)];
+ rs = regs->regs[MIPSInst_RS(ir)];
+ res = (u64)rt * (u64)rs;
+ rt = regs->hi;
+ rs = regs->lo;
+ res = ((((s64)rt) << 32) | (u32)rs) - res;
+
+ rt = res;
+ regs->lo = (s64)(s32)rt;
+ rs = res >> 32;
+ regs->hi = (s64)(s32)rs;
+
+ MIPS_R2_STATS(dsps);
+
+ return 0;
+}
+
+/**
+ * mul_func - Emulate a MUL instruction
+ * @regs: Process register set
+ * @ir: Instruction
+ *
+ * Returns 0 since it always succeeds.
+ */
+static int mul_func(struct pt_regs *regs, u32 ir)
+{
+ s64 res;
+ s32 rt, rs;
+
+ if (!MIPSInst_RD(ir))
+ return 0;
+ rt = regs->regs[MIPSInst_RT(ir)];
+ rs = regs->regs[MIPSInst_RS(ir)];
+ res = (s64)rt * (s64)rs;
+
+ rs = res;
+ regs->regs[MIPSInst_RD(ir)] = (s64)rs;
+
+ MIPS_R2_STATS(muls);
+
+ return 0;
+}
+
+/**
+ * clz_func - Emulate a CLZ instruction
+ * @regs: Process register set
+ * @ir: Instruction
+ *
+ * Returns 0 since it always succeeds.
+ */
+static int clz_func(struct pt_regs *regs, u32 ir)
+{
+ u32 res;
+ u32 rs;
+
+ if (!MIPSInst_RD(ir))
+ return 0;
+
+ rs = regs->regs[MIPSInst_RS(ir)];
+ __asm__ __volatile__("clz %0, %1" : "=r"(res) : "r"(rs));
+ regs->regs[MIPSInst_RD(ir)] = res;
+
+ MIPS_R2_STATS(bops);
+
+ return 0;
+}
+
+/**
+ * clo_func - Emulate a CLO instruction
+ * @regs: Process register set
+ * @ir: Instruction
+ *
+ * Returns 0 since it always succeeds.
+ */
+
+static int clo_func(struct pt_regs *regs, u32 ir)
+{
+ u32 res;
+ u32 rs;
+
+ if (!MIPSInst_RD(ir))
+ return 0;
+
+ rs = regs->regs[MIPSInst_RS(ir)];
+ __asm__ __volatile__("clo %0, %1" : "=r"(res) : "r"(rs));
+ regs->regs[MIPSInst_RD(ir)] = res;
+
+ MIPS_R2_STATS(bops);
+
+ return 0;
+}
+
+/**
+ * dclz_func - Emulate a DCLZ instruction
+ * @regs: Process register set
+ * @ir: Instruction
+ *
+ * Returns 0 since it always succeeds.
+ */
+static int dclz_func(struct pt_regs *regs, u32 ir)
+{
+ u64 res;
+ u64 rs;
+
+ if (IS_ENABLED(CONFIG_32BIT))
+ return SIGILL;
+
+ if (!MIPSInst_RD(ir))
+ return 0;
+
+ rs = regs->regs[MIPSInst_RS(ir)];
+ __asm__ __volatile__("dclz %0, %1" : "=r"(res) : "r"(rs));
+ regs->regs[MIPSInst_RD(ir)] = res;
+
+ MIPS_R2_STATS(bops);
+
+ return 0;
+}
+
+/**
+ * dclo_func - Emulate a DCLO instruction
+ * @regs: Process register set
+ * @ir: Instruction
+ *
+ * Returns 0 since it always succeeds.
+ */
+static int dclo_func(struct pt_regs *regs, u32 ir)
+{
+ u64 res;
+ u64 rs;
+
+ if (IS_ENABLED(CONFIG_32BIT))
+ return SIGILL;
+
+ if (!MIPSInst_RD(ir))
+ return 0;
+
+ rs = regs->regs[MIPSInst_RS(ir)];
+ __asm__ __volatile__("dclo %0, %1" : "=r"(res) : "r"(rs));
+ regs->regs[MIPSInst_RD(ir)] = res;
+
+ MIPS_R2_STATS(bops);
+
+ return 0;
+}
+
+/* R6 removed instructions for the SPECIAL2 opcode */
+static const struct r2_decoder_table spec2_op_table[] = {
+ { 0xfc00ffff, 0x70000000, madd_func },
+ { 0xfc00ffff, 0x70000001, maddu_func },
+ { 0xfc0007ff, 0x70000002, mul_func },
+ { 0xfc00ffff, 0x70000004, msub_func },
+ { 0xfc00ffff, 0x70000005, msubu_func },
+ { 0xfc0007ff, 0x70000020, clz_func },
+ { 0xfc0007ff, 0x70000021, clo_func },
+ { 0xfc0007ff, 0x70000024, dclz_func },
+ { 0xfc0007ff, 0x70000025, dclo_func },
+ { }
+};
+
+static inline int mipsr2_find_op_func(struct pt_regs *regs, u32 inst,
+ const struct r2_decoder_table *table)
+{
+ const struct r2_decoder_table *p;
+ int err;
+
+ for (p = table; p->func; p++) {
+ if ((inst & p->mask) == p->code) {
+ err = (p->func)(regs, inst);
+ return err;
+ }
+ }
+ return SIGILL;
+}
+
+/**
+ * mipsr2_decoder: Decode and emulate a MIPS R2 instruction
+ * @regs: Process register set
+ * @inst: Instruction to decode and emulate
+ * @fcr31: Floating Point Control and Status Register Cause bits returned
+ */
+int mipsr2_decoder(struct pt_regs *regs, u32 inst, unsigned long *fcr31)
+{
+ int err = 0;
+ unsigned long vaddr;
+ u32 nir;
+ unsigned long cpc, epc, nepc, r31, res, rs, rt;
+
+ void __user *fault_addr = NULL;
+ int pass = 0;
+
+repeat:
+ r31 = regs->regs[31];
+ epc = regs->cp0_epc;
+ err = compute_return_epc(regs);
+ if (err < 0) {
+ BUG();
+ return SIGEMT;
+ }
+ pr_debug("Emulating the 0x%08x R2 instruction @ 0x%08lx (pass=%d))\n",
+ inst, epc, pass);
+
+ switch (MIPSInst_OPCODE(inst)) {
+ case spec_op:
+ err = mipsr2_find_op_func(regs, inst, spec_op_table);
+ if (err < 0) {
+ /* FPU instruction under JR */
+ regs->cp0_cause |= CAUSEF_BD;
+ goto fpu_emul;
+ }
+ break;
+ case spec2_op:
+ err = mipsr2_find_op_func(regs, inst, spec2_op_table);
+ break;
+ case bcond_op:
+ rt = MIPSInst_RT(inst);
+ rs = MIPSInst_RS(inst);
+ switch (rt) {
+ case tgei_op:
+ if ((long)regs->regs[rs] >= MIPSInst_SIMM(inst))
+ do_trap_or_bp(regs, 0, 0, "TGEI");
+
+ MIPS_R2_STATS(traps);
+
+ break;
+ case tgeiu_op:
+ if (regs->regs[rs] >= MIPSInst_UIMM(inst))
+ do_trap_or_bp(regs, 0, 0, "TGEIU");
+
+ MIPS_R2_STATS(traps);
+
+ break;
+ case tlti_op:
+ if ((long)regs->regs[rs] < MIPSInst_SIMM(inst))
+ do_trap_or_bp(regs, 0, 0, "TLTI");
+
+ MIPS_R2_STATS(traps);
+
+ break;
+ case tltiu_op:
+ if (regs->regs[rs] < MIPSInst_UIMM(inst))
+ do_trap_or_bp(regs, 0, 0, "TLTIU");
+
+ MIPS_R2_STATS(traps);
+
+ break;
+ case teqi_op:
+ if (regs->regs[rs] == MIPSInst_SIMM(inst))
+ do_trap_or_bp(regs, 0, 0, "TEQI");
+
+ MIPS_R2_STATS(traps);
+
+ break;
+ case tnei_op:
+ if (regs->regs[rs] != MIPSInst_SIMM(inst))
+ do_trap_or_bp(regs, 0, 0, "TNEI");
+
+ MIPS_R2_STATS(traps);
+
+ break;
+ case bltzl_op:
+ case bgezl_op:
+ case bltzall_op:
+ case bgezall_op:
+ if (delay_slot(regs)) {
+ err = SIGILL;
+ break;
+ }
+ regs->regs[31] = r31;
+ regs->cp0_epc = epc;
+ err = __compute_return_epc(regs);
+ if (err < 0)
+ return SIGEMT;
+ if (err != BRANCH_LIKELY_TAKEN)
+ break;
+ cpc = regs->cp0_epc;
+ nepc = epc + 4;
+ err = __get_user(nir, (u32 __user *)nepc);
+ if (err) {
+ err = SIGSEGV;
+ break;
+ }
+ /*
+ * This will probably be optimized away when
+ * CONFIG_DEBUG_FS is not enabled
+ */
+ switch (rt) {
+ case bltzl_op:
+ MIPS_R2BR_STATS(bltzl);
+ break;
+ case bgezl_op:
+ MIPS_R2BR_STATS(bgezl);
+ break;
+ case bltzall_op:
+ MIPS_R2BR_STATS(bltzall);
+ break;
+ case bgezall_op:
+ MIPS_R2BR_STATS(bgezall);
+ break;
+ }
+
+ switch (MIPSInst_OPCODE(nir)) {
+ case cop1_op:
+ case cop1x_op:
+ case lwc1_op:
+ case swc1_op:
+ regs->cp0_cause |= CAUSEF_BD;
+ goto fpu_emul;
+ }
+ if (nir) {
+ err = mipsr6_emul(regs, nir);
+ if (err > 0) {
+ err = mips_dsemul(regs, nir, epc, cpc);
+ if (err == SIGILL)
+ err = SIGEMT;
+ MIPS_R2_STATS(dsemul);
+ }
+ }
+ break;
+ case bltzal_op:
+ case bgezal_op:
+ if (delay_slot(regs)) {
+ err = SIGILL;
+ break;
+ }
+ regs->regs[31] = r31;
+ regs->cp0_epc = epc;
+ err = __compute_return_epc(regs);
+ if (err < 0)
+ return SIGEMT;
+ cpc = regs->cp0_epc;
+ nepc = epc + 4;
+ err = __get_user(nir, (u32 __user *)nepc);
+ if (err) {
+ err = SIGSEGV;
+ break;
+ }
+ /*
+ * This will probably be optimized away when
+ * CONFIG_DEBUG_FS is not enabled
+ */
+ switch (rt) {
+ case bltzal_op:
+ MIPS_R2BR_STATS(bltzal);
+ break;
+ case bgezal_op:
+ MIPS_R2BR_STATS(bgezal);
+ break;
+ }
+
+ switch (MIPSInst_OPCODE(nir)) {
+ case cop1_op:
+ case cop1x_op:
+ case lwc1_op:
+ case swc1_op:
+ regs->cp0_cause |= CAUSEF_BD;
+ goto fpu_emul;
+ }
+ if (nir) {
+ err = mipsr6_emul(regs, nir);
+ if (err > 0) {
+ err = mips_dsemul(regs, nir, epc, cpc);
+ if (err == SIGILL)
+ err = SIGEMT;
+ MIPS_R2_STATS(dsemul);
+ }
+ }
+ break;
+ default:
+ regs->regs[31] = r31;
+ regs->cp0_epc = epc;
+ err = SIGILL;
+ break;
+ }
+ break;
+
+ case blezl_op:
+ case bgtzl_op:
+ /*
+ * For BLEZL and BGTZL, rt field must be set to 0. If this
+ * is not the case, this may be an encoding of a MIPS R6
+ * instruction, so return to CPU execution if this occurs
+ */
+ if (MIPSInst_RT(inst)) {
+ err = SIGILL;
+ break;
+ }
+ fallthrough;
+ case beql_op:
+ case bnel_op:
+ if (delay_slot(regs)) {
+ err = SIGILL;
+ break;
+ }
+ regs->regs[31] = r31;
+ regs->cp0_epc = epc;
+ err = __compute_return_epc(regs);
+ if (err < 0)
+ return SIGEMT;
+ if (err != BRANCH_LIKELY_TAKEN)
+ break;
+ cpc = regs->cp0_epc;
+ nepc = epc + 4;
+ err = __get_user(nir, (u32 __user *)nepc);
+ if (err) {
+ err = SIGSEGV;
+ break;
+ }
+ /*
+ * This will probably be optimized away when
+ * CONFIG_DEBUG_FS is not enabled
+ */
+ switch (MIPSInst_OPCODE(inst)) {
+ case beql_op:
+ MIPS_R2BR_STATS(beql);
+ break;
+ case bnel_op:
+ MIPS_R2BR_STATS(bnel);
+ break;
+ case blezl_op:
+ MIPS_R2BR_STATS(blezl);
+ break;
+ case bgtzl_op:
+ MIPS_R2BR_STATS(bgtzl);
+ break;
+ }
+
+ switch (MIPSInst_OPCODE(nir)) {
+ case cop1_op:
+ case cop1x_op:
+ case lwc1_op:
+ case swc1_op:
+ regs->cp0_cause |= CAUSEF_BD;
+ goto fpu_emul;
+ }
+ if (nir) {
+ err = mipsr6_emul(regs, nir);
+ if (err > 0) {
+ err = mips_dsemul(regs, nir, epc, cpc);
+ if (err == SIGILL)
+ err = SIGEMT;
+ MIPS_R2_STATS(dsemul);
+ }
+ }
+ break;
+ case lwc1_op:
+ case swc1_op:
+ case cop1_op:
+ case cop1x_op:
+fpu_emul:
+ regs->regs[31] = r31;
+ regs->cp0_epc = epc;
+
+ err = fpu_emulator_cop1Handler(regs, &current->thread.fpu, 0,
+ &fault_addr);
+
+ /*
+ * We can't allow the emulated instruction to leave any
+ * enabled Cause bits set in $fcr31.
+ */
+ *fcr31 = res = mask_fcr31_x(current->thread.fpu.fcr31);
+ current->thread.fpu.fcr31 &= ~res;
+
+ /*
+ * this is a tricky issue - lose_fpu() uses LL/SC atomics
+ * if FPU is owned and effectively cancels user level LL/SC.
+ * So, it could be logical to don't restore FPU ownership here.
+ * But the sequence of multiple FPU instructions is much much
+ * more often than LL-FPU-SC and I prefer loop here until
+ * next scheduler cycle cancels FPU ownership
+ */
+ own_fpu(1); /* Restore FPU state. */
+
+ if (err)
+ current->thread.cp0_baduaddr = (unsigned long)fault_addr;
+
+ MIPS_R2_STATS(fpus);
+
+ break;
+
+ case lwl_op:
+ rt = regs->regs[MIPSInst_RT(inst)];
+ vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst);
+ if (!access_ok((void __user *)vaddr, 4)) {
+ current->thread.cp0_baduaddr = vaddr;
+ err = SIGSEGV;
+ break;
+ }
+ __asm__ __volatile__(
+ " .set push\n"
+ " .set reorder\n"
+#ifdef CONFIG_CPU_LITTLE_ENDIAN
+ "1:" LB "%1, 0(%2)\n"
+ INS "%0, %1, 24, 8\n"
+ " andi %1, %2, 0x3\n"
+ " beq $0, %1, 9f\n"
+ ADDIU "%2, %2, -1\n"
+ "2:" LB "%1, 0(%2)\n"
+ INS "%0, %1, 16, 8\n"
+ " andi %1, %2, 0x3\n"
+ " beq $0, %1, 9f\n"
+ ADDIU "%2, %2, -1\n"
+ "3:" LB "%1, 0(%2)\n"
+ INS "%0, %1, 8, 8\n"
+ " andi %1, %2, 0x3\n"
+ " beq $0, %1, 9f\n"
+ ADDIU "%2, %2, -1\n"
+ "4:" LB "%1, 0(%2)\n"
+ INS "%0, %1, 0, 8\n"
+#else /* !CONFIG_CPU_LITTLE_ENDIAN */
+ "1:" LB "%1, 0(%2)\n"
+ INS "%0, %1, 24, 8\n"
+ ADDIU "%2, %2, 1\n"
+ " andi %1, %2, 0x3\n"
+ " beq $0, %1, 9f\n"
+ "2:" LB "%1, 0(%2)\n"
+ INS "%0, %1, 16, 8\n"
+ ADDIU "%2, %2, 1\n"
+ " andi %1, %2, 0x3\n"
+ " beq $0, %1, 9f\n"
+ "3:" LB "%1, 0(%2)\n"
+ INS "%0, %1, 8, 8\n"
+ ADDIU "%2, %2, 1\n"
+ " andi %1, %2, 0x3\n"
+ " beq $0, %1, 9f\n"
+ "4:" LB "%1, 0(%2)\n"
+ INS "%0, %1, 0, 8\n"
+#endif /* CONFIG_CPU_LITTLE_ENDIAN */
+ "9: sll %0, %0, 0\n"
+ "10:\n"
+ " .insn\n"
+ " .section .fixup,\"ax\"\n"
+ "8: li %3,%4\n"
+ " j 10b\n"
+ " .previous\n"
+ " .section __ex_table,\"a\"\n"
+ STR(PTR_WD) " 1b,8b\n"
+ STR(PTR_WD) " 2b,8b\n"
+ STR(PTR_WD) " 3b,8b\n"
+ STR(PTR_WD) " 4b,8b\n"
+ " .previous\n"
+ " .set pop\n"
+ : "+&r"(rt), "=&r"(rs),
+ "+&r"(vaddr), "+&r"(err)
+ : "i"(SIGSEGV));
+
+ if (MIPSInst_RT(inst) && !err)
+ regs->regs[MIPSInst_RT(inst)] = rt;
+
+ MIPS_R2_STATS(loads);
+
+ break;
+
+ case lwr_op:
+ rt = regs->regs[MIPSInst_RT(inst)];
+ vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst);
+ if (!access_ok((void __user *)vaddr, 4)) {
+ current->thread.cp0_baduaddr = vaddr;
+ err = SIGSEGV;
+ break;
+ }
+ __asm__ __volatile__(
+ " .set push\n"
+ " .set reorder\n"
+#ifdef CONFIG_CPU_LITTLE_ENDIAN
+ "1:" LB "%1, 0(%2)\n"
+ INS "%0, %1, 0, 8\n"
+ ADDIU "%2, %2, 1\n"
+ " andi %1, %2, 0x3\n"
+ " beq $0, %1, 9f\n"
+ "2:" LB "%1, 0(%2)\n"
+ INS "%0, %1, 8, 8\n"
+ ADDIU "%2, %2, 1\n"
+ " andi %1, %2, 0x3\n"
+ " beq $0, %1, 9f\n"
+ "3:" LB "%1, 0(%2)\n"
+ INS "%0, %1, 16, 8\n"
+ ADDIU "%2, %2, 1\n"
+ " andi %1, %2, 0x3\n"
+ " beq $0, %1, 9f\n"
+ "4:" LB "%1, 0(%2)\n"
+ INS "%0, %1, 24, 8\n"
+ " sll %0, %0, 0\n"
+#else /* !CONFIG_CPU_LITTLE_ENDIAN */
+ "1:" LB "%1, 0(%2)\n"
+ INS "%0, %1, 0, 8\n"
+ " andi %1, %2, 0x3\n"
+ " beq $0, %1, 9f\n"
+ ADDIU "%2, %2, -1\n"
+ "2:" LB "%1, 0(%2)\n"
+ INS "%0, %1, 8, 8\n"
+ " andi %1, %2, 0x3\n"
+ " beq $0, %1, 9f\n"
+ ADDIU "%2, %2, -1\n"
+ "3:" LB "%1, 0(%2)\n"
+ INS "%0, %1, 16, 8\n"
+ " andi %1, %2, 0x3\n"
+ " beq $0, %1, 9f\n"
+ ADDIU "%2, %2, -1\n"
+ "4:" LB "%1, 0(%2)\n"
+ INS "%0, %1, 24, 8\n"
+ " sll %0, %0, 0\n"
+#endif /* CONFIG_CPU_LITTLE_ENDIAN */
+ "9:\n"
+ "10:\n"
+ " .insn\n"
+ " .section .fixup,\"ax\"\n"
+ "8: li %3,%4\n"
+ " j 10b\n"
+ " .previous\n"
+ " .section __ex_table,\"a\"\n"
+ STR(PTR_WD) " 1b,8b\n"
+ STR(PTR_WD) " 2b,8b\n"
+ STR(PTR_WD) " 3b,8b\n"
+ STR(PTR_WD) " 4b,8b\n"
+ " .previous\n"
+ " .set pop\n"
+ : "+&r"(rt), "=&r"(rs),
+ "+&r"(vaddr), "+&r"(err)
+ : "i"(SIGSEGV));
+ if (MIPSInst_RT(inst) && !err)
+ regs->regs[MIPSInst_RT(inst)] = rt;
+
+ MIPS_R2_STATS(loads);
+
+ break;
+
+ case swl_op:
+ rt = regs->regs[MIPSInst_RT(inst)];
+ vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst);
+ if (!access_ok((void __user *)vaddr, 4)) {
+ current->thread.cp0_baduaddr = vaddr;
+ err = SIGSEGV;
+ break;
+ }
+ __asm__ __volatile__(
+ " .set push\n"
+ " .set reorder\n"
+#ifdef CONFIG_CPU_LITTLE_ENDIAN
+ EXT "%1, %0, 24, 8\n"
+ "1:" SB "%1, 0(%2)\n"
+ " andi %1, %2, 0x3\n"
+ " beq $0, %1, 9f\n"
+ ADDIU "%2, %2, -1\n"
+ EXT "%1, %0, 16, 8\n"
+ "2:" SB "%1, 0(%2)\n"
+ " andi %1, %2, 0x3\n"
+ " beq $0, %1, 9f\n"
+ ADDIU "%2, %2, -1\n"
+ EXT "%1, %0, 8, 8\n"
+ "3:" SB "%1, 0(%2)\n"
+ " andi %1, %2, 0x3\n"
+ " beq $0, %1, 9f\n"
+ ADDIU "%2, %2, -1\n"
+ EXT "%1, %0, 0, 8\n"
+ "4:" SB "%1, 0(%2)\n"
+#else /* !CONFIG_CPU_LITTLE_ENDIAN */
+ EXT "%1, %0, 24, 8\n"
+ "1:" SB "%1, 0(%2)\n"
+ ADDIU "%2, %2, 1\n"
+ " andi %1, %2, 0x3\n"
+ " beq $0, %1, 9f\n"
+ EXT "%1, %0, 16, 8\n"
+ "2:" SB "%1, 0(%2)\n"
+ ADDIU "%2, %2, 1\n"
+ " andi %1, %2, 0x3\n"
+ " beq $0, %1, 9f\n"
+ EXT "%1, %0, 8, 8\n"
+ "3:" SB "%1, 0(%2)\n"
+ ADDIU "%2, %2, 1\n"
+ " andi %1, %2, 0x3\n"
+ " beq $0, %1, 9f\n"
+ EXT "%1, %0, 0, 8\n"
+ "4:" SB "%1, 0(%2)\n"
+#endif /* CONFIG_CPU_LITTLE_ENDIAN */
+ "9:\n"
+ " .insn\n"
+ " .section .fixup,\"ax\"\n"
+ "8: li %3,%4\n"
+ " j 9b\n"
+ " .previous\n"
+ " .section __ex_table,\"a\"\n"
+ STR(PTR_WD) " 1b,8b\n"
+ STR(PTR_WD) " 2b,8b\n"
+ STR(PTR_WD) " 3b,8b\n"
+ STR(PTR_WD) " 4b,8b\n"
+ " .previous\n"
+ " .set pop\n"
+ : "+&r"(rt), "=&r"(rs),
+ "+&r"(vaddr), "+&r"(err)
+ : "i"(SIGSEGV)
+ : "memory");
+
+ MIPS_R2_STATS(stores);
+
+ break;
+
+ case swr_op:
+ rt = regs->regs[MIPSInst_RT(inst)];
+ vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst);
+ if (!access_ok((void __user *)vaddr, 4)) {
+ current->thread.cp0_baduaddr = vaddr;
+ err = SIGSEGV;
+ break;
+ }
+ __asm__ __volatile__(
+ " .set push\n"
+ " .set reorder\n"
+#ifdef CONFIG_CPU_LITTLE_ENDIAN
+ EXT "%1, %0, 0, 8\n"
+ "1:" SB "%1, 0(%2)\n"
+ ADDIU "%2, %2, 1\n"
+ " andi %1, %2, 0x3\n"
+ " beq $0, %1, 9f\n"
+ EXT "%1, %0, 8, 8\n"
+ "2:" SB "%1, 0(%2)\n"
+ ADDIU "%2, %2, 1\n"
+ " andi %1, %2, 0x3\n"
+ " beq $0, %1, 9f\n"
+ EXT "%1, %0, 16, 8\n"
+ "3:" SB "%1, 0(%2)\n"
+ ADDIU "%2, %2, 1\n"
+ " andi %1, %2, 0x3\n"
+ " beq $0, %1, 9f\n"
+ EXT "%1, %0, 24, 8\n"
+ "4:" SB "%1, 0(%2)\n"
+#else /* !CONFIG_CPU_LITTLE_ENDIAN */
+ EXT "%1, %0, 0, 8\n"
+ "1:" SB "%1, 0(%2)\n"
+ " andi %1, %2, 0x3\n"
+ " beq $0, %1, 9f\n"
+ ADDIU "%2, %2, -1\n"
+ EXT "%1, %0, 8, 8\n"
+ "2:" SB "%1, 0(%2)\n"
+ " andi %1, %2, 0x3\n"
+ " beq $0, %1, 9f\n"
+ ADDIU "%2, %2, -1\n"
+ EXT "%1, %0, 16, 8\n"
+ "3:" SB "%1, 0(%2)\n"
+ " andi %1, %2, 0x3\n"
+ " beq $0, %1, 9f\n"
+ ADDIU "%2, %2, -1\n"
+ EXT "%1, %0, 24, 8\n"
+ "4:" SB "%1, 0(%2)\n"
+#endif /* CONFIG_CPU_LITTLE_ENDIAN */
+ "9:\n"
+ " .insn\n"
+ " .section .fixup,\"ax\"\n"
+ "8: li %3,%4\n"
+ " j 9b\n"
+ " .previous\n"
+ " .section __ex_table,\"a\"\n"
+ STR(PTR_WD) " 1b,8b\n"
+ STR(PTR_WD) " 2b,8b\n"
+ STR(PTR_WD) " 3b,8b\n"
+ STR(PTR_WD) " 4b,8b\n"
+ " .previous\n"
+ " .set pop\n"
+ : "+&r"(rt), "=&r"(rs),
+ "+&r"(vaddr), "+&r"(err)
+ : "i"(SIGSEGV)
+ : "memory");
+
+ MIPS_R2_STATS(stores);
+
+ break;
+
+ case ldl_op:
+ if (IS_ENABLED(CONFIG_32BIT)) {
+ err = SIGILL;
+ break;
+ }
+
+ rt = regs->regs[MIPSInst_RT(inst)];
+ vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst);
+ if (!access_ok((void __user *)vaddr, 8)) {
+ current->thread.cp0_baduaddr = vaddr;
+ err = SIGSEGV;
+ break;
+ }
+ __asm__ __volatile__(
+ " .set push\n"
+ " .set reorder\n"
+#ifdef CONFIG_CPU_LITTLE_ENDIAN
+ "1: lb %1, 0(%2)\n"
+ " dinsu %0, %1, 56, 8\n"
+ " andi %1, %2, 0x7\n"
+ " beq $0, %1, 9f\n"
+ " daddiu %2, %2, -1\n"
+ "2: lb %1, 0(%2)\n"
+ " dinsu %0, %1, 48, 8\n"
+ " andi %1, %2, 0x7\n"
+ " beq $0, %1, 9f\n"
+ " daddiu %2, %2, -1\n"
+ "3: lb %1, 0(%2)\n"
+ " dinsu %0, %1, 40, 8\n"
+ " andi %1, %2, 0x7\n"
+ " beq $0, %1, 9f\n"
+ " daddiu %2, %2, -1\n"
+ "4: lb %1, 0(%2)\n"
+ " dinsu %0, %1, 32, 8\n"
+ " andi %1, %2, 0x7\n"
+ " beq $0, %1, 9f\n"
+ " daddiu %2, %2, -1\n"
+ "5: lb %1, 0(%2)\n"
+ " dins %0, %1, 24, 8\n"
+ " andi %1, %2, 0x7\n"
+ " beq $0, %1, 9f\n"
+ " daddiu %2, %2, -1\n"
+ "6: lb %1, 0(%2)\n"
+ " dins %0, %1, 16, 8\n"
+ " andi %1, %2, 0x7\n"
+ " beq $0, %1, 9f\n"
+ " daddiu %2, %2, -1\n"
+ "7: lb %1, 0(%2)\n"
+ " dins %0, %1, 8, 8\n"
+ " andi %1, %2, 0x7\n"
+ " beq $0, %1, 9f\n"
+ " daddiu %2, %2, -1\n"
+ "0: lb %1, 0(%2)\n"
+ " dins %0, %1, 0, 8\n"
+#else /* !CONFIG_CPU_LITTLE_ENDIAN */
+ "1: lb %1, 0(%2)\n"
+ " dinsu %0, %1, 56, 8\n"
+ " daddiu %2, %2, 1\n"
+ " andi %1, %2, 0x7\n"
+ " beq $0, %1, 9f\n"
+ "2: lb %1, 0(%2)\n"
+ " dinsu %0, %1, 48, 8\n"
+ " daddiu %2, %2, 1\n"
+ " andi %1, %2, 0x7\n"
+ " beq $0, %1, 9f\n"
+ "3: lb %1, 0(%2)\n"
+ " dinsu %0, %1, 40, 8\n"
+ " daddiu %2, %2, 1\n"
+ " andi %1, %2, 0x7\n"
+ " beq $0, %1, 9f\n"
+ "4: lb %1, 0(%2)\n"
+ " dinsu %0, %1, 32, 8\n"
+ " daddiu %2, %2, 1\n"
+ " andi %1, %2, 0x7\n"
+ " beq $0, %1, 9f\n"
+ "5: lb %1, 0(%2)\n"
+ " dins %0, %1, 24, 8\n"
+ " daddiu %2, %2, 1\n"
+ " andi %1, %2, 0x7\n"
+ " beq $0, %1, 9f\n"
+ "6: lb %1, 0(%2)\n"
+ " dins %0, %1, 16, 8\n"
+ " daddiu %2, %2, 1\n"
+ " andi %1, %2, 0x7\n"
+ " beq $0, %1, 9f\n"
+ "7: lb %1, 0(%2)\n"
+ " dins %0, %1, 8, 8\n"
+ " daddiu %2, %2, 1\n"
+ " andi %1, %2, 0x7\n"
+ " beq $0, %1, 9f\n"
+ "0: lb %1, 0(%2)\n"
+ " dins %0, %1, 0, 8\n"
+#endif /* CONFIG_CPU_LITTLE_ENDIAN */
+ "9:\n"
+ " .insn\n"
+ " .section .fixup,\"ax\"\n"
+ "8: li %3,%4\n"
+ " j 9b\n"
+ " .previous\n"
+ " .section __ex_table,\"a\"\n"
+ STR(PTR_WD) " 1b,8b\n"
+ STR(PTR_WD) " 2b,8b\n"
+ STR(PTR_WD) " 3b,8b\n"
+ STR(PTR_WD) " 4b,8b\n"
+ STR(PTR_WD) " 5b,8b\n"
+ STR(PTR_WD) " 6b,8b\n"
+ STR(PTR_WD) " 7b,8b\n"
+ STR(PTR_WD) " 0b,8b\n"
+ " .previous\n"
+ " .set pop\n"
+ : "+&r"(rt), "=&r"(rs),
+ "+&r"(vaddr), "+&r"(err)
+ : "i"(SIGSEGV));
+ if (MIPSInst_RT(inst) && !err)
+ regs->regs[MIPSInst_RT(inst)] = rt;
+
+ MIPS_R2_STATS(loads);
+ break;
+
+ case ldr_op:
+ if (IS_ENABLED(CONFIG_32BIT)) {
+ err = SIGILL;
+ break;
+ }
+
+ rt = regs->regs[MIPSInst_RT(inst)];
+ vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst);
+ if (!access_ok((void __user *)vaddr, 8)) {
+ current->thread.cp0_baduaddr = vaddr;
+ err = SIGSEGV;
+ break;
+ }
+ __asm__ __volatile__(
+ " .set push\n"
+ " .set reorder\n"
+#ifdef CONFIG_CPU_LITTLE_ENDIAN
+ "1: lb %1, 0(%2)\n"
+ " dins %0, %1, 0, 8\n"
+ " daddiu %2, %2, 1\n"
+ " andi %1, %2, 0x7\n"
+ " beq $0, %1, 9f\n"
+ "2: lb %1, 0(%2)\n"
+ " dins %0, %1, 8, 8\n"
+ " daddiu %2, %2, 1\n"
+ " andi %1, %2, 0x7\n"
+ " beq $0, %1, 9f\n"
+ "3: lb %1, 0(%2)\n"
+ " dins %0, %1, 16, 8\n"
+ " daddiu %2, %2, 1\n"
+ " andi %1, %2, 0x7\n"
+ " beq $0, %1, 9f\n"
+ "4: lb %1, 0(%2)\n"
+ " dins %0, %1, 24, 8\n"
+ " daddiu %2, %2, 1\n"
+ " andi %1, %2, 0x7\n"
+ " beq $0, %1, 9f\n"
+ "5: lb %1, 0(%2)\n"
+ " dinsu %0, %1, 32, 8\n"
+ " daddiu %2, %2, 1\n"
+ " andi %1, %2, 0x7\n"
+ " beq $0, %1, 9f\n"
+ "6: lb %1, 0(%2)\n"
+ " dinsu %0, %1, 40, 8\n"
+ " daddiu %2, %2, 1\n"
+ " andi %1, %2, 0x7\n"
+ " beq $0, %1, 9f\n"
+ "7: lb %1, 0(%2)\n"
+ " dinsu %0, %1, 48, 8\n"
+ " daddiu %2, %2, 1\n"
+ " andi %1, %2, 0x7\n"
+ " beq $0, %1, 9f\n"
+ "0: lb %1, 0(%2)\n"
+ " dinsu %0, %1, 56, 8\n"
+#else /* !CONFIG_CPU_LITTLE_ENDIAN */
+ "1: lb %1, 0(%2)\n"
+ " dins %0, %1, 0, 8\n"
+ " andi %1, %2, 0x7\n"
+ " beq $0, %1, 9f\n"
+ " daddiu %2, %2, -1\n"
+ "2: lb %1, 0(%2)\n"
+ " dins %0, %1, 8, 8\n"
+ " andi %1, %2, 0x7\n"
+ " beq $0, %1, 9f\n"
+ " daddiu %2, %2, -1\n"
+ "3: lb %1, 0(%2)\n"
+ " dins %0, %1, 16, 8\n"
+ " andi %1, %2, 0x7\n"
+ " beq $0, %1, 9f\n"
+ " daddiu %2, %2, -1\n"
+ "4: lb %1, 0(%2)\n"
+ " dins %0, %1, 24, 8\n"
+ " andi %1, %2, 0x7\n"
+ " beq $0, %1, 9f\n"
+ " daddiu %2, %2, -1\n"
+ "5: lb %1, 0(%2)\n"
+ " dinsu %0, %1, 32, 8\n"
+ " andi %1, %2, 0x7\n"
+ " beq $0, %1, 9f\n"
+ " daddiu %2, %2, -1\n"
+ "6: lb %1, 0(%2)\n"
+ " dinsu %0, %1, 40, 8\n"
+ " andi %1, %2, 0x7\n"
+ " beq $0, %1, 9f\n"
+ " daddiu %2, %2, -1\n"
+ "7: lb %1, 0(%2)\n"
+ " dinsu %0, %1, 48, 8\n"
+ " andi %1, %2, 0x7\n"
+ " beq $0, %1, 9f\n"
+ " daddiu %2, %2, -1\n"
+ "0: lb %1, 0(%2)\n"
+ " dinsu %0, %1, 56, 8\n"
+#endif /* CONFIG_CPU_LITTLE_ENDIAN */
+ "9:\n"
+ " .insn\n"
+ " .section .fixup,\"ax\"\n"
+ "8: li %3,%4\n"
+ " j 9b\n"
+ " .previous\n"
+ " .section __ex_table,\"a\"\n"
+ STR(PTR_WD) " 1b,8b\n"
+ STR(PTR_WD) " 2b,8b\n"
+ STR(PTR_WD) " 3b,8b\n"
+ STR(PTR_WD) " 4b,8b\n"
+ STR(PTR_WD) " 5b,8b\n"
+ STR(PTR_WD) " 6b,8b\n"
+ STR(PTR_WD) " 7b,8b\n"
+ STR(PTR_WD) " 0b,8b\n"
+ " .previous\n"
+ " .set pop\n"
+ : "+&r"(rt), "=&r"(rs),
+ "+&r"(vaddr), "+&r"(err)
+ : "i"(SIGSEGV));
+ if (MIPSInst_RT(inst) && !err)
+ regs->regs[MIPSInst_RT(inst)] = rt;
+
+ MIPS_R2_STATS(loads);
+ break;
+
+ case sdl_op:
+ if (IS_ENABLED(CONFIG_32BIT)) {
+ err = SIGILL;
+ break;
+ }
+
+ rt = regs->regs[MIPSInst_RT(inst)];
+ vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst);
+ if (!access_ok((void __user *)vaddr, 8)) {
+ current->thread.cp0_baduaddr = vaddr;
+ err = SIGSEGV;
+ break;
+ }
+ __asm__ __volatile__(
+ " .set push\n"
+ " .set reorder\n"
+#ifdef CONFIG_CPU_LITTLE_ENDIAN
+ " dextu %1, %0, 56, 8\n"
+ "1: sb %1, 0(%2)\n"
+ " andi %1, %2, 0x7\n"
+ " beq $0, %1, 9f\n"
+ " daddiu %2, %2, -1\n"
+ " dextu %1, %0, 48, 8\n"
+ "2: sb %1, 0(%2)\n"
+ " andi %1, %2, 0x7\n"
+ " beq $0, %1, 9f\n"
+ " daddiu %2, %2, -1\n"
+ " dextu %1, %0, 40, 8\n"
+ "3: sb %1, 0(%2)\n"
+ " andi %1, %2, 0x7\n"
+ " beq $0, %1, 9f\n"
+ " daddiu %2, %2, -1\n"
+ " dextu %1, %0, 32, 8\n"
+ "4: sb %1, 0(%2)\n"
+ " andi %1, %2, 0x7\n"
+ " beq $0, %1, 9f\n"
+ " daddiu %2, %2, -1\n"
+ " dext %1, %0, 24, 8\n"
+ "5: sb %1, 0(%2)\n"
+ " andi %1, %2, 0x7\n"
+ " beq $0, %1, 9f\n"
+ " daddiu %2, %2, -1\n"
+ " dext %1, %0, 16, 8\n"
+ "6: sb %1, 0(%2)\n"
+ " andi %1, %2, 0x7\n"
+ " beq $0, %1, 9f\n"
+ " daddiu %2, %2, -1\n"
+ " dext %1, %0, 8, 8\n"
+ "7: sb %1, 0(%2)\n"
+ " andi %1, %2, 0x7\n"
+ " beq $0, %1, 9f\n"
+ " daddiu %2, %2, -1\n"
+ " dext %1, %0, 0, 8\n"
+ "0: sb %1, 0(%2)\n"
+#else /* !CONFIG_CPU_LITTLE_ENDIAN */
+ " dextu %1, %0, 56, 8\n"
+ "1: sb %1, 0(%2)\n"
+ " daddiu %2, %2, 1\n"
+ " andi %1, %2, 0x7\n"
+ " beq $0, %1, 9f\n"
+ " dextu %1, %0, 48, 8\n"
+ "2: sb %1, 0(%2)\n"
+ " daddiu %2, %2, 1\n"
+ " andi %1, %2, 0x7\n"
+ " beq $0, %1, 9f\n"
+ " dextu %1, %0, 40, 8\n"
+ "3: sb %1, 0(%2)\n"
+ " daddiu %2, %2, 1\n"
+ " andi %1, %2, 0x7\n"
+ " beq $0, %1, 9f\n"
+ " dextu %1, %0, 32, 8\n"
+ "4: sb %1, 0(%2)\n"
+ " daddiu %2, %2, 1\n"
+ " andi %1, %2, 0x7\n"
+ " beq $0, %1, 9f\n"
+ " dext %1, %0, 24, 8\n"
+ "5: sb %1, 0(%2)\n"
+ " daddiu %2, %2, 1\n"
+ " andi %1, %2, 0x7\n"
+ " beq $0, %1, 9f\n"
+ " dext %1, %0, 16, 8\n"
+ "6: sb %1, 0(%2)\n"
+ " daddiu %2, %2, 1\n"
+ " andi %1, %2, 0x7\n"
+ " beq $0, %1, 9f\n"
+ " dext %1, %0, 8, 8\n"
+ "7: sb %1, 0(%2)\n"
+ " daddiu %2, %2, 1\n"
+ " andi %1, %2, 0x7\n"
+ " beq $0, %1, 9f\n"
+ " dext %1, %0, 0, 8\n"
+ "0: sb %1, 0(%2)\n"
+#endif /* CONFIG_CPU_LITTLE_ENDIAN */
+ "9:\n"
+ " .insn\n"
+ " .section .fixup,\"ax\"\n"
+ "8: li %3,%4\n"
+ " j 9b\n"
+ " .previous\n"
+ " .section __ex_table,\"a\"\n"
+ STR(PTR_WD) " 1b,8b\n"
+ STR(PTR_WD) " 2b,8b\n"
+ STR(PTR_WD) " 3b,8b\n"
+ STR(PTR_WD) " 4b,8b\n"
+ STR(PTR_WD) " 5b,8b\n"
+ STR(PTR_WD) " 6b,8b\n"
+ STR(PTR_WD) " 7b,8b\n"
+ STR(PTR_WD) " 0b,8b\n"
+ " .previous\n"
+ " .set pop\n"
+ : "+&r"(rt), "=&r"(rs),
+ "+&r"(vaddr), "+&r"(err)
+ : "i"(SIGSEGV)
+ : "memory");
+
+ MIPS_R2_STATS(stores);
+ break;
+
+ case sdr_op:
+ if (IS_ENABLED(CONFIG_32BIT)) {
+ err = SIGILL;
+ break;
+ }
+
+ rt = regs->regs[MIPSInst_RT(inst)];
+ vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst);
+ if (!access_ok((void __user *)vaddr, 8)) {
+ current->thread.cp0_baduaddr = vaddr;
+ err = SIGSEGV;
+ break;
+ }
+ __asm__ __volatile__(
+ " .set push\n"
+ " .set reorder\n"
+#ifdef CONFIG_CPU_LITTLE_ENDIAN
+ " dext %1, %0, 0, 8\n"
+ "1: sb %1, 0(%2)\n"
+ " daddiu %2, %2, 1\n"
+ " andi %1, %2, 0x7\n"
+ " beq $0, %1, 9f\n"
+ " dext %1, %0, 8, 8\n"
+ "2: sb %1, 0(%2)\n"
+ " daddiu %2, %2, 1\n"
+ " andi %1, %2, 0x7\n"
+ " beq $0, %1, 9f\n"
+ " dext %1, %0, 16, 8\n"
+ "3: sb %1, 0(%2)\n"
+ " daddiu %2, %2, 1\n"
+ " andi %1, %2, 0x7\n"
+ " beq $0, %1, 9f\n"
+ " dext %1, %0, 24, 8\n"
+ "4: sb %1, 0(%2)\n"
+ " daddiu %2, %2, 1\n"
+ " andi %1, %2, 0x7\n"
+ " beq $0, %1, 9f\n"
+ " dextu %1, %0, 32, 8\n"
+ "5: sb %1, 0(%2)\n"
+ " daddiu %2, %2, 1\n"
+ " andi %1, %2, 0x7\n"
+ " beq $0, %1, 9f\n"
+ " dextu %1, %0, 40, 8\n"
+ "6: sb %1, 0(%2)\n"
+ " daddiu %2, %2, 1\n"
+ " andi %1, %2, 0x7\n"
+ " beq $0, %1, 9f\n"
+ " dextu %1, %0, 48, 8\n"
+ "7: sb %1, 0(%2)\n"
+ " daddiu %2, %2, 1\n"
+ " andi %1, %2, 0x7\n"
+ " beq $0, %1, 9f\n"
+ " dextu %1, %0, 56, 8\n"
+ "0: sb %1, 0(%2)\n"
+#else /* !CONFIG_CPU_LITTLE_ENDIAN */
+ " dext %1, %0, 0, 8\n"
+ "1: sb %1, 0(%2)\n"
+ " andi %1, %2, 0x7\n"
+ " beq $0, %1, 9f\n"
+ " daddiu %2, %2, -1\n"
+ " dext %1, %0, 8, 8\n"
+ "2: sb %1, 0(%2)\n"
+ " andi %1, %2, 0x7\n"
+ " beq $0, %1, 9f\n"
+ " daddiu %2, %2, -1\n"
+ " dext %1, %0, 16, 8\n"
+ "3: sb %1, 0(%2)\n"
+ " andi %1, %2, 0x7\n"
+ " beq $0, %1, 9f\n"
+ " daddiu %2, %2, -1\n"
+ " dext %1, %0, 24, 8\n"
+ "4: sb %1, 0(%2)\n"
+ " andi %1, %2, 0x7\n"
+ " beq $0, %1, 9f\n"
+ " daddiu %2, %2, -1\n"
+ " dextu %1, %0, 32, 8\n"
+ "5: sb %1, 0(%2)\n"
+ " andi %1, %2, 0x7\n"
+ " beq $0, %1, 9f\n"
+ " daddiu %2, %2, -1\n"
+ " dextu %1, %0, 40, 8\n"
+ "6: sb %1, 0(%2)\n"
+ " andi %1, %2, 0x7\n"
+ " beq $0, %1, 9f\n"
+ " daddiu %2, %2, -1\n"
+ " dextu %1, %0, 48, 8\n"
+ "7: sb %1, 0(%2)\n"
+ " andi %1, %2, 0x7\n"
+ " beq $0, %1, 9f\n"
+ " daddiu %2, %2, -1\n"
+ " dextu %1, %0, 56, 8\n"
+ "0: sb %1, 0(%2)\n"
+#endif /* CONFIG_CPU_LITTLE_ENDIAN */
+ "9:\n"
+ " .insn\n"
+ " .section .fixup,\"ax\"\n"
+ "8: li %3,%4\n"
+ " j 9b\n"
+ " .previous\n"
+ " .section __ex_table,\"a\"\n"
+ STR(PTR_WD) " 1b,8b\n"
+ STR(PTR_WD) " 2b,8b\n"
+ STR(PTR_WD) " 3b,8b\n"
+ STR(PTR_WD) " 4b,8b\n"
+ STR(PTR_WD) " 5b,8b\n"
+ STR(PTR_WD) " 6b,8b\n"
+ STR(PTR_WD) " 7b,8b\n"
+ STR(PTR_WD) " 0b,8b\n"
+ " .previous\n"
+ " .set pop\n"
+ : "+&r"(rt), "=&r"(rs),
+ "+&r"(vaddr), "+&r"(err)
+ : "i"(SIGSEGV)
+ : "memory");
+
+ MIPS_R2_STATS(stores);
+
+ break;
+ case ll_op:
+ vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst);
+ if (vaddr & 0x3) {
+ current->thread.cp0_baduaddr = vaddr;
+ err = SIGBUS;
+ break;
+ }
+ if (!access_ok((void __user *)vaddr, 4)) {
+ current->thread.cp0_baduaddr = vaddr;
+ err = SIGBUS;
+ break;
+ }
+
+ if (!cpu_has_rw_llb) {
+ /*
+ * An LL/SC block can't be safely emulated without
+ * a Config5/LLB availability. So it's probably time to
+ * kill our process before things get any worse. This is
+ * because Config5/LLB allows us to use ERETNC so that
+ * the LLAddr/LLB bit is not cleared when we return from
+ * an exception. MIPS R2 LL/SC instructions trap with an
+ * RI exception so once we emulate them here, we return
+ * back to userland with ERETNC. That preserves the
+ * LLAddr/LLB so the subsequent SC instruction will
+ * succeed preserving the atomic semantics of the LL/SC
+ * block. Without that, there is no safe way to emulate
+ * an LL/SC block in MIPSR2 userland.
+ */
+ pr_err("Can't emulate MIPSR2 LL/SC without Config5/LLB\n");
+ err = SIGKILL;
+ break;
+ }
+
+ __asm__ __volatile__(
+ "1:\n"
+ "ll %0, 0(%2)\n"
+ "2:\n"
+ ".insn\n"
+ ".section .fixup,\"ax\"\n"
+ "3:\n"
+ "li %1, %3\n"
+ "j 2b\n"
+ ".previous\n"
+ ".section __ex_table,\"a\"\n"
+ STR(PTR_WD) " 1b,3b\n"
+ ".previous\n"
+ : "=&r"(res), "+&r"(err)
+ : "r"(vaddr), "i"(SIGSEGV)
+ : "memory");
+
+ if (MIPSInst_RT(inst) && !err)
+ regs->regs[MIPSInst_RT(inst)] = res;
+ MIPS_R2_STATS(llsc);
+
+ break;
+
+ case sc_op:
+ vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst);
+ if (vaddr & 0x3) {
+ current->thread.cp0_baduaddr = vaddr;
+ err = SIGBUS;
+ break;
+ }
+ if (!access_ok((void __user *)vaddr, 4)) {
+ current->thread.cp0_baduaddr = vaddr;
+ err = SIGBUS;
+ break;
+ }
+
+ if (!cpu_has_rw_llb) {
+ /*
+ * An LL/SC block can't be safely emulated without
+ * a Config5/LLB availability. So it's probably time to
+ * kill our process before things get any worse. This is
+ * because Config5/LLB allows us to use ERETNC so that
+ * the LLAddr/LLB bit is not cleared when we return from
+ * an exception. MIPS R2 LL/SC instructions trap with an
+ * RI exception so once we emulate them here, we return
+ * back to userland with ERETNC. That preserves the
+ * LLAddr/LLB so the subsequent SC instruction will
+ * succeed preserving the atomic semantics of the LL/SC
+ * block. Without that, there is no safe way to emulate
+ * an LL/SC block in MIPSR2 userland.
+ */
+ pr_err("Can't emulate MIPSR2 LL/SC without Config5/LLB\n");
+ err = SIGKILL;
+ break;
+ }
+
+ res = regs->regs[MIPSInst_RT(inst)];
+
+ __asm__ __volatile__(
+ "1:\n"
+ "sc %0, 0(%2)\n"
+ "2:\n"
+ ".insn\n"
+ ".section .fixup,\"ax\"\n"
+ "3:\n"
+ "li %1, %3\n"
+ "j 2b\n"
+ ".previous\n"
+ ".section __ex_table,\"a\"\n"
+ STR(PTR_WD) " 1b,3b\n"
+ ".previous\n"
+ : "+&r"(res), "+&r"(err)
+ : "r"(vaddr), "i"(SIGSEGV));
+
+ if (MIPSInst_RT(inst) && !err)
+ regs->regs[MIPSInst_RT(inst)] = res;
+
+ MIPS_R2_STATS(llsc);
+
+ break;
+
+ case lld_op:
+ if (IS_ENABLED(CONFIG_32BIT)) {
+ err = SIGILL;
+ break;
+ }
+
+ vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst);
+ if (vaddr & 0x7) {
+ current->thread.cp0_baduaddr = vaddr;
+ err = SIGBUS;
+ break;
+ }
+ if (!access_ok((void __user *)vaddr, 8)) {
+ current->thread.cp0_baduaddr = vaddr;
+ err = SIGBUS;
+ break;
+ }
+
+ if (!cpu_has_rw_llb) {
+ /*
+ * An LL/SC block can't be safely emulated without
+ * a Config5/LLB availability. So it's probably time to
+ * kill our process before things get any worse. This is
+ * because Config5/LLB allows us to use ERETNC so that
+ * the LLAddr/LLB bit is not cleared when we return from
+ * an exception. MIPS R2 LL/SC instructions trap with an
+ * RI exception so once we emulate them here, we return
+ * back to userland with ERETNC. That preserves the
+ * LLAddr/LLB so the subsequent SC instruction will
+ * succeed preserving the atomic semantics of the LL/SC
+ * block. Without that, there is no safe way to emulate
+ * an LL/SC block in MIPSR2 userland.
+ */
+ pr_err("Can't emulate MIPSR2 LL/SC without Config5/LLB\n");
+ err = SIGKILL;
+ break;
+ }
+
+ __asm__ __volatile__(
+ "1:\n"
+ "lld %0, 0(%2)\n"
+ "2:\n"
+ ".insn\n"
+ ".section .fixup,\"ax\"\n"
+ "3:\n"
+ "li %1, %3\n"
+ "j 2b\n"
+ ".previous\n"
+ ".section __ex_table,\"a\"\n"
+ STR(PTR_WD) " 1b,3b\n"
+ ".previous\n"
+ : "=&r"(res), "+&r"(err)
+ : "r"(vaddr), "i"(SIGSEGV)
+ : "memory");
+ if (MIPSInst_RT(inst) && !err)
+ regs->regs[MIPSInst_RT(inst)] = res;
+
+ MIPS_R2_STATS(llsc);
+
+ break;
+
+ case scd_op:
+ if (IS_ENABLED(CONFIG_32BIT)) {
+ err = SIGILL;
+ break;
+ }
+
+ vaddr = regs->regs[MIPSInst_RS(inst)] + MIPSInst_SIMM(inst);
+ if (vaddr & 0x7) {
+ current->thread.cp0_baduaddr = vaddr;
+ err = SIGBUS;
+ break;
+ }
+ if (!access_ok((void __user *)vaddr, 8)) {
+ current->thread.cp0_baduaddr = vaddr;
+ err = SIGBUS;
+ break;
+ }
+
+ if (!cpu_has_rw_llb) {
+ /*
+ * An LL/SC block can't be safely emulated without
+ * a Config5/LLB availability. So it's probably time to
+ * kill our process before things get any worse. This is
+ * because Config5/LLB allows us to use ERETNC so that
+ * the LLAddr/LLB bit is not cleared when we return from
+ * an exception. MIPS R2 LL/SC instructions trap with an
+ * RI exception so once we emulate them here, we return
+ * back to userland with ERETNC. That preserves the
+ * LLAddr/LLB so the subsequent SC instruction will
+ * succeed preserving the atomic semantics of the LL/SC
+ * block. Without that, there is no safe way to emulate
+ * an LL/SC block in MIPSR2 userland.
+ */
+ pr_err("Can't emulate MIPSR2 LL/SC without Config5/LLB\n");
+ err = SIGKILL;
+ break;
+ }
+
+ res = regs->regs[MIPSInst_RT(inst)];
+
+ __asm__ __volatile__(
+ "1:\n"
+ "scd %0, 0(%2)\n"
+ "2:\n"
+ ".insn\n"
+ ".section .fixup,\"ax\"\n"
+ "3:\n"
+ "li %1, %3\n"
+ "j 2b\n"
+ ".previous\n"
+ ".section __ex_table,\"a\"\n"
+ STR(PTR_WD) " 1b,3b\n"
+ ".previous\n"
+ : "+&r"(res), "+&r"(err)
+ : "r"(vaddr), "i"(SIGSEGV));
+
+ if (MIPSInst_RT(inst) && !err)
+ regs->regs[MIPSInst_RT(inst)] = res;
+
+ MIPS_R2_STATS(llsc);
+
+ break;
+ case pref_op:
+ /* skip it */
+ break;
+ default:
+ err = SIGILL;
+ }
+
+ /*
+ * Let's not return to userland just yet. It's costly and
+ * it's likely we have more R2 instructions to emulate
+ */
+ if (!err && (pass++ < MIPS_R2_EMUL_TOTAL_PASS)) {
+ regs->cp0_cause &= ~CAUSEF_BD;
+ err = get_user(inst, (u32 __user *)regs->cp0_epc);
+ if (!err)
+ goto repeat;
+
+ if (err < 0)
+ err = SIGSEGV;
+ }
+
+ if (err && (err != SIGEMT)) {
+ regs->regs[31] = r31;
+ regs->cp0_epc = epc;
+ }
+
+ /* Likely a MIPS R6 compatible instruction */
+ if (pass && (err == SIGILL))
+ err = 0;
+
+ return err;
+}
+
+#ifdef CONFIG_DEBUG_FS
+
+static int mipsr2_emul_show(struct seq_file *s, void *unused)
+{
+
+ seq_printf(s, "Instruction\tTotal\tBDslot\n------------------------------\n");
+ seq_printf(s, "movs\t\t%ld\t%ld\n",
+ (unsigned long)__this_cpu_read(mipsr2emustats.movs),
+ (unsigned long)__this_cpu_read(mipsr2bdemustats.movs));
+ seq_printf(s, "hilo\t\t%ld\t%ld\n",
+ (unsigned long)__this_cpu_read(mipsr2emustats.hilo),
+ (unsigned long)__this_cpu_read(mipsr2bdemustats.hilo));
+ seq_printf(s, "muls\t\t%ld\t%ld\n",
+ (unsigned long)__this_cpu_read(mipsr2emustats.muls),
+ (unsigned long)__this_cpu_read(mipsr2bdemustats.muls));
+ seq_printf(s, "divs\t\t%ld\t%ld\n",
+ (unsigned long)__this_cpu_read(mipsr2emustats.divs),
+ (unsigned long)__this_cpu_read(mipsr2bdemustats.divs));
+ seq_printf(s, "dsps\t\t%ld\t%ld\n",
+ (unsigned long)__this_cpu_read(mipsr2emustats.dsps),
+ (unsigned long)__this_cpu_read(mipsr2bdemustats.dsps));
+ seq_printf(s, "bops\t\t%ld\t%ld\n",
+ (unsigned long)__this_cpu_read(mipsr2emustats.bops),
+ (unsigned long)__this_cpu_read(mipsr2bdemustats.bops));
+ seq_printf(s, "traps\t\t%ld\t%ld\n",
+ (unsigned long)__this_cpu_read(mipsr2emustats.traps),
+ (unsigned long)__this_cpu_read(mipsr2bdemustats.traps));
+ seq_printf(s, "fpus\t\t%ld\t%ld\n",
+ (unsigned long)__this_cpu_read(mipsr2emustats.fpus),
+ (unsigned long)__this_cpu_read(mipsr2bdemustats.fpus));
+ seq_printf(s, "loads\t\t%ld\t%ld\n",
+ (unsigned long)__this_cpu_read(mipsr2emustats.loads),
+ (unsigned long)__this_cpu_read(mipsr2bdemustats.loads));
+ seq_printf(s, "stores\t\t%ld\t%ld\n",
+ (unsigned long)__this_cpu_read(mipsr2emustats.stores),
+ (unsigned long)__this_cpu_read(mipsr2bdemustats.stores));
+ seq_printf(s, "llsc\t\t%ld\t%ld\n",
+ (unsigned long)__this_cpu_read(mipsr2emustats.llsc),
+ (unsigned long)__this_cpu_read(mipsr2bdemustats.llsc));
+ seq_printf(s, "dsemul\t\t%ld\t%ld\n",
+ (unsigned long)__this_cpu_read(mipsr2emustats.dsemul),
+ (unsigned long)__this_cpu_read(mipsr2bdemustats.dsemul));
+ seq_printf(s, "jr\t\t%ld\n",
+ (unsigned long)__this_cpu_read(mipsr2bremustats.jrs));
+ seq_printf(s, "bltzl\t\t%ld\n",
+ (unsigned long)__this_cpu_read(mipsr2bremustats.bltzl));
+ seq_printf(s, "bgezl\t\t%ld\n",
+ (unsigned long)__this_cpu_read(mipsr2bremustats.bgezl));
+ seq_printf(s, "bltzll\t\t%ld\n",
+ (unsigned long)__this_cpu_read(mipsr2bremustats.bltzll));
+ seq_printf(s, "bgezll\t\t%ld\n",
+ (unsigned long)__this_cpu_read(mipsr2bremustats.bgezll));
+ seq_printf(s, "bltzal\t\t%ld\n",
+ (unsigned long)__this_cpu_read(mipsr2bremustats.bltzal));
+ seq_printf(s, "bgezal\t\t%ld\n",
+ (unsigned long)__this_cpu_read(mipsr2bremustats.bgezal));
+ seq_printf(s, "beql\t\t%ld\n",
+ (unsigned long)__this_cpu_read(mipsr2bremustats.beql));
+ seq_printf(s, "bnel\t\t%ld\n",
+ (unsigned long)__this_cpu_read(mipsr2bremustats.bnel));
+ seq_printf(s, "blezl\t\t%ld\n",
+ (unsigned long)__this_cpu_read(mipsr2bremustats.blezl));
+ seq_printf(s, "bgtzl\t\t%ld\n",
+ (unsigned long)__this_cpu_read(mipsr2bremustats.bgtzl));
+
+ return 0;
+}
+
+static int mipsr2_clear_show(struct seq_file *s, void *unused)
+{
+ mipsr2_emul_show(s, unused);
+
+ __this_cpu_write((mipsr2emustats).movs, 0);
+ __this_cpu_write((mipsr2bdemustats).movs, 0);
+ __this_cpu_write((mipsr2emustats).hilo, 0);
+ __this_cpu_write((mipsr2bdemustats).hilo, 0);
+ __this_cpu_write((mipsr2emustats).muls, 0);
+ __this_cpu_write((mipsr2bdemustats).muls, 0);
+ __this_cpu_write((mipsr2emustats).divs, 0);
+ __this_cpu_write((mipsr2bdemustats).divs, 0);
+ __this_cpu_write((mipsr2emustats).dsps, 0);
+ __this_cpu_write((mipsr2bdemustats).dsps, 0);
+ __this_cpu_write((mipsr2emustats).bops, 0);
+ __this_cpu_write((mipsr2bdemustats).bops, 0);
+ __this_cpu_write((mipsr2emustats).traps, 0);
+ __this_cpu_write((mipsr2bdemustats).traps, 0);
+ __this_cpu_write((mipsr2emustats).fpus, 0);
+ __this_cpu_write((mipsr2bdemustats).fpus, 0);
+ __this_cpu_write((mipsr2emustats).loads, 0);
+ __this_cpu_write((mipsr2bdemustats).loads, 0);
+ __this_cpu_write((mipsr2emustats).stores, 0);
+ __this_cpu_write((mipsr2bdemustats).stores, 0);
+ __this_cpu_write((mipsr2emustats).llsc, 0);
+ __this_cpu_write((mipsr2bdemustats).llsc, 0);
+ __this_cpu_write((mipsr2emustats).dsemul, 0);
+ __this_cpu_write((mipsr2bdemustats).dsemul, 0);
+ __this_cpu_write((mipsr2bremustats).jrs, 0);
+ __this_cpu_write((mipsr2bremustats).bltzl, 0);
+ __this_cpu_write((mipsr2bremustats).bgezl, 0);
+ __this_cpu_write((mipsr2bremustats).bltzll, 0);
+ __this_cpu_write((mipsr2bremustats).bgezll, 0);
+ __this_cpu_write((mipsr2bremustats).bltzall, 0);
+ __this_cpu_write((mipsr2bremustats).bgezall, 0);
+ __this_cpu_write((mipsr2bremustats).bltzal, 0);
+ __this_cpu_write((mipsr2bremustats).bgezal, 0);
+ __this_cpu_write((mipsr2bremustats).beql, 0);
+ __this_cpu_write((mipsr2bremustats).bnel, 0);
+ __this_cpu_write((mipsr2bremustats).blezl, 0);
+ __this_cpu_write((mipsr2bremustats).bgtzl, 0);
+
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(mipsr2_emul);
+DEFINE_SHOW_ATTRIBUTE(mipsr2_clear);
+
+static int __init mipsr2_init_debugfs(void)
+{
+ debugfs_create_file("r2_emul_stats", S_IRUGO, mips_debugfs_dir, NULL,
+ &mipsr2_emul_fops);
+ debugfs_create_file("r2_emul_stats_clear", S_IRUGO, mips_debugfs_dir,
+ NULL, &mipsr2_clear_fops);
+ return 0;
+}
+
+device_initcall(mipsr2_init_debugfs);
+
+#endif /* CONFIG_DEBUG_FS */
diff --git a/arch/mips/kernel/module.c b/arch/mips/kernel/module.c
new file mode 100644
index 000000000..0c936cbf2
--- /dev/null
+++ b/arch/mips/kernel/module.c
@@ -0,0 +1,453 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ *
+ * Copyright (C) 2001 Rusty Russell.
+ * Copyright (C) 2003, 2004 Ralf Baechle (ralf@linux-mips.org)
+ * Copyright (C) 2005 Thiemo Seufer
+ */
+
+#undef DEBUG
+
+#include <linux/extable.h>
+#include <linux/moduleloader.h>
+#include <linux/elf.h>
+#include <linux/mm.h>
+#include <linux/numa.h>
+#include <linux/vmalloc.h>
+#include <linux/slab.h>
+#include <linux/fs.h>
+#include <linux/string.h>
+#include <linux/kernel.h>
+#include <linux/spinlock.h>
+#include <linux/jump_label.h>
+
+extern void jump_label_apply_nops(struct module *mod);
+
+struct mips_hi16 {
+ struct mips_hi16 *next;
+ Elf_Addr *addr;
+ Elf_Addr value;
+};
+
+static LIST_HEAD(dbe_list);
+static DEFINE_SPINLOCK(dbe_lock);
+
+#ifdef MODULE_START
+void *module_alloc(unsigned long size)
+{
+ return __vmalloc_node_range(size, 1, MODULE_START, MODULE_END,
+ GFP_KERNEL, PAGE_KERNEL, 0, NUMA_NO_NODE,
+ __builtin_return_address(0));
+}
+#endif
+
+static void apply_r_mips_32(u32 *location, u32 base, Elf_Addr v)
+{
+ *location = base + v;
+}
+
+static int apply_r_mips_26(struct module *me, u32 *location, u32 base,
+ Elf_Addr v)
+{
+ if (v % 4) {
+ pr_err("module %s: dangerous R_MIPS_26 relocation\n",
+ me->name);
+ return -ENOEXEC;
+ }
+
+ if ((v & 0xf0000000) != (((unsigned long)location + 4) & 0xf0000000)) {
+ pr_err("module %s: relocation overflow\n",
+ me->name);
+ return -ENOEXEC;
+ }
+
+ *location = (*location & ~0x03ffffff) |
+ ((base + (v >> 2)) & 0x03ffffff);
+
+ return 0;
+}
+
+static int apply_r_mips_hi16(struct module *me, u32 *location, Elf_Addr v,
+ bool rela)
+{
+ struct mips_hi16 *n;
+
+ if (rela) {
+ *location = (*location & 0xffff0000) |
+ ((((long long) v + 0x8000LL) >> 16) & 0xffff);
+ return 0;
+ }
+
+ /*
+ * We cannot relocate this one now because we don't know the value of
+ * the carry we need to add. Save the information, and let LO16 do the
+ * actual relocation.
+ */
+ n = kmalloc(sizeof *n, GFP_KERNEL);
+ if (!n)
+ return -ENOMEM;
+
+ n->addr = (Elf_Addr *)location;
+ n->value = v;
+ n->next = me->arch.r_mips_hi16_list;
+ me->arch.r_mips_hi16_list = n;
+
+ return 0;
+}
+
+static void free_relocation_chain(struct mips_hi16 *l)
+{
+ struct mips_hi16 *next;
+
+ while (l) {
+ next = l->next;
+ kfree(l);
+ l = next;
+ }
+}
+
+static int apply_r_mips_lo16(struct module *me, u32 *location,
+ u32 base, Elf_Addr v, bool rela)
+{
+ unsigned long insnlo = base;
+ struct mips_hi16 *l;
+ Elf_Addr val, vallo;
+
+ if (rela) {
+ *location = (*location & 0xffff0000) | (v & 0xffff);
+ return 0;
+ }
+
+ /* Sign extend the addend we extract from the lo insn. */
+ vallo = ((insnlo & 0xffff) ^ 0x8000) - 0x8000;
+
+ if (me->arch.r_mips_hi16_list != NULL) {
+ l = me->arch.r_mips_hi16_list;
+ while (l != NULL) {
+ struct mips_hi16 *next;
+ unsigned long insn;
+
+ /*
+ * The value for the HI16 had best be the same.
+ */
+ if (v != l->value)
+ goto out_danger;
+
+ /*
+ * Do the HI16 relocation. Note that we actually don't
+ * need to know anything about the LO16 itself, except
+ * where to find the low 16 bits of the addend needed
+ * by the LO16.
+ */
+ insn = *l->addr;
+ val = ((insn & 0xffff) << 16) + vallo;
+ val += v;
+
+ /*
+ * Account for the sign extension that will happen in
+ * the low bits.
+ */
+ val = ((val >> 16) + ((val & 0x8000) != 0)) & 0xffff;
+
+ insn = (insn & ~0xffff) | val;
+ *l->addr = insn;
+
+ next = l->next;
+ kfree(l);
+ l = next;
+ }
+
+ me->arch.r_mips_hi16_list = NULL;
+ }
+
+ /*
+ * Ok, we're done with the HI16 relocs. Now deal with the LO16.
+ */
+ val = v + vallo;
+ insnlo = (insnlo & ~0xffff) | (val & 0xffff);
+ *location = insnlo;
+
+ return 0;
+
+out_danger:
+ free_relocation_chain(l);
+ me->arch.r_mips_hi16_list = NULL;
+
+ pr_err("module %s: dangerous R_MIPS_LO16 relocation\n", me->name);
+
+ return -ENOEXEC;
+}
+
+static int apply_r_mips_pc(struct module *me, u32 *location, u32 base,
+ Elf_Addr v, unsigned int bits)
+{
+ unsigned long mask = GENMASK(bits - 1, 0);
+ unsigned long se_bits;
+ long offset;
+
+ if (v % 4) {
+ pr_err("module %s: dangerous R_MIPS_PC%u relocation\n",
+ me->name, bits);
+ return -ENOEXEC;
+ }
+
+ /* retrieve & sign extend implicit addend if any */
+ offset = base & mask;
+ offset |= (offset & BIT(bits - 1)) ? ~mask : 0;
+
+ offset += ((long)v - (long)location) >> 2;
+
+ /* check the sign bit onwards are identical - ie. we didn't overflow */
+ se_bits = (offset & BIT(bits - 1)) ? ~0ul : 0;
+ if ((offset & ~mask) != (se_bits & ~mask)) {
+ pr_err("module %s: relocation overflow\n", me->name);
+ return -ENOEXEC;
+ }
+
+ *location = (*location & ~mask) | (offset & mask);
+
+ return 0;
+}
+
+static int apply_r_mips_pc16(struct module *me, u32 *location, u32 base,
+ Elf_Addr v)
+{
+ return apply_r_mips_pc(me, location, base, v, 16);
+}
+
+static int apply_r_mips_pc21(struct module *me, u32 *location, u32 base,
+ Elf_Addr v)
+{
+ return apply_r_mips_pc(me, location, base, v, 21);
+}
+
+static int apply_r_mips_pc26(struct module *me, u32 *location, u32 base,
+ Elf_Addr v)
+{
+ return apply_r_mips_pc(me, location, base, v, 26);
+}
+
+static int apply_r_mips_64(u32 *location, Elf_Addr v, bool rela)
+{
+ if (WARN_ON(!rela))
+ return -EINVAL;
+
+ *(Elf_Addr *)location = v;
+
+ return 0;
+}
+
+static int apply_r_mips_higher(u32 *location, Elf_Addr v, bool rela)
+{
+ if (WARN_ON(!rela))
+ return -EINVAL;
+
+ *location = (*location & 0xffff0000) |
+ ((((long long)v + 0x80008000LL) >> 32) & 0xffff);
+
+ return 0;
+}
+
+static int apply_r_mips_highest(u32 *location, Elf_Addr v, bool rela)
+{
+ if (WARN_ON(!rela))
+ return -EINVAL;
+
+ *location = (*location & 0xffff0000) |
+ ((((long long)v + 0x800080008000LL) >> 48) & 0xffff);
+
+ return 0;
+}
+
+/**
+ * reloc_handler() - Apply a particular relocation to a module
+ * @type: type of the relocation to apply
+ * @me: the module to apply the reloc to
+ * @location: the address at which the reloc is to be applied
+ * @base: the existing value at location for REL-style; 0 for RELA-style
+ * @v: the value of the reloc, with addend for RELA-style
+ * @rela: indication of is this a RELA (true) or REL (false) relocation
+ *
+ * Each implemented relocation function applies a particular type of
+ * relocation to the module @me. Relocs that may be found in either REL or RELA
+ * variants can be handled by making use of the @base & @v parameters which are
+ * set to values which abstract the difference away from the particular reloc
+ * implementations.
+ *
+ * Return: 0 upon success, else -ERRNO
+ */
+static int reloc_handler(u32 type, struct module *me, u32 *location, u32 base,
+ Elf_Addr v, bool rela)
+{
+ switch (type) {
+ case R_MIPS_NONE:
+ break;
+ case R_MIPS_32:
+ apply_r_mips_32(location, base, v);
+ break;
+ case R_MIPS_26:
+ return apply_r_mips_26(me, location, base, v);
+ case R_MIPS_HI16:
+ return apply_r_mips_hi16(me, location, v, rela);
+ case R_MIPS_LO16:
+ return apply_r_mips_lo16(me, location, base, v, rela);
+ case R_MIPS_PC16:
+ return apply_r_mips_pc16(me, location, base, v);
+ case R_MIPS_PC21_S2:
+ return apply_r_mips_pc21(me, location, base, v);
+ case R_MIPS_PC26_S2:
+ return apply_r_mips_pc26(me, location, base, v);
+ case R_MIPS_64:
+ return apply_r_mips_64(location, v, rela);
+ case R_MIPS_HIGHER:
+ return apply_r_mips_higher(location, v, rela);
+ case R_MIPS_HIGHEST:
+ return apply_r_mips_highest(location, v, rela);
+ default:
+ pr_err("%s: Unknown relocation type %u\n", me->name, type);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int __apply_relocate(Elf_Shdr *sechdrs, const char *strtab,
+ unsigned int symindex, unsigned int relsec,
+ struct module *me, bool rela)
+{
+ union {
+ Elf_Mips_Rel *rel;
+ Elf_Mips_Rela *rela;
+ } r;
+ Elf_Sym *sym;
+ u32 *location, base;
+ unsigned int i, type;
+ Elf_Addr v;
+ int err = 0;
+ size_t reloc_sz;
+
+ pr_debug("Applying relocate section %u to %u\n", relsec,
+ sechdrs[relsec].sh_info);
+
+ r.rel = (void *)sechdrs[relsec].sh_addr;
+ reloc_sz = rela ? sizeof(*r.rela) : sizeof(*r.rel);
+ me->arch.r_mips_hi16_list = NULL;
+ for (i = 0; i < sechdrs[relsec].sh_size / reloc_sz; i++) {
+ /* This is where to make the change */
+ location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
+ + r.rel->r_offset;
+ /* This is the symbol it is referring to */
+ sym = (Elf_Sym *)sechdrs[symindex].sh_addr
+ + ELF_MIPS_R_SYM(*r.rel);
+ if (sym->st_value >= -MAX_ERRNO) {
+ /* Ignore unresolved weak symbol */
+ if (ELF_ST_BIND(sym->st_info) == STB_WEAK)
+ continue;
+ pr_warn("%s: Unknown symbol %s\n",
+ me->name, strtab + sym->st_name);
+ err = -ENOENT;
+ goto out;
+ }
+
+ type = ELF_MIPS_R_TYPE(*r.rel);
+
+ if (rela) {
+ v = sym->st_value + r.rela->r_addend;
+ base = 0;
+ r.rela = &r.rela[1];
+ } else {
+ v = sym->st_value;
+ base = *location;
+ r.rel = &r.rel[1];
+ }
+
+ err = reloc_handler(type, me, location, base, v, rela);
+ if (err)
+ goto out;
+ }
+
+out:
+ /*
+ * Normally the hi16 list should be deallocated at this point. A
+ * malformed binary however could contain a series of R_MIPS_HI16
+ * relocations not followed by a R_MIPS_LO16 relocation, or if we hit
+ * an error processing a reloc we might have gotten here before
+ * reaching the R_MIPS_LO16. In either case, free up the list and
+ * return an error.
+ */
+ if (me->arch.r_mips_hi16_list) {
+ free_relocation_chain(me->arch.r_mips_hi16_list);
+ me->arch.r_mips_hi16_list = NULL;
+ err = err ?: -ENOEXEC;
+ }
+
+ return err;
+}
+
+int apply_relocate(Elf_Shdr *sechdrs, const char *strtab,
+ unsigned int symindex, unsigned int relsec,
+ struct module *me)
+{
+ return __apply_relocate(sechdrs, strtab, symindex, relsec, me, false);
+}
+
+#ifdef CONFIG_MODULES_USE_ELF_RELA
+int apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab,
+ unsigned int symindex, unsigned int relsec,
+ struct module *me)
+{
+ return __apply_relocate(sechdrs, strtab, symindex, relsec, me, true);
+}
+#endif /* CONFIG_MODULES_USE_ELF_RELA */
+
+/* Given an address, look for it in the module exception tables. */
+const struct exception_table_entry *search_module_dbetables(unsigned long addr)
+{
+ unsigned long flags;
+ const struct exception_table_entry *e = NULL;
+ struct mod_arch_specific *dbe;
+
+ spin_lock_irqsave(&dbe_lock, flags);
+ list_for_each_entry(dbe, &dbe_list, dbe_list) {
+ e = search_extable(dbe->dbe_start,
+ dbe->dbe_end - dbe->dbe_start, addr);
+ if (e)
+ break;
+ }
+ spin_unlock_irqrestore(&dbe_lock, flags);
+
+ /* Now, if we found one, we are running inside it now, hence
+ we cannot unload the module, hence no refcnt needed. */
+ return e;
+}
+
+/* Put in dbe list if necessary. */
+int module_finalize(const Elf_Ehdr *hdr,
+ const Elf_Shdr *sechdrs,
+ struct module *me)
+{
+ const Elf_Shdr *s;
+ char *secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
+
+ if (IS_ENABLED(CONFIG_JUMP_LABEL))
+ jump_label_apply_nops(me);
+
+ INIT_LIST_HEAD(&me->arch.dbe_list);
+ for (s = sechdrs; s < sechdrs + hdr->e_shnum; s++) {
+ if (strcmp("__dbe_table", secstrings + s->sh_name) != 0)
+ continue;
+ me->arch.dbe_start = (void *)s->sh_addr;
+ me->arch.dbe_end = (void *)s->sh_addr + s->sh_size;
+ spin_lock_irq(&dbe_lock);
+ list_add(&me->arch.dbe_list, &dbe_list);
+ spin_unlock_irq(&dbe_lock);
+ }
+ return 0;
+}
+
+void module_arch_cleanup(struct module *mod)
+{
+ spin_lock_irq(&dbe_lock);
+ list_del(&mod->arch.dbe_list);
+ spin_unlock_irq(&dbe_lock);
+}
diff --git a/arch/mips/kernel/octeon_switch.S b/arch/mips/kernel/octeon_switch.S
new file mode 100644
index 000000000..896080b44
--- /dev/null
+++ b/arch/mips/kernel/octeon_switch.S
@@ -0,0 +1,554 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1994, 1995, 1996, 1998, 1999, 2002, 2003 Ralf Baechle
+ * Copyright (C) 1996 David S. Miller (davem@davemloft.net)
+ * Copyright (C) 1994, 1995, 1996, by Andreas Busse
+ * Copyright (C) 1999 Silicon Graphics, Inc.
+ * Copyright (C) 2000 MIPS Technologies, Inc.
+ * written by Carsten Langgaard, carstenl@mips.com
+ */
+#include <asm/asm.h>
+#include <asm/export.h>
+#include <asm/asm-offsets.h>
+#include <asm/mipsregs.h>
+#include <asm/regdef.h>
+#include <asm/stackframe.h>
+
+/*
+ * task_struct *resume(task_struct *prev, task_struct *next,
+ * struct thread_info *next_ti)
+ */
+ .align 7
+ LEAF(resume)
+ .set arch=octeon
+ mfc0 t1, CP0_STATUS
+ LONG_S t1, THREAD_STATUS(a0)
+ cpu_save_nonscratch a0
+ LONG_S ra, THREAD_REG31(a0)
+
+#if CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE > 0
+ /* Check if we need to store CVMSEG state */
+ dmfc0 t0, $11,7 /* CvmMemCtl */
+ bbit0 t0, 6, 3f /* Is user access enabled? */
+
+ /* Store the CVMSEG state */
+ /* Extract the size of CVMSEG */
+ andi t0, 0x3f
+ /* Multiply * (cache line size/sizeof(long)/2) */
+ sll t0, 7-LONGLOG-1
+ li t1, -32768 /* Base address of CVMSEG */
+ LONG_ADDI t2, a0, THREAD_CVMSEG /* Where to store CVMSEG to */
+ synciobdma
+2:
+ .set noreorder
+ LONG_L t8, 0(t1) /* Load from CVMSEG */
+ subu t0, 1 /* Decrement loop var */
+ LONG_L t9, LONGSIZE(t1)/* Load from CVMSEG */
+ LONG_ADDU t1, LONGSIZE*2 /* Increment loc in CVMSEG */
+ LONG_S t8, 0(t2) /* Store CVMSEG to thread storage */
+ LONG_ADDU t2, LONGSIZE*2 /* Increment loc in thread storage */
+ bnez t0, 2b /* Loop until we've copied it all */
+ LONG_S t9, -LONGSIZE(t2)/* Store CVMSEG to thread storage */
+ .set reorder
+
+ /* Disable access to CVMSEG */
+ dmfc0 t0, $11,7 /* CvmMemCtl */
+ xori t0, t0, 0x40 /* Bit 6 is CVMSEG user enable */
+ dmtc0 t0, $11,7 /* CvmMemCtl */
+#endif
+3:
+
+#if defined(CONFIG_STACKPROTECTOR) && !defined(CONFIG_SMP)
+ PTR_LA t8, __stack_chk_guard
+ LONG_L t9, TASK_STACK_CANARY(a1)
+ LONG_S t9, 0(t8)
+#endif
+
+ /*
+ * The order of restoring the registers takes care of the race
+ * updating $28, $29 and kernelsp without disabling ints.
+ */
+ move $28, a2
+ cpu_restore_nonscratch a1
+
+ PTR_ADDU t0, $28, _THREAD_SIZE - 32
+ set_saved_sp t0, t1, t2
+
+ mfc0 t1, CP0_STATUS /* Do we really need this? */
+ li a3, 0xff01
+ and t1, a3
+ LONG_L a2, THREAD_STATUS(a1)
+ nor a3, $0, a3
+ and a2, a3
+ or a2, t1
+ mtc0 a2, CP0_STATUS
+ move v0, a0
+ jr ra
+ END(resume)
+
+/*
+ * void octeon_cop2_save(struct octeon_cop2_state *a0)
+ */
+ .align 7
+ .set push
+ .set noreorder
+ LEAF(octeon_cop2_save)
+
+ dmfc0 t9, $9,7 /* CvmCtl register. */
+
+ /* Save the COP2 CRC state */
+ dmfc2 t0, 0x0201
+ dmfc2 t1, 0x0202
+ dmfc2 t2, 0x0200
+ sd t0, OCTEON_CP2_CRC_IV(a0)
+ sd t1, OCTEON_CP2_CRC_LENGTH(a0)
+ /* Skip next instructions if CvmCtl[NODFA_CP2] set */
+ bbit1 t9, 28, 1f
+ sd t2, OCTEON_CP2_CRC_POLY(a0)
+
+ /* Save the LLM state */
+ dmfc2 t0, 0x0402
+ dmfc2 t1, 0x040A
+ sd t0, OCTEON_CP2_LLM_DAT(a0)
+
+1: bbit1 t9, 26, 3f /* done if CvmCtl[NOCRYPTO] set */
+ sd t1, OCTEON_CP2_LLM_DAT+8(a0)
+
+ /* Save the COP2 crypto state */
+ /* this part is mostly common to both pass 1 and later revisions */
+ dmfc2 t0, 0x0084
+ dmfc2 t1, 0x0080
+ dmfc2 t2, 0x0081
+ dmfc2 t3, 0x0082
+ sd t0, OCTEON_CP2_3DES_IV(a0)
+ dmfc2 t0, 0x0088
+ sd t1, OCTEON_CP2_3DES_KEY(a0)
+ dmfc2 t1, 0x0111 /* only necessary for pass 1 */
+ sd t2, OCTEON_CP2_3DES_KEY+8(a0)
+ dmfc2 t2, 0x0102
+ sd t3, OCTEON_CP2_3DES_KEY+16(a0)
+ dmfc2 t3, 0x0103
+ sd t0, OCTEON_CP2_3DES_RESULT(a0)
+ dmfc2 t0, 0x0104
+ sd t1, OCTEON_CP2_AES_INP0(a0) /* only necessary for pass 1 */
+ dmfc2 t1, 0x0105
+ sd t2, OCTEON_CP2_AES_IV(a0)
+ dmfc2 t2, 0x0106
+ sd t3, OCTEON_CP2_AES_IV+8(a0)
+ dmfc2 t3, 0x0107
+ sd t0, OCTEON_CP2_AES_KEY(a0)
+ dmfc2 t0, 0x0110
+ sd t1, OCTEON_CP2_AES_KEY+8(a0)
+ dmfc2 t1, 0x0100
+ sd t2, OCTEON_CP2_AES_KEY+16(a0)
+ dmfc2 t2, 0x0101
+ sd t3, OCTEON_CP2_AES_KEY+24(a0)
+ mfc0 v0, $15,0 /* Get the processor ID register */
+ sd t0, OCTEON_CP2_AES_KEYLEN(a0)
+ li v1, 0x000d0000 /* This is the processor ID of Octeon Pass1 */
+ sd t1, OCTEON_CP2_AES_RESULT(a0)
+ /* Skip to the Pass1 version of the remainder of the COP2 state */
+ beq v0, v1, 2f
+ sd t2, OCTEON_CP2_AES_RESULT+8(a0)
+
+ /* the non-pass1 state when !CvmCtl[NOCRYPTO] */
+ dmfc2 t1, 0x0240
+ dmfc2 t2, 0x0241
+ ori v1, v1, 0x9500 /* lowest OCTEON III PrId*/
+ dmfc2 t3, 0x0242
+ subu v1, v0, v1 /* prid - lowest OCTEON III PrId */
+ dmfc2 t0, 0x0243
+ sd t1, OCTEON_CP2_HSH_DATW(a0)
+ dmfc2 t1, 0x0244
+ sd t2, OCTEON_CP2_HSH_DATW+8(a0)
+ dmfc2 t2, 0x0245
+ sd t3, OCTEON_CP2_HSH_DATW+16(a0)
+ dmfc2 t3, 0x0246
+ sd t0, OCTEON_CP2_HSH_DATW+24(a0)
+ dmfc2 t0, 0x0247
+ sd t1, OCTEON_CP2_HSH_DATW+32(a0)
+ dmfc2 t1, 0x0248
+ sd t2, OCTEON_CP2_HSH_DATW+40(a0)
+ dmfc2 t2, 0x0249
+ sd t3, OCTEON_CP2_HSH_DATW+48(a0)
+ dmfc2 t3, 0x024A
+ sd t0, OCTEON_CP2_HSH_DATW+56(a0)
+ dmfc2 t0, 0x024B
+ sd t1, OCTEON_CP2_HSH_DATW+64(a0)
+ dmfc2 t1, 0x024C
+ sd t2, OCTEON_CP2_HSH_DATW+72(a0)
+ dmfc2 t2, 0x024D
+ sd t3, OCTEON_CP2_HSH_DATW+80(a0)
+ dmfc2 t3, 0x024E
+ sd t0, OCTEON_CP2_HSH_DATW+88(a0)
+ dmfc2 t0, 0x0250
+ sd t1, OCTEON_CP2_HSH_DATW+96(a0)
+ dmfc2 t1, 0x0251
+ sd t2, OCTEON_CP2_HSH_DATW+104(a0)
+ dmfc2 t2, 0x0252
+ sd t3, OCTEON_CP2_HSH_DATW+112(a0)
+ dmfc2 t3, 0x0253
+ sd t0, OCTEON_CP2_HSH_IVW(a0)
+ dmfc2 t0, 0x0254
+ sd t1, OCTEON_CP2_HSH_IVW+8(a0)
+ dmfc2 t1, 0x0255
+ sd t2, OCTEON_CP2_HSH_IVW+16(a0)
+ dmfc2 t2, 0x0256
+ sd t3, OCTEON_CP2_HSH_IVW+24(a0)
+ dmfc2 t3, 0x0257
+ sd t0, OCTEON_CP2_HSH_IVW+32(a0)
+ dmfc2 t0, 0x0258
+ sd t1, OCTEON_CP2_HSH_IVW+40(a0)
+ dmfc2 t1, 0x0259
+ sd t2, OCTEON_CP2_HSH_IVW+48(a0)
+ dmfc2 t2, 0x025E
+ sd t3, OCTEON_CP2_HSH_IVW+56(a0)
+ dmfc2 t3, 0x025A
+ sd t0, OCTEON_CP2_GFM_MULT(a0)
+ dmfc2 t0, 0x025B
+ sd t1, OCTEON_CP2_GFM_MULT+8(a0)
+ sd t2, OCTEON_CP2_GFM_POLY(a0)
+ sd t3, OCTEON_CP2_GFM_RESULT(a0)
+ bltz v1, 4f
+ sd t0, OCTEON_CP2_GFM_RESULT+8(a0)
+ /* OCTEON III things*/
+ dmfc2 t0, 0x024F
+ dmfc2 t1, 0x0050
+ sd t0, OCTEON_CP2_SHA3(a0)
+ sd t1, OCTEON_CP2_SHA3+8(a0)
+4:
+ jr ra
+ nop
+
+2: /* pass 1 special stuff when !CvmCtl[NOCRYPTO] */
+ dmfc2 t3, 0x0040
+ dmfc2 t0, 0x0041
+ dmfc2 t1, 0x0042
+ dmfc2 t2, 0x0043
+ sd t3, OCTEON_CP2_HSH_DATW(a0)
+ dmfc2 t3, 0x0044
+ sd t0, OCTEON_CP2_HSH_DATW+8(a0)
+ dmfc2 t0, 0x0045
+ sd t1, OCTEON_CP2_HSH_DATW+16(a0)
+ dmfc2 t1, 0x0046
+ sd t2, OCTEON_CP2_HSH_DATW+24(a0)
+ dmfc2 t2, 0x0048
+ sd t3, OCTEON_CP2_HSH_DATW+32(a0)
+ dmfc2 t3, 0x0049
+ sd t0, OCTEON_CP2_HSH_DATW+40(a0)
+ dmfc2 t0, 0x004A
+ sd t1, OCTEON_CP2_HSH_DATW+48(a0)
+ sd t2, OCTEON_CP2_HSH_IVW(a0)
+ sd t3, OCTEON_CP2_HSH_IVW+8(a0)
+ sd t0, OCTEON_CP2_HSH_IVW+16(a0)
+
+3: /* pass 1 or CvmCtl[NOCRYPTO] set */
+ jr ra
+ nop
+ END(octeon_cop2_save)
+ .set pop
+
+/*
+ * void octeon_cop2_restore(struct octeon_cop2_state *a0)
+ */
+ .align 7
+ .set push
+ .set noreorder
+ LEAF(octeon_cop2_restore)
+ /* First cache line was prefetched before the call */
+ pref 4, 128(a0)
+ dmfc0 t9, $9,7 /* CvmCtl register. */
+
+ pref 4, 256(a0)
+ ld t0, OCTEON_CP2_CRC_IV(a0)
+ pref 4, 384(a0)
+ ld t1, OCTEON_CP2_CRC_LENGTH(a0)
+ ld t2, OCTEON_CP2_CRC_POLY(a0)
+
+ /* Restore the COP2 CRC state */
+ dmtc2 t0, 0x0201
+ dmtc2 t1, 0x1202
+ bbit1 t9, 28, 2f /* Skip LLM if CvmCtl[NODFA_CP2] is set */
+ dmtc2 t2, 0x4200
+
+ /* Restore the LLM state */
+ ld t0, OCTEON_CP2_LLM_DAT(a0)
+ ld t1, OCTEON_CP2_LLM_DAT+8(a0)
+ dmtc2 t0, 0x0402
+ dmtc2 t1, 0x040A
+
+2:
+ bbit1 t9, 26, done_restore /* done if CvmCtl[NOCRYPTO] set */
+ nop
+
+ /* Restore the COP2 crypto state common to pass 1 and pass 2 */
+ ld t0, OCTEON_CP2_3DES_IV(a0)
+ ld t1, OCTEON_CP2_3DES_KEY(a0)
+ ld t2, OCTEON_CP2_3DES_KEY+8(a0)
+ dmtc2 t0, 0x0084
+ ld t0, OCTEON_CP2_3DES_KEY+16(a0)
+ dmtc2 t1, 0x0080
+ ld t1, OCTEON_CP2_3DES_RESULT(a0)
+ dmtc2 t2, 0x0081
+ ld t2, OCTEON_CP2_AES_INP0(a0) /* only really needed for pass 1 */
+ dmtc2 t0, 0x0082
+ ld t0, OCTEON_CP2_AES_IV(a0)
+ dmtc2 t1, 0x0098
+ ld t1, OCTEON_CP2_AES_IV+8(a0)
+ dmtc2 t2, 0x010A /* only really needed for pass 1 */
+ ld t2, OCTEON_CP2_AES_KEY(a0)
+ dmtc2 t0, 0x0102
+ ld t0, OCTEON_CP2_AES_KEY+8(a0)
+ dmtc2 t1, 0x0103
+ ld t1, OCTEON_CP2_AES_KEY+16(a0)
+ dmtc2 t2, 0x0104
+ ld t2, OCTEON_CP2_AES_KEY+24(a0)
+ dmtc2 t0, 0x0105
+ ld t0, OCTEON_CP2_AES_KEYLEN(a0)
+ dmtc2 t1, 0x0106
+ ld t1, OCTEON_CP2_AES_RESULT(a0)
+ dmtc2 t2, 0x0107
+ ld t2, OCTEON_CP2_AES_RESULT+8(a0)
+ mfc0 t3, $15,0 /* Get the processor ID register */
+ dmtc2 t0, 0x0110
+ li v0, 0x000d0000 /* This is the processor ID of Octeon Pass1 */
+ dmtc2 t1, 0x0100
+ bne v0, t3, 3f /* Skip the next stuff for non-pass1 */
+ dmtc2 t2, 0x0101
+
+ /* this code is specific for pass 1 */
+ ld t0, OCTEON_CP2_HSH_DATW(a0)
+ ld t1, OCTEON_CP2_HSH_DATW+8(a0)
+ ld t2, OCTEON_CP2_HSH_DATW+16(a0)
+ dmtc2 t0, 0x0040
+ ld t0, OCTEON_CP2_HSH_DATW+24(a0)
+ dmtc2 t1, 0x0041
+ ld t1, OCTEON_CP2_HSH_DATW+32(a0)
+ dmtc2 t2, 0x0042
+ ld t2, OCTEON_CP2_HSH_DATW+40(a0)
+ dmtc2 t0, 0x0043
+ ld t0, OCTEON_CP2_HSH_DATW+48(a0)
+ dmtc2 t1, 0x0044
+ ld t1, OCTEON_CP2_HSH_IVW(a0)
+ dmtc2 t2, 0x0045
+ ld t2, OCTEON_CP2_HSH_IVW+8(a0)
+ dmtc2 t0, 0x0046
+ ld t0, OCTEON_CP2_HSH_IVW+16(a0)
+ dmtc2 t1, 0x0048
+ dmtc2 t2, 0x0049
+ b done_restore /* unconditional branch */
+ dmtc2 t0, 0x004A
+
+3: /* this is post-pass1 code */
+ ld t2, OCTEON_CP2_HSH_DATW(a0)
+ ori v0, v0, 0x9500 /* lowest OCTEON III PrId*/
+ ld t0, OCTEON_CP2_HSH_DATW+8(a0)
+ ld t1, OCTEON_CP2_HSH_DATW+16(a0)
+ dmtc2 t2, 0x0240
+ ld t2, OCTEON_CP2_HSH_DATW+24(a0)
+ dmtc2 t0, 0x0241
+ ld t0, OCTEON_CP2_HSH_DATW+32(a0)
+ dmtc2 t1, 0x0242
+ ld t1, OCTEON_CP2_HSH_DATW+40(a0)
+ dmtc2 t2, 0x0243
+ ld t2, OCTEON_CP2_HSH_DATW+48(a0)
+ dmtc2 t0, 0x0244
+ ld t0, OCTEON_CP2_HSH_DATW+56(a0)
+ dmtc2 t1, 0x0245
+ ld t1, OCTEON_CP2_HSH_DATW+64(a0)
+ dmtc2 t2, 0x0246
+ ld t2, OCTEON_CP2_HSH_DATW+72(a0)
+ dmtc2 t0, 0x0247
+ ld t0, OCTEON_CP2_HSH_DATW+80(a0)
+ dmtc2 t1, 0x0248
+ ld t1, OCTEON_CP2_HSH_DATW+88(a0)
+ dmtc2 t2, 0x0249
+ ld t2, OCTEON_CP2_HSH_DATW+96(a0)
+ dmtc2 t0, 0x024A
+ ld t0, OCTEON_CP2_HSH_DATW+104(a0)
+ dmtc2 t1, 0x024B
+ ld t1, OCTEON_CP2_HSH_DATW+112(a0)
+ dmtc2 t2, 0x024C
+ ld t2, OCTEON_CP2_HSH_IVW(a0)
+ dmtc2 t0, 0x024D
+ ld t0, OCTEON_CP2_HSH_IVW+8(a0)
+ dmtc2 t1, 0x024E
+ ld t1, OCTEON_CP2_HSH_IVW+16(a0)
+ dmtc2 t2, 0x0250
+ ld t2, OCTEON_CP2_HSH_IVW+24(a0)
+ dmtc2 t0, 0x0251
+ ld t0, OCTEON_CP2_HSH_IVW+32(a0)
+ dmtc2 t1, 0x0252
+ ld t1, OCTEON_CP2_HSH_IVW+40(a0)
+ dmtc2 t2, 0x0253
+ ld t2, OCTEON_CP2_HSH_IVW+48(a0)
+ dmtc2 t0, 0x0254
+ ld t0, OCTEON_CP2_HSH_IVW+56(a0)
+ dmtc2 t1, 0x0255
+ ld t1, OCTEON_CP2_GFM_MULT(a0)
+ dmtc2 t2, 0x0256
+ ld t2, OCTEON_CP2_GFM_MULT+8(a0)
+ dmtc2 t0, 0x0257
+ ld t0, OCTEON_CP2_GFM_POLY(a0)
+ dmtc2 t1, 0x0258
+ ld t1, OCTEON_CP2_GFM_RESULT(a0)
+ dmtc2 t2, 0x0259
+ ld t2, OCTEON_CP2_GFM_RESULT+8(a0)
+ dmtc2 t0, 0x025E
+ subu v0, t3, v0 /* prid - lowest OCTEON III PrId */
+ dmtc2 t1, 0x025A
+ bltz v0, done_restore
+ dmtc2 t2, 0x025B
+ /* OCTEON III things*/
+ ld t0, OCTEON_CP2_SHA3(a0)
+ ld t1, OCTEON_CP2_SHA3+8(a0)
+ dmtc2 t0, 0x0051
+ dmtc2 t1, 0x0050
+done_restore:
+ jr ra
+ nop
+ END(octeon_cop2_restore)
+ .set pop
+
+/*
+ * void octeon_mult_save()
+ * sp is assumed to point to a struct pt_regs
+ *
+ * NOTE: This is called in SAVE_TEMP in stackframe.h. It can
+ * safely modify v1,k0, k1,$10-$15, and $24. It will
+ * be overwritten with a processor specific version of the code.
+ */
+ .p2align 7
+ .set push
+ .set noreorder
+ LEAF(octeon_mult_save)
+ jr ra
+ nop
+ .space 30 * 4, 0
+octeon_mult_save_end:
+ EXPORT(octeon_mult_save_end)
+ END(octeon_mult_save)
+
+ LEAF(octeon_mult_save2)
+ /* Save the multiplier state OCTEON II and earlier*/
+ v3mulu k0, $0, $0
+ v3mulu k1, $0, $0
+ sd k0, PT_MTP(sp) /* PT_MTP has P0 */
+ v3mulu k0, $0, $0
+ sd k1, PT_MTP+8(sp) /* PT_MTP+8 has P1 */
+ ori k1, $0, 1
+ v3mulu k1, k1, $0
+ sd k0, PT_MTP+16(sp) /* PT_MTP+16 has P2 */
+ v3mulu k0, $0, $0
+ sd k1, PT_MPL(sp) /* PT_MPL has MPL0 */
+ v3mulu k1, $0, $0
+ sd k0, PT_MPL+8(sp) /* PT_MPL+8 has MPL1 */
+ jr ra
+ sd k1, PT_MPL+16(sp) /* PT_MPL+16 has MPL2 */
+octeon_mult_save2_end:
+ EXPORT(octeon_mult_save2_end)
+ END(octeon_mult_save2)
+
+ LEAF(octeon_mult_save3)
+ /* Save the multiplier state OCTEON III */
+ v3mulu $10, $0, $0 /* read P0 */
+ v3mulu $11, $0, $0 /* read P1 */
+ v3mulu $12, $0, $0 /* read P2 */
+ sd $10, PT_MTP+(0*8)(sp) /* store P0 */
+ v3mulu $10, $0, $0 /* read P3 */
+ sd $11, PT_MTP+(1*8)(sp) /* store P1 */
+ v3mulu $11, $0, $0 /* read P4 */
+ sd $12, PT_MTP+(2*8)(sp) /* store P2 */
+ ori $13, $0, 1
+ v3mulu $12, $0, $0 /* read P5 */
+ sd $10, PT_MTP+(3*8)(sp) /* store P3 */
+ v3mulu $13, $13, $0 /* P4-P0 = MPL5-MPL1, $13 = MPL0 */
+ sd $11, PT_MTP+(4*8)(sp) /* store P4 */
+ v3mulu $10, $0, $0 /* read MPL1 */
+ sd $12, PT_MTP+(5*8)(sp) /* store P5 */
+ v3mulu $11, $0, $0 /* read MPL2 */
+ sd $13, PT_MPL+(0*8)(sp) /* store MPL0 */
+ v3mulu $12, $0, $0 /* read MPL3 */
+ sd $10, PT_MPL+(1*8)(sp) /* store MPL1 */
+ v3mulu $10, $0, $0 /* read MPL4 */
+ sd $11, PT_MPL+(2*8)(sp) /* store MPL2 */
+ v3mulu $11, $0, $0 /* read MPL5 */
+ sd $12, PT_MPL+(3*8)(sp) /* store MPL3 */
+ sd $10, PT_MPL+(4*8)(sp) /* store MPL4 */
+ jr ra
+ sd $11, PT_MPL+(5*8)(sp) /* store MPL5 */
+octeon_mult_save3_end:
+ EXPORT(octeon_mult_save3_end)
+ END(octeon_mult_save3)
+ .set pop
+
+/*
+ * void octeon_mult_restore()
+ * sp is assumed to point to a struct pt_regs
+ *
+ * NOTE: This is called in RESTORE_TEMP in stackframe.h.
+ */
+ .p2align 7
+ .set push
+ .set noreorder
+ LEAF(octeon_mult_restore)
+ jr ra
+ nop
+ .space 30 * 4, 0
+octeon_mult_restore_end:
+ EXPORT(octeon_mult_restore_end)
+ END(octeon_mult_restore)
+
+ LEAF(octeon_mult_restore2)
+ ld v0, PT_MPL(sp) /* MPL0 */
+ ld v1, PT_MPL+8(sp) /* MPL1 */
+ ld k0, PT_MPL+16(sp) /* MPL2 */
+ /* Restore the multiplier state */
+ ld k1, PT_MTP+16(sp) /* P2 */
+ mtm0 v0 /* MPL0 */
+ ld v0, PT_MTP+8(sp) /* P1 */
+ mtm1 v1 /* MPL1 */
+ ld v1, PT_MTP(sp) /* P0 */
+ mtm2 k0 /* MPL2 */
+ mtp2 k1 /* P2 */
+ mtp1 v0 /* P1 */
+ jr ra
+ mtp0 v1 /* P0 */
+octeon_mult_restore2_end:
+ EXPORT(octeon_mult_restore2_end)
+ END(octeon_mult_restore2)
+
+ LEAF(octeon_mult_restore3)
+ ld $12, PT_MPL+(0*8)(sp) /* read MPL0 */
+ ld $13, PT_MPL+(3*8)(sp) /* read MPL3 */
+ ld $10, PT_MPL+(1*8)(sp) /* read MPL1 */
+ ld $11, PT_MPL+(4*8)(sp) /* read MPL4 */
+ .word 0x718d0008
+ /* mtm0 $12, $13 restore MPL0 and MPL3 */
+ ld $12, PT_MPL+(2*8)(sp) /* read MPL2 */
+ .word 0x714b000c
+ /* mtm1 $10, $11 restore MPL1 and MPL4 */
+ ld $13, PT_MPL+(5*8)(sp) /* read MPL5 */
+ ld $10, PT_MTP+(0*8)(sp) /* read P0 */
+ ld $11, PT_MTP+(3*8)(sp) /* read P3 */
+ .word 0x718d000d
+ /* mtm2 $12, $13 restore MPL2 and MPL5 */
+ ld $12, PT_MTP+(1*8)(sp) /* read P1 */
+ .word 0x714b0009
+ /* mtp0 $10, $11 restore P0 and P3 */
+ ld $13, PT_MTP+(4*8)(sp) /* read P4 */
+ ld $10, PT_MTP+(2*8)(sp) /* read P2 */
+ ld $11, PT_MTP+(5*8)(sp) /* read P5 */
+ .word 0x718d000a
+ /* mtp1 $12, $13 restore P1 and P4 */
+ jr ra
+ .word 0x714b000b
+ /* mtp2 $10, $11 restore P2 and P5 */
+
+octeon_mult_restore3_end:
+ EXPORT(octeon_mult_restore3_end)
+ END(octeon_mult_restore3)
+ .set pop
diff --git a/arch/mips/kernel/perf_event.c b/arch/mips/kernel/perf_event.c
new file mode 100644
index 000000000..5d7a9c039
--- /dev/null
+++ b/arch/mips/kernel/perf_event.c
@@ -0,0 +1,67 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Linux performance counter support for MIPS.
+ *
+ * Copyright (C) 2010 MIPS Technologies, Inc.
+ * Author: Deng-Cheng Zhu
+ *
+ * This code is based on the implementation for ARM, which is in turn
+ * based on the sparc64 perf event code and the x86 code. Performance
+ * counter access is based on the MIPS Oprofile code. And the callchain
+ * support references the code of MIPS stacktrace.c.
+ */
+
+#include <linux/perf_event.h>
+#include <linux/sched/task_stack.h>
+
+#include <asm/stacktrace.h>
+
+/* Callchain handling code. */
+
+/*
+ * Leave userspace callchain empty for now. When we find a way to trace
+ * the user stack callchains, we will add it here.
+ */
+
+static void save_raw_perf_callchain(struct perf_callchain_entry_ctx *entry,
+ unsigned long reg29)
+{
+ unsigned long *sp = (unsigned long *)reg29;
+ unsigned long addr;
+
+ while (!kstack_end(sp)) {
+ addr = *sp++;
+ if (__kernel_text_address(addr)) {
+ perf_callchain_store(entry, addr);
+ if (entry->nr >= entry->max_stack)
+ break;
+ }
+ }
+}
+
+void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry,
+ struct pt_regs *regs)
+{
+ unsigned long sp = regs->regs[29];
+#ifdef CONFIG_KALLSYMS
+ unsigned long ra = regs->regs[31];
+ unsigned long pc = regs->cp0_epc;
+
+ if (raw_show_trace || !__kernel_text_address(pc)) {
+ unsigned long stack_page =
+ (unsigned long)task_stack_page(current);
+ if (stack_page && sp >= stack_page &&
+ sp <= stack_page + THREAD_SIZE - 32)
+ save_raw_perf_callchain(entry, sp);
+ return;
+ }
+ do {
+ perf_callchain_store(entry, pc);
+ if (entry->nr >= entry->max_stack)
+ break;
+ pc = unwind_stack(current, &sp, pc, &ra);
+ } while (pc);
+#else
+ save_raw_perf_callchain(entry, sp);
+#endif
+}
diff --git a/arch/mips/kernel/perf_event_mipsxx.c b/arch/mips/kernel/perf_event_mipsxx.c
new file mode 100644
index 000000000..c4d6b0913
--- /dev/null
+++ b/arch/mips/kernel/perf_event_mipsxx.c
@@ -0,0 +1,2056 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Linux performance counter support for MIPS.
+ *
+ * Copyright (C) 2010 MIPS Technologies, Inc.
+ * Copyright (C) 2011 Cavium Networks, Inc.
+ * Author: Deng-Cheng Zhu
+ *
+ * This code is based on the implementation for ARM, which is in turn
+ * based on the sparc64 perf event code and the x86 code. Performance
+ * counter access is based on the MIPS Oprofile code. And the callchain
+ * support references the code of MIPS stacktrace.c.
+ */
+
+#include <linux/cpumask.h>
+#include <linux/interrupt.h>
+#include <linux/smp.h>
+#include <linux/kernel.h>
+#include <linux/perf_event.h>
+#include <linux/uaccess.h>
+
+#include <asm/irq.h>
+#include <asm/irq_regs.h>
+#include <asm/stacktrace.h>
+#include <asm/time.h> /* For perf_irq */
+
+#define MIPS_MAX_HWEVENTS 4
+#define MIPS_TCS_PER_COUNTER 2
+#define MIPS_CPUID_TO_COUNTER_MASK (MIPS_TCS_PER_COUNTER - 1)
+
+struct cpu_hw_events {
+ /* Array of events on this cpu. */
+ struct perf_event *events[MIPS_MAX_HWEVENTS];
+
+ /*
+ * Set the bit (indexed by the counter number) when the counter
+ * is used for an event.
+ */
+ unsigned long used_mask[BITS_TO_LONGS(MIPS_MAX_HWEVENTS)];
+
+ /*
+ * Software copy of the control register for each performance counter.
+ * MIPS CPUs vary in performance counters. They use this differently,
+ * and even may not use it.
+ */
+ unsigned int saved_ctrl[MIPS_MAX_HWEVENTS];
+};
+DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = {
+ .saved_ctrl = {0},
+};
+
+/* The description of MIPS performance events. */
+struct mips_perf_event {
+ unsigned int event_id;
+ /*
+ * MIPS performance counters are indexed starting from 0.
+ * CNTR_EVEN indicates the indexes of the counters to be used are
+ * even numbers.
+ */
+ unsigned int cntr_mask;
+ #define CNTR_EVEN 0x55555555
+ #define CNTR_ODD 0xaaaaaaaa
+ #define CNTR_ALL 0xffffffff
+ enum {
+ T = 0,
+ V = 1,
+ P = 2,
+ } range;
+};
+
+static struct mips_perf_event raw_event;
+static DEFINE_MUTEX(raw_event_mutex);
+
+#define C(x) PERF_COUNT_HW_CACHE_##x
+
+struct mips_pmu {
+ u64 max_period;
+ u64 valid_count;
+ u64 overflow;
+ const char *name;
+ int irq;
+ u64 (*read_counter)(unsigned int idx);
+ void (*write_counter)(unsigned int idx, u64 val);
+ const struct mips_perf_event *(*map_raw_event)(u64 config);
+ const struct mips_perf_event (*general_event_map)[PERF_COUNT_HW_MAX];
+ const struct mips_perf_event (*cache_event_map)
+ [PERF_COUNT_HW_CACHE_MAX]
+ [PERF_COUNT_HW_CACHE_OP_MAX]
+ [PERF_COUNT_HW_CACHE_RESULT_MAX];
+ unsigned int num_counters;
+};
+
+static int counter_bits;
+static struct mips_pmu mipspmu;
+
+#define M_PERFCTL_EVENT(event) (((event) << MIPS_PERFCTRL_EVENT_S) & \
+ MIPS_PERFCTRL_EVENT)
+#define M_PERFCTL_VPEID(vpe) ((vpe) << MIPS_PERFCTRL_VPEID_S)
+
+#ifdef CONFIG_CPU_BMIPS5000
+#define M_PERFCTL_MT_EN(filter) 0
+#else /* !CONFIG_CPU_BMIPS5000 */
+#define M_PERFCTL_MT_EN(filter) (filter)
+#endif /* CONFIG_CPU_BMIPS5000 */
+
+#define M_TC_EN_ALL M_PERFCTL_MT_EN(MIPS_PERFCTRL_MT_EN_ALL)
+#define M_TC_EN_VPE M_PERFCTL_MT_EN(MIPS_PERFCTRL_MT_EN_VPE)
+#define M_TC_EN_TC M_PERFCTL_MT_EN(MIPS_PERFCTRL_MT_EN_TC)
+
+#define M_PERFCTL_COUNT_EVENT_WHENEVER (MIPS_PERFCTRL_EXL | \
+ MIPS_PERFCTRL_K | \
+ MIPS_PERFCTRL_U | \
+ MIPS_PERFCTRL_S | \
+ MIPS_PERFCTRL_IE)
+
+#ifdef CONFIG_MIPS_MT_SMP
+#define M_PERFCTL_CONFIG_MASK 0x3fff801f
+#else
+#define M_PERFCTL_CONFIG_MASK 0x1f
+#endif
+
+#define CNTR_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
+
+#ifdef CONFIG_MIPS_PERF_SHARED_TC_COUNTERS
+static DEFINE_RWLOCK(pmuint_rwlock);
+
+#if defined(CONFIG_CPU_BMIPS5000)
+#define vpe_id() (cpu_has_mipsmt_pertccounters ? \
+ 0 : (smp_processor_id() & MIPS_CPUID_TO_COUNTER_MASK))
+#else
+#define vpe_id() (cpu_has_mipsmt_pertccounters ? \
+ 0 : cpu_vpe_id(&current_cpu_data))
+#endif
+
+/* Copied from op_model_mipsxx.c */
+static unsigned int vpe_shift(void)
+{
+ if (num_possible_cpus() > 1)
+ return 1;
+
+ return 0;
+}
+
+static unsigned int counters_total_to_per_cpu(unsigned int counters)
+{
+ return counters >> vpe_shift();
+}
+
+#else /* !CONFIG_MIPS_PERF_SHARED_TC_COUNTERS */
+#define vpe_id() 0
+
+#endif /* CONFIG_MIPS_PERF_SHARED_TC_COUNTERS */
+
+static void resume_local_counters(void);
+static void pause_local_counters(void);
+static irqreturn_t mipsxx_pmu_handle_irq(int, void *);
+static int mipsxx_pmu_handle_shared_irq(void);
+
+/* 0: Not Loongson-3
+ * 1: Loongson-3A1000/3B1000/3B1500
+ * 2: Loongson-3A2000/3A3000
+ * 3: Loongson-3A4000+
+ */
+
+#define LOONGSON_PMU_TYPE0 0
+#define LOONGSON_PMU_TYPE1 1
+#define LOONGSON_PMU_TYPE2 2
+#define LOONGSON_PMU_TYPE3 3
+
+static inline int get_loongson3_pmu_type(void)
+{
+ if (boot_cpu_type() != CPU_LOONGSON64)
+ return LOONGSON_PMU_TYPE0;
+ if ((boot_cpu_data.processor_id & PRID_COMP_MASK) == PRID_COMP_LEGACY)
+ return LOONGSON_PMU_TYPE1;
+ if ((boot_cpu_data.processor_id & PRID_IMP_MASK) == PRID_IMP_LOONGSON_64C)
+ return LOONGSON_PMU_TYPE2;
+ if ((boot_cpu_data.processor_id & PRID_IMP_MASK) == PRID_IMP_LOONGSON_64G)
+ return LOONGSON_PMU_TYPE3;
+
+ return LOONGSON_PMU_TYPE0;
+}
+
+static unsigned int mipsxx_pmu_swizzle_perf_idx(unsigned int idx)
+{
+ if (vpe_id() == 1)
+ idx = (idx + 2) & 3;
+ return idx;
+}
+
+static u64 mipsxx_pmu_read_counter(unsigned int idx)
+{
+ idx = mipsxx_pmu_swizzle_perf_idx(idx);
+
+ switch (idx) {
+ case 0:
+ /*
+ * The counters are unsigned, we must cast to truncate
+ * off the high bits.
+ */
+ return (u32)read_c0_perfcntr0();
+ case 1:
+ return (u32)read_c0_perfcntr1();
+ case 2:
+ return (u32)read_c0_perfcntr2();
+ case 3:
+ return (u32)read_c0_perfcntr3();
+ default:
+ WARN_ONCE(1, "Invalid performance counter number (%d)\n", idx);
+ return 0;
+ }
+}
+
+static u64 mipsxx_pmu_read_counter_64(unsigned int idx)
+{
+ u64 mask = CNTR_BIT_MASK(counter_bits);
+ idx = mipsxx_pmu_swizzle_perf_idx(idx);
+
+ switch (idx) {
+ case 0:
+ return read_c0_perfcntr0_64() & mask;
+ case 1:
+ return read_c0_perfcntr1_64() & mask;
+ case 2:
+ return read_c0_perfcntr2_64() & mask;
+ case 3:
+ return read_c0_perfcntr3_64() & mask;
+ default:
+ WARN_ONCE(1, "Invalid performance counter number (%d)\n", idx);
+ return 0;
+ }
+}
+
+static void mipsxx_pmu_write_counter(unsigned int idx, u64 val)
+{
+ idx = mipsxx_pmu_swizzle_perf_idx(idx);
+
+ switch (idx) {
+ case 0:
+ write_c0_perfcntr0(val);
+ return;
+ case 1:
+ write_c0_perfcntr1(val);
+ return;
+ case 2:
+ write_c0_perfcntr2(val);
+ return;
+ case 3:
+ write_c0_perfcntr3(val);
+ return;
+ }
+}
+
+static void mipsxx_pmu_write_counter_64(unsigned int idx, u64 val)
+{
+ val &= CNTR_BIT_MASK(counter_bits);
+ idx = mipsxx_pmu_swizzle_perf_idx(idx);
+
+ switch (idx) {
+ case 0:
+ write_c0_perfcntr0_64(val);
+ return;
+ case 1:
+ write_c0_perfcntr1_64(val);
+ return;
+ case 2:
+ write_c0_perfcntr2_64(val);
+ return;
+ case 3:
+ write_c0_perfcntr3_64(val);
+ return;
+ }
+}
+
+static unsigned int mipsxx_pmu_read_control(unsigned int idx)
+{
+ idx = mipsxx_pmu_swizzle_perf_idx(idx);
+
+ switch (idx) {
+ case 0:
+ return read_c0_perfctrl0();
+ case 1:
+ return read_c0_perfctrl1();
+ case 2:
+ return read_c0_perfctrl2();
+ case 3:
+ return read_c0_perfctrl3();
+ default:
+ WARN_ONCE(1, "Invalid performance counter number (%d)\n", idx);
+ return 0;
+ }
+}
+
+static void mipsxx_pmu_write_control(unsigned int idx, unsigned int val)
+{
+ idx = mipsxx_pmu_swizzle_perf_idx(idx);
+
+ switch (idx) {
+ case 0:
+ write_c0_perfctrl0(val);
+ return;
+ case 1:
+ write_c0_perfctrl1(val);
+ return;
+ case 2:
+ write_c0_perfctrl2(val);
+ return;
+ case 3:
+ write_c0_perfctrl3(val);
+ return;
+ }
+}
+
+static int mipsxx_pmu_alloc_counter(struct cpu_hw_events *cpuc,
+ struct hw_perf_event *hwc)
+{
+ int i;
+ unsigned long cntr_mask;
+
+ /*
+ * We only need to care the counter mask. The range has been
+ * checked definitely.
+ */
+ if (get_loongson3_pmu_type() == LOONGSON_PMU_TYPE2)
+ cntr_mask = (hwc->event_base >> 10) & 0xffff;
+ else
+ cntr_mask = (hwc->event_base >> 8) & 0xffff;
+
+ for (i = mipspmu.num_counters - 1; i >= 0; i--) {
+ /*
+ * Note that some MIPS perf events can be counted by both
+ * even and odd counters, whereas many other are only by
+ * even _or_ odd counters. This introduces an issue that
+ * when the former kind of event takes the counter the
+ * latter kind of event wants to use, then the "counter
+ * allocation" for the latter event will fail. In fact if
+ * they can be dynamically swapped, they both feel happy.
+ * But here we leave this issue alone for now.
+ */
+ if (test_bit(i, &cntr_mask) &&
+ !test_and_set_bit(i, cpuc->used_mask))
+ return i;
+ }
+
+ return -EAGAIN;
+}
+
+static void mipsxx_pmu_enable_event(struct hw_perf_event *evt, int idx)
+{
+ struct perf_event *event = container_of(evt, struct perf_event, hw);
+ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
+ unsigned int range = evt->event_base >> 24;
+
+ WARN_ON(idx < 0 || idx >= mipspmu.num_counters);
+
+ if (get_loongson3_pmu_type() == LOONGSON_PMU_TYPE2)
+ cpuc->saved_ctrl[idx] = M_PERFCTL_EVENT(evt->event_base & 0x3ff) |
+ (evt->config_base & M_PERFCTL_CONFIG_MASK) |
+ /* Make sure interrupt enabled. */
+ MIPS_PERFCTRL_IE;
+ else
+ cpuc->saved_ctrl[idx] = M_PERFCTL_EVENT(evt->event_base & 0xff) |
+ (evt->config_base & M_PERFCTL_CONFIG_MASK) |
+ /* Make sure interrupt enabled. */
+ MIPS_PERFCTRL_IE;
+
+ if (IS_ENABLED(CONFIG_CPU_BMIPS5000)) {
+ /* enable the counter for the calling thread */
+ cpuc->saved_ctrl[idx] |=
+ (1 << (12 + vpe_id())) | BRCM_PERFCTRL_TC;
+ } else if (IS_ENABLED(CONFIG_MIPS_MT_SMP) && range > V) {
+ /* The counter is processor wide. Set it up to count all TCs. */
+ pr_debug("Enabling perf counter for all TCs\n");
+ cpuc->saved_ctrl[idx] |= M_TC_EN_ALL;
+ } else {
+ unsigned int cpu, ctrl;
+
+ /*
+ * Set up the counter for a particular CPU when event->cpu is
+ * a valid CPU number. Otherwise set up the counter for the CPU
+ * scheduling this thread.
+ */
+ cpu = (event->cpu >= 0) ? event->cpu : smp_processor_id();
+
+ ctrl = M_PERFCTL_VPEID(cpu_vpe_id(&cpu_data[cpu]));
+ ctrl |= M_TC_EN_VPE;
+ cpuc->saved_ctrl[idx] |= ctrl;
+ pr_debug("Enabling perf counter for CPU%d\n", cpu);
+ }
+ /*
+ * We do not actually let the counter run. Leave it until start().
+ */
+}
+
+static void mipsxx_pmu_disable_event(int idx)
+{
+ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
+ unsigned long flags;
+
+ WARN_ON(idx < 0 || idx >= mipspmu.num_counters);
+
+ local_irq_save(flags);
+ cpuc->saved_ctrl[idx] = mipsxx_pmu_read_control(idx) &
+ ~M_PERFCTL_COUNT_EVENT_WHENEVER;
+ mipsxx_pmu_write_control(idx, cpuc->saved_ctrl[idx]);
+ local_irq_restore(flags);
+}
+
+static int mipspmu_event_set_period(struct perf_event *event,
+ struct hw_perf_event *hwc,
+ int idx)
+{
+ u64 left = local64_read(&hwc->period_left);
+ u64 period = hwc->sample_period;
+ int ret = 0;
+
+ if (unlikely((left + period) & (1ULL << 63))) {
+ /* left underflowed by more than period. */
+ left = period;
+ local64_set(&hwc->period_left, left);
+ hwc->last_period = period;
+ ret = 1;
+ } else if (unlikely((left + period) <= period)) {
+ /* left underflowed by less than period. */
+ left += period;
+ local64_set(&hwc->period_left, left);
+ hwc->last_period = period;
+ ret = 1;
+ }
+
+ if (left > mipspmu.max_period) {
+ left = mipspmu.max_period;
+ local64_set(&hwc->period_left, left);
+ }
+
+ local64_set(&hwc->prev_count, mipspmu.overflow - left);
+
+ if (get_loongson3_pmu_type() == LOONGSON_PMU_TYPE2)
+ mipsxx_pmu_write_control(idx,
+ M_PERFCTL_EVENT(hwc->event_base & 0x3ff));
+
+ mipspmu.write_counter(idx, mipspmu.overflow - left);
+
+ perf_event_update_userpage(event);
+
+ return ret;
+}
+
+static void mipspmu_event_update(struct perf_event *event,
+ struct hw_perf_event *hwc,
+ int idx)
+{
+ u64 prev_raw_count, new_raw_count;
+ u64 delta;
+
+again:
+ prev_raw_count = local64_read(&hwc->prev_count);
+ new_raw_count = mipspmu.read_counter(idx);
+
+ if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
+ new_raw_count) != prev_raw_count)
+ goto again;
+
+ delta = new_raw_count - prev_raw_count;
+
+ local64_add(delta, &event->count);
+ local64_sub(delta, &hwc->period_left);
+}
+
+static void mipspmu_start(struct perf_event *event, int flags)
+{
+ struct hw_perf_event *hwc = &event->hw;
+
+ if (flags & PERF_EF_RELOAD)
+ WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE));
+
+ hwc->state = 0;
+
+ /* Set the period for the event. */
+ mipspmu_event_set_period(event, hwc, hwc->idx);
+
+ /* Enable the event. */
+ mipsxx_pmu_enable_event(hwc, hwc->idx);
+}
+
+static void mipspmu_stop(struct perf_event *event, int flags)
+{
+ struct hw_perf_event *hwc = &event->hw;
+
+ if (!(hwc->state & PERF_HES_STOPPED)) {
+ /* We are working on a local event. */
+ mipsxx_pmu_disable_event(hwc->idx);
+ barrier();
+ mipspmu_event_update(event, hwc, hwc->idx);
+ hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
+ }
+}
+
+static int mipspmu_add(struct perf_event *event, int flags)
+{
+ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
+ struct hw_perf_event *hwc = &event->hw;
+ int idx;
+ int err = 0;
+
+ perf_pmu_disable(event->pmu);
+
+ /* To look for a free counter for this event. */
+ idx = mipsxx_pmu_alloc_counter(cpuc, hwc);
+ if (idx < 0) {
+ err = idx;
+ goto out;
+ }
+
+ /*
+ * If there is an event in the counter we are going to use then
+ * make sure it is disabled.
+ */
+ event->hw.idx = idx;
+ mipsxx_pmu_disable_event(idx);
+ cpuc->events[idx] = event;
+
+ hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
+ if (flags & PERF_EF_START)
+ mipspmu_start(event, PERF_EF_RELOAD);
+
+ /* Propagate our changes to the userspace mapping. */
+ perf_event_update_userpage(event);
+
+out:
+ perf_pmu_enable(event->pmu);
+ return err;
+}
+
+static void mipspmu_del(struct perf_event *event, int flags)
+{
+ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
+ struct hw_perf_event *hwc = &event->hw;
+ int idx = hwc->idx;
+
+ WARN_ON(idx < 0 || idx >= mipspmu.num_counters);
+
+ mipspmu_stop(event, PERF_EF_UPDATE);
+ cpuc->events[idx] = NULL;
+ clear_bit(idx, cpuc->used_mask);
+
+ perf_event_update_userpage(event);
+}
+
+static void mipspmu_read(struct perf_event *event)
+{
+ struct hw_perf_event *hwc = &event->hw;
+
+ /* Don't read disabled counters! */
+ if (hwc->idx < 0)
+ return;
+
+ mipspmu_event_update(event, hwc, hwc->idx);
+}
+
+static void mipspmu_enable(struct pmu *pmu)
+{
+#ifdef CONFIG_MIPS_PERF_SHARED_TC_COUNTERS
+ write_unlock(&pmuint_rwlock);
+#endif
+ resume_local_counters();
+}
+
+/*
+ * MIPS performance counters can be per-TC. The control registers can
+ * not be directly accessed across CPUs. Hence if we want to do global
+ * control, we need cross CPU calls. on_each_cpu() can help us, but we
+ * can not make sure this function is called with interrupts enabled. So
+ * here we pause local counters and then grab a rwlock and leave the
+ * counters on other CPUs alone. If any counter interrupt raises while
+ * we own the write lock, simply pause local counters on that CPU and
+ * spin in the handler. Also we know we won't be switched to another
+ * CPU after pausing local counters and before grabbing the lock.
+ */
+static void mipspmu_disable(struct pmu *pmu)
+{
+ pause_local_counters();
+#ifdef CONFIG_MIPS_PERF_SHARED_TC_COUNTERS
+ write_lock(&pmuint_rwlock);
+#endif
+}
+
+static atomic_t active_events = ATOMIC_INIT(0);
+static DEFINE_MUTEX(pmu_reserve_mutex);
+static int (*save_perf_irq)(void);
+
+static int mipspmu_get_irq(void)
+{
+ int err;
+
+ if (mipspmu.irq >= 0) {
+ /* Request my own irq handler. */
+ err = request_irq(mipspmu.irq, mipsxx_pmu_handle_irq,
+ IRQF_PERCPU | IRQF_NOBALANCING |
+ IRQF_NO_THREAD | IRQF_NO_SUSPEND |
+ IRQF_SHARED,
+ "mips_perf_pmu", &mipspmu);
+ if (err) {
+ pr_warn("Unable to request IRQ%d for MIPS performance counters!\n",
+ mipspmu.irq);
+ }
+ } else if (cp0_perfcount_irq < 0) {
+ /*
+ * We are sharing the irq number with the timer interrupt.
+ */
+ save_perf_irq = perf_irq;
+ perf_irq = mipsxx_pmu_handle_shared_irq;
+ err = 0;
+ } else {
+ pr_warn("The platform hasn't properly defined its interrupt controller\n");
+ err = -ENOENT;
+ }
+
+ return err;
+}
+
+static void mipspmu_free_irq(void)
+{
+ if (mipspmu.irq >= 0)
+ free_irq(mipspmu.irq, &mipspmu);
+ else if (cp0_perfcount_irq < 0)
+ perf_irq = save_perf_irq;
+}
+
+/*
+ * mipsxx/rm9000/loongson2 have different performance counters, they have
+ * specific low-level init routines.
+ */
+static void reset_counters(void *arg);
+static int __hw_perf_event_init(struct perf_event *event);
+
+static void hw_perf_event_destroy(struct perf_event *event)
+{
+ if (atomic_dec_and_mutex_lock(&active_events,
+ &pmu_reserve_mutex)) {
+ /*
+ * We must not call the destroy function with interrupts
+ * disabled.
+ */
+ on_each_cpu(reset_counters,
+ (void *)(long)mipspmu.num_counters, 1);
+ mipspmu_free_irq();
+ mutex_unlock(&pmu_reserve_mutex);
+ }
+}
+
+static int mipspmu_event_init(struct perf_event *event)
+{
+ int err = 0;
+
+ /* does not support taken branch sampling */
+ if (has_branch_stack(event))
+ return -EOPNOTSUPP;
+
+ switch (event->attr.type) {
+ case PERF_TYPE_RAW:
+ case PERF_TYPE_HARDWARE:
+ case PERF_TYPE_HW_CACHE:
+ break;
+
+ default:
+ return -ENOENT;
+ }
+
+ if (event->cpu >= 0 && !cpu_online(event->cpu))
+ return -ENODEV;
+
+ if (!atomic_inc_not_zero(&active_events)) {
+ mutex_lock(&pmu_reserve_mutex);
+ if (atomic_read(&active_events) == 0)
+ err = mipspmu_get_irq();
+
+ if (!err)
+ atomic_inc(&active_events);
+ mutex_unlock(&pmu_reserve_mutex);
+ }
+
+ if (err)
+ return err;
+
+ return __hw_perf_event_init(event);
+}
+
+static struct pmu pmu = {
+ .pmu_enable = mipspmu_enable,
+ .pmu_disable = mipspmu_disable,
+ .event_init = mipspmu_event_init,
+ .add = mipspmu_add,
+ .del = mipspmu_del,
+ .start = mipspmu_start,
+ .stop = mipspmu_stop,
+ .read = mipspmu_read,
+};
+
+static unsigned int mipspmu_perf_event_encode(const struct mips_perf_event *pev)
+{
+/*
+ * Top 8 bits for range, next 16 bits for cntr_mask, lowest 8 bits for
+ * event_id.
+ */
+#ifdef CONFIG_MIPS_MT_SMP
+ if (num_possible_cpus() > 1)
+ return ((unsigned int)pev->range << 24) |
+ (pev->cntr_mask & 0xffff00) |
+ (pev->event_id & 0xff);
+ else
+#endif /* CONFIG_MIPS_MT_SMP */
+ {
+ if (get_loongson3_pmu_type() == LOONGSON_PMU_TYPE2)
+ return (pev->cntr_mask & 0xfffc00) |
+ (pev->event_id & 0x3ff);
+ else
+ return (pev->cntr_mask & 0xffff00) |
+ (pev->event_id & 0xff);
+ }
+}
+
+static const struct mips_perf_event *mipspmu_map_general_event(int idx)
+{
+
+ if ((*mipspmu.general_event_map)[idx].cntr_mask == 0)
+ return ERR_PTR(-EOPNOTSUPP);
+ return &(*mipspmu.general_event_map)[idx];
+}
+
+static const struct mips_perf_event *mipspmu_map_cache_event(u64 config)
+{
+ unsigned int cache_type, cache_op, cache_result;
+ const struct mips_perf_event *pev;
+
+ cache_type = (config >> 0) & 0xff;
+ if (cache_type >= PERF_COUNT_HW_CACHE_MAX)
+ return ERR_PTR(-EINVAL);
+
+ cache_op = (config >> 8) & 0xff;
+ if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX)
+ return ERR_PTR(-EINVAL);
+
+ cache_result = (config >> 16) & 0xff;
+ if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
+ return ERR_PTR(-EINVAL);
+
+ pev = &((*mipspmu.cache_event_map)
+ [cache_type]
+ [cache_op]
+ [cache_result]);
+
+ if (pev->cntr_mask == 0)
+ return ERR_PTR(-EOPNOTSUPP);
+
+ return pev;
+
+}
+
+static int validate_group(struct perf_event *event)
+{
+ struct perf_event *sibling, *leader = event->group_leader;
+ struct cpu_hw_events fake_cpuc;
+
+ memset(&fake_cpuc, 0, sizeof(fake_cpuc));
+
+ if (mipsxx_pmu_alloc_counter(&fake_cpuc, &leader->hw) < 0)
+ return -EINVAL;
+
+ for_each_sibling_event(sibling, leader) {
+ if (mipsxx_pmu_alloc_counter(&fake_cpuc, &sibling->hw) < 0)
+ return -EINVAL;
+ }
+
+ if (mipsxx_pmu_alloc_counter(&fake_cpuc, &event->hw) < 0)
+ return -EINVAL;
+
+ return 0;
+}
+
+/* This is needed by specific irq handlers in perf_event_*.c */
+static void handle_associated_event(struct cpu_hw_events *cpuc,
+ int idx, struct perf_sample_data *data,
+ struct pt_regs *regs)
+{
+ struct perf_event *event = cpuc->events[idx];
+ struct hw_perf_event *hwc = &event->hw;
+
+ mipspmu_event_update(event, hwc, idx);
+ data->period = event->hw.last_period;
+ if (!mipspmu_event_set_period(event, hwc, idx))
+ return;
+
+ if (perf_event_overflow(event, data, regs))
+ mipsxx_pmu_disable_event(idx);
+}
+
+
+static int __n_counters(void)
+{
+ if (!cpu_has_perf)
+ return 0;
+ if (!(read_c0_perfctrl0() & MIPS_PERFCTRL_M))
+ return 1;
+ if (!(read_c0_perfctrl1() & MIPS_PERFCTRL_M))
+ return 2;
+ if (!(read_c0_perfctrl2() & MIPS_PERFCTRL_M))
+ return 3;
+
+ return 4;
+}
+
+static int n_counters(void)
+{
+ int counters;
+
+ switch (current_cpu_type()) {
+ case CPU_R10000:
+ counters = 2;
+ break;
+
+ case CPU_R12000:
+ case CPU_R14000:
+ case CPU_R16000:
+ counters = 4;
+ break;
+
+ default:
+ counters = __n_counters();
+ }
+
+ return counters;
+}
+
+static void loongson3_reset_counters(void *arg)
+{
+ int counters = (int)(long)arg;
+
+ switch (counters) {
+ case 4:
+ mipsxx_pmu_write_control(3, 0);
+ mipspmu.write_counter(3, 0);
+ mipsxx_pmu_write_control(3, 127<<5);
+ mipspmu.write_counter(3, 0);
+ mipsxx_pmu_write_control(3, 191<<5);
+ mipspmu.write_counter(3, 0);
+ mipsxx_pmu_write_control(3, 255<<5);
+ mipspmu.write_counter(3, 0);
+ mipsxx_pmu_write_control(3, 319<<5);
+ mipspmu.write_counter(3, 0);
+ mipsxx_pmu_write_control(3, 383<<5);
+ mipspmu.write_counter(3, 0);
+ mipsxx_pmu_write_control(3, 575<<5);
+ mipspmu.write_counter(3, 0);
+ fallthrough;
+ case 3:
+ mipsxx_pmu_write_control(2, 0);
+ mipspmu.write_counter(2, 0);
+ mipsxx_pmu_write_control(2, 127<<5);
+ mipspmu.write_counter(2, 0);
+ mipsxx_pmu_write_control(2, 191<<5);
+ mipspmu.write_counter(2, 0);
+ mipsxx_pmu_write_control(2, 255<<5);
+ mipspmu.write_counter(2, 0);
+ mipsxx_pmu_write_control(2, 319<<5);
+ mipspmu.write_counter(2, 0);
+ mipsxx_pmu_write_control(2, 383<<5);
+ mipspmu.write_counter(2, 0);
+ mipsxx_pmu_write_control(2, 575<<5);
+ mipspmu.write_counter(2, 0);
+ fallthrough;
+ case 2:
+ mipsxx_pmu_write_control(1, 0);
+ mipspmu.write_counter(1, 0);
+ mipsxx_pmu_write_control(1, 127<<5);
+ mipspmu.write_counter(1, 0);
+ mipsxx_pmu_write_control(1, 191<<5);
+ mipspmu.write_counter(1, 0);
+ mipsxx_pmu_write_control(1, 255<<5);
+ mipspmu.write_counter(1, 0);
+ mipsxx_pmu_write_control(1, 319<<5);
+ mipspmu.write_counter(1, 0);
+ mipsxx_pmu_write_control(1, 383<<5);
+ mipspmu.write_counter(1, 0);
+ mipsxx_pmu_write_control(1, 575<<5);
+ mipspmu.write_counter(1, 0);
+ fallthrough;
+ case 1:
+ mipsxx_pmu_write_control(0, 0);
+ mipspmu.write_counter(0, 0);
+ mipsxx_pmu_write_control(0, 127<<5);
+ mipspmu.write_counter(0, 0);
+ mipsxx_pmu_write_control(0, 191<<5);
+ mipspmu.write_counter(0, 0);
+ mipsxx_pmu_write_control(0, 255<<5);
+ mipspmu.write_counter(0, 0);
+ mipsxx_pmu_write_control(0, 319<<5);
+ mipspmu.write_counter(0, 0);
+ mipsxx_pmu_write_control(0, 383<<5);
+ mipspmu.write_counter(0, 0);
+ mipsxx_pmu_write_control(0, 575<<5);
+ mipspmu.write_counter(0, 0);
+ break;
+ }
+}
+
+static void reset_counters(void *arg)
+{
+ int counters = (int)(long)arg;
+
+ if (get_loongson3_pmu_type() == LOONGSON_PMU_TYPE2) {
+ loongson3_reset_counters(arg);
+ return;
+ }
+
+ switch (counters) {
+ case 4:
+ mipsxx_pmu_write_control(3, 0);
+ mipspmu.write_counter(3, 0);
+ fallthrough;
+ case 3:
+ mipsxx_pmu_write_control(2, 0);
+ mipspmu.write_counter(2, 0);
+ fallthrough;
+ case 2:
+ mipsxx_pmu_write_control(1, 0);
+ mipspmu.write_counter(1, 0);
+ fallthrough;
+ case 1:
+ mipsxx_pmu_write_control(0, 0);
+ mipspmu.write_counter(0, 0);
+ break;
+ }
+}
+
+/* 24K/34K/1004K/interAptiv/loongson1 cores share the same event map. */
+static const struct mips_perf_event mipsxxcore_event_map
+ [PERF_COUNT_HW_MAX] = {
+ [PERF_COUNT_HW_CPU_CYCLES] = { 0x00, CNTR_EVEN | CNTR_ODD, P },
+ [PERF_COUNT_HW_INSTRUCTIONS] = { 0x01, CNTR_EVEN | CNTR_ODD, T },
+ [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x02, CNTR_EVEN, T },
+ [PERF_COUNT_HW_BRANCH_MISSES] = { 0x02, CNTR_ODD, T },
+};
+
+/* 74K/proAptiv core has different branch event code. */
+static const struct mips_perf_event mipsxxcore_event_map2
+ [PERF_COUNT_HW_MAX] = {
+ [PERF_COUNT_HW_CPU_CYCLES] = { 0x00, CNTR_EVEN | CNTR_ODD, P },
+ [PERF_COUNT_HW_INSTRUCTIONS] = { 0x01, CNTR_EVEN | CNTR_ODD, T },
+ [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x27, CNTR_EVEN, T },
+ [PERF_COUNT_HW_BRANCH_MISSES] = { 0x27, CNTR_ODD, T },
+};
+
+static const struct mips_perf_event i6x00_event_map[PERF_COUNT_HW_MAX] = {
+ [PERF_COUNT_HW_CPU_CYCLES] = { 0x00, CNTR_EVEN | CNTR_ODD },
+ [PERF_COUNT_HW_INSTRUCTIONS] = { 0x01, CNTR_EVEN | CNTR_ODD },
+ /* These only count dcache, not icache */
+ [PERF_COUNT_HW_CACHE_REFERENCES] = { 0x45, CNTR_EVEN | CNTR_ODD },
+ [PERF_COUNT_HW_CACHE_MISSES] = { 0x48, CNTR_EVEN | CNTR_ODD },
+ [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x15, CNTR_EVEN | CNTR_ODD },
+ [PERF_COUNT_HW_BRANCH_MISSES] = { 0x16, CNTR_EVEN | CNTR_ODD },
+};
+
+static const struct mips_perf_event loongson3_event_map1[PERF_COUNT_HW_MAX] = {
+ [PERF_COUNT_HW_CPU_CYCLES] = { 0x00, CNTR_EVEN },
+ [PERF_COUNT_HW_INSTRUCTIONS] = { 0x00, CNTR_ODD },
+ [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x01, CNTR_EVEN },
+ [PERF_COUNT_HW_BRANCH_MISSES] = { 0x01, CNTR_ODD },
+};
+
+static const struct mips_perf_event loongson3_event_map2[PERF_COUNT_HW_MAX] = {
+ [PERF_COUNT_HW_CPU_CYCLES] = { 0x80, CNTR_ALL },
+ [PERF_COUNT_HW_INSTRUCTIONS] = { 0x81, CNTR_ALL },
+ [PERF_COUNT_HW_CACHE_MISSES] = { 0x18, CNTR_ALL },
+ [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x94, CNTR_ALL },
+ [PERF_COUNT_HW_BRANCH_MISSES] = { 0x9c, CNTR_ALL },
+};
+
+static const struct mips_perf_event loongson3_event_map3[PERF_COUNT_HW_MAX] = {
+ [PERF_COUNT_HW_CPU_CYCLES] = { 0x00, CNTR_ALL },
+ [PERF_COUNT_HW_INSTRUCTIONS] = { 0x01, CNTR_ALL },
+ [PERF_COUNT_HW_CACHE_REFERENCES] = { 0x1c, CNTR_ALL },
+ [PERF_COUNT_HW_CACHE_MISSES] = { 0x1d, CNTR_ALL },
+ [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x02, CNTR_ALL },
+ [PERF_COUNT_HW_BRANCH_MISSES] = { 0x08, CNTR_ALL },
+};
+
+static const struct mips_perf_event octeon_event_map[PERF_COUNT_HW_MAX] = {
+ [PERF_COUNT_HW_CPU_CYCLES] = { 0x01, CNTR_ALL },
+ [PERF_COUNT_HW_INSTRUCTIONS] = { 0x03, CNTR_ALL },
+ [PERF_COUNT_HW_CACHE_REFERENCES] = { 0x2b, CNTR_ALL },
+ [PERF_COUNT_HW_CACHE_MISSES] = { 0x2e, CNTR_ALL },
+ [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x08, CNTR_ALL },
+ [PERF_COUNT_HW_BRANCH_MISSES] = { 0x09, CNTR_ALL },
+ [PERF_COUNT_HW_BUS_CYCLES] = { 0x25, CNTR_ALL },
+};
+
+static const struct mips_perf_event bmips5000_event_map
+ [PERF_COUNT_HW_MAX] = {
+ [PERF_COUNT_HW_CPU_CYCLES] = { 0x00, CNTR_EVEN | CNTR_ODD, T },
+ [PERF_COUNT_HW_INSTRUCTIONS] = { 0x01, CNTR_EVEN | CNTR_ODD, T },
+ [PERF_COUNT_HW_BRANCH_MISSES] = { 0x02, CNTR_ODD, T },
+};
+
+/* 24K/34K/1004K/interAptiv/loongson1 cores share the same cache event map. */
+static const struct mips_perf_event mipsxxcore_cache_map
+ [PERF_COUNT_HW_CACHE_MAX]
+ [PERF_COUNT_HW_CACHE_OP_MAX]
+ [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
+[C(L1D)] = {
+ /*
+ * Like some other architectures (e.g. ARM), the performance
+ * counters don't differentiate between read and write
+ * accesses/misses, so this isn't strictly correct, but it's the
+ * best we can do. Writes and reads get combined.
+ */
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = { 0x0a, CNTR_EVEN, T },
+ [C(RESULT_MISS)] = { 0x0b, CNTR_EVEN | CNTR_ODD, T },
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = { 0x0a, CNTR_EVEN, T },
+ [C(RESULT_MISS)] = { 0x0b, CNTR_EVEN | CNTR_ODD, T },
+ },
+},
+[C(L1I)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = { 0x09, CNTR_EVEN, T },
+ [C(RESULT_MISS)] = { 0x09, CNTR_ODD, T },
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = { 0x09, CNTR_EVEN, T },
+ [C(RESULT_MISS)] = { 0x09, CNTR_ODD, T },
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = { 0x14, CNTR_EVEN, T },
+ /*
+ * Note that MIPS has only "hit" events countable for
+ * the prefetch operation.
+ */
+ },
+},
+[C(LL)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = { 0x15, CNTR_ODD, P },
+ [C(RESULT_MISS)] = { 0x16, CNTR_EVEN, P },
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = { 0x15, CNTR_ODD, P },
+ [C(RESULT_MISS)] = { 0x16, CNTR_EVEN, P },
+ },
+},
+[C(DTLB)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = { 0x06, CNTR_EVEN, T },
+ [C(RESULT_MISS)] = { 0x06, CNTR_ODD, T },
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = { 0x06, CNTR_EVEN, T },
+ [C(RESULT_MISS)] = { 0x06, CNTR_ODD, T },
+ },
+},
+[C(ITLB)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = { 0x05, CNTR_EVEN, T },
+ [C(RESULT_MISS)] = { 0x05, CNTR_ODD, T },
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = { 0x05, CNTR_EVEN, T },
+ [C(RESULT_MISS)] = { 0x05, CNTR_ODD, T },
+ },
+},
+[C(BPU)] = {
+ /* Using the same code for *HW_BRANCH* */
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = { 0x02, CNTR_EVEN, T },
+ [C(RESULT_MISS)] = { 0x02, CNTR_ODD, T },
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = { 0x02, CNTR_EVEN, T },
+ [C(RESULT_MISS)] = { 0x02, CNTR_ODD, T },
+ },
+},
+};
+
+/* 74K/proAptiv core has completely different cache event map. */
+static const struct mips_perf_event mipsxxcore_cache_map2
+ [PERF_COUNT_HW_CACHE_MAX]
+ [PERF_COUNT_HW_CACHE_OP_MAX]
+ [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
+[C(L1D)] = {
+ /*
+ * Like some other architectures (e.g. ARM), the performance
+ * counters don't differentiate between read and write
+ * accesses/misses, so this isn't strictly correct, but it's the
+ * best we can do. Writes and reads get combined.
+ */
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = { 0x17, CNTR_ODD, T },
+ [C(RESULT_MISS)] = { 0x18, CNTR_ODD, T },
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = { 0x17, CNTR_ODD, T },
+ [C(RESULT_MISS)] = { 0x18, CNTR_ODD, T },
+ },
+},
+[C(L1I)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = { 0x06, CNTR_EVEN, T },
+ [C(RESULT_MISS)] = { 0x06, CNTR_ODD, T },
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = { 0x06, CNTR_EVEN, T },
+ [C(RESULT_MISS)] = { 0x06, CNTR_ODD, T },
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = { 0x34, CNTR_EVEN, T },
+ /*
+ * Note that MIPS has only "hit" events countable for
+ * the prefetch operation.
+ */
+ },
+},
+[C(LL)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = { 0x1c, CNTR_ODD, P },
+ [C(RESULT_MISS)] = { 0x1d, CNTR_EVEN, P },
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = { 0x1c, CNTR_ODD, P },
+ [C(RESULT_MISS)] = { 0x1d, CNTR_EVEN, P },
+ },
+},
+/*
+ * 74K core does not have specific DTLB events. proAptiv core has
+ * "speculative" DTLB events which are numbered 0x63 (even/odd) and
+ * not included here. One can use raw events if really needed.
+ */
+[C(ITLB)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = { 0x04, CNTR_EVEN, T },
+ [C(RESULT_MISS)] = { 0x04, CNTR_ODD, T },
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = { 0x04, CNTR_EVEN, T },
+ [C(RESULT_MISS)] = { 0x04, CNTR_ODD, T },
+ },
+},
+[C(BPU)] = {
+ /* Using the same code for *HW_BRANCH* */
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = { 0x27, CNTR_EVEN, T },
+ [C(RESULT_MISS)] = { 0x27, CNTR_ODD, T },
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = { 0x27, CNTR_EVEN, T },
+ [C(RESULT_MISS)] = { 0x27, CNTR_ODD, T },
+ },
+},
+};
+
+static const struct mips_perf_event i6x00_cache_map
+ [PERF_COUNT_HW_CACHE_MAX]
+ [PERF_COUNT_HW_CACHE_OP_MAX]
+ [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
+[C(L1D)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = { 0x46, CNTR_EVEN | CNTR_ODD },
+ [C(RESULT_MISS)] = { 0x49, CNTR_EVEN | CNTR_ODD },
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = { 0x47, CNTR_EVEN | CNTR_ODD },
+ [C(RESULT_MISS)] = { 0x4a, CNTR_EVEN | CNTR_ODD },
+ },
+},
+[C(L1I)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = { 0x84, CNTR_EVEN | CNTR_ODD },
+ [C(RESULT_MISS)] = { 0x85, CNTR_EVEN | CNTR_ODD },
+ },
+},
+[C(DTLB)] = {
+ /* Can't distinguish read & write */
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = { 0x40, CNTR_EVEN | CNTR_ODD },
+ [C(RESULT_MISS)] = { 0x41, CNTR_EVEN | CNTR_ODD },
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = { 0x40, CNTR_EVEN | CNTR_ODD },
+ [C(RESULT_MISS)] = { 0x41, CNTR_EVEN | CNTR_ODD },
+ },
+},
+[C(BPU)] = {
+ /* Conditional branches / mispredicted */
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = { 0x15, CNTR_EVEN | CNTR_ODD },
+ [C(RESULT_MISS)] = { 0x16, CNTR_EVEN | CNTR_ODD },
+ },
+},
+};
+
+static const struct mips_perf_event loongson3_cache_map1
+ [PERF_COUNT_HW_CACHE_MAX]
+ [PERF_COUNT_HW_CACHE_OP_MAX]
+ [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
+[C(L1D)] = {
+ /*
+ * Like some other architectures (e.g. ARM), the performance
+ * counters don't differentiate between read and write
+ * accesses/misses, so this isn't strictly correct, but it's the
+ * best we can do. Writes and reads get combined.
+ */
+ [C(OP_READ)] = {
+ [C(RESULT_MISS)] = { 0x04, CNTR_ODD },
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_MISS)] = { 0x04, CNTR_ODD },
+ },
+},
+[C(L1I)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_MISS)] = { 0x04, CNTR_EVEN },
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_MISS)] = { 0x04, CNTR_EVEN },
+ },
+},
+[C(DTLB)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_MISS)] = { 0x09, CNTR_ODD },
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_MISS)] = { 0x09, CNTR_ODD },
+ },
+},
+[C(ITLB)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_MISS)] = { 0x0c, CNTR_ODD },
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_MISS)] = { 0x0c, CNTR_ODD },
+ },
+},
+[C(BPU)] = {
+ /* Using the same code for *HW_BRANCH* */
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = { 0x01, CNTR_EVEN },
+ [C(RESULT_MISS)] = { 0x01, CNTR_ODD },
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = { 0x01, CNTR_EVEN },
+ [C(RESULT_MISS)] = { 0x01, CNTR_ODD },
+ },
+},
+};
+
+static const struct mips_perf_event loongson3_cache_map2
+ [PERF_COUNT_HW_CACHE_MAX]
+ [PERF_COUNT_HW_CACHE_OP_MAX]
+ [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
+[C(L1D)] = {
+ /*
+ * Like some other architectures (e.g. ARM), the performance
+ * counters don't differentiate between read and write
+ * accesses/misses, so this isn't strictly correct, but it's the
+ * best we can do. Writes and reads get combined.
+ */
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = { 0x156, CNTR_ALL },
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = { 0x155, CNTR_ALL },
+ [C(RESULT_MISS)] = { 0x153, CNTR_ALL },
+ },
+},
+[C(L1I)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_MISS)] = { 0x18, CNTR_ALL },
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_MISS)] = { 0x18, CNTR_ALL },
+ },
+},
+[C(LL)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = { 0x1b6, CNTR_ALL },
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = { 0x1b7, CNTR_ALL },
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = { 0x1bf, CNTR_ALL },
+ },
+},
+[C(DTLB)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_MISS)] = { 0x92, CNTR_ALL },
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_MISS)] = { 0x92, CNTR_ALL },
+ },
+},
+[C(ITLB)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_MISS)] = { 0x1a, CNTR_ALL },
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_MISS)] = { 0x1a, CNTR_ALL },
+ },
+},
+[C(BPU)] = {
+ /* Using the same code for *HW_BRANCH* */
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = { 0x94, CNTR_ALL },
+ [C(RESULT_MISS)] = { 0x9c, CNTR_ALL },
+ },
+},
+};
+
+static const struct mips_perf_event loongson3_cache_map3
+ [PERF_COUNT_HW_CACHE_MAX]
+ [PERF_COUNT_HW_CACHE_OP_MAX]
+ [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
+[C(L1D)] = {
+ /*
+ * Like some other architectures (e.g. ARM), the performance
+ * counters don't differentiate between read and write
+ * accesses/misses, so this isn't strictly correct, but it's the
+ * best we can do. Writes and reads get combined.
+ */
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = { 0x1e, CNTR_ALL },
+ [C(RESULT_MISS)] = { 0x1f, CNTR_ALL },
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = { 0xaa, CNTR_ALL },
+ [C(RESULT_MISS)] = { 0xa9, CNTR_ALL },
+ },
+},
+[C(L1I)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = { 0x1c, CNTR_ALL },
+ [C(RESULT_MISS)] = { 0x1d, CNTR_ALL },
+ },
+},
+[C(LL)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = { 0x2e, CNTR_ALL },
+ [C(RESULT_MISS)] = { 0x2f, CNTR_ALL },
+ },
+},
+[C(DTLB)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = { 0x14, CNTR_ALL },
+ [C(RESULT_MISS)] = { 0x1b, CNTR_ALL },
+ },
+},
+[C(ITLB)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_MISS)] = { 0x1a, CNTR_ALL },
+ },
+},
+[C(BPU)] = {
+ /* Using the same code for *HW_BRANCH* */
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = { 0x02, CNTR_ALL },
+ [C(RESULT_MISS)] = { 0x08, CNTR_ALL },
+ },
+},
+};
+
+/* BMIPS5000 */
+static const struct mips_perf_event bmips5000_cache_map
+ [PERF_COUNT_HW_CACHE_MAX]
+ [PERF_COUNT_HW_CACHE_OP_MAX]
+ [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
+[C(L1D)] = {
+ /*
+ * Like some other architectures (e.g. ARM), the performance
+ * counters don't differentiate between read and write
+ * accesses/misses, so this isn't strictly correct, but it's the
+ * best we can do. Writes and reads get combined.
+ */
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = { 12, CNTR_EVEN, T },
+ [C(RESULT_MISS)] = { 12, CNTR_ODD, T },
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = { 12, CNTR_EVEN, T },
+ [C(RESULT_MISS)] = { 12, CNTR_ODD, T },
+ },
+},
+[C(L1I)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = { 10, CNTR_EVEN, T },
+ [C(RESULT_MISS)] = { 10, CNTR_ODD, T },
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = { 10, CNTR_EVEN, T },
+ [C(RESULT_MISS)] = { 10, CNTR_ODD, T },
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = { 23, CNTR_EVEN, T },
+ /*
+ * Note that MIPS has only "hit" events countable for
+ * the prefetch operation.
+ */
+ },
+},
+[C(LL)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = { 28, CNTR_EVEN, P },
+ [C(RESULT_MISS)] = { 28, CNTR_ODD, P },
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = { 28, CNTR_EVEN, P },
+ [C(RESULT_MISS)] = { 28, CNTR_ODD, P },
+ },
+},
+[C(BPU)] = {
+ /* Using the same code for *HW_BRANCH* */
+ [C(OP_READ)] = {
+ [C(RESULT_MISS)] = { 0x02, CNTR_ODD, T },
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_MISS)] = { 0x02, CNTR_ODD, T },
+ },
+},
+};
+
+static const struct mips_perf_event octeon_cache_map
+ [PERF_COUNT_HW_CACHE_MAX]
+ [PERF_COUNT_HW_CACHE_OP_MAX]
+ [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
+[C(L1D)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = { 0x2b, CNTR_ALL },
+ [C(RESULT_MISS)] = { 0x2e, CNTR_ALL },
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = { 0x30, CNTR_ALL },
+ },
+},
+[C(L1I)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = { 0x18, CNTR_ALL },
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = { 0x19, CNTR_ALL },
+ },
+},
+[C(DTLB)] = {
+ /*
+ * Only general DTLB misses are counted use the same event for
+ * read and write.
+ */
+ [C(OP_READ)] = {
+ [C(RESULT_MISS)] = { 0x35, CNTR_ALL },
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_MISS)] = { 0x35, CNTR_ALL },
+ },
+},
+[C(ITLB)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_MISS)] = { 0x37, CNTR_ALL },
+ },
+},
+};
+
+static int __hw_perf_event_init(struct perf_event *event)
+{
+ struct perf_event_attr *attr = &event->attr;
+ struct hw_perf_event *hwc = &event->hw;
+ const struct mips_perf_event *pev;
+ int err;
+
+ /* Returning MIPS event descriptor for generic perf event. */
+ if (PERF_TYPE_HARDWARE == event->attr.type) {
+ if (event->attr.config >= PERF_COUNT_HW_MAX)
+ return -EINVAL;
+ pev = mipspmu_map_general_event(event->attr.config);
+ } else if (PERF_TYPE_HW_CACHE == event->attr.type) {
+ pev = mipspmu_map_cache_event(event->attr.config);
+ } else if (PERF_TYPE_RAW == event->attr.type) {
+ /* We are working on the global raw event. */
+ mutex_lock(&raw_event_mutex);
+ pev = mipspmu.map_raw_event(event->attr.config);
+ } else {
+ /* The event type is not (yet) supported. */
+ return -EOPNOTSUPP;
+ }
+
+ if (IS_ERR(pev)) {
+ if (PERF_TYPE_RAW == event->attr.type)
+ mutex_unlock(&raw_event_mutex);
+ return PTR_ERR(pev);
+ }
+
+ /*
+ * We allow max flexibility on how each individual counter shared
+ * by the single CPU operates (the mode exclusion and the range).
+ */
+ hwc->config_base = MIPS_PERFCTRL_IE;
+
+ hwc->event_base = mipspmu_perf_event_encode(pev);
+ if (PERF_TYPE_RAW == event->attr.type)
+ mutex_unlock(&raw_event_mutex);
+
+ if (!attr->exclude_user)
+ hwc->config_base |= MIPS_PERFCTRL_U;
+ if (!attr->exclude_kernel) {
+ hwc->config_base |= MIPS_PERFCTRL_K;
+ /* MIPS kernel mode: KSU == 00b || EXL == 1 || ERL == 1 */
+ hwc->config_base |= MIPS_PERFCTRL_EXL;
+ }
+ if (!attr->exclude_hv)
+ hwc->config_base |= MIPS_PERFCTRL_S;
+
+ hwc->config_base &= M_PERFCTL_CONFIG_MASK;
+ /*
+ * The event can belong to another cpu. We do not assign a local
+ * counter for it for now.
+ */
+ hwc->idx = -1;
+ hwc->config = 0;
+
+ if (!hwc->sample_period) {
+ hwc->sample_period = mipspmu.max_period;
+ hwc->last_period = hwc->sample_period;
+ local64_set(&hwc->period_left, hwc->sample_period);
+ }
+
+ err = 0;
+ if (event->group_leader != event)
+ err = validate_group(event);
+
+ event->destroy = hw_perf_event_destroy;
+
+ if (err)
+ event->destroy(event);
+
+ return err;
+}
+
+static void pause_local_counters(void)
+{
+ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
+ int ctr = mipspmu.num_counters;
+ unsigned long flags;
+
+ local_irq_save(flags);
+ do {
+ ctr--;
+ cpuc->saved_ctrl[ctr] = mipsxx_pmu_read_control(ctr);
+ mipsxx_pmu_write_control(ctr, cpuc->saved_ctrl[ctr] &
+ ~M_PERFCTL_COUNT_EVENT_WHENEVER);
+ } while (ctr > 0);
+ local_irq_restore(flags);
+}
+
+static void resume_local_counters(void)
+{
+ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
+ int ctr = mipspmu.num_counters;
+
+ do {
+ ctr--;
+ mipsxx_pmu_write_control(ctr, cpuc->saved_ctrl[ctr]);
+ } while (ctr > 0);
+}
+
+static int mipsxx_pmu_handle_shared_irq(void)
+{
+ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
+ struct perf_sample_data data;
+ unsigned int counters = mipspmu.num_counters;
+ u64 counter;
+ int n, handled = IRQ_NONE;
+ struct pt_regs *regs;
+
+ if (cpu_has_perf_cntr_intr_bit && !(read_c0_cause() & CAUSEF_PCI))
+ return handled;
+ /*
+ * First we pause the local counters, so that when we are locked
+ * here, the counters are all paused. When it gets locked due to
+ * perf_disable(), the timer interrupt handler will be delayed.
+ *
+ * See also mipsxx_pmu_start().
+ */
+ pause_local_counters();
+#ifdef CONFIG_MIPS_PERF_SHARED_TC_COUNTERS
+ read_lock(&pmuint_rwlock);
+#endif
+
+ regs = get_irq_regs();
+
+ perf_sample_data_init(&data, 0, 0);
+
+ for (n = counters - 1; n >= 0; n--) {
+ if (!test_bit(n, cpuc->used_mask))
+ continue;
+
+ counter = mipspmu.read_counter(n);
+ if (!(counter & mipspmu.overflow))
+ continue;
+
+ handle_associated_event(cpuc, n, &data, regs);
+ handled = IRQ_HANDLED;
+ }
+
+#ifdef CONFIG_MIPS_PERF_SHARED_TC_COUNTERS
+ read_unlock(&pmuint_rwlock);
+#endif
+ resume_local_counters();
+
+ /*
+ * Do all the work for the pending perf events. We can do this
+ * in here because the performance counter interrupt is a regular
+ * interrupt, not NMI.
+ */
+ if (handled == IRQ_HANDLED)
+ irq_work_run();
+
+ return handled;
+}
+
+static irqreturn_t mipsxx_pmu_handle_irq(int irq, void *dev)
+{
+ return mipsxx_pmu_handle_shared_irq();
+}
+
+/* 24K */
+#define IS_BOTH_COUNTERS_24K_EVENT(b) \
+ ((b) == 0 || (b) == 1 || (b) == 11)
+
+/* 34K */
+#define IS_BOTH_COUNTERS_34K_EVENT(b) \
+ ((b) == 0 || (b) == 1 || (b) == 11)
+#ifdef CONFIG_MIPS_MT_SMP
+#define IS_RANGE_P_34K_EVENT(r, b) \
+ ((b) == 0 || (r) == 18 || (b) == 21 || (b) == 22 || \
+ (b) == 25 || (b) == 39 || (r) == 44 || (r) == 174 || \
+ (r) == 176 || ((b) >= 50 && (b) <= 55) || \
+ ((b) >= 64 && (b) <= 67))
+#define IS_RANGE_V_34K_EVENT(r) ((r) == 47)
+#endif
+
+/* 74K */
+#define IS_BOTH_COUNTERS_74K_EVENT(b) \
+ ((b) == 0 || (b) == 1)
+
+/* proAptiv */
+#define IS_BOTH_COUNTERS_PROAPTIV_EVENT(b) \
+ ((b) == 0 || (b) == 1)
+/* P5600 */
+#define IS_BOTH_COUNTERS_P5600_EVENT(b) \
+ ((b) == 0 || (b) == 1)
+
+/* 1004K */
+#define IS_BOTH_COUNTERS_1004K_EVENT(b) \
+ ((b) == 0 || (b) == 1 || (b) == 11)
+#ifdef CONFIG_MIPS_MT_SMP
+#define IS_RANGE_P_1004K_EVENT(r, b) \
+ ((b) == 0 || (r) == 18 || (b) == 21 || (b) == 22 || \
+ (b) == 25 || (b) == 36 || (b) == 39 || (r) == 44 || \
+ (r) == 174 || (r) == 176 || ((b) >= 50 && (b) <= 59) || \
+ (r) == 188 || (b) == 61 || (b) == 62 || \
+ ((b) >= 64 && (b) <= 67))
+#define IS_RANGE_V_1004K_EVENT(r) ((r) == 47)
+#endif
+
+/* interAptiv */
+#define IS_BOTH_COUNTERS_INTERAPTIV_EVENT(b) \
+ ((b) == 0 || (b) == 1 || (b) == 11)
+#ifdef CONFIG_MIPS_MT_SMP
+/* The P/V/T info is not provided for "(b) == 38" in SUM, assume P. */
+#define IS_RANGE_P_INTERAPTIV_EVENT(r, b) \
+ ((b) == 0 || (r) == 18 || (b) == 21 || (b) == 22 || \
+ (b) == 25 || (b) == 36 || (b) == 38 || (b) == 39 || \
+ (r) == 44 || (r) == 174 || (r) == 176 || ((b) >= 50 && \
+ (b) <= 59) || (r) == 188 || (b) == 61 || (b) == 62 || \
+ ((b) >= 64 && (b) <= 67))
+#define IS_RANGE_V_INTERAPTIV_EVENT(r) ((r) == 47 || (r) == 175)
+#endif
+
+/* BMIPS5000 */
+#define IS_BOTH_COUNTERS_BMIPS5000_EVENT(b) \
+ ((b) == 0 || (b) == 1)
+
+
+/*
+ * For most cores the user can use 0-255 raw events, where 0-127 for the events
+ * of even counters, and 128-255 for odd counters. Note that bit 7 is used to
+ * indicate the even/odd bank selector. So, for example, when user wants to take
+ * the Event Num of 15 for odd counters (by referring to the user manual), then
+ * 128 needs to be added to 15 as the input for the event config, i.e., 143 (0x8F)
+ * to be used.
+ *
+ * Some newer cores have even more events, in which case the user can use raw
+ * events 0-511, where 0-255 are for the events of even counters, and 256-511
+ * are for odd counters, so bit 8 is used to indicate the even/odd bank selector.
+ */
+static const struct mips_perf_event *mipsxx_pmu_map_raw_event(u64 config)
+{
+ /* currently most cores have 7-bit event numbers */
+ int pmu_type;
+ unsigned int raw_id = config & 0xff;
+ unsigned int base_id = raw_id & 0x7f;
+
+ switch (current_cpu_type()) {
+ case CPU_24K:
+ if (IS_BOTH_COUNTERS_24K_EVENT(base_id))
+ raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
+ else
+ raw_event.cntr_mask =
+ raw_id > 127 ? CNTR_ODD : CNTR_EVEN;
+#ifdef CONFIG_MIPS_MT_SMP
+ /*
+ * This is actually doing nothing. Non-multithreading
+ * CPUs will not check and calculate the range.
+ */
+ raw_event.range = P;
+#endif
+ break;
+ case CPU_34K:
+ if (IS_BOTH_COUNTERS_34K_EVENT(base_id))
+ raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
+ else
+ raw_event.cntr_mask =
+ raw_id > 127 ? CNTR_ODD : CNTR_EVEN;
+#ifdef CONFIG_MIPS_MT_SMP
+ if (IS_RANGE_P_34K_EVENT(raw_id, base_id))
+ raw_event.range = P;
+ else if (unlikely(IS_RANGE_V_34K_EVENT(raw_id)))
+ raw_event.range = V;
+ else
+ raw_event.range = T;
+#endif
+ break;
+ case CPU_74K:
+ case CPU_1074K:
+ if (IS_BOTH_COUNTERS_74K_EVENT(base_id))
+ raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
+ else
+ raw_event.cntr_mask =
+ raw_id > 127 ? CNTR_ODD : CNTR_EVEN;
+#ifdef CONFIG_MIPS_MT_SMP
+ raw_event.range = P;
+#endif
+ break;
+ case CPU_PROAPTIV:
+ if (IS_BOTH_COUNTERS_PROAPTIV_EVENT(base_id))
+ raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
+ else
+ raw_event.cntr_mask =
+ raw_id > 127 ? CNTR_ODD : CNTR_EVEN;
+#ifdef CONFIG_MIPS_MT_SMP
+ raw_event.range = P;
+#endif
+ break;
+ case CPU_P5600:
+ case CPU_P6600:
+ /* 8-bit event numbers */
+ raw_id = config & 0x1ff;
+ base_id = raw_id & 0xff;
+ if (IS_BOTH_COUNTERS_P5600_EVENT(base_id))
+ raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
+ else
+ raw_event.cntr_mask =
+ raw_id > 255 ? CNTR_ODD : CNTR_EVEN;
+#ifdef CONFIG_MIPS_MT_SMP
+ raw_event.range = P;
+#endif
+ break;
+ case CPU_I6400:
+ case CPU_I6500:
+ /* 8-bit event numbers */
+ base_id = config & 0xff;
+ raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
+ break;
+ case CPU_1004K:
+ if (IS_BOTH_COUNTERS_1004K_EVENT(base_id))
+ raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
+ else
+ raw_event.cntr_mask =
+ raw_id > 127 ? CNTR_ODD : CNTR_EVEN;
+#ifdef CONFIG_MIPS_MT_SMP
+ if (IS_RANGE_P_1004K_EVENT(raw_id, base_id))
+ raw_event.range = P;
+ else if (unlikely(IS_RANGE_V_1004K_EVENT(raw_id)))
+ raw_event.range = V;
+ else
+ raw_event.range = T;
+#endif
+ break;
+ case CPU_INTERAPTIV:
+ if (IS_BOTH_COUNTERS_INTERAPTIV_EVENT(base_id))
+ raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
+ else
+ raw_event.cntr_mask =
+ raw_id > 127 ? CNTR_ODD : CNTR_EVEN;
+#ifdef CONFIG_MIPS_MT_SMP
+ if (IS_RANGE_P_INTERAPTIV_EVENT(raw_id, base_id))
+ raw_event.range = P;
+ else if (unlikely(IS_RANGE_V_INTERAPTIV_EVENT(raw_id)))
+ raw_event.range = V;
+ else
+ raw_event.range = T;
+#endif
+ break;
+ case CPU_BMIPS5000:
+ if (IS_BOTH_COUNTERS_BMIPS5000_EVENT(base_id))
+ raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
+ else
+ raw_event.cntr_mask =
+ raw_id > 127 ? CNTR_ODD : CNTR_EVEN;
+ break;
+ case CPU_LOONGSON64:
+ pmu_type = get_loongson3_pmu_type();
+
+ switch (pmu_type) {
+ case LOONGSON_PMU_TYPE1:
+ raw_event.cntr_mask =
+ raw_id > 127 ? CNTR_ODD : CNTR_EVEN;
+ break;
+ case LOONGSON_PMU_TYPE2:
+ base_id = config & 0x3ff;
+ raw_event.cntr_mask = CNTR_ALL;
+
+ if ((base_id >= 1 && base_id < 28) ||
+ (base_id >= 64 && base_id < 90) ||
+ (base_id >= 128 && base_id < 164) ||
+ (base_id >= 192 && base_id < 200) ||
+ (base_id >= 256 && base_id < 275) ||
+ (base_id >= 320 && base_id < 361) ||
+ (base_id >= 384 && base_id < 574))
+ break;
+
+ return ERR_PTR(-EOPNOTSUPP);
+ case LOONGSON_PMU_TYPE3:
+ base_id = raw_id;
+ raw_event.cntr_mask = CNTR_ALL;
+ break;
+ }
+ break;
+ }
+
+ raw_event.event_id = base_id;
+
+ return &raw_event;
+}
+
+static const struct mips_perf_event *octeon_pmu_map_raw_event(u64 config)
+{
+ unsigned int base_id = config & 0x7f;
+ unsigned int event_max;
+
+
+ raw_event.cntr_mask = CNTR_ALL;
+ raw_event.event_id = base_id;
+
+ if (current_cpu_type() == CPU_CAVIUM_OCTEON3)
+ event_max = 0x5f;
+ else if (current_cpu_type() == CPU_CAVIUM_OCTEON2)
+ event_max = 0x42;
+ else
+ event_max = 0x3a;
+
+ if (base_id > event_max) {
+ return ERR_PTR(-EOPNOTSUPP);
+ }
+
+ switch (base_id) {
+ case 0x00:
+ case 0x0f:
+ case 0x1e:
+ case 0x1f:
+ case 0x2f:
+ case 0x34:
+ case 0x3e ... 0x3f:
+ return ERR_PTR(-EOPNOTSUPP);
+ default:
+ break;
+ }
+
+ return &raw_event;
+}
+
+static int __init
+init_hw_perf_events(void)
+{
+ int counters, irq, pmu_type;
+
+ pr_info("Performance counters: ");
+
+ counters = n_counters();
+ if (counters == 0) {
+ pr_cont("No available PMU.\n");
+ return -ENODEV;
+ }
+
+#ifdef CONFIG_MIPS_PERF_SHARED_TC_COUNTERS
+ if (!cpu_has_mipsmt_pertccounters)
+ counters = counters_total_to_per_cpu(counters);
+#endif
+
+ if (get_c0_perfcount_int)
+ irq = get_c0_perfcount_int();
+ else if (cp0_perfcount_irq >= 0)
+ irq = MIPS_CPU_IRQ_BASE + cp0_perfcount_irq;
+ else
+ irq = -1;
+
+ mipspmu.map_raw_event = mipsxx_pmu_map_raw_event;
+
+ switch (current_cpu_type()) {
+ case CPU_24K:
+ mipspmu.name = "mips/24K";
+ mipspmu.general_event_map = &mipsxxcore_event_map;
+ mipspmu.cache_event_map = &mipsxxcore_cache_map;
+ break;
+ case CPU_34K:
+ mipspmu.name = "mips/34K";
+ mipspmu.general_event_map = &mipsxxcore_event_map;
+ mipspmu.cache_event_map = &mipsxxcore_cache_map;
+ break;
+ case CPU_74K:
+ mipspmu.name = "mips/74K";
+ mipspmu.general_event_map = &mipsxxcore_event_map2;
+ mipspmu.cache_event_map = &mipsxxcore_cache_map2;
+ break;
+ case CPU_PROAPTIV:
+ mipspmu.name = "mips/proAptiv";
+ mipspmu.general_event_map = &mipsxxcore_event_map2;
+ mipspmu.cache_event_map = &mipsxxcore_cache_map2;
+ break;
+ case CPU_P5600:
+ mipspmu.name = "mips/P5600";
+ mipspmu.general_event_map = &mipsxxcore_event_map2;
+ mipspmu.cache_event_map = &mipsxxcore_cache_map2;
+ break;
+ case CPU_P6600:
+ mipspmu.name = "mips/P6600";
+ mipspmu.general_event_map = &mipsxxcore_event_map2;
+ mipspmu.cache_event_map = &mipsxxcore_cache_map2;
+ break;
+ case CPU_I6400:
+ mipspmu.name = "mips/I6400";
+ mipspmu.general_event_map = &i6x00_event_map;
+ mipspmu.cache_event_map = &i6x00_cache_map;
+ break;
+ case CPU_I6500:
+ mipspmu.name = "mips/I6500";
+ mipspmu.general_event_map = &i6x00_event_map;
+ mipspmu.cache_event_map = &i6x00_cache_map;
+ break;
+ case CPU_1004K:
+ mipspmu.name = "mips/1004K";
+ mipspmu.general_event_map = &mipsxxcore_event_map;
+ mipspmu.cache_event_map = &mipsxxcore_cache_map;
+ break;
+ case CPU_1074K:
+ mipspmu.name = "mips/1074K";
+ mipspmu.general_event_map = &mipsxxcore_event_map;
+ mipspmu.cache_event_map = &mipsxxcore_cache_map;
+ break;
+ case CPU_INTERAPTIV:
+ mipspmu.name = "mips/interAptiv";
+ mipspmu.general_event_map = &mipsxxcore_event_map;
+ mipspmu.cache_event_map = &mipsxxcore_cache_map;
+ break;
+ case CPU_LOONGSON32:
+ mipspmu.name = "mips/loongson1";
+ mipspmu.general_event_map = &mipsxxcore_event_map;
+ mipspmu.cache_event_map = &mipsxxcore_cache_map;
+ break;
+ case CPU_LOONGSON64:
+ mipspmu.name = "mips/loongson3";
+ pmu_type = get_loongson3_pmu_type();
+
+ switch (pmu_type) {
+ case LOONGSON_PMU_TYPE1:
+ counters = 2;
+ mipspmu.general_event_map = &loongson3_event_map1;
+ mipspmu.cache_event_map = &loongson3_cache_map1;
+ break;
+ case LOONGSON_PMU_TYPE2:
+ counters = 4;
+ mipspmu.general_event_map = &loongson3_event_map2;
+ mipspmu.cache_event_map = &loongson3_cache_map2;
+ break;
+ case LOONGSON_PMU_TYPE3:
+ counters = 4;
+ mipspmu.general_event_map = &loongson3_event_map3;
+ mipspmu.cache_event_map = &loongson3_cache_map3;
+ break;
+ }
+ break;
+ case CPU_CAVIUM_OCTEON:
+ case CPU_CAVIUM_OCTEON_PLUS:
+ case CPU_CAVIUM_OCTEON2:
+ case CPU_CAVIUM_OCTEON3:
+ mipspmu.name = "octeon";
+ mipspmu.general_event_map = &octeon_event_map;
+ mipspmu.cache_event_map = &octeon_cache_map;
+ mipspmu.map_raw_event = octeon_pmu_map_raw_event;
+ break;
+ case CPU_BMIPS5000:
+ mipspmu.name = "BMIPS5000";
+ mipspmu.general_event_map = &bmips5000_event_map;
+ mipspmu.cache_event_map = &bmips5000_cache_map;
+ break;
+ default:
+ pr_cont("Either hardware does not support performance "
+ "counters, or not yet implemented.\n");
+ return -ENODEV;
+ }
+
+ mipspmu.num_counters = counters;
+ mipspmu.irq = irq;
+
+ if (read_c0_perfctrl0() & MIPS_PERFCTRL_W) {
+ if (get_loongson3_pmu_type() == LOONGSON_PMU_TYPE2) {
+ counter_bits = 48;
+ mipspmu.max_period = (1ULL << 47) - 1;
+ mipspmu.valid_count = (1ULL << 47) - 1;
+ mipspmu.overflow = 1ULL << 47;
+ } else {
+ counter_bits = 64;
+ mipspmu.max_period = (1ULL << 63) - 1;
+ mipspmu.valid_count = (1ULL << 63) - 1;
+ mipspmu.overflow = 1ULL << 63;
+ }
+ mipspmu.read_counter = mipsxx_pmu_read_counter_64;
+ mipspmu.write_counter = mipsxx_pmu_write_counter_64;
+ } else {
+ counter_bits = 32;
+ mipspmu.max_period = (1ULL << 31) - 1;
+ mipspmu.valid_count = (1ULL << 31) - 1;
+ mipspmu.overflow = 1ULL << 31;
+ mipspmu.read_counter = mipsxx_pmu_read_counter;
+ mipspmu.write_counter = mipsxx_pmu_write_counter;
+ }
+
+ on_each_cpu(reset_counters, (void *)(long)counters, 1);
+
+ pr_cont("%s PMU enabled, %d %d-bit counters available to each "
+ "CPU, irq %d%s\n", mipspmu.name, counters, counter_bits, irq,
+ irq < 0 ? " (share with timer interrupt)" : "");
+
+ perf_pmu_register(&pmu, "cpu", PERF_TYPE_RAW);
+
+ return 0;
+}
+early_initcall(init_hw_perf_events);
diff --git a/arch/mips/kernel/perf_regs.c b/arch/mips/kernel/perf_regs.c
new file mode 100644
index 000000000..e686780d1
--- /dev/null
+++ b/arch/mips/kernel/perf_regs.c
@@ -0,0 +1,68 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Some parts derived from x86 version of this file.
+ *
+ * Copyright (C) 2013 Cavium, Inc.
+ */
+
+#include <linux/perf_event.h>
+
+#include <asm/ptrace.h>
+
+#ifdef CONFIG_32BIT
+u64 perf_reg_abi(struct task_struct *tsk)
+{
+ return PERF_SAMPLE_REGS_ABI_32;
+}
+#else /* Must be CONFIG_64BIT */
+u64 perf_reg_abi(struct task_struct *tsk)
+{
+ if (test_tsk_thread_flag(tsk, TIF_32BIT_REGS))
+ return PERF_SAMPLE_REGS_ABI_32;
+ else
+ return PERF_SAMPLE_REGS_ABI_64;
+}
+#endif /* CONFIG_32BIT */
+
+int perf_reg_validate(u64 mask)
+{
+ if (!mask)
+ return -EINVAL;
+ if (mask & ~((1ull << PERF_REG_MIPS_MAX) - 1))
+ return -EINVAL;
+ return 0;
+}
+
+u64 perf_reg_value(struct pt_regs *regs, int idx)
+{
+ long v;
+
+ switch (idx) {
+ case PERF_REG_MIPS_PC:
+ v = regs->cp0_epc;
+ break;
+ case PERF_REG_MIPS_R1 ... PERF_REG_MIPS_R25:
+ v = regs->regs[idx - PERF_REG_MIPS_R1 + 1];
+ break;
+ case PERF_REG_MIPS_R28 ... PERF_REG_MIPS_R31:
+ v = regs->regs[idx - PERF_REG_MIPS_R28 + 28];
+ break;
+
+ default:
+ WARN_ON_ONCE(1);
+ return 0;
+ }
+
+ return (s64)v; /* Sign extend if 32-bit. */
+}
+
+void perf_get_regs_user(struct perf_regs *regs_user,
+ struct pt_regs *regs)
+{
+ regs_user->regs = task_pt_regs(current);
+ regs_user->abi = perf_reg_abi(current);
+}
diff --git a/arch/mips/kernel/pm-cps.c b/arch/mips/kernel/pm-cps.c
new file mode 100644
index 000000000..9bf60d7d4
--- /dev/null
+++ b/arch/mips/kernel/pm-cps.c
@@ -0,0 +1,738 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2014 Imagination Technologies
+ * Author: Paul Burton <paul.burton@mips.com>
+ */
+
+#include <linux/cpuhotplug.h>
+#include <linux/init.h>
+#include <linux/percpu.h>
+#include <linux/slab.h>
+#include <linux/suspend.h>
+
+#include <asm/asm-offsets.h>
+#include <asm/cacheflush.h>
+#include <asm/cacheops.h>
+#include <asm/idle.h>
+#include <asm/mips-cps.h>
+#include <asm/mipsmtregs.h>
+#include <asm/pm.h>
+#include <asm/pm-cps.h>
+#include <asm/smp-cps.h>
+#include <asm/uasm.h>
+
+/*
+ * cps_nc_entry_fn - type of a generated non-coherent state entry function
+ * @online: the count of online coupled VPEs
+ * @nc_ready_count: pointer to a non-coherent mapping of the core ready_count
+ *
+ * The code entering & exiting non-coherent states is generated at runtime
+ * using uasm, in order to ensure that the compiler cannot insert a stray
+ * memory access at an unfortunate time and to allow the generation of optimal
+ * core-specific code particularly for cache routines. If coupled_coherence
+ * is non-zero and this is the entry function for the CPS_PM_NC_WAIT state,
+ * returns the number of VPEs that were in the wait state at the point this
+ * VPE left it. Returns garbage if coupled_coherence is zero or this is not
+ * the entry function for CPS_PM_NC_WAIT.
+ */
+typedef unsigned (*cps_nc_entry_fn)(unsigned online, u32 *nc_ready_count);
+
+/*
+ * The entry point of the generated non-coherent idle state entry/exit
+ * functions. Actually per-core rather than per-CPU.
+ */
+static DEFINE_PER_CPU_READ_MOSTLY(cps_nc_entry_fn[CPS_PM_STATE_COUNT],
+ nc_asm_enter);
+
+/* Bitmap indicating which states are supported by the system */
+static DECLARE_BITMAP(state_support, CPS_PM_STATE_COUNT);
+
+/*
+ * Indicates the number of coupled VPEs ready to operate in a non-coherent
+ * state. Actually per-core rather than per-CPU.
+ */
+static DEFINE_PER_CPU_ALIGNED(u32*, ready_count);
+
+/* Indicates online CPUs coupled with the current CPU */
+static DEFINE_PER_CPU_ALIGNED(cpumask_t, online_coupled);
+
+/*
+ * Used to synchronize entry to deep idle states. Actually per-core rather
+ * than per-CPU.
+ */
+static DEFINE_PER_CPU_ALIGNED(atomic_t, pm_barrier);
+
+/* Saved CPU state across the CPS_PM_POWER_GATED state */
+DEFINE_PER_CPU_ALIGNED(struct mips_static_suspend_state, cps_cpu_state);
+
+/* A somewhat arbitrary number of labels & relocs for uasm */
+static struct uasm_label labels[32];
+static struct uasm_reloc relocs[32];
+
+enum mips_reg {
+ zero, at, v0, v1, a0, a1, a2, a3,
+ t0, t1, t2, t3, t4, t5, t6, t7,
+ s0, s1, s2, s3, s4, s5, s6, s7,
+ t8, t9, k0, k1, gp, sp, fp, ra,
+};
+
+bool cps_pm_support_state(enum cps_pm_state state)
+{
+ return test_bit(state, state_support);
+}
+
+static void coupled_barrier(atomic_t *a, unsigned online)
+{
+ /*
+ * This function is effectively the same as
+ * cpuidle_coupled_parallel_barrier, which can't be used here since
+ * there's no cpuidle device.
+ */
+
+ if (!coupled_coherence)
+ return;
+
+ smp_mb__before_atomic();
+ atomic_inc(a);
+
+ while (atomic_read(a) < online)
+ cpu_relax();
+
+ if (atomic_inc_return(a) == online * 2) {
+ atomic_set(a, 0);
+ return;
+ }
+
+ while (atomic_read(a) > online)
+ cpu_relax();
+}
+
+int cps_pm_enter_state(enum cps_pm_state state)
+{
+ unsigned cpu = smp_processor_id();
+ unsigned core = cpu_core(&current_cpu_data);
+ unsigned online, left;
+ cpumask_t *coupled_mask = this_cpu_ptr(&online_coupled);
+ u32 *core_ready_count, *nc_core_ready_count;
+ void *nc_addr;
+ cps_nc_entry_fn entry;
+ struct core_boot_config *core_cfg;
+ struct vpe_boot_config *vpe_cfg;
+
+ /* Check that there is an entry function for this state */
+ entry = per_cpu(nc_asm_enter, core)[state];
+ if (!entry)
+ return -EINVAL;
+
+ /* Calculate which coupled CPUs (VPEs) are online */
+#if defined(CONFIG_MIPS_MT) || defined(CONFIG_CPU_MIPSR6)
+ if (cpu_online(cpu)) {
+ cpumask_and(coupled_mask, cpu_online_mask,
+ &cpu_sibling_map[cpu]);
+ online = cpumask_weight(coupled_mask);
+ cpumask_clear_cpu(cpu, coupled_mask);
+ } else
+#endif
+ {
+ cpumask_clear(coupled_mask);
+ online = 1;
+ }
+
+ /* Setup the VPE to run mips_cps_pm_restore when started again */
+ if (IS_ENABLED(CONFIG_CPU_PM) && state == CPS_PM_POWER_GATED) {
+ /* Power gating relies upon CPS SMP */
+ if (!mips_cps_smp_in_use())
+ return -EINVAL;
+
+ core_cfg = &mips_cps_core_bootcfg[core];
+ vpe_cfg = &core_cfg->vpe_config[cpu_vpe_id(&current_cpu_data)];
+ vpe_cfg->pc = (unsigned long)mips_cps_pm_restore;
+ vpe_cfg->gp = (unsigned long)current_thread_info();
+ vpe_cfg->sp = 0;
+ }
+
+ /* Indicate that this CPU might not be coherent */
+ cpumask_clear_cpu(cpu, &cpu_coherent_mask);
+ smp_mb__after_atomic();
+
+ /* Create a non-coherent mapping of the core ready_count */
+ core_ready_count = per_cpu(ready_count, core);
+ nc_addr = kmap_noncoherent(virt_to_page(core_ready_count),
+ (unsigned long)core_ready_count);
+ nc_addr += ((unsigned long)core_ready_count & ~PAGE_MASK);
+ nc_core_ready_count = nc_addr;
+
+ /* Ensure ready_count is zero-initialised before the assembly runs */
+ WRITE_ONCE(*nc_core_ready_count, 0);
+ coupled_barrier(&per_cpu(pm_barrier, core), online);
+
+ /* Run the generated entry code */
+ left = entry(online, nc_core_ready_count);
+
+ /* Remove the non-coherent mapping of ready_count */
+ kunmap_noncoherent();
+
+ /* Indicate that this CPU is definitely coherent */
+ cpumask_set_cpu(cpu, &cpu_coherent_mask);
+
+ /*
+ * If this VPE is the first to leave the non-coherent wait state then
+ * it needs to wake up any coupled VPEs still running their wait
+ * instruction so that they return to cpuidle, which can then complete
+ * coordination between the coupled VPEs & provide the governor with
+ * a chance to reflect on the length of time the VPEs were in the
+ * idle state.
+ */
+ if (coupled_coherence && (state == CPS_PM_NC_WAIT) && (left == online))
+ arch_send_call_function_ipi_mask(coupled_mask);
+
+ return 0;
+}
+
+static void cps_gen_cache_routine(u32 **pp, struct uasm_label **pl,
+ struct uasm_reloc **pr,
+ const struct cache_desc *cache,
+ unsigned op, int lbl)
+{
+ unsigned cache_size = cache->ways << cache->waybit;
+ unsigned i;
+ const unsigned unroll_lines = 32;
+
+ /* If the cache isn't present this function has it easy */
+ if (cache->flags & MIPS_CACHE_NOT_PRESENT)
+ return;
+
+ /* Load base address */
+ UASM_i_LA(pp, t0, (long)CKSEG0);
+
+ /* Calculate end address */
+ if (cache_size < 0x8000)
+ uasm_i_addiu(pp, t1, t0, cache_size);
+ else
+ UASM_i_LA(pp, t1, (long)(CKSEG0 + cache_size));
+
+ /* Start of cache op loop */
+ uasm_build_label(pl, *pp, lbl);
+
+ /* Generate the cache ops */
+ for (i = 0; i < unroll_lines; i++) {
+ if (cpu_has_mips_r6) {
+ uasm_i_cache(pp, op, 0, t0);
+ uasm_i_addiu(pp, t0, t0, cache->linesz);
+ } else {
+ uasm_i_cache(pp, op, i * cache->linesz, t0);
+ }
+ }
+
+ if (!cpu_has_mips_r6)
+ /* Update the base address */
+ uasm_i_addiu(pp, t0, t0, unroll_lines * cache->linesz);
+
+ /* Loop if we haven't reached the end address yet */
+ uasm_il_bne(pp, pr, t0, t1, lbl);
+ uasm_i_nop(pp);
+}
+
+static int cps_gen_flush_fsb(u32 **pp, struct uasm_label **pl,
+ struct uasm_reloc **pr,
+ const struct cpuinfo_mips *cpu_info,
+ int lbl)
+{
+ unsigned i, fsb_size = 8;
+ unsigned num_loads = (fsb_size * 3) / 2;
+ unsigned line_stride = 2;
+ unsigned line_size = cpu_info->dcache.linesz;
+ unsigned perf_counter, perf_event;
+ unsigned revision = cpu_info->processor_id & PRID_REV_MASK;
+
+ /*
+ * Determine whether this CPU requires an FSB flush, and if so which
+ * performance counter/event reflect stalls due to a full FSB.
+ */
+ switch (__get_cpu_type(cpu_info->cputype)) {
+ case CPU_INTERAPTIV:
+ perf_counter = 1;
+ perf_event = 51;
+ break;
+
+ case CPU_PROAPTIV:
+ /* Newer proAptiv cores don't require this workaround */
+ if (revision >= PRID_REV_ENCODE_332(1, 1, 0))
+ return 0;
+
+ /* On older ones it's unavailable */
+ return -1;
+
+ default:
+ /* Assume that the CPU does not need this workaround */
+ return 0;
+ }
+
+ /*
+ * Ensure that the fill/store buffer (FSB) is not holding the results
+ * of a prefetch, since if it is then the CPC sequencer may become
+ * stuck in the D3 (ClrBus) state whilst entering a low power state.
+ */
+
+ /* Preserve perf counter setup */
+ uasm_i_mfc0(pp, t2, 25, (perf_counter * 2) + 0); /* PerfCtlN */
+ uasm_i_mfc0(pp, t3, 25, (perf_counter * 2) + 1); /* PerfCntN */
+
+ /* Setup perf counter to count FSB full pipeline stalls */
+ uasm_i_addiu(pp, t0, zero, (perf_event << 5) | 0xf);
+ uasm_i_mtc0(pp, t0, 25, (perf_counter * 2) + 0); /* PerfCtlN */
+ uasm_i_ehb(pp);
+ uasm_i_mtc0(pp, zero, 25, (perf_counter * 2) + 1); /* PerfCntN */
+ uasm_i_ehb(pp);
+
+ /* Base address for loads */
+ UASM_i_LA(pp, t0, (long)CKSEG0);
+
+ /* Start of clear loop */
+ uasm_build_label(pl, *pp, lbl);
+
+ /* Perform some loads to fill the FSB */
+ for (i = 0; i < num_loads; i++)
+ uasm_i_lw(pp, zero, i * line_size * line_stride, t0);
+
+ /*
+ * Invalidate the new D-cache entries so that the cache will need
+ * refilling (via the FSB) if the loop is executed again.
+ */
+ for (i = 0; i < num_loads; i++) {
+ uasm_i_cache(pp, Hit_Invalidate_D,
+ i * line_size * line_stride, t0);
+ uasm_i_cache(pp, Hit_Writeback_Inv_SD,
+ i * line_size * line_stride, t0);
+ }
+
+ /* Barrier ensuring previous cache invalidates are complete */
+ uasm_i_sync(pp, __SYNC_full);
+ uasm_i_ehb(pp);
+
+ /* Check whether the pipeline stalled due to the FSB being full */
+ uasm_i_mfc0(pp, t1, 25, (perf_counter * 2) + 1); /* PerfCntN */
+
+ /* Loop if it didn't */
+ uasm_il_beqz(pp, pr, t1, lbl);
+ uasm_i_nop(pp);
+
+ /* Restore perf counter 1. The count may well now be wrong... */
+ uasm_i_mtc0(pp, t2, 25, (perf_counter * 2) + 0); /* PerfCtlN */
+ uasm_i_ehb(pp);
+ uasm_i_mtc0(pp, t3, 25, (perf_counter * 2) + 1); /* PerfCntN */
+ uasm_i_ehb(pp);
+
+ return 0;
+}
+
+static void cps_gen_set_top_bit(u32 **pp, struct uasm_label **pl,
+ struct uasm_reloc **pr,
+ unsigned r_addr, int lbl)
+{
+ uasm_i_lui(pp, t0, uasm_rel_hi(0x80000000));
+ uasm_build_label(pl, *pp, lbl);
+ uasm_i_ll(pp, t1, 0, r_addr);
+ uasm_i_or(pp, t1, t1, t0);
+ uasm_i_sc(pp, t1, 0, r_addr);
+ uasm_il_beqz(pp, pr, t1, lbl);
+ uasm_i_nop(pp);
+}
+
+static void *cps_gen_entry_code(unsigned cpu, enum cps_pm_state state)
+{
+ struct uasm_label *l = labels;
+ struct uasm_reloc *r = relocs;
+ u32 *buf, *p;
+ const unsigned r_online = a0;
+ const unsigned r_nc_count = a1;
+ const unsigned r_pcohctl = t7;
+ const unsigned max_instrs = 256;
+ unsigned cpc_cmd;
+ int err;
+ enum {
+ lbl_incready = 1,
+ lbl_poll_cont,
+ lbl_secondary_hang,
+ lbl_disable_coherence,
+ lbl_flush_fsb,
+ lbl_invicache,
+ lbl_flushdcache,
+ lbl_hang,
+ lbl_set_cont,
+ lbl_secondary_cont,
+ lbl_decready,
+ };
+
+ /* Allocate a buffer to hold the generated code */
+ p = buf = kcalloc(max_instrs, sizeof(u32), GFP_KERNEL);
+ if (!buf)
+ return NULL;
+
+ /* Clear labels & relocs ready for (re)use */
+ memset(labels, 0, sizeof(labels));
+ memset(relocs, 0, sizeof(relocs));
+
+ if (IS_ENABLED(CONFIG_CPU_PM) && state == CPS_PM_POWER_GATED) {
+ /* Power gating relies upon CPS SMP */
+ if (!mips_cps_smp_in_use())
+ goto out_err;
+
+ /*
+ * Save CPU state. Note the non-standard calling convention
+ * with the return address placed in v0 to avoid clobbering
+ * the ra register before it is saved.
+ */
+ UASM_i_LA(&p, t0, (long)mips_cps_pm_save);
+ uasm_i_jalr(&p, v0, t0);
+ uasm_i_nop(&p);
+ }
+
+ /*
+ * Load addresses of required CM & CPC registers. This is done early
+ * because they're needed in both the enable & disable coherence steps
+ * but in the coupled case the enable step will only run on one VPE.
+ */
+ UASM_i_LA(&p, r_pcohctl, (long)addr_gcr_cl_coherence());
+
+ if (coupled_coherence) {
+ /* Increment ready_count */
+ uasm_i_sync(&p, __SYNC_mb);
+ uasm_build_label(&l, p, lbl_incready);
+ uasm_i_ll(&p, t1, 0, r_nc_count);
+ uasm_i_addiu(&p, t2, t1, 1);
+ uasm_i_sc(&p, t2, 0, r_nc_count);
+ uasm_il_beqz(&p, &r, t2, lbl_incready);
+ uasm_i_addiu(&p, t1, t1, 1);
+
+ /* Barrier ensuring all CPUs see the updated r_nc_count value */
+ uasm_i_sync(&p, __SYNC_mb);
+
+ /*
+ * If this is the last VPE to become ready for non-coherence
+ * then it should branch below.
+ */
+ uasm_il_beq(&p, &r, t1, r_online, lbl_disable_coherence);
+ uasm_i_nop(&p);
+
+ if (state < CPS_PM_POWER_GATED) {
+ /*
+ * Otherwise this is not the last VPE to become ready
+ * for non-coherence. It needs to wait until coherence
+ * has been disabled before proceeding, which it will do
+ * by polling for the top bit of ready_count being set.
+ */
+ uasm_i_addiu(&p, t1, zero, -1);
+ uasm_build_label(&l, p, lbl_poll_cont);
+ uasm_i_lw(&p, t0, 0, r_nc_count);
+ uasm_il_bltz(&p, &r, t0, lbl_secondary_cont);
+ uasm_i_ehb(&p);
+ if (cpu_has_mipsmt)
+ uasm_i_yield(&p, zero, t1);
+ uasm_il_b(&p, &r, lbl_poll_cont);
+ uasm_i_nop(&p);
+ } else {
+ /*
+ * The core will lose power & this VPE will not continue
+ * so it can simply halt here.
+ */
+ if (cpu_has_mipsmt) {
+ /* Halt the VPE via C0 tchalt register */
+ uasm_i_addiu(&p, t0, zero, TCHALT_H);
+ uasm_i_mtc0(&p, t0, 2, 4);
+ } else if (cpu_has_vp) {
+ /* Halt the VP via the CPC VP_STOP register */
+ unsigned int vpe_id;
+
+ vpe_id = cpu_vpe_id(&cpu_data[cpu]);
+ uasm_i_addiu(&p, t0, zero, 1 << vpe_id);
+ UASM_i_LA(&p, t1, (long)addr_cpc_cl_vp_stop());
+ uasm_i_sw(&p, t0, 0, t1);
+ } else {
+ BUG();
+ }
+ uasm_build_label(&l, p, lbl_secondary_hang);
+ uasm_il_b(&p, &r, lbl_secondary_hang);
+ uasm_i_nop(&p);
+ }
+ }
+
+ /*
+ * This is the point of no return - this VPE will now proceed to
+ * disable coherence. At this point we *must* be sure that no other
+ * VPE within the core will interfere with the L1 dcache.
+ */
+ uasm_build_label(&l, p, lbl_disable_coherence);
+
+ /* Invalidate the L1 icache */
+ cps_gen_cache_routine(&p, &l, &r, &cpu_data[cpu].icache,
+ Index_Invalidate_I, lbl_invicache);
+
+ /* Writeback & invalidate the L1 dcache */
+ cps_gen_cache_routine(&p, &l, &r, &cpu_data[cpu].dcache,
+ Index_Writeback_Inv_D, lbl_flushdcache);
+
+ /* Barrier ensuring previous cache invalidates are complete */
+ uasm_i_sync(&p, __SYNC_full);
+ uasm_i_ehb(&p);
+
+ if (mips_cm_revision() < CM_REV_CM3) {
+ /*
+ * Disable all but self interventions. The load from COHCTL is
+ * defined by the interAptiv & proAptiv SUMs as ensuring that the
+ * operation resulting from the preceding store is complete.
+ */
+ uasm_i_addiu(&p, t0, zero, 1 << cpu_core(&cpu_data[cpu]));
+ uasm_i_sw(&p, t0, 0, r_pcohctl);
+ uasm_i_lw(&p, t0, 0, r_pcohctl);
+
+ /* Barrier to ensure write to coherence control is complete */
+ uasm_i_sync(&p, __SYNC_full);
+ uasm_i_ehb(&p);
+ }
+
+ /* Disable coherence */
+ uasm_i_sw(&p, zero, 0, r_pcohctl);
+ uasm_i_lw(&p, t0, 0, r_pcohctl);
+
+ if (state >= CPS_PM_CLOCK_GATED) {
+ err = cps_gen_flush_fsb(&p, &l, &r, &cpu_data[cpu],
+ lbl_flush_fsb);
+ if (err)
+ goto out_err;
+
+ /* Determine the CPC command to issue */
+ switch (state) {
+ case CPS_PM_CLOCK_GATED:
+ cpc_cmd = CPC_Cx_CMD_CLOCKOFF;
+ break;
+ case CPS_PM_POWER_GATED:
+ cpc_cmd = CPC_Cx_CMD_PWRDOWN;
+ break;
+ default:
+ BUG();
+ goto out_err;
+ }
+
+ /* Issue the CPC command */
+ UASM_i_LA(&p, t0, (long)addr_cpc_cl_cmd());
+ uasm_i_addiu(&p, t1, zero, cpc_cmd);
+ uasm_i_sw(&p, t1, 0, t0);
+
+ if (state == CPS_PM_POWER_GATED) {
+ /* If anything goes wrong just hang */
+ uasm_build_label(&l, p, lbl_hang);
+ uasm_il_b(&p, &r, lbl_hang);
+ uasm_i_nop(&p);
+
+ /*
+ * There's no point generating more code, the core is
+ * powered down & if powered back up will run from the
+ * reset vector not from here.
+ */
+ goto gen_done;
+ }
+
+ /* Barrier to ensure write to CPC command is complete */
+ uasm_i_sync(&p, __SYNC_full);
+ uasm_i_ehb(&p);
+ }
+
+ if (state == CPS_PM_NC_WAIT) {
+ /*
+ * At this point it is safe for all VPEs to proceed with
+ * execution. This VPE will set the top bit of ready_count
+ * to indicate to the other VPEs that they may continue.
+ */
+ if (coupled_coherence)
+ cps_gen_set_top_bit(&p, &l, &r, r_nc_count,
+ lbl_set_cont);
+
+ /*
+ * VPEs which did not disable coherence will continue
+ * executing, after coherence has been disabled, from this
+ * point.
+ */
+ uasm_build_label(&l, p, lbl_secondary_cont);
+
+ /* Now perform our wait */
+ uasm_i_wait(&p, 0);
+ }
+
+ /*
+ * Re-enable coherence. Note that for CPS_PM_NC_WAIT all coupled VPEs
+ * will run this. The first will actually re-enable coherence & the
+ * rest will just be performing a rather unusual nop.
+ */
+ uasm_i_addiu(&p, t0, zero, mips_cm_revision() < CM_REV_CM3
+ ? CM_GCR_Cx_COHERENCE_COHDOMAINEN
+ : CM3_GCR_Cx_COHERENCE_COHEN);
+
+ uasm_i_sw(&p, t0, 0, r_pcohctl);
+ uasm_i_lw(&p, t0, 0, r_pcohctl);
+
+ /* Barrier to ensure write to coherence control is complete */
+ uasm_i_sync(&p, __SYNC_full);
+ uasm_i_ehb(&p);
+
+ if (coupled_coherence && (state == CPS_PM_NC_WAIT)) {
+ /* Decrement ready_count */
+ uasm_build_label(&l, p, lbl_decready);
+ uasm_i_sync(&p, __SYNC_mb);
+ uasm_i_ll(&p, t1, 0, r_nc_count);
+ uasm_i_addiu(&p, t2, t1, -1);
+ uasm_i_sc(&p, t2, 0, r_nc_count);
+ uasm_il_beqz(&p, &r, t2, lbl_decready);
+ uasm_i_andi(&p, v0, t1, (1 << fls(smp_num_siblings)) - 1);
+
+ /* Barrier ensuring all CPUs see the updated r_nc_count value */
+ uasm_i_sync(&p, __SYNC_mb);
+ }
+
+ if (coupled_coherence && (state == CPS_PM_CLOCK_GATED)) {
+ /*
+ * At this point it is safe for all VPEs to proceed with
+ * execution. This VPE will set the top bit of ready_count
+ * to indicate to the other VPEs that they may continue.
+ */
+ cps_gen_set_top_bit(&p, &l, &r, r_nc_count, lbl_set_cont);
+
+ /*
+ * This core will be reliant upon another core sending a
+ * power-up command to the CPC in order to resume operation.
+ * Thus an arbitrary VPE can't trigger the core leaving the
+ * idle state and the one that disables coherence might as well
+ * be the one to re-enable it. The rest will continue from here
+ * after that has been done.
+ */
+ uasm_build_label(&l, p, lbl_secondary_cont);
+
+ /* Barrier ensuring all CPUs see the updated r_nc_count value */
+ uasm_i_sync(&p, __SYNC_mb);
+ }
+
+ /* The core is coherent, time to return to C code */
+ uasm_i_jr(&p, ra);
+ uasm_i_nop(&p);
+
+gen_done:
+ /* Ensure the code didn't exceed the resources allocated for it */
+ BUG_ON((p - buf) > max_instrs);
+ BUG_ON((l - labels) > ARRAY_SIZE(labels));
+ BUG_ON((r - relocs) > ARRAY_SIZE(relocs));
+
+ /* Patch branch offsets */
+ uasm_resolve_relocs(relocs, labels);
+
+ /* Flush the icache */
+ local_flush_icache_range((unsigned long)buf, (unsigned long)p);
+
+ return buf;
+out_err:
+ kfree(buf);
+ return NULL;
+}
+
+static int cps_pm_online_cpu(unsigned int cpu)
+{
+ enum cps_pm_state state;
+ unsigned core = cpu_core(&cpu_data[cpu]);
+ void *entry_fn, *core_rc;
+
+ for (state = CPS_PM_NC_WAIT; state < CPS_PM_STATE_COUNT; state++) {
+ if (per_cpu(nc_asm_enter, core)[state])
+ continue;
+ if (!test_bit(state, state_support))
+ continue;
+
+ entry_fn = cps_gen_entry_code(cpu, state);
+ if (!entry_fn) {
+ pr_err("Failed to generate core %u state %u entry\n",
+ core, state);
+ clear_bit(state, state_support);
+ }
+
+ per_cpu(nc_asm_enter, core)[state] = entry_fn;
+ }
+
+ if (!per_cpu(ready_count, core)) {
+ core_rc = kmalloc(sizeof(u32), GFP_KERNEL);
+ if (!core_rc) {
+ pr_err("Failed allocate core %u ready_count\n", core);
+ return -ENOMEM;
+ }
+ per_cpu(ready_count, core) = core_rc;
+ }
+
+ return 0;
+}
+
+static int cps_pm_power_notifier(struct notifier_block *this,
+ unsigned long event, void *ptr)
+{
+ unsigned int stat;
+
+ switch (event) {
+ case PM_SUSPEND_PREPARE:
+ stat = read_cpc_cl_stat_conf();
+ /*
+ * If we're attempting to suspend the system and power down all
+ * of the cores, the JTAG detect bit indicates that the CPC will
+ * instead put the cores into clock-off state. In this state
+ * a connected debugger can cause the CPU to attempt
+ * interactions with the powered down system. At best this will
+ * fail. At worst, it can hang the NoC, requiring a hard reset.
+ * To avoid this, just block system suspend if a JTAG probe
+ * is detected.
+ */
+ if (stat & CPC_Cx_STAT_CONF_EJTAG_PROBE) {
+ pr_warn("JTAG probe is connected - abort suspend\n");
+ return NOTIFY_BAD;
+ }
+ return NOTIFY_DONE;
+ default:
+ return NOTIFY_DONE;
+ }
+}
+
+static int __init cps_pm_init(void)
+{
+ /* A CM is required for all non-coherent states */
+ if (!mips_cm_present()) {
+ pr_warn("pm-cps: no CM, non-coherent states unavailable\n");
+ return 0;
+ }
+
+ /*
+ * If interrupts were enabled whilst running a wait instruction on a
+ * non-coherent core then the VPE may end up processing interrupts
+ * whilst non-coherent. That would be bad.
+ */
+ if (cpu_wait == r4k_wait_irqoff)
+ set_bit(CPS_PM_NC_WAIT, state_support);
+ else
+ pr_warn("pm-cps: non-coherent wait unavailable\n");
+
+ /* Detect whether a CPC is present */
+ if (mips_cpc_present()) {
+ /* Detect whether clock gating is implemented */
+ if (read_cpc_cl_stat_conf() & CPC_Cx_STAT_CONF_CLKGAT_IMPL)
+ set_bit(CPS_PM_CLOCK_GATED, state_support);
+ else
+ pr_warn("pm-cps: CPC does not support clock gating\n");
+
+ /* Power gating is available with CPS SMP & any CPC */
+ if (mips_cps_smp_in_use())
+ set_bit(CPS_PM_POWER_GATED, state_support);
+ else
+ pr_warn("pm-cps: CPS SMP not in use, power gating unavailable\n");
+ } else {
+ pr_warn("pm-cps: no CPC, clock & power gating unavailable\n");
+ }
+
+ pm_notifier(cps_pm_power_notifier, 0);
+
+ return cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "mips/cps_pm:online",
+ cps_pm_online_cpu, NULL);
+}
+arch_initcall(cps_pm_init);
diff --git a/arch/mips/kernel/pm.c b/arch/mips/kernel/pm.c
new file mode 100644
index 000000000..486ed2bf2
--- /dev/null
+++ b/arch/mips/kernel/pm.c
@@ -0,0 +1,95 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2014 Imagination Technologies Ltd.
+ *
+ * CPU PM notifiers for saving/restoring general CPU state.
+ */
+
+#include <linux/cpu_pm.h>
+#include <linux/init.h>
+
+#include <asm/dsp.h>
+#include <asm/fpu.h>
+#include <asm/mmu_context.h>
+#include <asm/pm.h>
+#include <asm/watch.h>
+
+/* Used by PM helper macros in asm/pm.h */
+struct mips_static_suspend_state mips_static_suspend_state;
+
+/**
+ * mips_cpu_save() - Save general CPU state.
+ * Ensures that general CPU context is saved, notably FPU and DSP.
+ */
+static int mips_cpu_save(void)
+{
+ /* Save FPU state */
+ lose_fpu(1);
+
+ /* Save DSP state */
+ save_dsp(current);
+
+ return 0;
+}
+
+/**
+ * mips_cpu_restore() - Restore general CPU state.
+ * Restores important CPU context.
+ */
+static void mips_cpu_restore(void)
+{
+ unsigned int cpu = smp_processor_id();
+
+ /* Restore ASID */
+ if (current->mm)
+ write_c0_entryhi(cpu_asid(cpu, current->mm));
+
+ /* Restore DSP state */
+ restore_dsp(current);
+
+ /* Restore UserLocal */
+ if (cpu_has_userlocal)
+ write_c0_userlocal(current_thread_info()->tp_value);
+
+ /* Restore watch registers */
+ __restore_watch(current);
+}
+
+/**
+ * mips_pm_notifier() - Notifier for preserving general CPU context.
+ * @self: Notifier block.
+ * @cmd: CPU PM event.
+ * @v: Private data (unused).
+ *
+ * This is called when a CPU power management event occurs, and is used to
+ * ensure that important CPU context is preserved across a CPU power down.
+ */
+static int mips_pm_notifier(struct notifier_block *self, unsigned long cmd,
+ void *v)
+{
+ int ret;
+
+ switch (cmd) {
+ case CPU_PM_ENTER:
+ ret = mips_cpu_save();
+ if (ret)
+ return NOTIFY_STOP;
+ break;
+ case CPU_PM_ENTER_FAILED:
+ case CPU_PM_EXIT:
+ mips_cpu_restore();
+ break;
+ }
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block mips_pm_notifier_block = {
+ .notifier_call = mips_pm_notifier,
+};
+
+static int __init mips_pm_init(void)
+{
+ return cpu_pm_register_notifier(&mips_pm_notifier_block);
+}
+arch_initcall(mips_pm_init);
diff --git a/arch/mips/kernel/probes-common.h b/arch/mips/kernel/probes-common.h
new file mode 100644
index 000000000..73e1d5e95
--- /dev/null
+++ b/arch/mips/kernel/probes-common.h
@@ -0,0 +1,79 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (C) 2016 Imagination Technologies
+ * Author: Marcin Nowakowski <marcin.nowakowski@mips.com>
+ */
+
+#ifndef __PROBES_COMMON_H
+#define __PROBES_COMMON_H
+
+#include <asm/inst.h>
+
+int __insn_is_compact_branch(union mips_instruction insn);
+
+static inline int __insn_has_delay_slot(const union mips_instruction insn)
+{
+ switch (insn.i_format.opcode) {
+ /*
+ * jr and jalr are in r_format format.
+ */
+ case spec_op:
+ switch (insn.r_format.func) {
+ case jalr_op:
+ case jr_op:
+ return 1;
+ }
+ break;
+
+ /*
+ * This group contains:
+ * bltz_op, bgez_op, bltzl_op, bgezl_op,
+ * bltzal_op, bgezal_op, bltzall_op, bgezall_op.
+ */
+ case bcond_op:
+ switch (insn.i_format.rt) {
+ case bltz_op:
+ case bltzl_op:
+ case bgez_op:
+ case bgezl_op:
+ case bltzal_op:
+ case bltzall_op:
+ case bgezal_op:
+ case bgezall_op:
+ case bposge32_op:
+ return 1;
+ }
+ break;
+
+ /*
+ * These are unconditional and in j_format.
+ */
+ case jal_op:
+ case j_op:
+ case beq_op:
+ case beql_op:
+ case bne_op:
+ case bnel_op:
+ case blez_op: /* not really i_format */
+ case blezl_op:
+ case bgtz_op:
+ case bgtzl_op:
+ return 1;
+
+ /*
+ * And now the FPA/cp1 branch instructions.
+ */
+ case cop1_op:
+#ifdef CONFIG_CPU_CAVIUM_OCTEON
+ case lwc2_op: /* This is bbit0 on Octeon */
+ case ldc2_op: /* This is bbit032 on Octeon */
+ case swc2_op: /* This is bbit1 on Octeon */
+ case sdc2_op: /* This is bbit132 on Octeon */
+#endif
+ return 1;
+ }
+
+ return 0;
+}
+
+#endif /* __PROBES_COMMON_H */
diff --git a/arch/mips/kernel/proc.c b/arch/mips/kernel/proc.c
new file mode 100644
index 000000000..8eba5a1ed
--- /dev/null
+++ b/arch/mips/kernel/proc.c
@@ -0,0 +1,332 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 1995, 1996, 2001 Ralf Baechle
+ * Copyright (C) 2001, 2004 MIPS Technologies, Inc.
+ * Copyright (C) 2004 Maciej W. Rozycki
+ */
+#include <linux/delay.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/seq_file.h>
+#include <asm/bootinfo.h>
+#include <asm/cpu.h>
+#include <asm/cpu-features.h>
+#include <asm/idle.h>
+#include <asm/mipsregs.h>
+#include <asm/processor.h>
+#include <asm/prom.h>
+
+unsigned int vced_count, vcei_count;
+
+/*
+ * No lock; only written during early bootup by CPU 0.
+ */
+static RAW_NOTIFIER_HEAD(proc_cpuinfo_chain);
+
+int __ref register_proc_cpuinfo_notifier(struct notifier_block *nb)
+{
+ return raw_notifier_chain_register(&proc_cpuinfo_chain, nb);
+}
+
+int proc_cpuinfo_notifier_call_chain(unsigned long val, void *v)
+{
+ return raw_notifier_call_chain(&proc_cpuinfo_chain, val, v);
+}
+
+static int show_cpuinfo(struct seq_file *m, void *v)
+{
+ struct proc_cpuinfo_notifier_args proc_cpuinfo_notifier_args;
+ unsigned long n = (unsigned long) v - 1;
+ unsigned int version = cpu_data[n].processor_id;
+ unsigned int fp_vers = cpu_data[n].fpu_id;
+ char fmt[64];
+ int i;
+
+#ifdef CONFIG_SMP
+ if (!cpu_online(n))
+ return 0;
+#endif
+
+ /*
+ * For the first processor also print the system type
+ */
+ if (n == 0) {
+ seq_printf(m, "system type\t\t: %s\n", get_system_type());
+ if (mips_get_machine_name())
+ seq_printf(m, "machine\t\t\t: %s\n",
+ mips_get_machine_name());
+ }
+
+ seq_printf(m, "processor\t\t: %ld\n", n);
+ sprintf(fmt, "cpu model\t\t: %%s V%%d.%%d%s\n",
+ cpu_data[n].options & MIPS_CPU_FPU ? " FPU V%d.%d" : "");
+ seq_printf(m, fmt, __cpu_name[n],
+ (version >> 4) & 0x0f, version & 0x0f,
+ (fp_vers >> 4) & 0x0f, fp_vers & 0x0f);
+ seq_printf(m, "BogoMIPS\t\t: %u.%02u\n",
+ cpu_data[n].udelay_val / (500000/HZ),
+ (cpu_data[n].udelay_val / (5000/HZ)) % 100);
+ seq_printf(m, "wait instruction\t: %s\n", cpu_wait ? "yes" : "no");
+ seq_printf(m, "microsecond timers\t: %s\n",
+ cpu_has_counter ? "yes" : "no");
+ seq_printf(m, "tlb_entries\t\t: %d\n", cpu_data[n].tlbsize);
+ seq_printf(m, "extra interrupt vector\t: %s\n",
+ cpu_has_divec ? "yes" : "no");
+ seq_printf(m, "hardware watchpoint\t: %s",
+ cpu_has_watch ? "yes, " : "no\n");
+ if (cpu_has_watch) {
+ seq_printf(m, "count: %d, address/irw mask: [",
+ cpu_data[n].watch_reg_count);
+ for (i = 0; i < cpu_data[n].watch_reg_count; i++)
+ seq_printf(m, "%s0x%04x", i ? ", " : "",
+ cpu_data[n].watch_reg_masks[i]);
+ seq_puts(m, "]\n");
+ }
+
+ seq_puts(m, "isa\t\t\t:");
+ if (cpu_has_mips_1)
+ seq_puts(m, " mips1");
+ if (cpu_has_mips_2)
+ seq_puts(m, " mips2");
+ if (cpu_has_mips_3)
+ seq_puts(m, " mips3");
+ if (cpu_has_mips_4)
+ seq_puts(m, " mips4");
+ if (cpu_has_mips_5)
+ seq_puts(m, " mips5");
+ if (cpu_has_mips32r1)
+ seq_puts(m, " mips32r1");
+ if (cpu_has_mips32r2)
+ seq_puts(m, " mips32r2");
+ if (cpu_has_mips32r5)
+ seq_puts(m, " mips32r5");
+ if (cpu_has_mips32r6)
+ seq_puts(m, " mips32r6");
+ if (cpu_has_mips64r1)
+ seq_puts(m, " mips64r1");
+ if (cpu_has_mips64r2)
+ seq_puts(m, " mips64r2");
+ if (cpu_has_mips64r5)
+ seq_puts(m, " mips64r5");
+ if (cpu_has_mips64r6)
+ seq_puts(m, " mips64r6");
+ seq_puts(m, "\n");
+
+ seq_puts(m, "ASEs implemented\t:");
+ if (cpu_has_mips16)
+ seq_puts(m, " mips16");
+ if (cpu_has_mips16e2)
+ seq_puts(m, " mips16e2");
+ if (cpu_has_mdmx)
+ seq_puts(m, " mdmx");
+ if (cpu_has_mips3d)
+ seq_puts(m, " mips3d");
+ if (cpu_has_smartmips)
+ seq_puts(m, " smartmips");
+ if (cpu_has_dsp)
+ seq_puts(m, " dsp");
+ if (cpu_has_dsp2)
+ seq_puts(m, " dsp2");
+ if (cpu_has_dsp3)
+ seq_puts(m, " dsp3");
+ if (cpu_has_mipsmt)
+ seq_puts(m, " mt");
+ if (cpu_has_mmips)
+ seq_puts(m, " micromips");
+ if (cpu_has_vz)
+ seq_puts(m, " vz");
+ if (cpu_has_msa)
+ seq_puts(m, " msa");
+ if (cpu_has_eva)
+ seq_puts(m, " eva");
+ if (cpu_has_htw)
+ seq_puts(m, " htw");
+ if (cpu_has_xpa)
+ seq_puts(m, " xpa");
+ if (cpu_has_loongson_mmi)
+ seq_puts(m, " loongson-mmi");
+ if (cpu_has_loongson_cam)
+ seq_puts(m, " loongson-cam");
+ if (cpu_has_loongson_ext)
+ seq_puts(m, " loongson-ext");
+ if (cpu_has_loongson_ext2)
+ seq_puts(m, " loongson-ext2");
+ seq_puts(m, "\n");
+
+ if (cpu_has_mmips) {
+ seq_printf(m, "micromips kernel\t: %s\n",
+ (read_c0_config3() & MIPS_CONF3_ISA_OE) ? "yes" : "no");
+ }
+
+ seq_puts(m, "Options implemented\t:");
+ if (cpu_has_tlb)
+ seq_puts(m, " tlb");
+ if (cpu_has_ftlb)
+ seq_puts(m, " ftlb");
+ if (cpu_has_tlbinv)
+ seq_puts(m, " tlbinv");
+ if (cpu_has_segments)
+ seq_puts(m, " segments");
+ if (cpu_has_rixiex)
+ seq_puts(m, " rixiex");
+ if (cpu_has_ldpte)
+ seq_puts(m, " ldpte");
+ if (cpu_has_maar)
+ seq_puts(m, " maar");
+ if (cpu_has_rw_llb)
+ seq_puts(m, " rw_llb");
+ if (cpu_has_4kex)
+ seq_puts(m, " 4kex");
+ if (cpu_has_3k_cache)
+ seq_puts(m, " 3k_cache");
+ if (cpu_has_4k_cache)
+ seq_puts(m, " 4k_cache");
+ if (cpu_has_octeon_cache)
+ seq_puts(m, " octeon_cache");
+ if (raw_cpu_has_fpu)
+ seq_puts(m, " fpu");
+ if (cpu_has_32fpr)
+ seq_puts(m, " 32fpr");
+ if (cpu_has_cache_cdex_p)
+ seq_puts(m, " cache_cdex_p");
+ if (cpu_has_cache_cdex_s)
+ seq_puts(m, " cache_cdex_s");
+ if (cpu_has_prefetch)
+ seq_puts(m, " prefetch");
+ if (cpu_has_mcheck)
+ seq_puts(m, " mcheck");
+ if (cpu_has_ejtag)
+ seq_puts(m, " ejtag");
+ if (cpu_has_llsc)
+ seq_puts(m, " llsc");
+ if (cpu_has_guestctl0ext)
+ seq_puts(m, " guestctl0ext");
+ if (cpu_has_guestctl1)
+ seq_puts(m, " guestctl1");
+ if (cpu_has_guestctl2)
+ seq_puts(m, " guestctl2");
+ if (cpu_has_guestid)
+ seq_puts(m, " guestid");
+ if (cpu_has_drg)
+ seq_puts(m, " drg");
+ if (cpu_has_rixi)
+ seq_puts(m, " rixi");
+ if (cpu_has_lpa)
+ seq_puts(m, " lpa");
+ if (cpu_has_mvh)
+ seq_puts(m, " mvh");
+ if (cpu_has_vtag_icache)
+ seq_puts(m, " vtag_icache");
+ if (cpu_has_dc_aliases)
+ seq_puts(m, " dc_aliases");
+ if (cpu_has_ic_fills_f_dc)
+ seq_puts(m, " ic_fills_f_dc");
+ if (cpu_has_pindexed_dcache)
+ seq_puts(m, " pindexed_dcache");
+ if (cpu_has_userlocal)
+ seq_puts(m, " userlocal");
+ if (cpu_has_nofpuex)
+ seq_puts(m, " nofpuex");
+ if (cpu_has_vint)
+ seq_puts(m, " vint");
+ if (cpu_has_veic)
+ seq_puts(m, " veic");
+ if (cpu_has_inclusive_pcaches)
+ seq_puts(m, " inclusive_pcaches");
+ if (cpu_has_perf_cntr_intr_bit)
+ seq_puts(m, " perf_cntr_intr_bit");
+ if (cpu_has_ufr)
+ seq_puts(m, " ufr");
+ if (cpu_has_fre)
+ seq_puts(m, " fre");
+ if (cpu_has_cdmm)
+ seq_puts(m, " cdmm");
+ if (cpu_has_small_pages)
+ seq_puts(m, " small_pages");
+ if (cpu_has_nan_legacy)
+ seq_puts(m, " nan_legacy");
+ if (cpu_has_nan_2008)
+ seq_puts(m, " nan_2008");
+ if (cpu_has_ebase_wg)
+ seq_puts(m, " ebase_wg");
+ if (cpu_has_badinstr)
+ seq_puts(m, " badinstr");
+ if (cpu_has_badinstrp)
+ seq_puts(m, " badinstrp");
+ if (cpu_has_contextconfig)
+ seq_puts(m, " contextconfig");
+ if (cpu_has_perf)
+ seq_puts(m, " perf");
+ if (cpu_has_mac2008_only)
+ seq_puts(m, " mac2008_only");
+ if (cpu_has_ftlbparex)
+ seq_puts(m, " ftlbparex");
+ if (cpu_has_gsexcex)
+ seq_puts(m, " gsexcex");
+ if (cpu_has_shared_ftlb_ram)
+ seq_puts(m, " shared_ftlb_ram");
+ if (cpu_has_shared_ftlb_entries)
+ seq_puts(m, " shared_ftlb_entries");
+ if (cpu_has_mipsmt_pertccounters)
+ seq_puts(m, " mipsmt_pertccounters");
+ if (cpu_has_mmid)
+ seq_puts(m, " mmid");
+ if (cpu_has_mm_sysad)
+ seq_puts(m, " mm_sysad");
+ if (cpu_has_mm_full)
+ seq_puts(m, " mm_full");
+ seq_puts(m, "\n");
+
+ seq_printf(m, "shadow register sets\t: %d\n",
+ cpu_data[n].srsets);
+ seq_printf(m, "kscratch registers\t: %d\n",
+ hweight8(cpu_data[n].kscratch_mask));
+ seq_printf(m, "package\t\t\t: %d\n", cpu_data[n].package);
+ seq_printf(m, "core\t\t\t: %d\n", cpu_core(&cpu_data[n]));
+
+#if defined(CONFIG_MIPS_MT_SMP) || defined(CONFIG_CPU_MIPSR6)
+ if (cpu_has_mipsmt)
+ seq_printf(m, "VPE\t\t\t: %d\n", cpu_vpe_id(&cpu_data[n]));
+ else if (cpu_has_vp)
+ seq_printf(m, "VP\t\t\t: %d\n", cpu_vpe_id(&cpu_data[n]));
+#endif
+
+ sprintf(fmt, "VCE%%c exceptions\t\t: %s\n",
+ cpu_has_vce ? "%u" : "not available");
+ seq_printf(m, fmt, 'D', vced_count);
+ seq_printf(m, fmt, 'I', vcei_count);
+
+ proc_cpuinfo_notifier_args.m = m;
+ proc_cpuinfo_notifier_args.n = n;
+
+ raw_notifier_call_chain(&proc_cpuinfo_chain, 0,
+ &proc_cpuinfo_notifier_args);
+
+ seq_puts(m, "\n");
+
+ return 0;
+}
+
+static void *c_start(struct seq_file *m, loff_t *pos)
+{
+ unsigned long i = *pos;
+
+ return i < nr_cpu_ids ? (void *) (i + 1) : NULL;
+}
+
+static void *c_next(struct seq_file *m, void *v, loff_t *pos)
+{
+ ++*pos;
+ return c_start(m, pos);
+}
+
+static void c_stop(struct seq_file *m, void *v)
+{
+}
+
+const struct seq_operations cpuinfo_op = {
+ .start = c_start,
+ .next = c_next,
+ .stop = c_stop,
+ .show = show_cpuinfo,
+};
diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
new file mode 100644
index 000000000..17d80e2f2
--- /dev/null
+++ b/arch/mips/kernel/process.c
@@ -0,0 +1,912 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1994 - 1999, 2000 by Ralf Baechle and others.
+ * Copyright (C) 2005, 2006 by Ralf Baechle (ralf@linux-mips.org)
+ * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
+ * Copyright (C) 2004 Thiemo Seufer
+ * Copyright (C) 2013 Imagination Technologies Ltd.
+ */
+#include <linux/cpu.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/kallsyms.h>
+#include <linux/kernel.h>
+#include <linux/nmi.h>
+#include <linux/personality.h>
+#include <linux/prctl.h>
+#include <linux/random.h>
+#include <linux/sched.h>
+#include <linux/sched/debug.h>
+#include <linux/sched/task_stack.h>
+
+#include <asm/abi.h>
+#include <asm/asm.h>
+#include <asm/dsemul.h>
+#include <asm/dsp.h>
+#include <asm/exec.h>
+#include <asm/fpu.h>
+#include <asm/inst.h>
+#include <asm/irq.h>
+#include <asm/irq_regs.h>
+#include <asm/isadep.h>
+#include <asm/msa.h>
+#include <asm/mips-cps.h>
+#include <asm/mipsregs.h>
+#include <asm/processor.h>
+#include <asm/reg.h>
+#include <asm/stacktrace.h>
+
+#ifdef CONFIG_HOTPLUG_CPU
+void arch_cpu_idle_dead(void)
+{
+ play_dead();
+}
+#endif
+
+asmlinkage void ret_from_fork(void);
+asmlinkage void ret_from_kernel_thread(void);
+
+void start_thread(struct pt_regs * regs, unsigned long pc, unsigned long sp)
+{
+ unsigned long status;
+
+ /* New thread loses kernel privileges. */
+ status = regs->cp0_status & ~(ST0_CU0|ST0_CU1|ST0_CU2|ST0_FR|KU_MASK);
+ status |= KU_USER;
+ regs->cp0_status = status;
+ lose_fpu(0);
+ clear_thread_flag(TIF_MSA_CTX_LIVE);
+ clear_used_math();
+#ifdef CONFIG_MIPS_FP_SUPPORT
+ atomic_set(&current->thread.bd_emu_frame, BD_EMUFRAME_NONE);
+#endif
+ init_dsp();
+ regs->cp0_epc = pc;
+ regs->regs[29] = sp;
+}
+
+void exit_thread(struct task_struct *tsk)
+{
+ /*
+ * User threads may have allocated a delay slot emulation frame.
+ * If so, clean up that allocation.
+ */
+ if (!(current->flags & PF_KTHREAD))
+ dsemul_thread_cleanup(tsk);
+}
+
+int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
+{
+ /*
+ * Save any process state which is live in hardware registers to the
+ * parent context prior to duplication. This prevents the new child
+ * state becoming stale if the parent is preempted before copy_thread()
+ * gets a chance to save the parent's live hardware registers to the
+ * child context.
+ */
+ preempt_disable();
+
+ if (is_msa_enabled())
+ save_msa(current);
+ else if (is_fpu_owner())
+ _save_fp(current);
+
+ save_dsp(current);
+
+ preempt_enable();
+
+ *dst = *src;
+ return 0;
+}
+
+/*
+ * Copy architecture-specific thread state
+ */
+int copy_thread(struct task_struct *p, const struct kernel_clone_args *args)
+{
+ unsigned long clone_flags = args->flags;
+ unsigned long usp = args->stack;
+ unsigned long tls = args->tls;
+ struct thread_info *ti = task_thread_info(p);
+ struct pt_regs *childregs, *regs = current_pt_regs();
+ unsigned long childksp;
+
+ childksp = (unsigned long)task_stack_page(p) + THREAD_SIZE - 32;
+
+ /* set up new TSS. */
+ childregs = (struct pt_regs *) childksp - 1;
+ /* Put the stack after the struct pt_regs. */
+ childksp = (unsigned long) childregs;
+ p->thread.cp0_status = (read_c0_status() & ~(ST0_CU2|ST0_CU1)) | ST0_KERNEL_CUMASK;
+
+ /*
+ * New tasks lose permission to use the fpu. This accelerates context
+ * switching for most programs since they don't use the fpu.
+ */
+ clear_tsk_thread_flag(p, TIF_USEDFPU);
+ clear_tsk_thread_flag(p, TIF_USEDMSA);
+ clear_tsk_thread_flag(p, TIF_MSA_CTX_LIVE);
+
+#ifdef CONFIG_MIPS_MT_FPAFF
+ clear_tsk_thread_flag(p, TIF_FPUBOUND);
+#endif /* CONFIG_MIPS_MT_FPAFF */
+
+ if (unlikely(args->fn)) {
+ /* kernel thread */
+ unsigned long status = p->thread.cp0_status;
+ memset(childregs, 0, sizeof(struct pt_regs));
+ p->thread.reg16 = (unsigned long)args->fn;
+ p->thread.reg17 = (unsigned long)args->fn_arg;
+ p->thread.reg29 = childksp;
+ p->thread.reg31 = (unsigned long) ret_from_kernel_thread;
+#if defined(CONFIG_CPU_R3000)
+ status = (status & ~(ST0_KUP | ST0_IEP | ST0_IEC)) |
+ ((status & (ST0_KUC | ST0_IEC)) << 2);
+#else
+ status |= ST0_EXL;
+#endif
+ childregs->cp0_status = status;
+ return 0;
+ }
+
+ /* user thread */
+ *childregs = *regs;
+ childregs->regs[7] = 0; /* Clear error flag */
+ childregs->regs[2] = 0; /* Child gets zero as return value */
+ if (usp)
+ childregs->regs[29] = usp;
+
+ p->thread.reg29 = (unsigned long) childregs;
+ p->thread.reg31 = (unsigned long) ret_from_fork;
+
+ childregs->cp0_status &= ~(ST0_CU2|ST0_CU1);
+
+#ifdef CONFIG_MIPS_FP_SUPPORT
+ atomic_set(&p->thread.bd_emu_frame, BD_EMUFRAME_NONE);
+#endif
+
+ if (clone_flags & CLONE_SETTLS)
+ ti->tp_value = tls;
+
+ return 0;
+}
+
+#ifdef CONFIG_STACKPROTECTOR
+#include <linux/stackprotector.h>
+unsigned long __stack_chk_guard __read_mostly;
+EXPORT_SYMBOL(__stack_chk_guard);
+#endif
+
+struct mips_frame_info {
+ void *func;
+ unsigned long func_size;
+ int frame_size;
+ int pc_offset;
+};
+
+#define J_TARGET(pc,target) \
+ (((unsigned long)(pc) & 0xf0000000) | ((target) << 2))
+
+static inline int is_jr_ra_ins(union mips_instruction *ip)
+{
+#ifdef CONFIG_CPU_MICROMIPS
+ /*
+ * jr16 ra
+ * jr ra
+ */
+ if (mm_insn_16bit(ip->word >> 16)) {
+ if (ip->mm16_r5_format.opcode == mm_pool16c_op &&
+ ip->mm16_r5_format.rt == mm_jr16_op &&
+ ip->mm16_r5_format.imm == 31)
+ return 1;
+ return 0;
+ }
+
+ if (ip->r_format.opcode == mm_pool32a_op &&
+ ip->r_format.func == mm_pool32axf_op &&
+ ((ip->u_format.uimmediate >> 6) & GENMASK(9, 0)) == mm_jalr_op &&
+ ip->r_format.rt == 31)
+ return 1;
+ return 0;
+#else
+ if (ip->r_format.opcode == spec_op &&
+ ip->r_format.func == jr_op &&
+ ip->r_format.rs == 31)
+ return 1;
+ return 0;
+#endif
+}
+
+static inline int is_ra_save_ins(union mips_instruction *ip, int *poff)
+{
+#ifdef CONFIG_CPU_MICROMIPS
+ /*
+ * swsp ra,offset
+ * swm16 reglist,offset(sp)
+ * swm32 reglist,offset(sp)
+ * sw32 ra,offset(sp)
+ * jradiussp - NOT SUPPORTED
+ *
+ * microMIPS is way more fun...
+ */
+ if (mm_insn_16bit(ip->word >> 16)) {
+ switch (ip->mm16_r5_format.opcode) {
+ case mm_swsp16_op:
+ if (ip->mm16_r5_format.rt != 31)
+ return 0;
+
+ *poff = ip->mm16_r5_format.imm;
+ *poff = (*poff << 2) / sizeof(ulong);
+ return 1;
+
+ case mm_pool16c_op:
+ switch (ip->mm16_m_format.func) {
+ case mm_swm16_op:
+ *poff = ip->mm16_m_format.imm;
+ *poff += 1 + ip->mm16_m_format.rlist;
+ *poff = (*poff << 2) / sizeof(ulong);
+ return 1;
+
+ default:
+ return 0;
+ }
+
+ default:
+ return 0;
+ }
+ }
+
+ switch (ip->i_format.opcode) {
+ case mm_sw32_op:
+ if (ip->i_format.rs != 29)
+ return 0;
+ if (ip->i_format.rt != 31)
+ return 0;
+
+ *poff = ip->i_format.simmediate / sizeof(ulong);
+ return 1;
+
+ case mm_pool32b_op:
+ switch (ip->mm_m_format.func) {
+ case mm_swm32_func:
+ if (ip->mm_m_format.rd < 0x10)
+ return 0;
+ if (ip->mm_m_format.base != 29)
+ return 0;
+
+ *poff = ip->mm_m_format.simmediate;
+ *poff += (ip->mm_m_format.rd & 0xf) * sizeof(u32);
+ *poff /= sizeof(ulong);
+ return 1;
+ default:
+ return 0;
+ }
+
+ default:
+ return 0;
+ }
+#else
+ /* sw / sd $ra, offset($sp) */
+ if ((ip->i_format.opcode == sw_op || ip->i_format.opcode == sd_op) &&
+ ip->i_format.rs == 29 && ip->i_format.rt == 31) {
+ *poff = ip->i_format.simmediate / sizeof(ulong);
+ return 1;
+ }
+#ifdef CONFIG_CPU_LOONGSON64
+ if ((ip->loongson3_lswc2_format.opcode == swc2_op) &&
+ (ip->loongson3_lswc2_format.ls == 1) &&
+ (ip->loongson3_lswc2_format.fr == 0) &&
+ (ip->loongson3_lswc2_format.base == 29)) {
+ if (ip->loongson3_lswc2_format.rt == 31) {
+ *poff = ip->loongson3_lswc2_format.offset << 1;
+ return 1;
+ }
+ if (ip->loongson3_lswc2_format.rq == 31) {
+ *poff = (ip->loongson3_lswc2_format.offset << 1) + 1;
+ return 1;
+ }
+ }
+#endif
+ return 0;
+#endif
+}
+
+static inline int is_jump_ins(union mips_instruction *ip)
+{
+#ifdef CONFIG_CPU_MICROMIPS
+ /*
+ * jr16,jrc,jalr16,jalr16
+ * jal
+ * jalr/jr,jalr.hb/jr.hb,jalrs,jalrs.hb
+ * jraddiusp - NOT SUPPORTED
+ *
+ * microMIPS is kind of more fun...
+ */
+ if (mm_insn_16bit(ip->word >> 16)) {
+ if ((ip->mm16_r5_format.opcode == mm_pool16c_op &&
+ (ip->mm16_r5_format.rt & mm_jr16_op) == mm_jr16_op))
+ return 1;
+ return 0;
+ }
+
+ if (ip->j_format.opcode == mm_j32_op)
+ return 1;
+ if (ip->j_format.opcode == mm_jal32_op)
+ return 1;
+ if (ip->r_format.opcode != mm_pool32a_op ||
+ ip->r_format.func != mm_pool32axf_op)
+ return 0;
+ return ((ip->u_format.uimmediate >> 6) & mm_jalr_op) == mm_jalr_op;
+#else
+ if (ip->j_format.opcode == j_op)
+ return 1;
+ if (ip->j_format.opcode == jal_op)
+ return 1;
+ if (ip->r_format.opcode != spec_op)
+ return 0;
+ return ip->r_format.func == jalr_op || ip->r_format.func == jr_op;
+#endif
+}
+
+static inline int is_sp_move_ins(union mips_instruction *ip, int *frame_size)
+{
+#ifdef CONFIG_CPU_MICROMIPS
+ unsigned short tmp;
+
+ /*
+ * addiusp -imm
+ * addius5 sp,-imm
+ * addiu32 sp,sp,-imm
+ * jradiussp - NOT SUPPORTED
+ *
+ * microMIPS is not more fun...
+ */
+ if (mm_insn_16bit(ip->word >> 16)) {
+ if (ip->mm16_r3_format.opcode == mm_pool16d_op &&
+ ip->mm16_r3_format.simmediate & mm_addiusp_func) {
+ tmp = ip->mm_b0_format.simmediate >> 1;
+ tmp = ((tmp & 0x1ff) ^ 0x100) - 0x100;
+ if ((tmp + 2) < 4) /* 0x0,0x1,0x1fe,0x1ff are special */
+ tmp ^= 0x100;
+ *frame_size = -(signed short)(tmp << 2);
+ return 1;
+ }
+ if (ip->mm16_r5_format.opcode == mm_pool16d_op &&
+ ip->mm16_r5_format.rt == 29) {
+ tmp = ip->mm16_r5_format.imm >> 1;
+ *frame_size = -(signed short)(tmp & 0xf);
+ return 1;
+ }
+ return 0;
+ }
+
+ if (ip->mm_i_format.opcode == mm_addiu32_op &&
+ ip->mm_i_format.rt == 29 && ip->mm_i_format.rs == 29) {
+ *frame_size = -ip->i_format.simmediate;
+ return 1;
+ }
+#else
+ /* addiu/daddiu sp,sp,-imm */
+ if (ip->i_format.rs != 29 || ip->i_format.rt != 29)
+ return 0;
+
+ if (ip->i_format.opcode == addiu_op ||
+ ip->i_format.opcode == daddiu_op) {
+ *frame_size = -ip->i_format.simmediate;
+ return 1;
+ }
+#endif
+ return 0;
+}
+
+static int get_frame_info(struct mips_frame_info *info)
+{
+ bool is_mmips = IS_ENABLED(CONFIG_CPU_MICROMIPS);
+ union mips_instruction insn, *ip, *ip_end;
+ unsigned int last_insn_size = 0;
+ bool saw_jump = false;
+
+ info->pc_offset = -1;
+ info->frame_size = 0;
+
+ ip = (void *)msk_isa16_mode((ulong)info->func);
+ if (!ip)
+ goto err;
+
+ ip_end = (void *)ip + (info->func_size ? info->func_size : 512);
+
+ while (ip < ip_end) {
+ ip = (void *)ip + last_insn_size;
+
+ if (is_mmips && mm_insn_16bit(ip->halfword[0])) {
+ insn.word = ip->halfword[0] << 16;
+ last_insn_size = 2;
+ } else if (is_mmips) {
+ insn.word = ip->halfword[0] << 16 | ip->halfword[1];
+ last_insn_size = 4;
+ } else {
+ insn.word = ip->word;
+ last_insn_size = 4;
+ }
+
+ if (is_jr_ra_ins(ip)) {
+ break;
+ } else if (!info->frame_size) {
+ is_sp_move_ins(&insn, &info->frame_size);
+ continue;
+ } else if (!saw_jump && is_jump_ins(ip)) {
+ /*
+ * If we see a jump instruction, we are finished
+ * with the frame save.
+ *
+ * Some functions can have a shortcut return at
+ * the beginning of the function, so don't start
+ * looking for jump instruction until we see the
+ * frame setup.
+ *
+ * The RA save instruction can get put into the
+ * delay slot of the jump instruction, so look
+ * at the next instruction, too.
+ */
+ saw_jump = true;
+ continue;
+ }
+ if (info->pc_offset == -1 &&
+ is_ra_save_ins(&insn, &info->pc_offset))
+ break;
+ if (saw_jump)
+ break;
+ }
+ if (info->frame_size && info->pc_offset >= 0) /* nested */
+ return 0;
+ if (info->pc_offset < 0) /* leaf */
+ return 1;
+ /* prologue seems bogus... */
+err:
+ return -1;
+}
+
+static struct mips_frame_info schedule_mfi __read_mostly;
+
+#ifdef CONFIG_KALLSYMS
+static unsigned long get___schedule_addr(void)
+{
+ return kallsyms_lookup_name("__schedule");
+}
+#else
+static unsigned long get___schedule_addr(void)
+{
+ union mips_instruction *ip = (void *)schedule;
+ int max_insns = 8;
+ int i;
+
+ for (i = 0; i < max_insns; i++, ip++) {
+ if (ip->j_format.opcode == j_op)
+ return J_TARGET(ip, ip->j_format.target);
+ }
+ return 0;
+}
+#endif
+
+static int __init frame_info_init(void)
+{
+ unsigned long size = 0;
+#ifdef CONFIG_KALLSYMS
+ unsigned long ofs;
+#endif
+ unsigned long addr;
+
+ addr = get___schedule_addr();
+ if (!addr)
+ addr = (unsigned long)schedule;
+
+#ifdef CONFIG_KALLSYMS
+ kallsyms_lookup_size_offset(addr, &size, &ofs);
+#endif
+ schedule_mfi.func = (void *)addr;
+ schedule_mfi.func_size = size;
+
+ get_frame_info(&schedule_mfi);
+
+ /*
+ * Without schedule() frame info, result given by
+ * thread_saved_pc() and __get_wchan() are not reliable.
+ */
+ if (schedule_mfi.pc_offset < 0)
+ printk("Can't analyze schedule() prologue at %p\n", schedule);
+
+ return 0;
+}
+
+arch_initcall(frame_info_init);
+
+/*
+ * Return saved PC of a blocked thread.
+ */
+static unsigned long thread_saved_pc(struct task_struct *tsk)
+{
+ struct thread_struct *t = &tsk->thread;
+
+ /* New born processes are a special case */
+ if (t->reg31 == (unsigned long) ret_from_fork)
+ return t->reg31;
+ if (schedule_mfi.pc_offset < 0)
+ return 0;
+ return ((unsigned long *)t->reg29)[schedule_mfi.pc_offset];
+}
+
+
+#ifdef CONFIG_KALLSYMS
+/* generic stack unwinding function */
+unsigned long notrace unwind_stack_by_address(unsigned long stack_page,
+ unsigned long *sp,
+ unsigned long pc,
+ unsigned long *ra)
+{
+ unsigned long low, high, irq_stack_high;
+ struct mips_frame_info info;
+ unsigned long size, ofs;
+ struct pt_regs *regs;
+ int leaf;
+
+ if (!stack_page)
+ return 0;
+
+ /*
+ * IRQ stacks start at IRQ_STACK_START
+ * task stacks at THREAD_SIZE - 32
+ */
+ low = stack_page;
+ if (!preemptible() && on_irq_stack(raw_smp_processor_id(), *sp)) {
+ high = stack_page + IRQ_STACK_START;
+ irq_stack_high = high;
+ } else {
+ high = stack_page + THREAD_SIZE - 32;
+ irq_stack_high = 0;
+ }
+
+ /*
+ * If we reached the top of the interrupt stack, start unwinding
+ * the interrupted task stack.
+ */
+ if (unlikely(*sp == irq_stack_high)) {
+ unsigned long task_sp = *(unsigned long *)*sp;
+
+ /*
+ * Check that the pointer saved in the IRQ stack head points to
+ * something within the stack of the current task
+ */
+ if (!object_is_on_stack((void *)task_sp))
+ return 0;
+
+ /*
+ * Follow pointer to tasks kernel stack frame where interrupted
+ * state was saved.
+ */
+ regs = (struct pt_regs *)task_sp;
+ pc = regs->cp0_epc;
+ if (!user_mode(regs) && __kernel_text_address(pc)) {
+ *sp = regs->regs[29];
+ *ra = regs->regs[31];
+ return pc;
+ }
+ return 0;
+ }
+ if (!kallsyms_lookup_size_offset(pc, &size, &ofs))
+ return 0;
+ /*
+ * Return ra if an exception occurred at the first instruction
+ */
+ if (unlikely(ofs == 0)) {
+ pc = *ra;
+ *ra = 0;
+ return pc;
+ }
+
+ info.func = (void *)(pc - ofs);
+ info.func_size = ofs; /* analyze from start to ofs */
+ leaf = get_frame_info(&info);
+ if (leaf < 0)
+ return 0;
+
+ if (*sp < low || *sp + info.frame_size > high)
+ return 0;
+
+ if (leaf)
+ /*
+ * For some extreme cases, get_frame_info() can
+ * consider wrongly a nested function as a leaf
+ * one. In that cases avoid to return always the
+ * same value.
+ */
+ pc = pc != *ra ? *ra : 0;
+ else
+ pc = ((unsigned long *)(*sp))[info.pc_offset];
+
+ *sp += info.frame_size;
+ *ra = 0;
+ return __kernel_text_address(pc) ? pc : 0;
+}
+EXPORT_SYMBOL(unwind_stack_by_address);
+
+/* used by show_backtrace() */
+unsigned long unwind_stack(struct task_struct *task, unsigned long *sp,
+ unsigned long pc, unsigned long *ra)
+{
+ unsigned long stack_page = 0;
+ int cpu;
+
+ for_each_possible_cpu(cpu) {
+ if (on_irq_stack(cpu, *sp)) {
+ stack_page = (unsigned long)irq_stack[cpu];
+ break;
+ }
+ }
+
+ if (!stack_page)
+ stack_page = (unsigned long)task_stack_page(task);
+
+ return unwind_stack_by_address(stack_page, sp, pc, ra);
+}
+#endif
+
+/*
+ * __get_wchan - a maintenance nightmare^W^Wpain in the ass ...
+ */
+unsigned long __get_wchan(struct task_struct *task)
+{
+ unsigned long pc = 0;
+#ifdef CONFIG_KALLSYMS
+ unsigned long sp;
+ unsigned long ra = 0;
+#endif
+
+ if (!task_stack_page(task))
+ goto out;
+
+ pc = thread_saved_pc(task);
+
+#ifdef CONFIG_KALLSYMS
+ sp = task->thread.reg29 + schedule_mfi.frame_size;
+
+ while (in_sched_functions(pc))
+ pc = unwind_stack(task, &sp, pc, &ra);
+#endif
+
+out:
+ return pc;
+}
+
+unsigned long mips_stack_top(void)
+{
+ unsigned long top = TASK_SIZE & PAGE_MASK;
+
+ if (IS_ENABLED(CONFIG_MIPS_FP_SUPPORT)) {
+ /* One page for branch delay slot "emulation" */
+ top -= PAGE_SIZE;
+ }
+
+ /* Space for the VDSO, data page & GIC user page */
+ top -= PAGE_ALIGN(current->thread.abi->vdso->size);
+ top -= PAGE_SIZE;
+ top -= mips_gic_present() ? PAGE_SIZE : 0;
+
+ /* Space for cache colour alignment */
+ if (cpu_has_dc_aliases)
+ top -= shm_align_mask + 1;
+
+ /* Space to randomize the VDSO base */
+ if (current->flags & PF_RANDOMIZE)
+ top -= VDSO_RANDOMIZE_SIZE;
+
+ return top;
+}
+
+/*
+ * Don't forget that the stack pointer must be aligned on a 8 bytes
+ * boundary for 32-bits ABI and 16 bytes for 64-bits ABI.
+ */
+unsigned long arch_align_stack(unsigned long sp)
+{
+ if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space)
+ sp -= prandom_u32_max(PAGE_SIZE);
+
+ return sp & ALMASK;
+}
+
+static struct cpumask backtrace_csd_busy;
+
+static void handle_backtrace(void *info)
+{
+ nmi_cpu_backtrace(get_irq_regs());
+ cpumask_clear_cpu(smp_processor_id(), &backtrace_csd_busy);
+}
+
+static DEFINE_PER_CPU(call_single_data_t, backtrace_csd) =
+ CSD_INIT(handle_backtrace, NULL);
+
+static void raise_backtrace(cpumask_t *mask)
+{
+ call_single_data_t *csd;
+ int cpu;
+
+ for_each_cpu(cpu, mask) {
+ /*
+ * If we previously sent an IPI to the target CPU & it hasn't
+ * cleared its bit in the busy cpumask then it didn't handle
+ * our previous IPI & it's not safe for us to reuse the
+ * call_single_data_t.
+ */
+ if (cpumask_test_and_set_cpu(cpu, &backtrace_csd_busy)) {
+ pr_warn("Unable to send backtrace IPI to CPU%u - perhaps it hung?\n",
+ cpu);
+ continue;
+ }
+
+ csd = &per_cpu(backtrace_csd, cpu);
+ smp_call_function_single_async(cpu, csd);
+ }
+}
+
+void arch_trigger_cpumask_backtrace(const cpumask_t *mask, bool exclude_self)
+{
+ nmi_trigger_cpumask_backtrace(mask, exclude_self, raise_backtrace);
+}
+
+int mips_get_process_fp_mode(struct task_struct *task)
+{
+ int value = 0;
+
+ if (!test_tsk_thread_flag(task, TIF_32BIT_FPREGS))
+ value |= PR_FP_MODE_FR;
+ if (test_tsk_thread_flag(task, TIF_HYBRID_FPREGS))
+ value |= PR_FP_MODE_FRE;
+
+ return value;
+}
+
+static long prepare_for_fp_mode_switch(void *unused)
+{
+ /*
+ * This is icky, but we use this to simply ensure that all CPUs have
+ * context switched, regardless of whether they were previously running
+ * kernel or user code. This ensures that no CPU that a mode-switching
+ * program may execute on keeps its FPU enabled (& in the old mode)
+ * throughout the mode switch.
+ */
+ return 0;
+}
+
+int mips_set_process_fp_mode(struct task_struct *task, unsigned int value)
+{
+ const unsigned int known_bits = PR_FP_MODE_FR | PR_FP_MODE_FRE;
+ struct task_struct *t;
+ struct cpumask process_cpus;
+ int cpu;
+
+ /* If nothing to change, return right away, successfully. */
+ if (value == mips_get_process_fp_mode(task))
+ return 0;
+
+ /* Only accept a mode change if 64-bit FP enabled for o32. */
+ if (!IS_ENABLED(CONFIG_MIPS_O32_FP64_SUPPORT))
+ return -EOPNOTSUPP;
+
+ /* And only for o32 tasks. */
+ if (IS_ENABLED(CONFIG_64BIT) && !test_thread_flag(TIF_32BIT_REGS))
+ return -EOPNOTSUPP;
+
+ /* Check the value is valid */
+ if (value & ~known_bits)
+ return -EOPNOTSUPP;
+
+ /* Setting FRE without FR is not supported. */
+ if ((value & (PR_FP_MODE_FR | PR_FP_MODE_FRE)) == PR_FP_MODE_FRE)
+ return -EOPNOTSUPP;
+
+ /* Avoid inadvertently triggering emulation */
+ if ((value & PR_FP_MODE_FR) && raw_cpu_has_fpu &&
+ !(raw_current_cpu_data.fpu_id & MIPS_FPIR_F64))
+ return -EOPNOTSUPP;
+ if ((value & PR_FP_MODE_FRE) && raw_cpu_has_fpu && !cpu_has_fre)
+ return -EOPNOTSUPP;
+
+ /* FR = 0 not supported in MIPS R6 */
+ if (!(value & PR_FP_MODE_FR) && raw_cpu_has_fpu && cpu_has_mips_r6)
+ return -EOPNOTSUPP;
+
+ /* Indicate the new FP mode in each thread */
+ for_each_thread(task, t) {
+ /* Update desired FP register width */
+ if (value & PR_FP_MODE_FR) {
+ clear_tsk_thread_flag(t, TIF_32BIT_FPREGS);
+ } else {
+ set_tsk_thread_flag(t, TIF_32BIT_FPREGS);
+ clear_tsk_thread_flag(t, TIF_MSA_CTX_LIVE);
+ }
+
+ /* Update desired FP single layout */
+ if (value & PR_FP_MODE_FRE)
+ set_tsk_thread_flag(t, TIF_HYBRID_FPREGS);
+ else
+ clear_tsk_thread_flag(t, TIF_HYBRID_FPREGS);
+ }
+
+ /*
+ * We need to ensure that all threads in the process have switched mode
+ * before returning, in order to allow userland to not worry about
+ * races. We can do this by forcing all CPUs that any thread in the
+ * process may be running on to schedule something else - in this case
+ * prepare_for_fp_mode_switch().
+ *
+ * We begin by generating a mask of all CPUs that any thread in the
+ * process may be running on.
+ */
+ cpumask_clear(&process_cpus);
+ for_each_thread(task, t)
+ cpumask_set_cpu(task_cpu(t), &process_cpus);
+
+ /*
+ * Now we schedule prepare_for_fp_mode_switch() on each of those CPUs.
+ *
+ * The CPUs may have rescheduled already since we switched mode or
+ * generated the cpumask, but that doesn't matter. If the task in this
+ * process is scheduled out then our scheduling
+ * prepare_for_fp_mode_switch() will simply be redundant. If it's
+ * scheduled in then it will already have picked up the new FP mode
+ * whilst doing so.
+ */
+ cpus_read_lock();
+ for_each_cpu_and(cpu, &process_cpus, cpu_online_mask)
+ work_on_cpu(cpu, prepare_for_fp_mode_switch, NULL);
+ cpus_read_unlock();
+
+ return 0;
+}
+
+#if defined(CONFIG_32BIT) || defined(CONFIG_MIPS32_O32)
+void mips_dump_regs32(u32 *uregs, const struct pt_regs *regs)
+{
+ unsigned int i;
+
+ for (i = MIPS32_EF_R1; i <= MIPS32_EF_R31; i++) {
+ /* k0/k1 are copied as zero. */
+ if (i == MIPS32_EF_R26 || i == MIPS32_EF_R27)
+ uregs[i] = 0;
+ else
+ uregs[i] = regs->regs[i - MIPS32_EF_R0];
+ }
+
+ uregs[MIPS32_EF_LO] = regs->lo;
+ uregs[MIPS32_EF_HI] = regs->hi;
+ uregs[MIPS32_EF_CP0_EPC] = regs->cp0_epc;
+ uregs[MIPS32_EF_CP0_BADVADDR] = regs->cp0_badvaddr;
+ uregs[MIPS32_EF_CP0_STATUS] = regs->cp0_status;
+ uregs[MIPS32_EF_CP0_CAUSE] = regs->cp0_cause;
+}
+#endif /* CONFIG_32BIT || CONFIG_MIPS32_O32 */
+
+#ifdef CONFIG_64BIT
+void mips_dump_regs64(u64 *uregs, const struct pt_regs *regs)
+{
+ unsigned int i;
+
+ for (i = MIPS64_EF_R1; i <= MIPS64_EF_R31; i++) {
+ /* k0/k1 are copied as zero. */
+ if (i == MIPS64_EF_R26 || i == MIPS64_EF_R27)
+ uregs[i] = 0;
+ else
+ uregs[i] = regs->regs[i - MIPS64_EF_R0];
+ }
+
+ uregs[MIPS64_EF_LO] = regs->lo;
+ uregs[MIPS64_EF_HI] = regs->hi;
+ uregs[MIPS64_EF_CP0_EPC] = regs->cp0_epc;
+ uregs[MIPS64_EF_CP0_BADVADDR] = regs->cp0_badvaddr;
+ uregs[MIPS64_EF_CP0_STATUS] = regs->cp0_status;
+ uregs[MIPS64_EF_CP0_CAUSE] = regs->cp0_cause;
+}
+#endif /* CONFIG_64BIT */
diff --git a/arch/mips/kernel/prom.c b/arch/mips/kernel/prom.c
new file mode 100644
index 000000000..f88ce78e1
--- /dev/null
+++ b/arch/mips/kernel/prom.c
@@ -0,0 +1,72 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * MIPS support for CONFIG_OF device tree support
+ *
+ * Copyright (C) 2010 Cisco Systems Inc. <dediao@cisco.com>
+ */
+
+#include <linux/init.h>
+#include <linux/export.h>
+#include <linux/errno.h>
+#include <linux/types.h>
+#include <linux/memblock.h>
+#include <linux/debugfs.h>
+#include <linux/of.h>
+#include <linux/of_fdt.h>
+#include <linux/of_platform.h>
+
+#include <asm/bootinfo.h>
+#include <asm/page.h>
+#include <asm/prom.h>
+
+static char mips_machine_name[64] = "Unknown";
+
+__init void mips_set_machine_name(const char *name)
+{
+ if (name == NULL)
+ return;
+
+ strscpy(mips_machine_name, name, sizeof(mips_machine_name));
+ pr_info("MIPS: machine is %s\n", mips_get_machine_name());
+}
+
+char *mips_get_machine_name(void)
+{
+ return mips_machine_name;
+}
+
+#ifdef CONFIG_USE_OF
+
+void __init __dt_setup_arch(void *bph)
+{
+ if (!early_init_dt_scan(bph))
+ return;
+
+ mips_set_machine_name(of_flat_dt_get_machine_name());
+}
+
+int __init __dt_register_buses(const char *bus0, const char *bus1)
+{
+ static struct of_device_id of_ids[3];
+
+ if (!of_have_populated_dt())
+ panic("device tree not present");
+
+ strscpy(of_ids[0].compatible, bus0, sizeof(of_ids[0].compatible));
+ if (bus1) {
+ strscpy(of_ids[1].compatible, bus1,
+ sizeof(of_ids[1].compatible));
+ }
+
+ if (of_platform_populate(NULL, of_ids, NULL, NULL))
+ panic("failed to populate DT");
+
+ return 0;
+}
+
+void __weak __init device_tree_init(void)
+{
+ unflatten_and_copy_device_tree();
+}
+
+#endif
diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c
new file mode 100644
index 000000000..567aec4ab
--- /dev/null
+++ b/arch/mips/kernel/ptrace.c
@@ -0,0 +1,1381 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1992 Ross Biro
+ * Copyright (C) Linus Torvalds
+ * Copyright (C) 1994, 95, 96, 97, 98, 2000 Ralf Baechle
+ * Copyright (C) 1996 David S. Miller
+ * Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com
+ * Copyright (C) 1999 MIPS Technologies, Inc.
+ * Copyright (C) 2000 Ulf Carlsson
+ *
+ * At this time Linux/MIPS64 only supports syscall tracing, even for 32-bit
+ * binaries.
+ */
+#include <linux/compiler.h>
+#include <linux/context_tracking.h>
+#include <linux/elf.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/sched/task_stack.h>
+#include <linux/mm.h>
+#include <linux/errno.h>
+#include <linux/ptrace.h>
+#include <linux/regset.h>
+#include <linux/smp.h>
+#include <linux/security.h>
+#include <linux/stddef.h>
+#include <linux/audit.h>
+#include <linux/seccomp.h>
+#include <linux/ftrace.h>
+
+#include <asm/byteorder.h>
+#include <asm/cpu.h>
+#include <asm/cpu-info.h>
+#include <asm/dsp.h>
+#include <asm/fpu.h>
+#include <asm/mipsregs.h>
+#include <asm/mipsmtregs.h>
+#include <asm/page.h>
+#include <asm/processor.h>
+#include <asm/syscall.h>
+#include <linux/uaccess.h>
+#include <asm/bootinfo.h>
+#include <asm/reg.h>
+
+#define CREATE_TRACE_POINTS
+#include <trace/events/syscalls.h>
+
+/*
+ * Called by kernel/ptrace.c when detaching..
+ *
+ * Make sure single step bits etc are not set.
+ */
+void ptrace_disable(struct task_struct *child)
+{
+ /* Don't load the watchpoint registers for the ex-child. */
+ clear_tsk_thread_flag(child, TIF_LOAD_WATCH);
+}
+
+/*
+ * Read a general register set. We always use the 64-bit format, even
+ * for 32-bit kernels and for 32-bit processes on a 64-bit kernel.
+ * Registers are sign extended to fill the available space.
+ */
+int ptrace_getregs(struct task_struct *child, struct user_pt_regs __user *data)
+{
+ struct pt_regs *regs;
+ int i;
+
+ if (!access_ok(data, 38 * 8))
+ return -EIO;
+
+ regs = task_pt_regs(child);
+
+ for (i = 0; i < 32; i++)
+ __put_user((long)regs->regs[i], (__s64 __user *)&data->regs[i]);
+ __put_user((long)regs->lo, (__s64 __user *)&data->lo);
+ __put_user((long)regs->hi, (__s64 __user *)&data->hi);
+ __put_user((long)regs->cp0_epc, (__s64 __user *)&data->cp0_epc);
+ __put_user((long)regs->cp0_badvaddr, (__s64 __user *)&data->cp0_badvaddr);
+ __put_user((long)regs->cp0_status, (__s64 __user *)&data->cp0_status);
+ __put_user((long)regs->cp0_cause, (__s64 __user *)&data->cp0_cause);
+
+ return 0;
+}
+
+/*
+ * Write a general register set. As for PTRACE_GETREGS, we always use
+ * the 64-bit format. On a 32-bit kernel only the lower order half
+ * (according to endianness) will be used.
+ */
+int ptrace_setregs(struct task_struct *child, struct user_pt_regs __user *data)
+{
+ struct pt_regs *regs;
+ int i;
+
+ if (!access_ok(data, 38 * 8))
+ return -EIO;
+
+ regs = task_pt_regs(child);
+
+ for (i = 0; i < 32; i++)
+ __get_user(regs->regs[i], (__s64 __user *)&data->regs[i]);
+ __get_user(regs->lo, (__s64 __user *)&data->lo);
+ __get_user(regs->hi, (__s64 __user *)&data->hi);
+ __get_user(regs->cp0_epc, (__s64 __user *)&data->cp0_epc);
+
+ /* badvaddr, status, and cause may not be written. */
+
+ /* System call number may have been changed */
+ mips_syscall_update_nr(child, regs);
+
+ return 0;
+}
+
+int ptrace_get_watch_regs(struct task_struct *child,
+ struct pt_watch_regs __user *addr)
+{
+ enum pt_watch_style style;
+ int i;
+
+ if (!cpu_has_watch || boot_cpu_data.watch_reg_use_cnt == 0)
+ return -EIO;
+ if (!access_ok(addr, sizeof(struct pt_watch_regs)))
+ return -EIO;
+
+#ifdef CONFIG_32BIT
+ style = pt_watch_style_mips32;
+#define WATCH_STYLE mips32
+#else
+ style = pt_watch_style_mips64;
+#define WATCH_STYLE mips64
+#endif
+
+ __put_user(style, &addr->style);
+ __put_user(boot_cpu_data.watch_reg_use_cnt,
+ &addr->WATCH_STYLE.num_valid);
+ for (i = 0; i < boot_cpu_data.watch_reg_use_cnt; i++) {
+ __put_user(child->thread.watch.mips3264.watchlo[i],
+ &addr->WATCH_STYLE.watchlo[i]);
+ __put_user(child->thread.watch.mips3264.watchhi[i] &
+ (MIPS_WATCHHI_MASK | MIPS_WATCHHI_IRW),
+ &addr->WATCH_STYLE.watchhi[i]);
+ __put_user(boot_cpu_data.watch_reg_masks[i],
+ &addr->WATCH_STYLE.watch_masks[i]);
+ }
+ for (; i < 8; i++) {
+ __put_user(0, &addr->WATCH_STYLE.watchlo[i]);
+ __put_user(0, &addr->WATCH_STYLE.watchhi[i]);
+ __put_user(0, &addr->WATCH_STYLE.watch_masks[i]);
+ }
+
+ return 0;
+}
+
+int ptrace_set_watch_regs(struct task_struct *child,
+ struct pt_watch_regs __user *addr)
+{
+ int i;
+ int watch_active = 0;
+ unsigned long lt[NUM_WATCH_REGS];
+ u16 ht[NUM_WATCH_REGS];
+
+ if (!cpu_has_watch || boot_cpu_data.watch_reg_use_cnt == 0)
+ return -EIO;
+ if (!access_ok(addr, sizeof(struct pt_watch_regs)))
+ return -EIO;
+ /* Check the values. */
+ for (i = 0; i < boot_cpu_data.watch_reg_use_cnt; i++) {
+ __get_user(lt[i], &addr->WATCH_STYLE.watchlo[i]);
+#ifdef CONFIG_32BIT
+ if (lt[i] & __UA_LIMIT)
+ return -EINVAL;
+#else
+ if (test_tsk_thread_flag(child, TIF_32BIT_ADDR)) {
+ if (lt[i] & 0xffffffff80000000UL)
+ return -EINVAL;
+ } else {
+ if (lt[i] & __UA_LIMIT)
+ return -EINVAL;
+ }
+#endif
+ __get_user(ht[i], &addr->WATCH_STYLE.watchhi[i]);
+ if (ht[i] & ~MIPS_WATCHHI_MASK)
+ return -EINVAL;
+ }
+ /* Install them. */
+ for (i = 0; i < boot_cpu_data.watch_reg_use_cnt; i++) {
+ if (lt[i] & MIPS_WATCHLO_IRW)
+ watch_active = 1;
+ child->thread.watch.mips3264.watchlo[i] = lt[i];
+ /* Set the G bit. */
+ child->thread.watch.mips3264.watchhi[i] = ht[i];
+ }
+
+ if (watch_active)
+ set_tsk_thread_flag(child, TIF_LOAD_WATCH);
+ else
+ clear_tsk_thread_flag(child, TIF_LOAD_WATCH);
+
+ return 0;
+}
+
+/* regset get/set implementations */
+
+#if defined(CONFIG_32BIT) || defined(CONFIG_MIPS32_O32)
+
+static int gpr32_get(struct task_struct *target,
+ const struct user_regset *regset,
+ struct membuf to)
+{
+ struct pt_regs *regs = task_pt_regs(target);
+ u32 uregs[ELF_NGREG] = {};
+
+ mips_dump_regs32(uregs, regs);
+ return membuf_write(&to, uregs, sizeof(uregs));
+}
+
+static int gpr32_set(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ const void *kbuf, const void __user *ubuf)
+{
+ struct pt_regs *regs = task_pt_regs(target);
+ u32 uregs[ELF_NGREG];
+ unsigned start, num_regs, i;
+ int err;
+
+ start = pos / sizeof(u32);
+ num_regs = count / sizeof(u32);
+
+ if (start + num_regs > ELF_NGREG)
+ return -EIO;
+
+ err = user_regset_copyin(&pos, &count, &kbuf, &ubuf, uregs, 0,
+ sizeof(uregs));
+ if (err)
+ return err;
+
+ for (i = start; i < num_regs; i++) {
+ /*
+ * Cast all values to signed here so that if this is a 64-bit
+ * kernel, the supplied 32-bit values will be sign extended.
+ */
+ switch (i) {
+ case MIPS32_EF_R1 ... MIPS32_EF_R25:
+ /* k0/k1 are ignored. */
+ case MIPS32_EF_R28 ... MIPS32_EF_R31:
+ regs->regs[i - MIPS32_EF_R0] = (s32)uregs[i];
+ break;
+ case MIPS32_EF_LO:
+ regs->lo = (s32)uregs[i];
+ break;
+ case MIPS32_EF_HI:
+ regs->hi = (s32)uregs[i];
+ break;
+ case MIPS32_EF_CP0_EPC:
+ regs->cp0_epc = (s32)uregs[i];
+ break;
+ }
+ }
+
+ /* System call number may have been changed */
+ mips_syscall_update_nr(target, regs);
+
+ return 0;
+}
+
+#endif /* CONFIG_32BIT || CONFIG_MIPS32_O32 */
+
+#ifdef CONFIG_64BIT
+
+static int gpr64_get(struct task_struct *target,
+ const struct user_regset *regset,
+ struct membuf to)
+{
+ struct pt_regs *regs = task_pt_regs(target);
+ u64 uregs[ELF_NGREG] = {};
+
+ mips_dump_regs64(uregs, regs);
+ return membuf_write(&to, uregs, sizeof(uregs));
+}
+
+static int gpr64_set(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ const void *kbuf, const void __user *ubuf)
+{
+ struct pt_regs *regs = task_pt_regs(target);
+ u64 uregs[ELF_NGREG];
+ unsigned start, num_regs, i;
+ int err;
+
+ start = pos / sizeof(u64);
+ num_regs = count / sizeof(u64);
+
+ if (start + num_regs > ELF_NGREG)
+ return -EIO;
+
+ err = user_regset_copyin(&pos, &count, &kbuf, &ubuf, uregs, 0,
+ sizeof(uregs));
+ if (err)
+ return err;
+
+ for (i = start; i < num_regs; i++) {
+ switch (i) {
+ case MIPS64_EF_R1 ... MIPS64_EF_R25:
+ /* k0/k1 are ignored. */
+ case MIPS64_EF_R28 ... MIPS64_EF_R31:
+ regs->regs[i - MIPS64_EF_R0] = uregs[i];
+ break;
+ case MIPS64_EF_LO:
+ regs->lo = uregs[i];
+ break;
+ case MIPS64_EF_HI:
+ regs->hi = uregs[i];
+ break;
+ case MIPS64_EF_CP0_EPC:
+ regs->cp0_epc = uregs[i];
+ break;
+ }
+ }
+
+ /* System call number may have been changed */
+ mips_syscall_update_nr(target, regs);
+
+ return 0;
+}
+
+#endif /* CONFIG_64BIT */
+
+
+#ifdef CONFIG_MIPS_FP_SUPPORT
+
+/*
+ * Poke at FCSR according to its mask. Set the Cause bits even
+ * if a corresponding Enable bit is set. This will be noticed at
+ * the time the thread is switched to and SIGFPE thrown accordingly.
+ */
+static void ptrace_setfcr31(struct task_struct *child, u32 value)
+{
+ u32 fcr31;
+ u32 mask;
+
+ fcr31 = child->thread.fpu.fcr31;
+ mask = boot_cpu_data.fpu_msk31;
+ child->thread.fpu.fcr31 = (value & ~mask) | (fcr31 & mask);
+}
+
+int ptrace_getfpregs(struct task_struct *child, __u32 __user *data)
+{
+ int i;
+
+ if (!access_ok(data, 33 * 8))
+ return -EIO;
+
+ if (tsk_used_math(child)) {
+ union fpureg *fregs = get_fpu_regs(child);
+ for (i = 0; i < 32; i++)
+ __put_user(get_fpr64(&fregs[i], 0),
+ i + (__u64 __user *)data);
+ } else {
+ for (i = 0; i < 32; i++)
+ __put_user((__u64) -1, i + (__u64 __user *) data);
+ }
+
+ __put_user(child->thread.fpu.fcr31, data + 64);
+ __put_user(boot_cpu_data.fpu_id, data + 65);
+
+ return 0;
+}
+
+int ptrace_setfpregs(struct task_struct *child, __u32 __user *data)
+{
+ union fpureg *fregs;
+ u64 fpr_val;
+ u32 value;
+ int i;
+
+ if (!access_ok(data, 33 * 8))
+ return -EIO;
+
+ init_fp_ctx(child);
+ fregs = get_fpu_regs(child);
+
+ for (i = 0; i < 32; i++) {
+ __get_user(fpr_val, i + (__u64 __user *)data);
+ set_fpr64(&fregs[i], 0, fpr_val);
+ }
+
+ __get_user(value, data + 64);
+ ptrace_setfcr31(child, value);
+
+ /* FIR may not be written. */
+
+ return 0;
+}
+
+/*
+ * Copy the floating-point context to the supplied NT_PRFPREG buffer,
+ * !CONFIG_CPU_HAS_MSA variant. FP context's general register slots
+ * correspond 1:1 to buffer slots. Only general registers are copied.
+ */
+static void fpr_get_fpa(struct task_struct *target,
+ struct membuf *to)
+{
+ membuf_write(to, &target->thread.fpu,
+ NUM_FPU_REGS * sizeof(elf_fpreg_t));
+}
+
+/*
+ * Copy the floating-point context to the supplied NT_PRFPREG buffer,
+ * CONFIG_CPU_HAS_MSA variant. Only lower 64 bits of FP context's
+ * general register slots are copied to buffer slots. Only general
+ * registers are copied.
+ */
+static void fpr_get_msa(struct task_struct *target, struct membuf *to)
+{
+ unsigned int i;
+
+ BUILD_BUG_ON(sizeof(u64) != sizeof(elf_fpreg_t));
+ for (i = 0; i < NUM_FPU_REGS; i++)
+ membuf_store(to, get_fpr64(&target->thread.fpu.fpr[i], 0));
+}
+
+/*
+ * Copy the floating-point context to the supplied NT_PRFPREG buffer.
+ * Choose the appropriate helper for general registers, and then copy
+ * the FCSR and FIR registers separately.
+ */
+static int fpr_get(struct task_struct *target,
+ const struct user_regset *regset,
+ struct membuf to)
+{
+ if (sizeof(target->thread.fpu.fpr[0]) == sizeof(elf_fpreg_t))
+ fpr_get_fpa(target, &to);
+ else
+ fpr_get_msa(target, &to);
+
+ membuf_write(&to, &target->thread.fpu.fcr31, sizeof(u32));
+ membuf_write(&to, &boot_cpu_data.fpu_id, sizeof(u32));
+ return 0;
+}
+
+/*
+ * Copy the supplied NT_PRFPREG buffer to the floating-point context,
+ * !CONFIG_CPU_HAS_MSA variant. Buffer slots correspond 1:1 to FP
+ * context's general register slots. Only general registers are copied.
+ */
+static int fpr_set_fpa(struct task_struct *target,
+ unsigned int *pos, unsigned int *count,
+ const void **kbuf, const void __user **ubuf)
+{
+ return user_regset_copyin(pos, count, kbuf, ubuf,
+ &target->thread.fpu,
+ 0, NUM_FPU_REGS * sizeof(elf_fpreg_t));
+}
+
+/*
+ * Copy the supplied NT_PRFPREG buffer to the floating-point context,
+ * CONFIG_CPU_HAS_MSA variant. Buffer slots are copied to lower 64
+ * bits only of FP context's general register slots. Only general
+ * registers are copied.
+ */
+static int fpr_set_msa(struct task_struct *target,
+ unsigned int *pos, unsigned int *count,
+ const void **kbuf, const void __user **ubuf)
+{
+ unsigned int i;
+ u64 fpr_val;
+ int err;
+
+ BUILD_BUG_ON(sizeof(fpr_val) != sizeof(elf_fpreg_t));
+ for (i = 0; i < NUM_FPU_REGS && *count > 0; i++) {
+ err = user_regset_copyin(pos, count, kbuf, ubuf,
+ &fpr_val, i * sizeof(elf_fpreg_t),
+ (i + 1) * sizeof(elf_fpreg_t));
+ if (err)
+ return err;
+ set_fpr64(&target->thread.fpu.fpr[i], 0, fpr_val);
+ }
+
+ return 0;
+}
+
+/*
+ * Copy the supplied NT_PRFPREG buffer to the floating-point context.
+ * Choose the appropriate helper for general registers, and then copy
+ * the FCSR register separately. Ignore the incoming FIR register
+ * contents though, as the register is read-only.
+ *
+ * We optimize for the case where `count % sizeof(elf_fpreg_t) == 0',
+ * which is supposed to have been guaranteed by the kernel before
+ * calling us, e.g. in `ptrace_regset'. We enforce that requirement,
+ * so that we can safely avoid preinitializing temporaries for
+ * partial register writes.
+ */
+static int fpr_set(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ const void *kbuf, const void __user *ubuf)
+{
+ const int fcr31_pos = NUM_FPU_REGS * sizeof(elf_fpreg_t);
+ const int fir_pos = fcr31_pos + sizeof(u32);
+ u32 fcr31;
+ int err;
+
+ BUG_ON(count % sizeof(elf_fpreg_t));
+
+ if (pos + count > sizeof(elf_fpregset_t))
+ return -EIO;
+
+ init_fp_ctx(target);
+
+ if (sizeof(target->thread.fpu.fpr[0]) == sizeof(elf_fpreg_t))
+ err = fpr_set_fpa(target, &pos, &count, &kbuf, &ubuf);
+ else
+ err = fpr_set_msa(target, &pos, &count, &kbuf, &ubuf);
+ if (err)
+ return err;
+
+ if (count > 0) {
+ err = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+ &fcr31,
+ fcr31_pos, fcr31_pos + sizeof(u32));
+ if (err)
+ return err;
+
+ ptrace_setfcr31(target, fcr31);
+ }
+
+ if (count > 0)
+ err = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
+ fir_pos,
+ fir_pos + sizeof(u32));
+
+ return err;
+}
+
+/* Copy the FP mode setting to the supplied NT_MIPS_FP_MODE buffer. */
+static int fp_mode_get(struct task_struct *target,
+ const struct user_regset *regset,
+ struct membuf to)
+{
+ return membuf_store(&to, (int)mips_get_process_fp_mode(target));
+}
+
+/*
+ * Copy the supplied NT_MIPS_FP_MODE buffer to the FP mode setting.
+ *
+ * We optimize for the case where `count % sizeof(int) == 0', which
+ * is supposed to have been guaranteed by the kernel before calling
+ * us, e.g. in `ptrace_regset'. We enforce that requirement, so
+ * that we can safely avoid preinitializing temporaries for partial
+ * mode writes.
+ */
+static int fp_mode_set(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ const void *kbuf, const void __user *ubuf)
+{
+ int fp_mode;
+ int err;
+
+ BUG_ON(count % sizeof(int));
+
+ if (pos + count > sizeof(fp_mode))
+ return -EIO;
+
+ err = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &fp_mode, 0,
+ sizeof(fp_mode));
+ if (err)
+ return err;
+
+ if (count > 0)
+ err = mips_set_process_fp_mode(target, fp_mode);
+
+ return err;
+}
+
+#endif /* CONFIG_MIPS_FP_SUPPORT */
+
+#ifdef CONFIG_CPU_HAS_MSA
+
+struct msa_control_regs {
+ unsigned int fir;
+ unsigned int fcsr;
+ unsigned int msair;
+ unsigned int msacsr;
+};
+
+static void copy_pad_fprs(struct task_struct *target,
+ const struct user_regset *regset,
+ struct membuf *to,
+ unsigned int live_sz)
+{
+ int i, j;
+ unsigned long long fill = ~0ull;
+ unsigned int cp_sz, pad_sz;
+
+ cp_sz = min(regset->size, live_sz);
+ pad_sz = regset->size - cp_sz;
+ WARN_ON(pad_sz % sizeof(fill));
+
+ for (i = 0; i < NUM_FPU_REGS; i++) {
+ membuf_write(to, &target->thread.fpu.fpr[i], cp_sz);
+ for (j = 0; j < (pad_sz / sizeof(fill)); j++)
+ membuf_store(to, fill);
+ }
+}
+
+static int msa_get(struct task_struct *target,
+ const struct user_regset *regset,
+ struct membuf to)
+{
+ const unsigned int wr_size = NUM_FPU_REGS * regset->size;
+ const struct msa_control_regs ctrl_regs = {
+ .fir = boot_cpu_data.fpu_id,
+ .fcsr = target->thread.fpu.fcr31,
+ .msair = boot_cpu_data.msa_id,
+ .msacsr = target->thread.fpu.msacsr,
+ };
+
+ if (!tsk_used_math(target)) {
+ /* The task hasn't used FP or MSA, fill with 0xff */
+ copy_pad_fprs(target, regset, &to, 0);
+ } else if (!test_tsk_thread_flag(target, TIF_MSA_CTX_LIVE)) {
+ /* Copy scalar FP context, fill the rest with 0xff */
+ copy_pad_fprs(target, regset, &to, 8);
+ } else if (sizeof(target->thread.fpu.fpr[0]) == regset->size) {
+ /* Trivially copy the vector registers */
+ membuf_write(&to, &target->thread.fpu.fpr, wr_size);
+ } else {
+ /* Copy as much context as possible, fill the rest with 0xff */
+ copy_pad_fprs(target, regset, &to,
+ sizeof(target->thread.fpu.fpr[0]));
+ }
+
+ return membuf_write(&to, &ctrl_regs, sizeof(ctrl_regs));
+}
+
+static int msa_set(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ const void *kbuf, const void __user *ubuf)
+{
+ const unsigned int wr_size = NUM_FPU_REGS * regset->size;
+ struct msa_control_regs ctrl_regs;
+ unsigned int cp_sz;
+ int i, err, start;
+
+ init_fp_ctx(target);
+
+ if (sizeof(target->thread.fpu.fpr[0]) == regset->size) {
+ /* Trivially copy the vector registers */
+ err = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+ &target->thread.fpu.fpr,
+ 0, wr_size);
+ } else {
+ /* Copy as much context as possible */
+ cp_sz = min_t(unsigned int, regset->size,
+ sizeof(target->thread.fpu.fpr[0]));
+
+ i = start = err = 0;
+ for (; i < NUM_FPU_REGS; i++, start += regset->size) {
+ err |= user_regset_copyin(&pos, &count, &kbuf, &ubuf,
+ &target->thread.fpu.fpr[i],
+ start, start + cp_sz);
+ }
+ }
+
+ if (!err)
+ err = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &ctrl_regs,
+ wr_size, wr_size + sizeof(ctrl_regs));
+ if (!err) {
+ target->thread.fpu.fcr31 = ctrl_regs.fcsr & ~FPU_CSR_ALL_X;
+ target->thread.fpu.msacsr = ctrl_regs.msacsr & ~MSA_CSR_CAUSEF;
+ }
+
+ return err;
+}
+
+#endif /* CONFIG_CPU_HAS_MSA */
+
+#if defined(CONFIG_32BIT) || defined(CONFIG_MIPS32_O32)
+
+/*
+ * Copy the DSP context to the supplied 32-bit NT_MIPS_DSP buffer.
+ */
+static int dsp32_get(struct task_struct *target,
+ const struct user_regset *regset,
+ struct membuf to)
+{
+ u32 dspregs[NUM_DSP_REGS + 1];
+ unsigned int i;
+
+ BUG_ON(to.left % sizeof(u32));
+
+ if (!cpu_has_dsp)
+ return -EIO;
+
+ for (i = 0; i < NUM_DSP_REGS; i++)
+ dspregs[i] = target->thread.dsp.dspr[i];
+ dspregs[NUM_DSP_REGS] = target->thread.dsp.dspcontrol;
+ return membuf_write(&to, dspregs, sizeof(dspregs));
+}
+
+/*
+ * Copy the supplied 32-bit NT_MIPS_DSP buffer to the DSP context.
+ */
+static int dsp32_set(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ const void *kbuf, const void __user *ubuf)
+{
+ unsigned int start, num_regs, i;
+ u32 dspregs[NUM_DSP_REGS + 1];
+ int err;
+
+ BUG_ON(count % sizeof(u32));
+
+ if (!cpu_has_dsp)
+ return -EIO;
+
+ start = pos / sizeof(u32);
+ num_regs = count / sizeof(u32);
+
+ if (start + num_regs > NUM_DSP_REGS + 1)
+ return -EIO;
+
+ err = user_regset_copyin(&pos, &count, &kbuf, &ubuf, dspregs, 0,
+ sizeof(dspregs));
+ if (err)
+ return err;
+
+ for (i = start; i < num_regs; i++)
+ switch (i) {
+ case 0 ... NUM_DSP_REGS - 1:
+ target->thread.dsp.dspr[i] = (s32)dspregs[i];
+ break;
+ case NUM_DSP_REGS:
+ target->thread.dsp.dspcontrol = (s32)dspregs[i];
+ break;
+ }
+
+ return 0;
+}
+
+#endif /* CONFIG_32BIT || CONFIG_MIPS32_O32 */
+
+#ifdef CONFIG_64BIT
+
+/*
+ * Copy the DSP context to the supplied 64-bit NT_MIPS_DSP buffer.
+ */
+static int dsp64_get(struct task_struct *target,
+ const struct user_regset *regset,
+ struct membuf to)
+{
+ u64 dspregs[NUM_DSP_REGS + 1];
+ unsigned int i;
+
+ BUG_ON(to.left % sizeof(u64));
+
+ if (!cpu_has_dsp)
+ return -EIO;
+
+ for (i = 0; i < NUM_DSP_REGS; i++)
+ dspregs[i] = target->thread.dsp.dspr[i];
+ dspregs[NUM_DSP_REGS] = target->thread.dsp.dspcontrol;
+ return membuf_write(&to, dspregs, sizeof(dspregs));
+}
+
+/*
+ * Copy the supplied 64-bit NT_MIPS_DSP buffer to the DSP context.
+ */
+static int dsp64_set(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ const void *kbuf, const void __user *ubuf)
+{
+ unsigned int start, num_regs, i;
+ u64 dspregs[NUM_DSP_REGS + 1];
+ int err;
+
+ BUG_ON(count % sizeof(u64));
+
+ if (!cpu_has_dsp)
+ return -EIO;
+
+ start = pos / sizeof(u64);
+ num_regs = count / sizeof(u64);
+
+ if (start + num_regs > NUM_DSP_REGS + 1)
+ return -EIO;
+
+ err = user_regset_copyin(&pos, &count, &kbuf, &ubuf, dspregs, 0,
+ sizeof(dspregs));
+ if (err)
+ return err;
+
+ for (i = start; i < num_regs; i++)
+ switch (i) {
+ case 0 ... NUM_DSP_REGS - 1:
+ target->thread.dsp.dspr[i] = dspregs[i];
+ break;
+ case NUM_DSP_REGS:
+ target->thread.dsp.dspcontrol = dspregs[i];
+ break;
+ }
+
+ return 0;
+}
+
+#endif /* CONFIG_64BIT */
+
+/*
+ * Determine whether the DSP context is present.
+ */
+static int dsp_active(struct task_struct *target,
+ const struct user_regset *regset)
+{
+ return cpu_has_dsp ? NUM_DSP_REGS + 1 : -ENODEV;
+}
+
+enum mips_regset {
+ REGSET_GPR,
+ REGSET_DSP,
+#ifdef CONFIG_MIPS_FP_SUPPORT
+ REGSET_FPR,
+ REGSET_FP_MODE,
+#endif
+#ifdef CONFIG_CPU_HAS_MSA
+ REGSET_MSA,
+#endif
+};
+
+struct pt_regs_offset {
+ const char *name;
+ int offset;
+};
+
+#define REG_OFFSET_NAME(reg, r) { \
+ .name = #reg, \
+ .offset = offsetof(struct pt_regs, r) \
+}
+
+#define REG_OFFSET_END { \
+ .name = NULL, \
+ .offset = 0 \
+}
+
+static const struct pt_regs_offset regoffset_table[] = {
+ REG_OFFSET_NAME(r0, regs[0]),
+ REG_OFFSET_NAME(r1, regs[1]),
+ REG_OFFSET_NAME(r2, regs[2]),
+ REG_OFFSET_NAME(r3, regs[3]),
+ REG_OFFSET_NAME(r4, regs[4]),
+ REG_OFFSET_NAME(r5, regs[5]),
+ REG_OFFSET_NAME(r6, regs[6]),
+ REG_OFFSET_NAME(r7, regs[7]),
+ REG_OFFSET_NAME(r8, regs[8]),
+ REG_OFFSET_NAME(r9, regs[9]),
+ REG_OFFSET_NAME(r10, regs[10]),
+ REG_OFFSET_NAME(r11, regs[11]),
+ REG_OFFSET_NAME(r12, regs[12]),
+ REG_OFFSET_NAME(r13, regs[13]),
+ REG_OFFSET_NAME(r14, regs[14]),
+ REG_OFFSET_NAME(r15, regs[15]),
+ REG_OFFSET_NAME(r16, regs[16]),
+ REG_OFFSET_NAME(r17, regs[17]),
+ REG_OFFSET_NAME(r18, regs[18]),
+ REG_OFFSET_NAME(r19, regs[19]),
+ REG_OFFSET_NAME(r20, regs[20]),
+ REG_OFFSET_NAME(r21, regs[21]),
+ REG_OFFSET_NAME(r22, regs[22]),
+ REG_OFFSET_NAME(r23, regs[23]),
+ REG_OFFSET_NAME(r24, regs[24]),
+ REG_OFFSET_NAME(r25, regs[25]),
+ REG_OFFSET_NAME(r26, regs[26]),
+ REG_OFFSET_NAME(r27, regs[27]),
+ REG_OFFSET_NAME(r28, regs[28]),
+ REG_OFFSET_NAME(r29, regs[29]),
+ REG_OFFSET_NAME(r30, regs[30]),
+ REG_OFFSET_NAME(r31, regs[31]),
+ REG_OFFSET_NAME(c0_status, cp0_status),
+ REG_OFFSET_NAME(hi, hi),
+ REG_OFFSET_NAME(lo, lo),
+#ifdef CONFIG_CPU_HAS_SMARTMIPS
+ REG_OFFSET_NAME(acx, acx),
+#endif
+ REG_OFFSET_NAME(c0_badvaddr, cp0_badvaddr),
+ REG_OFFSET_NAME(c0_cause, cp0_cause),
+ REG_OFFSET_NAME(c0_epc, cp0_epc),
+#ifdef CONFIG_CPU_CAVIUM_OCTEON
+ REG_OFFSET_NAME(mpl0, mpl[0]),
+ REG_OFFSET_NAME(mpl1, mpl[1]),
+ REG_OFFSET_NAME(mpl2, mpl[2]),
+ REG_OFFSET_NAME(mtp0, mtp[0]),
+ REG_OFFSET_NAME(mtp1, mtp[1]),
+ REG_OFFSET_NAME(mtp2, mtp[2]),
+#endif
+ REG_OFFSET_END,
+};
+
+/**
+ * regs_query_register_offset() - query register offset from its name
+ * @name: the name of a register
+ *
+ * regs_query_register_offset() returns the offset of a register in struct
+ * pt_regs from its name. If the name is invalid, this returns -EINVAL;
+ */
+int regs_query_register_offset(const char *name)
+{
+ const struct pt_regs_offset *roff;
+ for (roff = regoffset_table; roff->name != NULL; roff++)
+ if (!strcmp(roff->name, name))
+ return roff->offset;
+ return -EINVAL;
+}
+
+#if defined(CONFIG_32BIT) || defined(CONFIG_MIPS32_O32)
+
+static const struct user_regset mips_regsets[] = {
+ [REGSET_GPR] = {
+ .core_note_type = NT_PRSTATUS,
+ .n = ELF_NGREG,
+ .size = sizeof(unsigned int),
+ .align = sizeof(unsigned int),
+ .regset_get = gpr32_get,
+ .set = gpr32_set,
+ },
+ [REGSET_DSP] = {
+ .core_note_type = NT_MIPS_DSP,
+ .n = NUM_DSP_REGS + 1,
+ .size = sizeof(u32),
+ .align = sizeof(u32),
+ .regset_get = dsp32_get,
+ .set = dsp32_set,
+ .active = dsp_active,
+ },
+#ifdef CONFIG_MIPS_FP_SUPPORT
+ [REGSET_FPR] = {
+ .core_note_type = NT_PRFPREG,
+ .n = ELF_NFPREG,
+ .size = sizeof(elf_fpreg_t),
+ .align = sizeof(elf_fpreg_t),
+ .regset_get = fpr_get,
+ .set = fpr_set,
+ },
+ [REGSET_FP_MODE] = {
+ .core_note_type = NT_MIPS_FP_MODE,
+ .n = 1,
+ .size = sizeof(int),
+ .align = sizeof(int),
+ .regset_get = fp_mode_get,
+ .set = fp_mode_set,
+ },
+#endif
+#ifdef CONFIG_CPU_HAS_MSA
+ [REGSET_MSA] = {
+ .core_note_type = NT_MIPS_MSA,
+ .n = NUM_FPU_REGS + 1,
+ .size = 16,
+ .align = 16,
+ .regset_get = msa_get,
+ .set = msa_set,
+ },
+#endif
+};
+
+static const struct user_regset_view user_mips_view = {
+ .name = "mips",
+ .e_machine = ELF_ARCH,
+ .ei_osabi = ELF_OSABI,
+ .regsets = mips_regsets,
+ .n = ARRAY_SIZE(mips_regsets),
+};
+
+#endif /* CONFIG_32BIT || CONFIG_MIPS32_O32 */
+
+#ifdef CONFIG_64BIT
+
+static const struct user_regset mips64_regsets[] = {
+ [REGSET_GPR] = {
+ .core_note_type = NT_PRSTATUS,
+ .n = ELF_NGREG,
+ .size = sizeof(unsigned long),
+ .align = sizeof(unsigned long),
+ .regset_get = gpr64_get,
+ .set = gpr64_set,
+ },
+ [REGSET_DSP] = {
+ .core_note_type = NT_MIPS_DSP,
+ .n = NUM_DSP_REGS + 1,
+ .size = sizeof(u64),
+ .align = sizeof(u64),
+ .regset_get = dsp64_get,
+ .set = dsp64_set,
+ .active = dsp_active,
+ },
+#ifdef CONFIG_MIPS_FP_SUPPORT
+ [REGSET_FP_MODE] = {
+ .core_note_type = NT_MIPS_FP_MODE,
+ .n = 1,
+ .size = sizeof(int),
+ .align = sizeof(int),
+ .regset_get = fp_mode_get,
+ .set = fp_mode_set,
+ },
+ [REGSET_FPR] = {
+ .core_note_type = NT_PRFPREG,
+ .n = ELF_NFPREG,
+ .size = sizeof(elf_fpreg_t),
+ .align = sizeof(elf_fpreg_t),
+ .regset_get = fpr_get,
+ .set = fpr_set,
+ },
+#endif
+#ifdef CONFIG_CPU_HAS_MSA
+ [REGSET_MSA] = {
+ .core_note_type = NT_MIPS_MSA,
+ .n = NUM_FPU_REGS + 1,
+ .size = 16,
+ .align = 16,
+ .regset_get = msa_get,
+ .set = msa_set,
+ },
+#endif
+};
+
+static const struct user_regset_view user_mips64_view = {
+ .name = "mips64",
+ .e_machine = ELF_ARCH,
+ .ei_osabi = ELF_OSABI,
+ .regsets = mips64_regsets,
+ .n = ARRAY_SIZE(mips64_regsets),
+};
+
+#ifdef CONFIG_MIPS32_N32
+
+static const struct user_regset_view user_mipsn32_view = {
+ .name = "mipsn32",
+ .e_flags = EF_MIPS_ABI2,
+ .e_machine = ELF_ARCH,
+ .ei_osabi = ELF_OSABI,
+ .regsets = mips64_regsets,
+ .n = ARRAY_SIZE(mips64_regsets),
+};
+
+#endif /* CONFIG_MIPS32_N32 */
+
+#endif /* CONFIG_64BIT */
+
+const struct user_regset_view *task_user_regset_view(struct task_struct *task)
+{
+#ifdef CONFIG_32BIT
+ return &user_mips_view;
+#else
+#ifdef CONFIG_MIPS32_O32
+ if (test_tsk_thread_flag(task, TIF_32BIT_REGS))
+ return &user_mips_view;
+#endif
+#ifdef CONFIG_MIPS32_N32
+ if (test_tsk_thread_flag(task, TIF_32BIT_ADDR))
+ return &user_mipsn32_view;
+#endif
+ return &user_mips64_view;
+#endif
+}
+
+long arch_ptrace(struct task_struct *child, long request,
+ unsigned long addr, unsigned long data)
+{
+ int ret;
+ void __user *addrp = (void __user *) addr;
+ void __user *datavp = (void __user *) data;
+ unsigned long __user *datalp = (void __user *) data;
+
+ switch (request) {
+ /* when I and D space are separate, these will need to be fixed. */
+ case PTRACE_PEEKTEXT: /* read word at location addr. */
+ case PTRACE_PEEKDATA:
+ ret = generic_ptrace_peekdata(child, addr, data);
+ break;
+
+ /* Read the word at location addr in the USER area. */
+ case PTRACE_PEEKUSR: {
+ struct pt_regs *regs;
+ unsigned long tmp = 0;
+
+ regs = task_pt_regs(child);
+ ret = 0; /* Default return value. */
+
+ switch (addr) {
+ case 0 ... 31:
+ tmp = regs->regs[addr];
+ break;
+#ifdef CONFIG_MIPS_FP_SUPPORT
+ case FPR_BASE ... FPR_BASE + 31: {
+ union fpureg *fregs;
+
+ if (!tsk_used_math(child)) {
+ /* FP not yet used */
+ tmp = -1;
+ break;
+ }
+ fregs = get_fpu_regs(child);
+
+#ifdef CONFIG_32BIT
+ if (test_tsk_thread_flag(child, TIF_32BIT_FPREGS)) {
+ /*
+ * The odd registers are actually the high
+ * order bits of the values stored in the even
+ * registers.
+ */
+ tmp = get_fpr32(&fregs[(addr & ~1) - FPR_BASE],
+ addr & 1);
+ break;
+ }
+#endif
+ tmp = get_fpr64(&fregs[addr - FPR_BASE], 0);
+ break;
+ }
+ case FPC_CSR:
+ tmp = child->thread.fpu.fcr31;
+ break;
+ case FPC_EIR:
+ /* implementation / version register */
+ tmp = boot_cpu_data.fpu_id;
+ break;
+#endif
+ case PC:
+ tmp = regs->cp0_epc;
+ break;
+ case CAUSE:
+ tmp = regs->cp0_cause;
+ break;
+ case BADVADDR:
+ tmp = regs->cp0_badvaddr;
+ break;
+ case MMHI:
+ tmp = regs->hi;
+ break;
+ case MMLO:
+ tmp = regs->lo;
+ break;
+#ifdef CONFIG_CPU_HAS_SMARTMIPS
+ case ACX:
+ tmp = regs->acx;
+ break;
+#endif
+ case DSP_BASE ... DSP_BASE + 5: {
+ dspreg_t *dregs;
+
+ if (!cpu_has_dsp) {
+ tmp = 0;
+ ret = -EIO;
+ goto out;
+ }
+ dregs = __get_dsp_regs(child);
+ tmp = dregs[addr - DSP_BASE];
+ break;
+ }
+ case DSP_CONTROL:
+ if (!cpu_has_dsp) {
+ tmp = 0;
+ ret = -EIO;
+ goto out;
+ }
+ tmp = child->thread.dsp.dspcontrol;
+ break;
+ default:
+ tmp = 0;
+ ret = -EIO;
+ goto out;
+ }
+ ret = put_user(tmp, datalp);
+ break;
+ }
+
+ /* when I and D space are separate, this will have to be fixed. */
+ case PTRACE_POKETEXT: /* write the word at location addr. */
+ case PTRACE_POKEDATA:
+ ret = generic_ptrace_pokedata(child, addr, data);
+ break;
+
+ case PTRACE_POKEUSR: {
+ struct pt_regs *regs;
+ ret = 0;
+ regs = task_pt_regs(child);
+
+ switch (addr) {
+ case 0 ... 31:
+ regs->regs[addr] = data;
+ /* System call number may have been changed */
+ if (addr == 2)
+ mips_syscall_update_nr(child, regs);
+ else if (addr == 4 &&
+ mips_syscall_is_indirect(child, regs))
+ mips_syscall_update_nr(child, regs);
+ break;
+#ifdef CONFIG_MIPS_FP_SUPPORT
+ case FPR_BASE ... FPR_BASE + 31: {
+ union fpureg *fregs = get_fpu_regs(child);
+
+ init_fp_ctx(child);
+#ifdef CONFIG_32BIT
+ if (test_tsk_thread_flag(child, TIF_32BIT_FPREGS)) {
+ /*
+ * The odd registers are actually the high
+ * order bits of the values stored in the even
+ * registers.
+ */
+ set_fpr32(&fregs[(addr & ~1) - FPR_BASE],
+ addr & 1, data);
+ break;
+ }
+#endif
+ set_fpr64(&fregs[addr - FPR_BASE], 0, data);
+ break;
+ }
+ case FPC_CSR:
+ init_fp_ctx(child);
+ ptrace_setfcr31(child, data);
+ break;
+#endif
+ case PC:
+ regs->cp0_epc = data;
+ break;
+ case MMHI:
+ regs->hi = data;
+ break;
+ case MMLO:
+ regs->lo = data;
+ break;
+#ifdef CONFIG_CPU_HAS_SMARTMIPS
+ case ACX:
+ regs->acx = data;
+ break;
+#endif
+ case DSP_BASE ... DSP_BASE + 5: {
+ dspreg_t *dregs;
+
+ if (!cpu_has_dsp) {
+ ret = -EIO;
+ break;
+ }
+
+ dregs = __get_dsp_regs(child);
+ dregs[addr - DSP_BASE] = data;
+ break;
+ }
+ case DSP_CONTROL:
+ if (!cpu_has_dsp) {
+ ret = -EIO;
+ break;
+ }
+ child->thread.dsp.dspcontrol = data;
+ break;
+ default:
+ /* The rest are not allowed. */
+ ret = -EIO;
+ break;
+ }
+ break;
+ }
+
+ case PTRACE_GETREGS:
+ ret = ptrace_getregs(child, datavp);
+ break;
+
+ case PTRACE_SETREGS:
+ ret = ptrace_setregs(child, datavp);
+ break;
+
+#ifdef CONFIG_MIPS_FP_SUPPORT
+ case PTRACE_GETFPREGS:
+ ret = ptrace_getfpregs(child, datavp);
+ break;
+
+ case PTRACE_SETFPREGS:
+ ret = ptrace_setfpregs(child, datavp);
+ break;
+#endif
+ case PTRACE_GET_THREAD_AREA:
+ ret = put_user(task_thread_info(child)->tp_value, datalp);
+ break;
+
+ case PTRACE_GET_WATCH_REGS:
+ ret = ptrace_get_watch_regs(child, addrp);
+ break;
+
+ case PTRACE_SET_WATCH_REGS:
+ ret = ptrace_set_watch_regs(child, addrp);
+ break;
+
+ default:
+ ret = ptrace_request(child, request, addr, data);
+ break;
+ }
+ out:
+ return ret;
+}
+
+/*
+ * Notification of system call entry/exit
+ * - triggered by current->work.syscall_trace
+ */
+asmlinkage long syscall_trace_enter(struct pt_regs *regs, long syscall)
+{
+ user_exit();
+
+ current_thread_info()->syscall = syscall;
+
+ if (test_thread_flag(TIF_SYSCALL_TRACE)) {
+ if (ptrace_report_syscall_entry(regs))
+ return -1;
+ syscall = current_thread_info()->syscall;
+ }
+
+#ifdef CONFIG_SECCOMP
+ if (unlikely(test_thread_flag(TIF_SECCOMP))) {
+ int ret, i;
+ struct seccomp_data sd;
+ unsigned long args[6];
+
+ sd.nr = syscall;
+ sd.arch = syscall_get_arch(current);
+ syscall_get_arguments(current, regs, args);
+ for (i = 0; i < 6; i++)
+ sd.args[i] = args[i];
+ sd.instruction_pointer = KSTK_EIP(current);
+
+ ret = __secure_computing(&sd);
+ if (ret == -1)
+ return ret;
+ syscall = current_thread_info()->syscall;
+ }
+#endif
+
+ if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
+ trace_sys_enter(regs, regs->regs[2]);
+
+ audit_syscall_entry(syscall, regs->regs[4], regs->regs[5],
+ regs->regs[6], regs->regs[7]);
+
+ /*
+ * Negative syscall numbers are mistaken for rejected syscalls, but
+ * won't have had the return value set appropriately, so we do so now.
+ */
+ if (syscall < 0)
+ syscall_set_return_value(current, regs, -ENOSYS, 0);
+ return syscall;
+}
+
+/*
+ * Notification of system call entry/exit
+ * - triggered by current->work.syscall_trace
+ */
+asmlinkage void syscall_trace_leave(struct pt_regs *regs)
+{
+ /*
+ * We may come here right after calling schedule_user()
+ * or do_notify_resume(), in which case we can be in RCU
+ * user mode.
+ */
+ user_exit();
+
+ audit_syscall_exit(regs);
+
+ if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
+ trace_sys_exit(regs, regs_return_value(regs));
+
+ if (test_thread_flag(TIF_SYSCALL_TRACE))
+ ptrace_report_syscall_exit(regs, 0);
+
+ user_enter();
+}
diff --git a/arch/mips/kernel/ptrace32.c b/arch/mips/kernel/ptrace32.c
new file mode 100644
index 000000000..afcf27a87
--- /dev/null
+++ b/arch/mips/kernel/ptrace32.c
@@ -0,0 +1,317 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1992 Ross Biro
+ * Copyright (C) Linus Torvalds
+ * Copyright (C) 1994, 95, 96, 97, 98, 2000 Ralf Baechle
+ * Copyright (C) 1996 David S. Miller
+ * Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com
+ * Copyright (C) 1999 MIPS Technologies, Inc.
+ * Copyright (C) 2000 Ulf Carlsson
+ *
+ * At this time Linux/MIPS64 only supports syscall tracing, even for 32-bit
+ * binaries.
+ */
+#include <linux/compiler.h>
+#include <linux/compat.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/sched/task_stack.h>
+#include <linux/mm.h>
+#include <linux/errno.h>
+#include <linux/ptrace.h>
+#include <linux/smp.h>
+#include <linux/security.h>
+
+#include <asm/cpu.h>
+#include <asm/dsp.h>
+#include <asm/fpu.h>
+#include <asm/mipsregs.h>
+#include <asm/mipsmtregs.h>
+#include <asm/page.h>
+#include <asm/reg.h>
+#include <asm/syscall.h>
+#include <linux/uaccess.h>
+#include <asm/bootinfo.h>
+
+/*
+ * Tracing a 32-bit process with a 64-bit strace and vice versa will not
+ * work. I don't know how to fix this.
+ */
+long compat_arch_ptrace(struct task_struct *child, compat_long_t request,
+ compat_ulong_t caddr, compat_ulong_t cdata)
+{
+ int addr = caddr;
+ int data = cdata;
+ int ret;
+
+ switch (request) {
+
+ /*
+ * Read 4 bytes of the other process' storage
+ * data is a pointer specifying where the user wants the
+ * 4 bytes copied into
+ * addr is a pointer in the user's storage that contains an 8 byte
+ * address in the other process of the 4 bytes that is to be read
+ * (this is run in a 32-bit process looking at a 64-bit process)
+ * when I and D space are separate, these will need to be fixed.
+ */
+ case PTRACE_PEEKTEXT_3264:
+ case PTRACE_PEEKDATA_3264: {
+ u32 tmp;
+ int copied;
+ u32 __user * addrOthers;
+
+ ret = -EIO;
+
+ /* Get the addr in the other process that we want to read */
+ if (get_user(addrOthers, (u32 __user * __user *) (unsigned long) addr) != 0)
+ break;
+
+ copied = ptrace_access_vm(child, (u64)addrOthers, &tmp,
+ sizeof(tmp), FOLL_FORCE);
+ if (copied != sizeof(tmp))
+ break;
+ ret = put_user(tmp, (u32 __user *) (unsigned long) data);
+ break;
+ }
+
+ /* Read the word at location addr in the USER area. */
+ case PTRACE_PEEKUSR: {
+ struct pt_regs *regs;
+ unsigned int tmp;
+
+ regs = task_pt_regs(child);
+ ret = 0; /* Default return value. */
+
+ switch (addr) {
+ case 0 ... 31:
+ tmp = regs->regs[addr];
+ break;
+#ifdef CONFIG_MIPS_FP_SUPPORT
+ case FPR_BASE ... FPR_BASE + 31: {
+ union fpureg *fregs;
+
+ if (!tsk_used_math(child)) {
+ /* FP not yet used */
+ tmp = -1;
+ break;
+ }
+ fregs = get_fpu_regs(child);
+ if (test_tsk_thread_flag(child, TIF_32BIT_FPREGS)) {
+ /*
+ * The odd registers are actually the high
+ * order bits of the values stored in the even
+ * registers.
+ */
+ tmp = get_fpr32(&fregs[(addr & ~1) - FPR_BASE],
+ addr & 1);
+ break;
+ }
+ tmp = get_fpr64(&fregs[addr - FPR_BASE], 0);
+ break;
+ }
+ case FPC_CSR:
+ tmp = child->thread.fpu.fcr31;
+ break;
+ case FPC_EIR:
+ /* implementation / version register */
+ tmp = boot_cpu_data.fpu_id;
+ break;
+#endif /* CONFIG_MIPS_FP_SUPPORT */
+ case PC:
+ tmp = regs->cp0_epc;
+ break;
+ case CAUSE:
+ tmp = regs->cp0_cause;
+ break;
+ case BADVADDR:
+ tmp = regs->cp0_badvaddr;
+ break;
+ case MMHI:
+ tmp = regs->hi;
+ break;
+ case MMLO:
+ tmp = regs->lo;
+ break;
+ case DSP_BASE ... DSP_BASE + 5: {
+ dspreg_t *dregs;
+
+ if (!cpu_has_dsp) {
+ tmp = 0;
+ ret = -EIO;
+ goto out;
+ }
+ dregs = __get_dsp_regs(child);
+ tmp = dregs[addr - DSP_BASE];
+ break;
+ }
+ case DSP_CONTROL:
+ if (!cpu_has_dsp) {
+ tmp = 0;
+ ret = -EIO;
+ goto out;
+ }
+ tmp = child->thread.dsp.dspcontrol;
+ break;
+ default:
+ tmp = 0;
+ ret = -EIO;
+ goto out;
+ }
+ ret = put_user(tmp, (unsigned __user *) (unsigned long) data);
+ break;
+ }
+
+ /*
+ * Write 4 bytes into the other process' storage
+ * data is the 4 bytes that the user wants written
+ * addr is a pointer in the user's storage that contains an
+ * 8 byte address in the other process where the 4 bytes
+ * that is to be written
+ * (this is run in a 32-bit process looking at a 64-bit process)
+ * when I and D space are separate, these will need to be fixed.
+ */
+ case PTRACE_POKETEXT_3264:
+ case PTRACE_POKEDATA_3264: {
+ u32 __user * addrOthers;
+
+ /* Get the addr in the other process that we want to write into */
+ ret = -EIO;
+ if (get_user(addrOthers, (u32 __user * __user *) (unsigned long) addr) != 0)
+ break;
+ ret = 0;
+ if (ptrace_access_vm(child, (u64)addrOthers, &data,
+ sizeof(data),
+ FOLL_FORCE | FOLL_WRITE) == sizeof(data))
+ break;
+ ret = -EIO;
+ break;
+ }
+
+ case PTRACE_POKEUSR: {
+ struct pt_regs *regs;
+ ret = 0;
+ regs = task_pt_regs(child);
+
+ switch (addr) {
+ case 0 ... 31:
+ regs->regs[addr] = data;
+ /* System call number may have been changed */
+ if (addr == 2)
+ mips_syscall_update_nr(child, regs);
+ else if (addr == 4 &&
+ mips_syscall_is_indirect(child, regs))
+ mips_syscall_update_nr(child, regs);
+ break;
+#ifdef CONFIG_MIPS_FP_SUPPORT
+ case FPR_BASE ... FPR_BASE + 31: {
+ union fpureg *fregs = get_fpu_regs(child);
+
+ if (!tsk_used_math(child)) {
+ /* FP not yet used */
+ memset(&child->thread.fpu, ~0,
+ sizeof(child->thread.fpu));
+ child->thread.fpu.fcr31 = 0;
+ }
+ if (test_tsk_thread_flag(child, TIF_32BIT_FPREGS)) {
+ /*
+ * The odd registers are actually the high
+ * order bits of the values stored in the even
+ * registers.
+ */
+ set_fpr32(&fregs[(addr & ~1) - FPR_BASE],
+ addr & 1, data);
+ break;
+ }
+ set_fpr64(&fregs[addr - FPR_BASE], 0, data);
+ break;
+ }
+ case FPC_CSR:
+ child->thread.fpu.fcr31 = data;
+ break;
+#endif /* CONFIG_MIPS_FP_SUPPORT */
+ case PC:
+ regs->cp0_epc = data;
+ break;
+ case MMHI:
+ regs->hi = data;
+ break;
+ case MMLO:
+ regs->lo = data;
+ break;
+ case DSP_BASE ... DSP_BASE + 5: {
+ dspreg_t *dregs;
+
+ if (!cpu_has_dsp) {
+ ret = -EIO;
+ break;
+ }
+
+ dregs = __get_dsp_regs(child);
+ dregs[addr - DSP_BASE] = data;
+ break;
+ }
+ case DSP_CONTROL:
+ if (!cpu_has_dsp) {
+ ret = -EIO;
+ break;
+ }
+ child->thread.dsp.dspcontrol = data;
+ break;
+ default:
+ /* The rest are not allowed. */
+ ret = -EIO;
+ break;
+ }
+ break;
+ }
+
+ case PTRACE_GETREGS:
+ ret = ptrace_getregs(child,
+ (struct user_pt_regs __user *) (__u64) data);
+ break;
+
+ case PTRACE_SETREGS:
+ ret = ptrace_setregs(child,
+ (struct user_pt_regs __user *) (__u64) data);
+ break;
+
+#ifdef CONFIG_MIPS_FP_SUPPORT
+ case PTRACE_GETFPREGS:
+ ret = ptrace_getfpregs(child, (__u32 __user *) (__u64) data);
+ break;
+
+ case PTRACE_SETFPREGS:
+ ret = ptrace_setfpregs(child, (__u32 __user *) (__u64) data);
+ break;
+#endif
+ case PTRACE_GET_THREAD_AREA:
+ ret = put_user(task_thread_info(child)->tp_value,
+ (unsigned int __user *) (unsigned long) data);
+ break;
+
+ case PTRACE_GET_THREAD_AREA_3264:
+ ret = put_user(task_thread_info(child)->tp_value,
+ (unsigned long __user *) (unsigned long) data);
+ break;
+
+ case PTRACE_GET_WATCH_REGS:
+ ret = ptrace_get_watch_regs(child,
+ (struct pt_watch_regs __user *) (unsigned long) addr);
+ break;
+
+ case PTRACE_SET_WATCH_REGS:
+ ret = ptrace_set_watch_regs(child,
+ (struct pt_watch_regs __user *) (unsigned long) addr);
+ break;
+
+ default:
+ ret = compat_ptrace_request(child, request, addr, data);
+ break;
+ }
+out:
+ return ret;
+}
diff --git a/arch/mips/kernel/r2300_fpu.S b/arch/mips/kernel/r2300_fpu.S
new file mode 100644
index 000000000..2748c5582
--- /dev/null
+++ b/arch/mips/kernel/r2300_fpu.S
@@ -0,0 +1,130 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1996, 1998 by Ralf Baechle
+ *
+ * Multi-arch abstraction and asm macros for easier reading:
+ * Copyright (C) 1996 David S. Miller (davem@davemloft.net)
+ *
+ * Further modifications to make this work:
+ * Copyright (c) 1998 Harald Koerfgen
+ */
+#include <asm/asm.h>
+#include <asm/asmmacro.h>
+#include <asm/errno.h>
+#include <asm/export.h>
+#include <asm/fpregdef.h>
+#include <asm/mipsregs.h>
+#include <asm/asm-offsets.h>
+#include <asm/regdef.h>
+
+#define EX(a,b) \
+9: a,##b; \
+ .section __ex_table,"a"; \
+ PTR_WD 9b,fault; \
+ .previous
+
+#define EX2(a,b) \
+9: a,##b; \
+ .section __ex_table,"a"; \
+ PTR_WD 9b,fault; \
+ PTR_WD 9b+4,fault; \
+ .previous
+
+ .set mips1
+
+/*
+ * Save a thread's fp context.
+ */
+LEAF(_save_fp)
+EXPORT_SYMBOL(_save_fp)
+ fpu_save_single a0, t1 # clobbers t1
+ jr ra
+ END(_save_fp)
+
+/*
+ * Restore a thread's fp context.
+ */
+LEAF(_restore_fp)
+ fpu_restore_single a0, t1 # clobbers t1
+ jr ra
+ END(_restore_fp)
+
+ .set noreorder
+
+/**
+ * _save_fp_context() - save FP context from the FPU
+ * @a0 - pointer to fpregs field of sigcontext
+ * @a1 - pointer to fpc_csr field of sigcontext
+ *
+ * Save FP context, including the 32 FP data registers and the FP
+ * control & status register, from the FPU to signal context.
+ */
+LEAF(_save_fp_context)
+ .set push
+ SET_HARDFLOAT
+ li v0, 0 # assume success
+ cfc1 t1, fcr31
+ EX2(s.d $f0, 0(a0))
+ EX2(s.d $f2, 16(a0))
+ EX2(s.d $f4, 32(a0))
+ EX2(s.d $f6, 48(a0))
+ EX2(s.d $f8, 64(a0))
+ EX2(s.d $f10, 80(a0))
+ EX2(s.d $f12, 96(a0))
+ EX2(s.d $f14, 112(a0))
+ EX2(s.d $f16, 128(a0))
+ EX2(s.d $f18, 144(a0))
+ EX2(s.d $f20, 160(a0))
+ EX2(s.d $f22, 176(a0))
+ EX2(s.d $f24, 192(a0))
+ EX2(s.d $f26, 208(a0))
+ EX2(s.d $f28, 224(a0))
+ EX2(s.d $f30, 240(a0))
+ jr ra
+ EX(sw t1, (a1))
+ .set pop
+ END(_save_fp_context)
+
+/**
+ * _restore_fp_context() - restore FP context to the FPU
+ * @a0 - pointer to fpregs field of sigcontext
+ * @a1 - pointer to fpc_csr field of sigcontext
+ *
+ * Restore FP context, including the 32 FP data registers and the FP
+ * control & status register, from signal context to the FPU.
+ */
+LEAF(_restore_fp_context)
+ .set push
+ SET_HARDFLOAT
+ li v0, 0 # assume success
+ EX(lw t0, (a1))
+ EX2(l.d $f0, 0(a0))
+ EX2(l.d $f2, 16(a0))
+ EX2(l.d $f4, 32(a0))
+ EX2(l.d $f6, 48(a0))
+ EX2(l.d $f8, 64(a0))
+ EX2(l.d $f10, 80(a0))
+ EX2(l.d $f12, 96(a0))
+ EX2(l.d $f14, 112(a0))
+ EX2(l.d $f16, 128(a0))
+ EX2(l.d $f18, 144(a0))
+ EX2(l.d $f20, 160(a0))
+ EX2(l.d $f22, 176(a0))
+ EX2(l.d $f24, 192(a0))
+ EX2(l.d $f26, 208(a0))
+ EX2(l.d $f28, 224(a0))
+ EX2(l.d $f30, 240(a0))
+ jr ra
+ ctc1 t0, fcr31
+ .set pop
+ END(_restore_fp_context)
+ .set reorder
+
+ .type fault, @function
+ .ent fault
+fault: li v0, -EFAULT
+ jr ra
+ .end fault
diff --git a/arch/mips/kernel/r2300_switch.S b/arch/mips/kernel/r2300_switch.S
new file mode 100644
index 000000000..71b1aafae
--- /dev/null
+++ b/arch/mips/kernel/r2300_switch.S
@@ -0,0 +1,65 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * r2300_switch.S: R2300 specific task switching code.
+ *
+ * Copyright (C) 1994, 1995, 1996, 1999 by Ralf Baechle
+ * Copyright (C) 1994, 1995, 1996 by Andreas Busse
+ *
+ * Multi-cpu abstraction and macros for easier reading:
+ * Copyright (C) 1996 David S. Miller (davem@davemloft.net)
+ *
+ * Further modifications to make this work:
+ * Copyright (c) 1998-2000 Harald Koerfgen
+ */
+#include <asm/asm.h>
+#include <asm/cachectl.h>
+#include <asm/export.h>
+#include <asm/fpregdef.h>
+#include <asm/mipsregs.h>
+#include <asm/asm-offsets.h>
+#include <asm/regdef.h>
+#include <asm/stackframe.h>
+#include <asm/thread_info.h>
+
+#include <asm/asmmacro.h>
+
+ .set mips1
+ .align 5
+
+/*
+ * task_struct *resume(task_struct *prev, task_struct *next,
+ * struct thread_info *next_ti)
+ */
+LEAF(resume)
+ mfc0 t1, CP0_STATUS
+ sw t1, THREAD_STATUS(a0)
+ cpu_save_nonscratch a0
+ sw ra, THREAD_REG31(a0)
+
+#if defined(CONFIG_STACKPROTECTOR) && !defined(CONFIG_SMP)
+ PTR_LA t8, __stack_chk_guard
+ LONG_L t9, TASK_STACK_CANARY(a1)
+ LONG_S t9, 0(t8)
+#endif
+
+ /*
+ * The order of restoring the registers takes care of the race
+ * updating $28, $29 and kernelsp without disabling ints.
+ */
+ move $28, a2
+ cpu_restore_nonscratch a1
+
+ addiu t1, $28, _THREAD_SIZE - 32
+ sw t1, kernelsp
+
+ mfc0 t1, CP0_STATUS /* Do we really need this? */
+ li a3, 0xff01
+ and t1, a3
+ lw a2, THREAD_STATUS(a1)
+ nor a3, $0, a3
+ and a2, a3
+ or a2, t1
+ mtc0 a2, CP0_STATUS
+ move v0, a0
+ jr ra
+ END(resume)
diff --git a/arch/mips/kernel/r4k-bugs64.c b/arch/mips/kernel/r4k-bugs64.c
new file mode 100644
index 000000000..6ffefb2c6
--- /dev/null
+++ b/arch/mips/kernel/r4k-bugs64.c
@@ -0,0 +1,325 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2003, 2004, 2007 Maciej W. Rozycki
+ */
+#include <linux/context_tracking.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/ptrace.h>
+#include <linux/stddef.h>
+
+#include <asm/bugs.h>
+#include <asm/compiler.h>
+#include <asm/cpu.h>
+#include <asm/fpu.h>
+#include <asm/mipsregs.h>
+#include <asm/setup.h>
+
+static char bug64hit[] __initdata =
+ "reliable operation impossible!\n%s";
+static char nowar[] __initdata =
+ "Please report to <linux-mips@vger.kernel.org>.";
+static char r4kwar[] __initdata =
+ "Enable CPU_R4000_WORKAROUNDS to rectify.";
+static char daddiwar[] __initdata =
+ "Enable CPU_DADDI_WORKAROUNDS to rectify.";
+
+static __always_inline __init
+void align_mod(const int align, const int mod)
+{
+ asm volatile(
+ ".set push\n\t"
+ ".set noreorder\n\t"
+ ".balign %0\n\t"
+ ".rept %1\n\t"
+ "nop\n\t"
+ ".endr\n\t"
+ ".set pop"
+ :
+ : "n"(align), "n"(mod));
+}
+
+static __always_inline __init
+void mult_sh_align_mod(long *v1, long *v2, long *w,
+ const int align, const int mod)
+{
+ unsigned long flags;
+ int m1, m2;
+ long p, s, lv1, lv2, lw;
+
+ /*
+ * We want the multiply and the shift to be isolated from the
+ * rest of the code to disable gcc optimizations. Hence the
+ * asm statements that execute nothing, but make gcc not know
+ * what the values of m1, m2 and s are and what lv2 and p are
+ * used for.
+ */
+
+ local_irq_save(flags);
+ /*
+ * The following code leads to a wrong result of the first
+ * dsll32 when executed on R4000 rev. 2.2 or 3.0 (PRId
+ * 00000422 or 00000430, respectively).
+ *
+ * See "MIPS R4000PC/SC Errata, Processor Revision 2.2 and
+ * 3.0" by MIPS Technologies, Inc., errata #16 and #28 for
+ * details. I got no permission to duplicate them here,
+ * sigh... --macro
+ */
+ asm volatile(
+ ""
+ : "=r" (m1), "=r" (m2), "=r" (s)
+ : "0" (5), "1" (8), "2" (5));
+ align_mod(align, mod);
+ /*
+ * The trailing nop is needed to fulfill the two-instruction
+ * requirement between reading hi/lo and staring a mult/div.
+ * Leaving it out may cause gas insert a nop itself breaking
+ * the desired alignment of the next chunk.
+ */
+ asm volatile(
+ ".set push\n\t"
+ ".set noat\n\t"
+ ".set noreorder\n\t"
+ ".set nomacro\n\t"
+ "mult %2, %3\n\t"
+ "dsll32 %0, %4, %5\n\t"
+ "mflo $0\n\t"
+ "dsll32 %1, %4, %5\n\t"
+ "nop\n\t"
+ ".set pop"
+ : "=&r" (lv1), "=r" (lw)
+ : "r" (m1), "r" (m2), "r" (s), "I" (0)
+ : "hi", "lo", "$0");
+ /* We have to use single integers for m1 and m2 and a double
+ * one for p to be sure the mulsidi3 gcc's RTL multiplication
+ * instruction has the workaround applied. Older versions of
+ * gcc have correct umulsi3 and mulsi3, but other
+ * multiplication variants lack the workaround.
+ */
+ asm volatile(
+ ""
+ : "=r" (m1), "=r" (m2), "=r" (s)
+ : "0" (m1), "1" (m2), "2" (s));
+ align_mod(align, mod);
+ p = m1 * m2;
+ lv2 = s << 32;
+ asm volatile(
+ ""
+ : "=r" (lv2)
+ : "0" (lv2), "r" (p));
+ local_irq_restore(flags);
+
+ *v1 = lv1;
+ *v2 = lv2;
+ *w = lw;
+}
+
+static __always_inline __init void check_mult_sh(void)
+{
+ long v1[8], v2[8], w[8];
+ int bug, fix, i;
+
+ printk("Checking for the multiply/shift bug... ");
+
+ /*
+ * Testing discovered false negatives for certain code offsets
+ * into cache lines. Hence we test all possible offsets for
+ * the worst assumption of an R4000 I-cache line width of 32
+ * bytes.
+ *
+ * We can't use a loop as alignment directives need to be
+ * immediates.
+ */
+ mult_sh_align_mod(&v1[0], &v2[0], &w[0], 32, 0);
+ mult_sh_align_mod(&v1[1], &v2[1], &w[1], 32, 1);
+ mult_sh_align_mod(&v1[2], &v2[2], &w[2], 32, 2);
+ mult_sh_align_mod(&v1[3], &v2[3], &w[3], 32, 3);
+ mult_sh_align_mod(&v1[4], &v2[4], &w[4], 32, 4);
+ mult_sh_align_mod(&v1[5], &v2[5], &w[5], 32, 5);
+ mult_sh_align_mod(&v1[6], &v2[6], &w[6], 32, 6);
+ mult_sh_align_mod(&v1[7], &v2[7], &w[7], 32, 7);
+
+ bug = 0;
+ for (i = 0; i < 8; i++)
+ if (v1[i] != w[i])
+ bug = 1;
+
+ if (bug == 0) {
+ pr_cont("no.\n");
+ return;
+ }
+
+ pr_cont("yes, workaround... ");
+
+ fix = 1;
+ for (i = 0; i < 8; i++)
+ if (v2[i] != w[i])
+ fix = 0;
+
+ if (fix == 1) {
+ pr_cont("yes.\n");
+ return;
+ }
+
+ pr_cont("no.\n");
+ panic(bug64hit,
+ IS_ENABLED(CONFIG_CPU_R4000_WORKAROUNDS) ? nowar : r4kwar);
+}
+
+static volatile int daddi_ov;
+
+asmlinkage void __init do_daddi_ov(struct pt_regs *regs)
+{
+ enum ctx_state prev_state;
+
+ prev_state = exception_enter();
+ daddi_ov = 1;
+ regs->cp0_epc += 4;
+ exception_exit(prev_state);
+}
+
+static __init void check_daddi(void)
+{
+ extern asmlinkage void handle_daddi_ov(void);
+ unsigned long flags;
+ void *handler;
+ long v, tmp;
+
+ printk("Checking for the daddi bug... ");
+
+ local_irq_save(flags);
+ handler = set_except_vector(EXCCODE_OV, handle_daddi_ov);
+ /*
+ * The following code fails to trigger an overflow exception
+ * when executed on R4000 rev. 2.2 or 3.0 (PRId 00000422 or
+ * 00000430, respectively).
+ *
+ * See "MIPS R4000PC/SC Errata, Processor Revision 2.2 and
+ * 3.0" by MIPS Technologies, Inc., erratum #23 for details.
+ * I got no permission to duplicate it here, sigh... --macro
+ */
+ asm volatile(
+ ".set push\n\t"
+ ".set noat\n\t"
+ ".set noreorder\n\t"
+ ".set nomacro\n\t"
+ "addiu %1, $0, %2\n\t"
+ "dsrl %1, %1, 1\n\t"
+#ifdef HAVE_AS_SET_DADDI
+ ".set daddi\n\t"
+#endif
+ "daddi %0, %1, %3\n\t"
+ ".set pop"
+ : "=r" (v), "=&r" (tmp)
+ : "I" (0xffffffffffffdb9aUL), "I" (0x1234));
+ set_except_vector(EXCCODE_OV, handler);
+ local_irq_restore(flags);
+
+ if (daddi_ov) {
+ pr_cont("no.\n");
+ return;
+ }
+
+ pr_cont("yes, workaround... ");
+
+ local_irq_save(flags);
+ handler = set_except_vector(EXCCODE_OV, handle_daddi_ov);
+ asm volatile(
+ "addiu %1, $0, %2\n\t"
+ "dsrl %1, %1, 1\n\t"
+ "daddi %0, %1, %3"
+ : "=r" (v), "=&r" (tmp)
+ : "I" (0xffffffffffffdb9aUL), "I" (0x1234));
+ set_except_vector(EXCCODE_OV, handler);
+ local_irq_restore(flags);
+
+ if (daddi_ov) {
+ pr_cont("yes.\n");
+ return;
+ }
+
+ pr_cont("no.\n");
+ panic(bug64hit,
+ IS_ENABLED(CONFIG_CPU_DADDI_WORKAROUNDS) ? nowar : daddiwar);
+}
+
+int daddiu_bug = -1;
+
+static __init void check_daddiu(void)
+{
+ long v, w, tmp;
+
+ printk("Checking for the daddiu bug... ");
+
+ /*
+ * The following code leads to a wrong result of daddiu when
+ * executed on R4400 rev. 1.0 (PRId 00000440).
+ *
+ * See "MIPS R4400PC/SC Errata, Processor Revision 1.0" by
+ * MIPS Technologies, Inc., erratum #7 for details.
+ *
+ * According to "MIPS R4000PC/SC Errata, Processor Revision
+ * 2.2 and 3.0" by MIPS Technologies, Inc., erratum #41 this
+ * problem affects R4000 rev. 2.2 and 3.0 (PRId 00000422 and
+ * 00000430, respectively), too. Testing failed to trigger it
+ * so far.
+ *
+ * I got no permission to duplicate the errata here, sigh...
+ * --macro
+ */
+ asm volatile(
+ ".set push\n\t"
+ ".set noat\n\t"
+ ".set noreorder\n\t"
+ ".set nomacro\n\t"
+ "addiu %2, $0, %3\n\t"
+ "dsrl %2, %2, 1\n\t"
+#ifdef HAVE_AS_SET_DADDI
+ ".set daddi\n\t"
+#endif
+ "daddiu %0, %2, %4\n\t"
+ "addiu %1, $0, %4\n\t"
+ "daddu %1, %2\n\t"
+ ".set pop"
+ : "=&r" (v), "=&r" (w), "=&r" (tmp)
+ : "I" (0xffffffffffffdb9aUL), "I" (0x1234));
+
+ daddiu_bug = v != w;
+
+ if (!daddiu_bug) {
+ pr_cont("no.\n");
+ return;
+ }
+
+ pr_cont("yes, workaround... ");
+
+ asm volatile(
+ "addiu %2, $0, %3\n\t"
+ "dsrl %2, %2, 1\n\t"
+ "daddiu %0, %2, %4\n\t"
+ "addiu %1, $0, %4\n\t"
+ "daddu %1, %2"
+ : "=&r" (v), "=&r" (w), "=&r" (tmp)
+ : "I" (0xffffffffffffdb9aUL), "I" (0x1234));
+
+ if (v == w) {
+ pr_cont("yes.\n");
+ return;
+ }
+
+ pr_cont("no.\n");
+ panic(bug64hit,
+ IS_ENABLED(CONFIG_CPU_DADDI_WORKAROUNDS) ? nowar : daddiwar);
+}
+
+void __init check_bugs64_early(void)
+{
+ check_mult_sh();
+ check_daddiu();
+}
+
+void __init check_bugs64(void)
+{
+ check_daddi();
+}
diff --git a/arch/mips/kernel/r4k_fpu.S b/arch/mips/kernel/r4k_fpu.S
new file mode 100644
index 000000000..2e687c60b
--- /dev/null
+++ b/arch/mips/kernel/r4k_fpu.S
@@ -0,0 +1,417 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1996, 98, 99, 2000, 01 Ralf Baechle
+ *
+ * Multi-arch abstraction and asm macros for easier reading:
+ * Copyright (C) 1996 David S. Miller (davem@davemloft.net)
+ *
+ * Carsten Langgaard, carstenl@mips.com
+ * Copyright (C) 2000 MIPS Technologies, Inc.
+ * Copyright (C) 1999, 2001 Silicon Graphics, Inc.
+ */
+#include <asm/asm.h>
+#include <asm/asmmacro.h>
+#include <asm/errno.h>
+#include <asm/export.h>
+#include <asm/fpregdef.h>
+#include <asm/mipsregs.h>
+#include <asm/asm-offsets.h>
+#include <asm/regdef.h>
+
+/* preprocessor replaces the fp in ".set fp=64" with $30 otherwise */
+#undef fp
+
+ .macro EX insn, reg, src
+ .set push
+ SET_HARDFLOAT
+ .set nomacro
+.ex\@: \insn \reg, \src
+ .set pop
+ .section __ex_table,"a"
+ PTR_WD .ex\@, fault
+ .previous
+ .endm
+
+/*
+ * Save a thread's fp context.
+ */
+LEAF(_save_fp)
+EXPORT_SYMBOL(_save_fp)
+#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPSR2) || \
+ defined(CONFIG_CPU_MIPSR5) || defined(CONFIG_CPU_MIPSR6)
+ mfc0 t0, CP0_STATUS
+#endif
+ fpu_save_double a0 t0 t1 # clobbers t1
+ jr ra
+ END(_save_fp)
+
+/*
+ * Restore a thread's fp context.
+ */
+LEAF(_restore_fp)
+#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPSR2) || \
+ defined(CONFIG_CPU_MIPSR5) || defined(CONFIG_CPU_MIPSR6)
+ mfc0 t0, CP0_STATUS
+#endif
+ fpu_restore_double a0 t0 t1 # clobbers t1
+ jr ra
+ END(_restore_fp)
+
+#ifdef CONFIG_CPU_HAS_MSA
+
+/*
+ * Save a thread's MSA vector context.
+ */
+LEAF(_save_msa)
+EXPORT_SYMBOL(_save_msa)
+ msa_save_all a0
+ jr ra
+ END(_save_msa)
+
+/*
+ * Restore a thread's MSA vector context.
+ */
+LEAF(_restore_msa)
+ msa_restore_all a0
+ jr ra
+ END(_restore_msa)
+
+LEAF(_init_msa_upper)
+ msa_init_all_upper
+ jr ra
+ END(_init_msa_upper)
+
+#endif
+
+ .set noreorder
+
+/**
+ * _save_fp_context() - save FP context from the FPU
+ * @a0 - pointer to fpregs field of sigcontext
+ * @a1 - pointer to fpc_csr field of sigcontext
+ *
+ * Save FP context, including the 32 FP data registers and the FP
+ * control & status register, from the FPU to signal context.
+ */
+LEAF(_save_fp_context)
+ .set push
+ SET_HARDFLOAT
+ cfc1 t1, fcr31
+ .set pop
+
+#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPSR2) || \
+ defined(CONFIG_CPU_MIPSR5) || defined(CONFIG_CPU_MIPSR6)
+ .set push
+ SET_HARDFLOAT
+#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR5)
+ .set mips32r2
+ .set fp=64
+ mfc0 t0, CP0_STATUS
+ sll t0, t0, 5
+ bgez t0, 1f # skip storing odd if FR=0
+ nop
+#endif
+ /* Store the 16 odd double precision registers */
+ EX sdc1 $f1, 8(a0)
+ EX sdc1 $f3, 24(a0)
+ EX sdc1 $f5, 40(a0)
+ EX sdc1 $f7, 56(a0)
+ EX sdc1 $f9, 72(a0)
+ EX sdc1 $f11, 88(a0)
+ EX sdc1 $f13, 104(a0)
+ EX sdc1 $f15, 120(a0)
+ EX sdc1 $f17, 136(a0)
+ EX sdc1 $f19, 152(a0)
+ EX sdc1 $f21, 168(a0)
+ EX sdc1 $f23, 184(a0)
+ EX sdc1 $f25, 200(a0)
+ EX sdc1 $f27, 216(a0)
+ EX sdc1 $f29, 232(a0)
+ EX sdc1 $f31, 248(a0)
+1: .set pop
+#endif
+
+ .set push
+ SET_HARDFLOAT
+ /* Store the 16 even double precision registers */
+ EX sdc1 $f0, 0(a0)
+ EX sdc1 $f2, 16(a0)
+ EX sdc1 $f4, 32(a0)
+ EX sdc1 $f6, 48(a0)
+ EX sdc1 $f8, 64(a0)
+ EX sdc1 $f10, 80(a0)
+ EX sdc1 $f12, 96(a0)
+ EX sdc1 $f14, 112(a0)
+ EX sdc1 $f16, 128(a0)
+ EX sdc1 $f18, 144(a0)
+ EX sdc1 $f20, 160(a0)
+ EX sdc1 $f22, 176(a0)
+ EX sdc1 $f24, 192(a0)
+ EX sdc1 $f26, 208(a0)
+ EX sdc1 $f28, 224(a0)
+ EX sdc1 $f30, 240(a0)
+ EX sw t1, 0(a1)
+ jr ra
+ li v0, 0 # success
+ .set pop
+ END(_save_fp_context)
+
+/**
+ * _restore_fp_context() - restore FP context to the FPU
+ * @a0 - pointer to fpregs field of sigcontext
+ * @a1 - pointer to fpc_csr field of sigcontext
+ *
+ * Restore FP context, including the 32 FP data registers and the FP
+ * control & status register, from signal context to the FPU.
+ */
+LEAF(_restore_fp_context)
+ EX lw t1, 0(a1)
+
+#if defined(CONFIG_64BIT) || defined(CONFIG_CPU_MIPSR2) || \
+ defined(CONFIG_CPU_MIPSR5) || defined(CONFIG_CPU_MIPSR6)
+ .set push
+ SET_HARDFLOAT
+#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR5)
+ .set mips32r2
+ .set fp=64
+ mfc0 t0, CP0_STATUS
+ sll t0, t0, 5
+ bgez t0, 1f # skip loading odd if FR=0
+ nop
+#endif
+ EX ldc1 $f1, 8(a0)
+ EX ldc1 $f3, 24(a0)
+ EX ldc1 $f5, 40(a0)
+ EX ldc1 $f7, 56(a0)
+ EX ldc1 $f9, 72(a0)
+ EX ldc1 $f11, 88(a0)
+ EX ldc1 $f13, 104(a0)
+ EX ldc1 $f15, 120(a0)
+ EX ldc1 $f17, 136(a0)
+ EX ldc1 $f19, 152(a0)
+ EX ldc1 $f21, 168(a0)
+ EX ldc1 $f23, 184(a0)
+ EX ldc1 $f25, 200(a0)
+ EX ldc1 $f27, 216(a0)
+ EX ldc1 $f29, 232(a0)
+ EX ldc1 $f31, 248(a0)
+1: .set pop
+#endif
+ .set push
+ SET_HARDFLOAT
+ EX ldc1 $f0, 0(a0)
+ EX ldc1 $f2, 16(a0)
+ EX ldc1 $f4, 32(a0)
+ EX ldc1 $f6, 48(a0)
+ EX ldc1 $f8, 64(a0)
+ EX ldc1 $f10, 80(a0)
+ EX ldc1 $f12, 96(a0)
+ EX ldc1 $f14, 112(a0)
+ EX ldc1 $f16, 128(a0)
+ EX ldc1 $f18, 144(a0)
+ EX ldc1 $f20, 160(a0)
+ EX ldc1 $f22, 176(a0)
+ EX ldc1 $f24, 192(a0)
+ EX ldc1 $f26, 208(a0)
+ EX ldc1 $f28, 224(a0)
+ EX ldc1 $f30, 240(a0)
+ ctc1 t1, fcr31
+ .set pop
+ jr ra
+ li v0, 0 # success
+ END(_restore_fp_context)
+
+#ifdef CONFIG_CPU_HAS_MSA
+
+ .macro op_one_wr op, idx, base
+ .align 4
+\idx: \op \idx, 0, \base
+ jr ra
+ nop
+ .endm
+
+ .macro op_msa_wr name, op
+LEAF(\name)
+ .set push
+ .set noreorder
+ sll t0, a0, 4
+ PTR_LA t1, 0f
+ PTR_ADDU t0, t0, t1
+ jr t0
+ nop
+ op_one_wr \op, 0, a1
+ op_one_wr \op, 1, a1
+ op_one_wr \op, 2, a1
+ op_one_wr \op, 3, a1
+ op_one_wr \op, 4, a1
+ op_one_wr \op, 5, a1
+ op_one_wr \op, 6, a1
+ op_one_wr \op, 7, a1
+ op_one_wr \op, 8, a1
+ op_one_wr \op, 9, a1
+ op_one_wr \op, 10, a1
+ op_one_wr \op, 11, a1
+ op_one_wr \op, 12, a1
+ op_one_wr \op, 13, a1
+ op_one_wr \op, 14, a1
+ op_one_wr \op, 15, a1
+ op_one_wr \op, 16, a1
+ op_one_wr \op, 17, a1
+ op_one_wr \op, 18, a1
+ op_one_wr \op, 19, a1
+ op_one_wr \op, 20, a1
+ op_one_wr \op, 21, a1
+ op_one_wr \op, 22, a1
+ op_one_wr \op, 23, a1
+ op_one_wr \op, 24, a1
+ op_one_wr \op, 25, a1
+ op_one_wr \op, 26, a1
+ op_one_wr \op, 27, a1
+ op_one_wr \op, 28, a1
+ op_one_wr \op, 29, a1
+ op_one_wr \op, 30, a1
+ op_one_wr \op, 31, a1
+ .set pop
+ END(\name)
+ .endm
+
+ op_msa_wr read_msa_wr_b, st_b
+ op_msa_wr read_msa_wr_h, st_h
+ op_msa_wr read_msa_wr_w, st_w
+ op_msa_wr read_msa_wr_d, st_d
+
+ op_msa_wr write_msa_wr_b, ld_b
+ op_msa_wr write_msa_wr_h, ld_h
+ op_msa_wr write_msa_wr_w, ld_w
+ op_msa_wr write_msa_wr_d, ld_d
+
+#endif /* CONFIG_CPU_HAS_MSA */
+
+#ifdef CONFIG_CPU_HAS_MSA
+
+ .macro save_msa_upper wr, off, base
+ .set push
+ .set noat
+#ifdef CONFIG_64BIT
+ copy_s_d \wr, 1
+ EX sd $1, \off(\base)
+#elif defined(CONFIG_CPU_LITTLE_ENDIAN)
+ copy_s_w \wr, 2
+ EX sw $1, \off(\base)
+ copy_s_w \wr, 3
+ EX sw $1, (\off+4)(\base)
+#else /* CONFIG_CPU_BIG_ENDIAN */
+ copy_s_w \wr, 2
+ EX sw $1, (\off+4)(\base)
+ copy_s_w \wr, 3
+ EX sw $1, \off(\base)
+#endif
+ .set pop
+ .endm
+
+LEAF(_save_msa_all_upper)
+ save_msa_upper 0, 0x00, a0
+ save_msa_upper 1, 0x08, a0
+ save_msa_upper 2, 0x10, a0
+ save_msa_upper 3, 0x18, a0
+ save_msa_upper 4, 0x20, a0
+ save_msa_upper 5, 0x28, a0
+ save_msa_upper 6, 0x30, a0
+ save_msa_upper 7, 0x38, a0
+ save_msa_upper 8, 0x40, a0
+ save_msa_upper 9, 0x48, a0
+ save_msa_upper 10, 0x50, a0
+ save_msa_upper 11, 0x58, a0
+ save_msa_upper 12, 0x60, a0
+ save_msa_upper 13, 0x68, a0
+ save_msa_upper 14, 0x70, a0
+ save_msa_upper 15, 0x78, a0
+ save_msa_upper 16, 0x80, a0
+ save_msa_upper 17, 0x88, a0
+ save_msa_upper 18, 0x90, a0
+ save_msa_upper 19, 0x98, a0
+ save_msa_upper 20, 0xa0, a0
+ save_msa_upper 21, 0xa8, a0
+ save_msa_upper 22, 0xb0, a0
+ save_msa_upper 23, 0xb8, a0
+ save_msa_upper 24, 0xc0, a0
+ save_msa_upper 25, 0xc8, a0
+ save_msa_upper 26, 0xd0, a0
+ save_msa_upper 27, 0xd8, a0
+ save_msa_upper 28, 0xe0, a0
+ save_msa_upper 29, 0xe8, a0
+ save_msa_upper 30, 0xf0, a0
+ save_msa_upper 31, 0xf8, a0
+ jr ra
+ li v0, 0
+ END(_save_msa_all_upper)
+
+ .macro restore_msa_upper wr, off, base
+ .set push
+ .set noat
+#ifdef CONFIG_64BIT
+ EX ld $1, \off(\base)
+ insert_d \wr, 1
+#elif defined(CONFIG_CPU_LITTLE_ENDIAN)
+ EX lw $1, \off(\base)
+ insert_w \wr, 2
+ EX lw $1, (\off+4)(\base)
+ insert_w \wr, 3
+#else /* CONFIG_CPU_BIG_ENDIAN */
+ EX lw $1, (\off+4)(\base)
+ insert_w \wr, 2
+ EX lw $1, \off(\base)
+ insert_w \wr, 3
+#endif
+ .set pop
+ .endm
+
+LEAF(_restore_msa_all_upper)
+ restore_msa_upper 0, 0x00, a0
+ restore_msa_upper 1, 0x08, a0
+ restore_msa_upper 2, 0x10, a0
+ restore_msa_upper 3, 0x18, a0
+ restore_msa_upper 4, 0x20, a0
+ restore_msa_upper 5, 0x28, a0
+ restore_msa_upper 6, 0x30, a0
+ restore_msa_upper 7, 0x38, a0
+ restore_msa_upper 8, 0x40, a0
+ restore_msa_upper 9, 0x48, a0
+ restore_msa_upper 10, 0x50, a0
+ restore_msa_upper 11, 0x58, a0
+ restore_msa_upper 12, 0x60, a0
+ restore_msa_upper 13, 0x68, a0
+ restore_msa_upper 14, 0x70, a0
+ restore_msa_upper 15, 0x78, a0
+ restore_msa_upper 16, 0x80, a0
+ restore_msa_upper 17, 0x88, a0
+ restore_msa_upper 18, 0x90, a0
+ restore_msa_upper 19, 0x98, a0
+ restore_msa_upper 20, 0xa0, a0
+ restore_msa_upper 21, 0xa8, a0
+ restore_msa_upper 22, 0xb0, a0
+ restore_msa_upper 23, 0xb8, a0
+ restore_msa_upper 24, 0xc0, a0
+ restore_msa_upper 25, 0xc8, a0
+ restore_msa_upper 26, 0xd0, a0
+ restore_msa_upper 27, 0xd8, a0
+ restore_msa_upper 28, 0xe0, a0
+ restore_msa_upper 29, 0xe8, a0
+ restore_msa_upper 30, 0xf0, a0
+ restore_msa_upper 31, 0xf8, a0
+ jr ra
+ li v0, 0
+ END(_restore_msa_all_upper)
+
+#endif /* CONFIG_CPU_HAS_MSA */
+
+ .set reorder
+
+ .type fault, @function
+ .ent fault
+fault: li v0, -EFAULT # failure
+ jr ra
+ .end fault
diff --git a/arch/mips/kernel/r4k_switch.S b/arch/mips/kernel/r4k_switch.S
new file mode 100644
index 000000000..58232ae6c
--- /dev/null
+++ b/arch/mips/kernel/r4k_switch.S
@@ -0,0 +1,59 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1994, 1995, 1996, 1998, 1999, 2002, 2003 Ralf Baechle
+ * Copyright (C) 1996 David S. Miller (davem@davemloft.net)
+ * Copyright (C) 1994, 1995, 1996, by Andreas Busse
+ * Copyright (C) 1999 Silicon Graphics, Inc.
+ * Copyright (C) 2000 MIPS Technologies, Inc.
+ * written by Carsten Langgaard, carstenl@mips.com
+ */
+#include <asm/asm.h>
+#include <asm/cachectl.h>
+#include <asm/mipsregs.h>
+#include <asm/asm-offsets.h>
+#include <asm/regdef.h>
+#include <asm/stackframe.h>
+#include <asm/thread_info.h>
+
+#include <asm/asmmacro.h>
+
+/*
+ * task_struct *resume(task_struct *prev, task_struct *next,
+ * struct thread_info *next_ti)
+ */
+ .align 5
+ LEAF(resume)
+ mfc0 t1, CP0_STATUS
+ LONG_S t1, THREAD_STATUS(a0)
+ cpu_save_nonscratch a0
+ LONG_S ra, THREAD_REG31(a0)
+
+#if defined(CONFIG_STACKPROTECTOR) && !defined(CONFIG_SMP)
+ PTR_LA t8, __stack_chk_guard
+ LONG_L t9, TASK_STACK_CANARY(a1)
+ LONG_S t9, 0(t8)
+#endif
+
+ /*
+ * The order of restoring the registers takes care of the race
+ * updating $28, $29 and kernelsp without disabling ints.
+ */
+ move $28, a2
+ cpu_restore_nonscratch a1
+
+ PTR_ADDU t0, $28, _THREAD_SIZE - 32
+ set_saved_sp t0, t1, t2
+ mfc0 t1, CP0_STATUS /* Do we really need this? */
+ li a3, 0xff01
+ and t1, a3
+ LONG_L a2, THREAD_STATUS(a1)
+ nor a3, $0, a3
+ and a2, a3
+ or a2, t1
+ mtc0 a2, CP0_STATUS
+ move v0, a0
+ jr ra
+ END(resume)
diff --git a/arch/mips/kernel/relocate.c b/arch/mips/kernel/relocate.c
new file mode 100644
index 000000000..58fc8d089
--- /dev/null
+++ b/arch/mips/kernel/relocate.c
@@ -0,0 +1,466 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Support for Kernel relocation at boot time
+ *
+ * Copyright (C) 2015, Imagination Technologies Ltd.
+ * Authors: Matt Redfearn (matt.redfearn@mips.com)
+ */
+#include <asm/bootinfo.h>
+#include <asm/cacheflush.h>
+#include <asm/fw/fw.h>
+#include <asm/sections.h>
+#include <asm/setup.h>
+#include <asm/timex.h>
+#include <linux/elf.h>
+#include <linux/kernel.h>
+#include <linux/libfdt.h>
+#include <linux/of_fdt.h>
+#include <linux/panic_notifier.h>
+#include <linux/sched/task.h>
+#include <linux/start_kernel.h>
+#include <linux/string.h>
+#include <linux/printk.h>
+
+#define RELOCATED(x) ((void *)((long)x + offset))
+
+extern u32 _relocation_start[]; /* End kernel image / start relocation table */
+extern u32 _relocation_end[]; /* End relocation table */
+
+extern long __start___ex_table; /* Start exception table */
+extern long __stop___ex_table; /* End exception table */
+
+extern void __weak plat_fdt_relocated(void *new_location);
+
+/*
+ * This function may be defined for a platform to perform any post-relocation
+ * fixup necessary.
+ * Return non-zero to abort relocation
+ */
+int __weak plat_post_relocation(long offset)
+{
+ return 0;
+}
+
+static inline u32 __init get_synci_step(void)
+{
+ u32 res;
+
+ __asm__("rdhwr %0, $1" : "=r" (res));
+
+ return res;
+}
+
+static void __init sync_icache(void *kbase, unsigned long kernel_length)
+{
+ void *kend = kbase + kernel_length;
+ u32 step = get_synci_step();
+
+ do {
+ __asm__ __volatile__(
+ "synci 0(%0)"
+ : /* no output */
+ : "r" (kbase));
+
+ kbase += step;
+ } while (step && kbase < kend);
+
+ /* Completion barrier */
+ __sync();
+}
+
+static void __init apply_r_mips_64_rel(u32 *loc_new, long offset)
+{
+ *(u64 *)loc_new += offset;
+}
+
+static void __init apply_r_mips_32_rel(u32 *loc_new, long offset)
+{
+ *loc_new += offset;
+}
+
+static int __init apply_r_mips_26_rel(u32 *loc_orig, u32 *loc_new, long offset)
+{
+ unsigned long target_addr = (*loc_orig) & 0x03ffffff;
+
+ if (offset % 4) {
+ pr_err("Dangerous R_MIPS_26 REL relocation\n");
+ return -ENOEXEC;
+ }
+
+ /* Original target address */
+ target_addr <<= 2;
+ target_addr += (unsigned long)loc_orig & 0xf0000000;
+
+ /* Get the new target address */
+ target_addr += offset;
+
+ if ((target_addr & 0xf0000000) != ((unsigned long)loc_new & 0xf0000000)) {
+ pr_err("R_MIPS_26 REL relocation overflow\n");
+ return -ENOEXEC;
+ }
+
+ target_addr -= (unsigned long)loc_new & 0xf0000000;
+ target_addr >>= 2;
+
+ *loc_new = (*loc_new & ~0x03ffffff) | (target_addr & 0x03ffffff);
+
+ return 0;
+}
+
+
+static void __init apply_r_mips_hi16_rel(u32 *loc_orig, u32 *loc_new,
+ long offset)
+{
+ unsigned long insn = *loc_orig;
+ unsigned long target = (insn & 0xffff) << 16; /* high 16bits of target */
+
+ target += offset;
+
+ *loc_new = (insn & ~0xffff) | ((target >> 16) & 0xffff);
+}
+
+static int __init reloc_handler(u32 type, u32 *loc_orig, u32 *loc_new,
+ long offset)
+{
+ switch (type) {
+ case R_MIPS_64:
+ apply_r_mips_64_rel(loc_new, offset);
+ break;
+ case R_MIPS_32:
+ apply_r_mips_32_rel(loc_new, offset);
+ break;
+ case R_MIPS_26:
+ return apply_r_mips_26_rel(loc_orig, loc_new, offset);
+ case R_MIPS_HI16:
+ apply_r_mips_hi16_rel(loc_orig, loc_new, offset);
+ break;
+ default:
+ pr_err("Unhandled relocation type %d at 0x%pK\n", type,
+ loc_orig);
+ return -ENOEXEC;
+ }
+
+ return 0;
+}
+
+static int __init do_relocations(void *kbase_old, void *kbase_new, long offset)
+{
+ u32 *r;
+ u32 *loc_orig;
+ u32 *loc_new;
+ int type;
+ int res;
+
+ for (r = _relocation_start; r < _relocation_end; r++) {
+ /* Sentinel for last relocation */
+ if (*r == 0)
+ break;
+
+ type = (*r >> 24) & 0xff;
+ loc_orig = kbase_old + ((*r & 0x00ffffff) << 2);
+ loc_new = RELOCATED(loc_orig);
+
+ res = reloc_handler(type, loc_orig, loc_new, offset);
+ if (res)
+ return res;
+ }
+
+ return 0;
+}
+
+/*
+ * The exception table is filled in by the relocs tool after vmlinux is linked.
+ * It must be relocated separately since there will not be any relocation
+ * information for it filled in by the linker.
+ */
+static int __init relocate_exception_table(long offset)
+{
+ unsigned long *etable_start, *etable_end, *e;
+
+ etable_start = RELOCATED(&__start___ex_table);
+ etable_end = RELOCATED(&__stop___ex_table);
+
+ for (e = etable_start; e < etable_end; e++)
+ *e += offset;
+
+ return 0;
+}
+
+#ifdef CONFIG_RANDOMIZE_BASE
+
+static inline __init unsigned long rotate_xor(unsigned long hash,
+ const void *area, size_t size)
+{
+ const typeof(hash) *ptr = PTR_ALIGN(area, sizeof(hash));
+ size_t diff, i;
+
+ diff = (void *)ptr - area;
+ if (unlikely(size < diff + sizeof(hash)))
+ return hash;
+
+ size = ALIGN_DOWN(size - diff, sizeof(hash));
+
+ for (i = 0; i < size / sizeof(hash); i++) {
+ /* Rotate by odd number of bits and XOR. */
+ hash = (hash << ((sizeof(hash) * 8) - 7)) | (hash >> 7);
+ hash ^= ptr[i];
+ }
+
+ return hash;
+}
+
+static inline __init unsigned long get_random_boot(void)
+{
+ unsigned long entropy = random_get_entropy();
+ unsigned long hash = 0;
+
+ /* Attempt to create a simple but unpredictable starting entropy. */
+ hash = rotate_xor(hash, linux_banner, strlen(linux_banner));
+
+ /* Add in any runtime entropy we can get */
+ hash = rotate_xor(hash, &entropy, sizeof(entropy));
+
+#if defined(CONFIG_USE_OF)
+ /* Get any additional entropy passed in device tree */
+ if (initial_boot_params) {
+ int node, len;
+ u64 *prop;
+
+ node = fdt_path_offset(initial_boot_params, "/chosen");
+ if (node >= 0) {
+ prop = fdt_getprop_w(initial_boot_params, node,
+ "kaslr-seed", &len);
+ if (prop && (len == sizeof(u64)))
+ hash = rotate_xor(hash, prop, sizeof(*prop));
+ }
+ }
+#endif /* CONFIG_USE_OF */
+
+ return hash;
+}
+
+static inline __init bool kaslr_disabled(void)
+{
+ char *str;
+
+#if defined(CONFIG_CMDLINE_BOOL)
+ const char *builtin_cmdline = CONFIG_CMDLINE;
+
+ str = strstr(builtin_cmdline, "nokaslr");
+ if (str == builtin_cmdline ||
+ (str > builtin_cmdline && *(str - 1) == ' '))
+ return true;
+#endif
+ str = strstr(arcs_cmdline, "nokaslr");
+ if (str == arcs_cmdline || (str > arcs_cmdline && *(str - 1) == ' '))
+ return true;
+
+ return false;
+}
+
+static inline void __init *determine_relocation_address(void)
+{
+ /* Choose a new address for the kernel */
+ unsigned long kernel_length;
+ void *dest = &_text;
+ unsigned long offset;
+
+ if (kaslr_disabled())
+ return dest;
+
+ kernel_length = (long)_end - (long)(&_text);
+
+ offset = get_random_boot() << 16;
+ offset &= (CONFIG_RANDOMIZE_BASE_MAX_OFFSET - 1);
+ if (offset < kernel_length)
+ offset += ALIGN(kernel_length, 0xffff);
+
+ return RELOCATED(dest);
+}
+
+#else
+
+static inline void __init *determine_relocation_address(void)
+{
+ /*
+ * Choose a new address for the kernel
+ * For now we'll hard code the destination
+ */
+ return (void *)0xffffffff81000000;
+}
+
+#endif
+
+static inline int __init relocation_addr_valid(void *loc_new)
+{
+ if ((unsigned long)loc_new & 0x0000ffff) {
+ /* Inappropriately aligned new location */
+ return 0;
+ }
+ if ((unsigned long)loc_new < (unsigned long)&_end) {
+ /* New location overlaps original kernel */
+ return 0;
+ }
+ return 1;
+}
+
+static inline void __init update_kaslr_offset(unsigned long *addr, long offset)
+{
+ unsigned long *new_addr = (unsigned long *)RELOCATED(addr);
+
+ *new_addr = (unsigned long)offset;
+}
+
+#if defined(CONFIG_USE_OF)
+void __weak *plat_get_fdt(void)
+{
+ return NULL;
+}
+#endif
+
+void *__init relocate_kernel(void)
+{
+ void *loc_new;
+ unsigned long kernel_length;
+ unsigned long bss_length;
+ long offset = 0;
+ int res = 1;
+ /* Default to original kernel entry point */
+ void *kernel_entry = start_kernel;
+ void *fdt = NULL;
+
+ /* Get the command line */
+ fw_init_cmdline();
+#if defined(CONFIG_USE_OF)
+ /* Deal with the device tree */
+ fdt = plat_get_fdt();
+ early_init_dt_scan(fdt);
+ if (boot_command_line[0]) {
+ /* Boot command line was passed in device tree */
+ strscpy(arcs_cmdline, boot_command_line, COMMAND_LINE_SIZE);
+ }
+#endif /* CONFIG_USE_OF */
+
+ kernel_length = (long)(&_relocation_start) - (long)(&_text);
+ bss_length = (long)&__bss_stop - (long)&__bss_start;
+
+ loc_new = determine_relocation_address();
+
+ /* Sanity check relocation address */
+ if (relocation_addr_valid(loc_new))
+ offset = (unsigned long)loc_new - (unsigned long)(&_text);
+
+ /* Reset the command line now so we don't end up with a duplicate */
+ arcs_cmdline[0] = '\0';
+
+ if (offset) {
+ void (*fdt_relocated_)(void *) = NULL;
+#if defined(CONFIG_USE_OF)
+ unsigned long fdt_phys = virt_to_phys(fdt);
+
+ /*
+ * If built-in dtb is used then it will have been relocated
+ * during kernel _text relocation. If appended DTB is used
+ * then it will not be relocated, but it should remain
+ * intact in the original location. If dtb is loaded by
+ * the bootloader then it may need to be moved if it crosses
+ * the target memory area
+ */
+
+ if (fdt_phys >= virt_to_phys(RELOCATED(&_text)) &&
+ fdt_phys <= virt_to_phys(RELOCATED(&_end))) {
+ void *fdt_relocated =
+ RELOCATED(ALIGN((long)&_end, PAGE_SIZE));
+ memcpy(fdt_relocated, fdt, fdt_totalsize(fdt));
+ fdt = fdt_relocated;
+ fdt_relocated_ = RELOCATED(&plat_fdt_relocated);
+ }
+#endif /* CONFIG_USE_OF */
+
+ /* Copy the kernel to it's new location */
+ memcpy(loc_new, &_text, kernel_length);
+
+ /* Perform relocations on the new kernel */
+ res = do_relocations(&_text, loc_new, offset);
+ if (res < 0)
+ goto out;
+
+ /* Sync the caches ready for execution of new kernel */
+ sync_icache(loc_new, kernel_length);
+
+ res = relocate_exception_table(offset);
+ if (res < 0)
+ goto out;
+
+ /*
+ * The original .bss has already been cleared, and
+ * some variables such as command line parameters
+ * stored to it so make a copy in the new location.
+ */
+ memcpy(RELOCATED(&__bss_start), &__bss_start, bss_length);
+
+ /*
+ * If fdt was stored outside of the kernel image and
+ * had to be moved then update platform's state data
+ * with the new fdt location
+ */
+ if (fdt_relocated_)
+ fdt_relocated_(fdt);
+
+ /*
+ * Last chance for the platform to abort relocation.
+ * This may also be used by the platform to perform any
+ * initialisation required now that the new kernel is
+ * resident in memory and ready to be executed.
+ */
+ if (plat_post_relocation(offset))
+ goto out;
+
+ /* The current thread is now within the relocated image */
+ __current_thread_info = RELOCATED(&init_thread_union);
+
+ /* Return the new kernel's entry point */
+ kernel_entry = RELOCATED(start_kernel);
+
+ /* Error may occur before, so keep it at last */
+ update_kaslr_offset(&__kaslr_offset, offset);
+ }
+out:
+ return kernel_entry;
+}
+
+/*
+ * Show relocation information on panic.
+ */
+static void show_kernel_relocation(const char *level)
+{
+ if (__kaslr_offset > 0) {
+ printk(level);
+ pr_cont("Kernel relocated by 0x%pK\n", (void *)__kaslr_offset);
+ pr_cont(" .text @ 0x%pK\n", _text);
+ pr_cont(" .data @ 0x%pK\n", _sdata);
+ pr_cont(" .bss @ 0x%pK\n", __bss_start);
+ }
+}
+
+static int kernel_location_notifier_fn(struct notifier_block *self,
+ unsigned long v, void *p)
+{
+ show_kernel_relocation(KERN_EMERG);
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block kernel_location_notifier = {
+ .notifier_call = kernel_location_notifier_fn
+};
+
+static int __init register_kernel_offset_dumper(void)
+{
+ atomic_notifier_chain_register(&panic_notifier_list,
+ &kernel_location_notifier);
+ return 0;
+}
+__initcall(register_kernel_offset_dumper);
diff --git a/arch/mips/kernel/relocate_kernel.S b/arch/mips/kernel/relocate_kernel.S
new file mode 100644
index 000000000..f5b2ef979
--- /dev/null
+++ b/arch/mips/kernel/relocate_kernel.S
@@ -0,0 +1,184 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * relocate_kernel.S for kexec
+ * Created by <nschichan@corp.free.fr> on Thu Oct 12 17:49:57 2006
+ */
+
+#include <asm/asm.h>
+#include <asm/asmmacro.h>
+#include <asm/regdef.h>
+#include <asm/mipsregs.h>
+#include <asm/stackframe.h>
+#include <asm/addrspace.h>
+
+#include <kernel-entry-init.h>
+
+LEAF(relocate_new_kernel)
+ PTR_L a0, arg0
+ PTR_L a1, arg1
+ PTR_L a2, arg2
+ PTR_L a3, arg3
+
+ PTR_L s0, kexec_indirection_page
+ PTR_L s1, kexec_start_address
+
+process_entry:
+ PTR_L s2, (s0)
+ PTR_ADDIU s0, s0, SZREG
+
+ /*
+ * In case of a kdump/crash kernel, the indirection page is not
+ * populated as the kernel is directly copied to a reserved location
+ */
+ beqz s2, done
+
+ /* destination page */
+ and s3, s2, 0x1
+ beq s3, zero, 1f
+ and s4, s2, ~0x1 /* store destination addr in s4 */
+ b process_entry
+
+1:
+ /* indirection page, update s0 */
+ and s3, s2, 0x2
+ beq s3, zero, 1f
+ and s0, s2, ~0x2
+ b process_entry
+
+1:
+ /* done page */
+ and s3, s2, 0x4
+ beq s3, zero, 1f
+ b done
+1:
+ /* source page */
+ and s3, s2, 0x8
+ beq s3, zero, process_entry
+ and s2, s2, ~0x8
+ li s6, (1 << _PAGE_SHIFT) / SZREG
+
+copy_word:
+ /* copy page word by word */
+ REG_L s5, (s2)
+ REG_S s5, (s4)
+ PTR_ADDIU s4, s4, SZREG
+ PTR_ADDIU s2, s2, SZREG
+ LONG_ADDIU s6, s6, -1
+ beq s6, zero, process_entry
+ b copy_word
+ b process_entry
+
+done:
+#ifdef CONFIG_SMP
+ /* kexec_flag reset is signal to other CPUs what kernel
+ was moved to it's location. Note - we need relocated address
+ of kexec_flag. */
+
+ bal 1f
+ 1: move t1,ra;
+ PTR_LA t2,1b
+ PTR_LA t0,kexec_flag
+ PTR_SUB t0,t0,t2;
+ PTR_ADD t0,t1,t0;
+ LONG_S zero,(t0)
+#endif
+
+#ifdef CONFIG_CPU_CAVIUM_OCTEON
+ /* We need to flush I-cache before jumping to new kernel.
+ * Unfortunately, this code is cpu-specific.
+ */
+ .set push
+ .set noreorder
+ syncw
+ syncw
+ synci 0($0)
+ .set pop
+#else
+ sync
+#endif
+ /* jump to kexec_start_address */
+ j s1
+ END(relocate_new_kernel)
+
+#ifdef CONFIG_SMP
+/*
+ * Other CPUs should wait until code is relocated and
+ * then start at entry (?) point.
+ */
+LEAF(kexec_smp_wait)
+ PTR_L a0, s_arg0
+ PTR_L a1, s_arg1
+ PTR_L a2, s_arg2
+ PTR_L a3, s_arg3
+ PTR_L s1, kexec_start_address
+
+ /* Non-relocated address works for args and kexec_start_address ( old
+ * kernel is not overwritten). But we need relocated address of
+ * kexec_flag.
+ */
+
+ bal 1f
+1: move t1,ra;
+ PTR_LA t2,1b
+ PTR_LA t0,kexec_flag
+ PTR_SUB t0,t0,t2;
+ PTR_ADD t0,t1,t0;
+
+1: LONG_L s0, (t0)
+ bne s0, zero,1b
+
+#ifdef USE_KEXEC_SMP_WAIT_FINAL
+ kexec_smp_wait_final
+#else
+ sync
+#endif
+ j s1
+ END(kexec_smp_wait)
+#endif
+
+#ifdef __mips64
+ /* all PTR's must be aligned to 8 byte in 64-bit mode */
+ .align 3
+#endif
+
+/* All parameters to new kernel are passed in registers a0-a3.
+ * kexec_args[0..3] are used to prepare register values.
+ */
+
+EXPORT(kexec_args)
+arg0: PTR_WD 0x0
+arg1: PTR_WD 0x0
+arg2: PTR_WD 0x0
+arg3: PTR_WD 0x0
+ .size kexec_args,PTRSIZE*4
+
+#ifdef CONFIG_SMP
+/*
+ * Secondary CPUs may have different kernel parameters in
+ * their registers a0-a3. secondary_kexec_args[0..3] are used
+ * to prepare register values.
+ */
+EXPORT(secondary_kexec_args)
+s_arg0: PTR_WD 0x0
+s_arg1: PTR_WD 0x0
+s_arg2: PTR_WD 0x0
+s_arg3: PTR_WD 0x0
+ .size secondary_kexec_args,PTRSIZE*4
+kexec_flag:
+ LONG 0x1
+
+#endif
+
+EXPORT(kexec_start_address)
+ PTR_WD 0x0
+ .size kexec_start_address, PTRSIZE
+
+EXPORT(kexec_indirection_page)
+ PTR_WD 0
+ .size kexec_indirection_page, PTRSIZE
+
+relocate_new_kernel_end:
+
+EXPORT(relocate_new_kernel_size)
+ PTR_WD relocate_new_kernel_end - relocate_new_kernel
+ .size relocate_new_kernel_size, PTRSIZE
diff --git a/arch/mips/kernel/reset.c b/arch/mips/kernel/reset.c
new file mode 100644
index 000000000..e7ce07b3e
--- /dev/null
+++ b/arch/mips/kernel/reset.c
@@ -0,0 +1,124 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2001, 06 by Ralf Baechle (ralf@linux-mips.org)
+ * Copyright (C) 2001 MIPS Technologies, Inc.
+ */
+#include <linux/kernel.h>
+#include <linux/export.h>
+#include <linux/pm.h>
+#include <linux/types.h>
+#include <linux/reboot.h>
+#include <linux/delay.h>
+
+#include <asm/compiler.h>
+#include <asm/idle.h>
+#include <asm/mipsregs.h>
+#include <asm/reboot.h>
+
+/*
+ * Urgs ... Too many MIPS machines to handle this in a generic way.
+ * So handle all using function pointers to machine specific
+ * functions.
+ */
+void (*_machine_restart)(char *command);
+void (*_machine_halt)(void);
+void (*pm_power_off)(void);
+
+EXPORT_SYMBOL(pm_power_off);
+
+static void machine_hang(void)
+{
+ /*
+ * We're hanging the system so we don't want to be interrupted anymore.
+ * Any interrupt handlers that ran would at best be useless & at worst
+ * go awry because the system isn't in a functional state.
+ */
+ local_irq_disable();
+
+ /*
+ * Mask all interrupts, giving us a better chance of remaining in the
+ * low power wait state.
+ */
+ clear_c0_status(ST0_IM);
+
+ while (true) {
+ if (cpu_has_mips_r) {
+ /*
+ * We know that the wait instruction is supported so
+ * make use of it directly, leaving interrupts
+ * disabled.
+ */
+ asm volatile(
+ ".set push\n\t"
+ ".set " MIPS_ISA_ARCH_LEVEL "\n\t"
+ "wait\n\t"
+ ".set pop");
+ } else if (cpu_wait) {
+ /*
+ * Try the cpu_wait() callback. This isn't ideal since
+ * it'll re-enable interrupts, but that ought to be
+ * harmless given that they're all masked.
+ */
+ cpu_wait();
+ local_irq_disable();
+ } else {
+ /*
+ * We're going to burn some power running round the
+ * loop, but we don't really have a choice. This isn't
+ * a path we should expect to run for long during
+ * typical use anyway.
+ */
+ }
+
+ /*
+ * In most modern MIPS CPUs interrupts will cause the wait
+ * instruction to graduate even when disabled, and in some
+ * cases even when masked. In order to prevent a timer
+ * interrupt from continuously taking us out of the low power
+ * wait state, we clear any pending timer interrupt here.
+ */
+ if (cpu_has_counter)
+ write_c0_compare(0);
+ }
+}
+
+void machine_restart(char *command)
+{
+ if (_machine_restart)
+ _machine_restart(command);
+
+#ifdef CONFIG_SMP
+ preempt_disable();
+ smp_send_stop();
+#endif
+ do_kernel_restart(command);
+ mdelay(1000);
+ pr_emerg("Reboot failed -- System halted\n");
+ machine_hang();
+}
+
+void machine_halt(void)
+{
+ if (_machine_halt)
+ _machine_halt();
+
+#ifdef CONFIG_SMP
+ preempt_disable();
+ smp_send_stop();
+#endif
+ machine_hang();
+}
+
+void machine_power_off(void)
+{
+ do_kernel_power_off();
+
+#ifdef CONFIG_SMP
+ preempt_disable();
+ smp_send_stop();
+#endif
+ machine_hang();
+}
diff --git a/arch/mips/kernel/rtlx-cmp.c b/arch/mips/kernel/rtlx-cmp.c
new file mode 100644
index 000000000..d26dcc4b4
--- /dev/null
+++ b/arch/mips/kernel/rtlx-cmp.c
@@ -0,0 +1,122 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2005 MIPS Technologies, Inc. All rights reserved.
+ * Copyright (C) 2013 Imagination Technologies Ltd.
+ */
+#include <linux/device.h>
+#include <linux/fs.h>
+#include <linux/err.h>
+#include <linux/wait.h>
+#include <linux/sched.h>
+#include <linux/smp.h>
+
+#include <asm/mips_mt.h>
+#include <asm/vpe.h>
+#include <asm/rtlx.h>
+
+static int major;
+
+static void rtlx_interrupt(void)
+{
+ int i;
+ struct rtlx_info *info;
+ struct rtlx_info **p = vpe_get_shared(aprp_cpu_index());
+
+ if (p == NULL || *p == NULL)
+ return;
+
+ info = *p;
+
+ if (info->ap_int_pending == 1 && smp_processor_id() == 0) {
+ for (i = 0; i < RTLX_CHANNELS; i++) {
+ wake_up(&channel_wqs[i].lx_queue);
+ wake_up(&channel_wqs[i].rt_queue);
+ }
+ info->ap_int_pending = 0;
+ }
+}
+
+void _interrupt_sp(void)
+{
+ smp_send_reschedule(aprp_cpu_index());
+}
+
+int __init rtlx_module_init(void)
+{
+ struct device *dev;
+ int i, err;
+
+ if (!cpu_has_mipsmt) {
+ pr_warn("VPE loader: not a MIPS MT capable processor\n");
+ return -ENODEV;
+ }
+
+ if (num_possible_cpus() - aprp_cpu_index() < 1) {
+ pr_warn("No TCs reserved for AP/SP, not initializing RTLX.\n"
+ "Pass maxcpus=<n> argument as kernel argument\n");
+
+ return -ENODEV;
+ }
+
+ major = register_chrdev(0, RTLX_MODULE_NAME, &rtlx_fops);
+ if (major < 0) {
+ pr_err("rtlx_module_init: unable to register device\n");
+ return major;
+ }
+
+ /* initialise the wait queues */
+ for (i = 0; i < RTLX_CHANNELS; i++) {
+ init_waitqueue_head(&channel_wqs[i].rt_queue);
+ init_waitqueue_head(&channel_wqs[i].lx_queue);
+ atomic_set(&channel_wqs[i].in_open, 0);
+ mutex_init(&channel_wqs[i].mutex);
+
+ dev = device_create(mt_class, NULL, MKDEV(major, i), NULL,
+ "%s%d", RTLX_MODULE_NAME, i);
+ if (IS_ERR(dev)) {
+ while (i--)
+ device_destroy(mt_class, MKDEV(major, i));
+
+ err = PTR_ERR(dev);
+ goto out_chrdev;
+ }
+ }
+
+ /* set up notifiers */
+ rtlx_notify.start = rtlx_starting;
+ rtlx_notify.stop = rtlx_stopping;
+ vpe_notify(aprp_cpu_index(), &rtlx_notify);
+
+ if (cpu_has_vint) {
+ aprp_hook = rtlx_interrupt;
+ } else {
+ pr_err("APRP RTLX init on non-vectored-interrupt processor\n");
+ err = -ENODEV;
+ goto out_class;
+ }
+
+ return 0;
+
+out_class:
+ for (i = 0; i < RTLX_CHANNELS; i++)
+ device_destroy(mt_class, MKDEV(major, i));
+out_chrdev:
+ unregister_chrdev(major, RTLX_MODULE_NAME);
+
+ return err;
+}
+
+void __exit rtlx_module_exit(void)
+{
+ int i;
+
+ for (i = 0; i < RTLX_CHANNELS; i++)
+ device_destroy(mt_class, MKDEV(major, i));
+
+ unregister_chrdev(major, RTLX_MODULE_NAME);
+
+ aprp_hook = NULL;
+}
diff --git a/arch/mips/kernel/rtlx-mt.c b/arch/mips/kernel/rtlx-mt.c
new file mode 100644
index 000000000..38c6925a1
--- /dev/null
+++ b/arch/mips/kernel/rtlx-mt.c
@@ -0,0 +1,147 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2005 MIPS Technologies, Inc. All rights reserved.
+ * Copyright (C) 2013 Imagination Technologies Ltd.
+ */
+#include <linux/device.h>
+#include <linux/fs.h>
+#include <linux/err.h>
+#include <linux/wait.h>
+#include <linux/sched.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+
+#include <asm/mips_mt.h>
+#include <asm/vpe.h>
+#include <asm/rtlx.h>
+
+static int major;
+
+static void rtlx_dispatch(void)
+{
+ if (read_c0_cause() & read_c0_status() & C_SW0)
+ do_IRQ(MIPS_CPU_IRQ_BASE + MIPS_CPU_RTLX_IRQ);
+}
+
+/*
+ * Interrupt handler may be called before rtlx_init has otherwise had
+ * a chance to run.
+ */
+static irqreturn_t rtlx_interrupt(int irq, void *dev_id)
+{
+ unsigned int vpeflags;
+ unsigned long flags;
+ int i;
+
+ local_irq_save(flags);
+ vpeflags = dvpe();
+ set_c0_status(0x100 << MIPS_CPU_RTLX_IRQ);
+ irq_enable_hazard();
+ evpe(vpeflags);
+ local_irq_restore(flags);
+
+ for (i = 0; i < RTLX_CHANNELS; i++) {
+ wake_up(&channel_wqs[i].lx_queue);
+ wake_up(&channel_wqs[i].rt_queue);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static int rtlx_irq_num = MIPS_CPU_IRQ_BASE + MIPS_CPU_RTLX_IRQ;
+
+void _interrupt_sp(void)
+{
+ unsigned long flags;
+
+ local_irq_save(flags);
+ dvpe();
+ settc(1);
+ write_vpe_c0_cause(read_vpe_c0_cause() | C_SW0);
+ evpe(EVPE_ENABLE);
+ local_irq_restore(flags);
+}
+
+int __init rtlx_module_init(void)
+{
+ struct device *dev;
+ int i, err;
+
+ if (!cpu_has_mipsmt) {
+ pr_warn("VPE loader: not a MIPS MT capable processor\n");
+ return -ENODEV;
+ }
+
+ if (aprp_cpu_index() == 0) {
+ pr_warn("No TCs reserved for AP/SP, not initializing RTLX.\n"
+ "Pass maxtcs=<n> argument as kernel argument\n");
+
+ return -ENODEV;
+ }
+
+ major = register_chrdev(0, RTLX_MODULE_NAME, &rtlx_fops);
+ if (major < 0) {
+ pr_err("rtlx_module_init: unable to register device\n");
+ return major;
+ }
+
+ /* initialise the wait queues */
+ for (i = 0; i < RTLX_CHANNELS; i++) {
+ init_waitqueue_head(&channel_wqs[i].rt_queue);
+ init_waitqueue_head(&channel_wqs[i].lx_queue);
+ atomic_set(&channel_wqs[i].in_open, 0);
+ mutex_init(&channel_wqs[i].mutex);
+
+ dev = device_create(mt_class, NULL, MKDEV(major, i), NULL,
+ "%s%d", RTLX_MODULE_NAME, i);
+ if (IS_ERR(dev)) {
+ while (i--)
+ device_destroy(mt_class, MKDEV(major, i));
+
+ err = PTR_ERR(dev);
+ goto out_chrdev;
+ }
+ }
+
+ /* set up notifiers */
+ rtlx_notify.start = rtlx_starting;
+ rtlx_notify.stop = rtlx_stopping;
+ vpe_notify(aprp_cpu_index(), &rtlx_notify);
+
+ if (cpu_has_vint) {
+ aprp_hook = rtlx_dispatch;
+ } else {
+ pr_err("APRP RTLX init on non-vectored-interrupt processor\n");
+ err = -ENODEV;
+ goto out_class;
+ }
+
+ err = request_irq(rtlx_irq_num, rtlx_interrupt, 0, "RTLX", rtlx);
+ if (err)
+ goto out_class;
+
+ return 0;
+
+out_class:
+ for (i = 0; i < RTLX_CHANNELS; i++)
+ device_destroy(mt_class, MKDEV(major, i));
+out_chrdev:
+ unregister_chrdev(major, RTLX_MODULE_NAME);
+
+ return err;
+}
+
+void __exit rtlx_module_exit(void)
+{
+ int i;
+
+ for (i = 0; i < RTLX_CHANNELS; i++)
+ device_destroy(mt_class, MKDEV(major, i));
+
+ unregister_chrdev(major, RTLX_MODULE_NAME);
+
+ aprp_hook = NULL;
+}
diff --git a/arch/mips/kernel/rtlx.c b/arch/mips/kernel/rtlx.c
new file mode 100644
index 000000000..18c509c59
--- /dev/null
+++ b/arch/mips/kernel/rtlx.c
@@ -0,0 +1,409 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2005 MIPS Technologies, Inc. All rights reserved.
+ * Copyright (C) 2005, 06 Ralf Baechle (ralf@linux-mips.org)
+ * Copyright (C) 2013 Imagination Technologies Ltd.
+ */
+#include <linux/kernel.h>
+#include <linux/fs.h>
+#include <linux/syscalls.h>
+#include <linux/moduleloader.h>
+#include <linux/atomic.h>
+#include <linux/sched/signal.h>
+
+#include <asm/mipsmtregs.h>
+#include <asm/mips_mt.h>
+#include <asm/processor.h>
+#include <asm/rtlx.h>
+#include <asm/setup.h>
+#include <asm/vpe.h>
+
+static int sp_stopping;
+struct rtlx_info *rtlx;
+struct chan_waitqueues channel_wqs[RTLX_CHANNELS];
+struct vpe_notifications rtlx_notify;
+void (*aprp_hook)(void) = NULL;
+EXPORT_SYMBOL(aprp_hook);
+
+static void __used dump_rtlx(void)
+{
+ int i;
+
+ pr_info("id 0x%lx state %d\n", rtlx->id, rtlx->state);
+
+ for (i = 0; i < RTLX_CHANNELS; i++) {
+ struct rtlx_channel *chan = &rtlx->channel[i];
+
+ pr_info(" rt_state %d lx_state %d buffer_size %d\n",
+ chan->rt_state, chan->lx_state, chan->buffer_size);
+
+ pr_info(" rt_read %d rt_write %d\n",
+ chan->rt_read, chan->rt_write);
+
+ pr_info(" lx_read %d lx_write %d\n",
+ chan->lx_read, chan->lx_write);
+
+ pr_info(" rt_buffer <%s>\n", chan->rt_buffer);
+ pr_info(" lx_buffer <%s>\n", chan->lx_buffer);
+ }
+}
+
+/* call when we have the address of the shared structure from the SP side. */
+static int rtlx_init(struct rtlx_info *rtlxi)
+{
+ if (rtlxi->id != RTLX_ID) {
+ pr_err("no valid RTLX id at 0x%p 0x%lx\n", rtlxi, rtlxi->id);
+ return -ENOEXEC;
+ }
+
+ rtlx = rtlxi;
+
+ return 0;
+}
+
+/* notifications */
+void rtlx_starting(int vpe)
+{
+ int i;
+ sp_stopping = 0;
+
+ /* force a reload of rtlx */
+ rtlx = NULL;
+
+ /* wake up any sleeping rtlx_open's */
+ for (i = 0; i < RTLX_CHANNELS; i++)
+ wake_up_interruptible(&channel_wqs[i].lx_queue);
+}
+
+void rtlx_stopping(int vpe)
+{
+ int i;
+
+ sp_stopping = 1;
+ for (i = 0; i < RTLX_CHANNELS; i++)
+ wake_up_interruptible(&channel_wqs[i].lx_queue);
+}
+
+
+int rtlx_open(int index, int can_sleep)
+{
+ struct rtlx_info **p;
+ struct rtlx_channel *chan;
+ enum rtlx_state state;
+ int ret = 0;
+
+ if (index >= RTLX_CHANNELS) {
+ pr_debug("rtlx_open index out of range\n");
+ return -ENOSYS;
+ }
+
+ if (atomic_inc_return(&channel_wqs[index].in_open) > 1) {
+ pr_debug("rtlx_open channel %d already opened\n", index);
+ ret = -EBUSY;
+ goto out_fail;
+ }
+
+ if (rtlx == NULL) {
+ p = vpe_get_shared(aprp_cpu_index());
+ if (p == NULL) {
+ if (can_sleep) {
+ ret = __wait_event_interruptible(
+ channel_wqs[index].lx_queue,
+ (p = vpe_get_shared(aprp_cpu_index())));
+ if (ret)
+ goto out_fail;
+ } else {
+ pr_debug("No SP program loaded, and device opened with O_NONBLOCK\n");
+ ret = -ENOSYS;
+ goto out_fail;
+ }
+ }
+
+ smp_rmb();
+ if (*p == NULL) {
+ if (can_sleep) {
+ DEFINE_WAIT(wait);
+
+ for (;;) {
+ prepare_to_wait(
+ &channel_wqs[index].lx_queue,
+ &wait, TASK_INTERRUPTIBLE);
+ smp_rmb();
+ if (*p != NULL)
+ break;
+ if (!signal_pending(current)) {
+ schedule();
+ continue;
+ }
+ ret = -ERESTARTSYS;
+ goto out_fail;
+ }
+ finish_wait(&channel_wqs[index].lx_queue,
+ &wait);
+ } else {
+ pr_err(" *vpe_get_shared is NULL. Has an SP program been loaded?\n");
+ ret = -ENOSYS;
+ goto out_fail;
+ }
+ }
+
+ if ((unsigned int)*p < KSEG0) {
+ pr_warn("vpe_get_shared returned an invalid pointer maybe an error code %d\n",
+ (int)*p);
+ ret = -ENOSYS;
+ goto out_fail;
+ }
+
+ ret = rtlx_init(*p);
+ if (ret < 0)
+ goto out_ret;
+ }
+
+ chan = &rtlx->channel[index];
+
+ state = xchg(&chan->lx_state, RTLX_STATE_OPENED);
+ if (state == RTLX_STATE_OPENED) {
+ ret = -EBUSY;
+ goto out_fail;
+ }
+
+out_fail:
+ smp_mb();
+ atomic_dec(&channel_wqs[index].in_open);
+ smp_mb();
+
+out_ret:
+ return ret;
+}
+
+int rtlx_release(int index)
+{
+ if (rtlx == NULL) {
+ pr_err("rtlx_release() with null rtlx\n");
+ return 0;
+ }
+ rtlx->channel[index].lx_state = RTLX_STATE_UNUSED;
+ return 0;
+}
+
+unsigned int rtlx_read_poll(int index, int can_sleep)
+{
+ struct rtlx_channel *chan;
+
+ if (rtlx == NULL)
+ return 0;
+
+ chan = &rtlx->channel[index];
+
+ /* data available to read? */
+ if (chan->lx_read == chan->lx_write) {
+ if (can_sleep) {
+ int ret = __wait_event_interruptible(
+ channel_wqs[index].lx_queue,
+ (chan->lx_read != chan->lx_write) ||
+ sp_stopping);
+ if (ret)
+ return ret;
+
+ if (sp_stopping)
+ return 0;
+ } else
+ return 0;
+ }
+
+ return (chan->lx_write + chan->buffer_size - chan->lx_read)
+ % chan->buffer_size;
+}
+
+static inline int write_spacefree(int read, int write, int size)
+{
+ if (read == write) {
+ /*
+ * Never fill the buffer completely, so indexes are always
+ * equal if empty and only empty, or !equal if data available
+ */
+ return size - 1;
+ }
+
+ return ((read + size - write) % size) - 1;
+}
+
+unsigned int rtlx_write_poll(int index)
+{
+ struct rtlx_channel *chan = &rtlx->channel[index];
+
+ return write_spacefree(chan->rt_read, chan->rt_write,
+ chan->buffer_size);
+}
+
+ssize_t rtlx_read(int index, void __user *buff, size_t count)
+{
+ size_t lx_write, fl = 0L;
+ struct rtlx_channel *lx;
+ unsigned long failed;
+
+ if (rtlx == NULL)
+ return -ENOSYS;
+
+ lx = &rtlx->channel[index];
+
+ mutex_lock(&channel_wqs[index].mutex);
+ smp_rmb();
+ lx_write = lx->lx_write;
+
+ /* find out how much in total */
+ count = min(count,
+ (size_t)(lx_write + lx->buffer_size - lx->lx_read)
+ % lx->buffer_size);
+
+ /* then how much from the read pointer onwards */
+ fl = min(count, (size_t)lx->buffer_size - lx->lx_read);
+
+ failed = copy_to_user(buff, lx->lx_buffer + lx->lx_read, fl);
+ if (failed)
+ goto out;
+
+ /* and if there is anything left at the beginning of the buffer */
+ if (count - fl)
+ failed = copy_to_user(buff + fl, lx->lx_buffer, count - fl);
+
+out:
+ count -= failed;
+
+ smp_wmb();
+ lx->lx_read = (lx->lx_read + count) % lx->buffer_size;
+ smp_wmb();
+ mutex_unlock(&channel_wqs[index].mutex);
+
+ return count;
+}
+
+ssize_t rtlx_write(int index, const void __user *buffer, size_t count)
+{
+ struct rtlx_channel *rt;
+ unsigned long failed;
+ size_t rt_read;
+ size_t fl;
+
+ if (rtlx == NULL)
+ return -ENOSYS;
+
+ rt = &rtlx->channel[index];
+
+ mutex_lock(&channel_wqs[index].mutex);
+ smp_rmb();
+ rt_read = rt->rt_read;
+
+ /* total number of bytes to copy */
+ count = min_t(size_t, count, write_spacefree(rt_read, rt->rt_write,
+ rt->buffer_size));
+
+ /* first bit from write pointer to the end of the buffer, or count */
+ fl = min(count, (size_t) rt->buffer_size - rt->rt_write);
+
+ failed = copy_from_user(rt->rt_buffer + rt->rt_write, buffer, fl);
+ if (failed)
+ goto out;
+
+ /* if there's any left copy to the beginning of the buffer */
+ if (count - fl)
+ failed = copy_from_user(rt->rt_buffer, buffer + fl, count - fl);
+
+out:
+ count -= failed;
+
+ smp_wmb();
+ rt->rt_write = (rt->rt_write + count) % rt->buffer_size;
+ smp_wmb();
+ mutex_unlock(&channel_wqs[index].mutex);
+
+ _interrupt_sp();
+
+ return count;
+}
+
+
+static int file_open(struct inode *inode, struct file *filp)
+{
+ return rtlx_open(iminor(inode), (filp->f_flags & O_NONBLOCK) ? 0 : 1);
+}
+
+static int file_release(struct inode *inode, struct file *filp)
+{
+ return rtlx_release(iminor(inode));
+}
+
+static __poll_t file_poll(struct file *file, poll_table *wait)
+{
+ int minor = iminor(file_inode(file));
+ __poll_t mask = 0;
+
+ poll_wait(file, &channel_wqs[minor].rt_queue, wait);
+ poll_wait(file, &channel_wqs[minor].lx_queue, wait);
+
+ if (rtlx == NULL)
+ return 0;
+
+ /* data available to read? */
+ if (rtlx_read_poll(minor, 0))
+ mask |= EPOLLIN | EPOLLRDNORM;
+
+ /* space to write */
+ if (rtlx_write_poll(minor))
+ mask |= EPOLLOUT | EPOLLWRNORM;
+
+ return mask;
+}
+
+static ssize_t file_read(struct file *file, char __user *buffer, size_t count,
+ loff_t *ppos)
+{
+ int minor = iminor(file_inode(file));
+
+ /* data available? */
+ if (!rtlx_read_poll(minor, (file->f_flags & O_NONBLOCK) ? 0 : 1))
+ return 0; /* -EAGAIN makes 'cat' whine */
+
+ return rtlx_read(minor, buffer, count);
+}
+
+static ssize_t file_write(struct file *file, const char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ int minor = iminor(file_inode(file));
+
+ /* any space left... */
+ if (!rtlx_write_poll(minor)) {
+ int ret;
+
+ if (file->f_flags & O_NONBLOCK)
+ return -EAGAIN;
+
+ ret = __wait_event_interruptible(channel_wqs[minor].rt_queue,
+ rtlx_write_poll(minor));
+ if (ret)
+ return ret;
+ }
+
+ return rtlx_write(minor, buffer, count);
+}
+
+const struct file_operations rtlx_fops = {
+ .owner = THIS_MODULE,
+ .open = file_open,
+ .release = file_release,
+ .write = file_write,
+ .read = file_read,
+ .poll = file_poll,
+ .llseek = noop_llseek,
+};
+
+module_init(rtlx_module_init);
+module_exit(rtlx_module_exit);
+
+MODULE_DESCRIPTION("MIPS RTLX");
+MODULE_AUTHOR("Elizabeth Oldham, MIPS Technologies, Inc.");
+MODULE_LICENSE("GPL");
diff --git a/arch/mips/kernel/scall32-o32.S b/arch/mips/kernel/scall32-o32.S
new file mode 100644
index 000000000..18dc9b345
--- /dev/null
+++ b/arch/mips/kernel/scall32-o32.S
@@ -0,0 +1,222 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1995-99, 2000- 02, 06 Ralf Baechle <ralf@linux-mips.org>
+ * Copyright (C) 2001 MIPS Technologies, Inc.
+ * Copyright (C) 2004 Thiemo Seufer
+ * Copyright (C) 2014 Imagination Technologies Ltd.
+ */
+#include <linux/errno.h>
+#include <asm/asm.h>
+#include <asm/asmmacro.h>
+#include <asm/irqflags.h>
+#include <asm/mipsregs.h>
+#include <asm/regdef.h>
+#include <asm/stackframe.h>
+#include <asm/isadep.h>
+#include <asm/sysmips.h>
+#include <asm/thread_info.h>
+#include <asm/unistd.h>
+#include <asm/asm-offsets.h>
+
+ .align 5
+NESTED(handle_sys, PT_SIZE, sp)
+ .set noat
+ SAVE_SOME
+ TRACE_IRQS_ON_RELOAD
+ STI
+ .set at
+
+ lw t1, PT_EPC(sp) # skip syscall on return
+
+ addiu t1, 4 # skip to next instruction
+ sw t1, PT_EPC(sp)
+
+ sw a3, PT_R26(sp) # save a3 for syscall restarting
+
+ /*
+ * More than four arguments. Try to deal with it by copying the
+ * stack arguments from the user stack to the kernel stack.
+ * This Sucks (TM).
+ */
+ lw t0, PT_R29(sp) # get old user stack pointer
+
+ /*
+ * We intentionally keep the kernel stack a little below the top of
+ * userspace so we don't have to do a slower byte accurate check here.
+ */
+ addu t4, t0, 32
+ bltz t4, bad_stack # -> sp is bad
+
+ /*
+ * Ok, copy the args from the luser stack to the kernel stack.
+ */
+
+ .set push
+ .set noreorder
+ .set nomacro
+
+load_a4: user_lw(t5, 16(t0)) # argument #5 from usp
+load_a5: user_lw(t6, 20(t0)) # argument #6 from usp
+load_a6: user_lw(t7, 24(t0)) # argument #7 from usp
+load_a7: user_lw(t8, 28(t0)) # argument #8 from usp
+loads_done:
+
+ sw t5, 16(sp) # argument #5 to ksp
+ sw t6, 20(sp) # argument #6 to ksp
+ sw t7, 24(sp) # argument #7 to ksp
+ sw t8, 28(sp) # argument #8 to ksp
+ .set pop
+
+ .section __ex_table,"a"
+ PTR_WD load_a4, bad_stack_a4
+ PTR_WD load_a5, bad_stack_a5
+ PTR_WD load_a6, bad_stack_a6
+ PTR_WD load_a7, bad_stack_a7
+ .previous
+
+ lw t0, TI_FLAGS($28) # syscall tracing enabled?
+ li t1, _TIF_WORK_SYSCALL_ENTRY
+ and t0, t1
+ bnez t0, syscall_trace_entry # -> yes
+syscall_common:
+ subu v0, v0, __NR_O32_Linux # check syscall number
+ sltiu t0, v0, __NR_O32_Linux_syscalls
+ beqz t0, illegal_syscall
+
+ sll t0, v0, 2
+ la t1, sys_call_table
+ addu t1, t0
+ lw t2, (t1) # syscall routine
+
+ beqz t2, illegal_syscall
+
+ jalr t2 # Do The Real Thing (TM)
+
+ li t0, -EMAXERRNO - 1 # error?
+ sltu t0, t0, v0
+ sw t0, PT_R7(sp) # set error flag
+ beqz t0, 1f
+
+ lw t1, PT_R2(sp) # syscall number
+ negu v0 # error
+ sw t1, PT_R0(sp) # save it for syscall restarting
+1: sw v0, PT_R2(sp) # result
+
+o32_syscall_exit:
+ j syscall_exit_partial
+
+/* ------------------------------------------------------------------------ */
+
+syscall_trace_entry:
+ SAVE_STATIC
+ move a0, sp
+
+ /*
+ * syscall number is in v0 unless we called syscall(__NR_###)
+ * where the real syscall number is in a0
+ */
+ move a1, v0
+ subu t2, v0, __NR_O32_Linux
+ bnez t2, 1f /* __NR_syscall at offset 0 */
+ lw a1, PT_R4(sp)
+
+1: jal syscall_trace_enter
+
+ bltz v0, 1f # seccomp failed? Skip syscall
+
+ RESTORE_STATIC
+ lw v0, PT_R2(sp) # Restore syscall (maybe modified)
+ lw a0, PT_R4(sp) # Restore argument registers
+ lw a1, PT_R5(sp)
+ lw a2, PT_R6(sp)
+ lw a3, PT_R7(sp)
+ j syscall_common
+
+1: j syscall_exit
+
+/* ------------------------------------------------------------------------ */
+
+ /*
+ * Our open-coded access area sanity test for the stack pointer
+ * failed. We probably should handle this case a bit more drastic.
+ */
+bad_stack:
+ li v0, EFAULT
+ sw v0, PT_R2(sp)
+ li t0, 1 # set error flag
+ sw t0, PT_R7(sp)
+ j o32_syscall_exit
+
+bad_stack_a4:
+ li t5, 0
+ b load_a5
+
+bad_stack_a5:
+ li t6, 0
+ b load_a6
+
+bad_stack_a6:
+ li t7, 0
+ b load_a7
+
+bad_stack_a7:
+ li t8, 0
+ b loads_done
+
+ /*
+ * The system call does not exist in this kernel
+ */
+illegal_syscall:
+ li v0, ENOSYS # error
+ sw v0, PT_R2(sp)
+ li t0, 1 # set error flag
+ sw t0, PT_R7(sp)
+ j o32_syscall_exit
+ END(handle_sys)
+
+ LEAF(sys_syscall)
+ subu t0, a0, __NR_O32_Linux # check syscall number
+ sltiu v0, t0, __NR_O32_Linux_syscalls
+ beqz t0, einval # do not recurse
+ sll t1, t0, 2
+ beqz v0, einval
+ lw t2, sys_call_table(t1) # syscall routine
+
+ move a0, a1 # shift argument registers
+ move a1, a2
+ move a2, a3
+ lw a3, 16(sp)
+ lw t4, 20(sp)
+ lw t5, 24(sp)
+ lw t6, 28(sp)
+ sw t4, 16(sp)
+ sw t5, 20(sp)
+ sw t6, 24(sp)
+ jr t2
+ /* Unreached */
+
+einval: li v0, -ENOSYS
+ jr ra
+ END(sys_syscall)
+
+#ifdef CONFIG_MIPS_MT_FPAFF
+ /*
+ * For FPU affinity scheduling on MIPS MT processors, we need to
+ * intercept sys_sched_xxxaffinity() calls until we get a proper hook
+ * in kernel/sched/core.c. Considered only temporary we only support
+ * these hooks for the 32-bit kernel - there is no MIPS64 MT processor
+ * atm.
+ */
+#define sys_sched_setaffinity mipsmt_sys_sched_setaffinity
+#define sys_sched_getaffinity mipsmt_sys_sched_getaffinity
+#endif /* CONFIG_MIPS_MT_FPAFF */
+
+#define __SYSCALL_WITH_COMPAT(nr, native, compat) __SYSCALL(nr, native)
+#define __SYSCALL(nr, entry) PTR_WD entry
+ .align 2
+ .type sys_call_table, @object
+EXPORT(sys_call_table)
+#include <asm/syscall_table_o32.h>
diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S
new file mode 100644
index 000000000..97456b2ca
--- /dev/null
+++ b/arch/mips/kernel/scall64-n32.S
@@ -0,0 +1,107 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1995, 96, 97, 98, 99, 2000, 01 by Ralf Baechle
+ * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
+ * Copyright (C) 2001 MIPS Technologies, Inc.
+ */
+#include <linux/errno.h>
+#include <asm/asm.h>
+#include <asm/asmmacro.h>
+#include <asm/irqflags.h>
+#include <asm/mipsregs.h>
+#include <asm/regdef.h>
+#include <asm/stackframe.h>
+#include <asm/thread_info.h>
+#include <asm/unistd.h>
+
+#ifndef CONFIG_MIPS32_O32
+/* No O32, so define handle_sys here */
+#define handle_sysn32 handle_sys
+#endif
+
+ .align 5
+NESTED(handle_sysn32, PT_SIZE, sp)
+#ifndef CONFIG_MIPS32_O32
+ .set noat
+ SAVE_SOME
+ TRACE_IRQS_ON_RELOAD
+ STI
+ .set at
+#endif
+
+ dsubu t0, v0, __NR_N32_Linux # check syscall number
+ sltiu t0, t0, __NR_N32_Linux_syscalls
+
+#ifndef CONFIG_MIPS32_O32
+ ld t1, PT_EPC(sp) # skip syscall on return
+ daddiu t1, 4 # skip to next instruction
+ sd t1, PT_EPC(sp)
+#endif
+ beqz t0, not_n32_scall
+
+ sd a3, PT_R26(sp) # save a3 for syscall restarting
+
+ li t1, _TIF_WORK_SYSCALL_ENTRY
+ LONG_L t0, TI_FLAGS($28) # syscall tracing enabled?
+ and t0, t1, t0
+ bnez t0, n32_syscall_trace_entry
+
+syscall_common:
+ dsll t0, v0, 3 # offset into table
+ ld t2, (sysn32_call_table - (__NR_N32_Linux * 8))(t0)
+
+ jalr t2 # Do The Real Thing (TM)
+
+ li t0, -EMAXERRNO - 1 # error?
+ sltu t0, t0, v0
+ sd t0, PT_R7(sp) # set error flag
+ beqz t0, 1f
+
+ ld t1, PT_R2(sp) # syscall number
+ dnegu v0 # error
+ sd t1, PT_R0(sp) # save it for syscall restarting
+1: sd v0, PT_R2(sp) # result
+
+ j syscall_exit_partial
+
+/* ------------------------------------------------------------------------ */
+
+n32_syscall_trace_entry:
+ SAVE_STATIC
+ move a0, sp
+ move a1, v0
+ jal syscall_trace_enter
+
+ bltz v0, 1f # seccomp failed? Skip syscall
+
+ RESTORE_STATIC
+ ld v0, PT_R2(sp) # Restore syscall (maybe modified)
+ ld a0, PT_R4(sp) # Restore argument registers
+ ld a1, PT_R5(sp)
+ ld a2, PT_R6(sp)
+ ld a3, PT_R7(sp)
+ ld a4, PT_R8(sp)
+ ld a5, PT_R9(sp)
+
+ dsubu t2, v0, __NR_N32_Linux # check (new) syscall number
+ sltiu t0, t2, __NR_N32_Linux_syscalls
+ beqz t0, not_n32_scall
+
+ j syscall_common
+
+1: j syscall_exit
+
+not_n32_scall:
+ /* This is not an n32 compatibility syscall, pass it on to
+ the n64 syscall handlers. */
+ j handle_sys64
+
+ END(handle_sysn32)
+
+#define __SYSCALL(nr, entry) PTR_WD entry
+ .type sysn32_call_table, @object
+EXPORT(sysn32_call_table)
+#include <asm/syscall_table_n32.h>
diff --git a/arch/mips/kernel/scall64-n64.S b/arch/mips/kernel/scall64-n64.S
new file mode 100644
index 000000000..e6264aa62
--- /dev/null
+++ b/arch/mips/kernel/scall64-n64.S
@@ -0,0 +1,115 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1995, 96, 97, 98, 99, 2000, 01, 02 by Ralf Baechle
+ * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
+ * Copyright (C) 2001 MIPS Technologies, Inc.
+ */
+#include <linux/errno.h>
+#include <asm/asm.h>
+#include <asm/asmmacro.h>
+#include <asm/irqflags.h>
+#include <asm/mipsregs.h>
+#include <asm/regdef.h>
+#include <asm/stackframe.h>
+#include <asm/asm-offsets.h>
+#include <asm/sysmips.h>
+#include <asm/thread_info.h>
+#include <asm/unistd.h>
+
+#ifndef CONFIG_MIPS32_COMPAT
+/* Neither O32 nor N32, so define handle_sys here */
+#define handle_sys64 handle_sys
+#endif
+
+ .align 5
+NESTED(handle_sys64, PT_SIZE, sp)
+#if !defined(CONFIG_MIPS32_O32) && !defined(CONFIG_MIPS32_N32)
+ /*
+ * When 32-bit compatibility is configured scall_o32.S
+ * already did this.
+ */
+ .set noat
+ SAVE_SOME
+ TRACE_IRQS_ON_RELOAD
+ STI
+ .set at
+#endif
+
+#if !defined(CONFIG_MIPS32_O32) && !defined(CONFIG_MIPS32_N32)
+ ld t1, PT_EPC(sp) # skip syscall on return
+ daddiu t1, 4 # skip to next instruction
+ sd t1, PT_EPC(sp)
+#endif
+
+ sd a3, PT_R26(sp) # save a3 for syscall restarting
+
+ li t1, _TIF_WORK_SYSCALL_ENTRY
+ LONG_L t0, TI_FLAGS($28) # syscall tracing enabled?
+ and t0, t1, t0
+ bnez t0, syscall_trace_entry
+
+syscall_common:
+ dsubu t2, v0, __NR_64_Linux
+ sltiu t0, t2, __NR_64_Linux_syscalls
+ beqz t0, illegal_syscall
+
+ dsll t0, t2, 3 # offset into table
+ dla t2, sys_call_table
+ daddu t0, t2, t0
+ ld t2, (t0) # syscall routine
+ beqz t2, illegal_syscall
+
+ jalr t2 # Do The Real Thing (TM)
+
+ li t0, -EMAXERRNO - 1 # error?
+ sltu t0, t0, v0
+ sd t0, PT_R7(sp) # set error flag
+ beqz t0, 1f
+
+ ld t1, PT_R2(sp) # syscall number
+ dnegu v0 # error
+ sd t1, PT_R0(sp) # save it for syscall restarting
+1: sd v0, PT_R2(sp) # result
+
+n64_syscall_exit:
+ j syscall_exit_partial
+
+/* ------------------------------------------------------------------------ */
+
+syscall_trace_entry:
+ SAVE_STATIC
+ move a0, sp
+ move a1, v0
+ jal syscall_trace_enter
+
+ bltz v0, 1f # seccomp failed? Skip syscall
+
+ RESTORE_STATIC
+ ld v0, PT_R2(sp) # Restore syscall (maybe modified)
+ ld a0, PT_R4(sp) # Restore argument registers
+ ld a1, PT_R5(sp)
+ ld a2, PT_R6(sp)
+ ld a3, PT_R7(sp)
+ ld a4, PT_R8(sp)
+ ld a5, PT_R9(sp)
+ j syscall_common
+
+1: j syscall_exit
+
+illegal_syscall:
+ /* This also isn't a 64-bit syscall, throw an error. */
+ li v0, ENOSYS # error
+ sd v0, PT_R2(sp)
+ li t0, 1 # set error flag
+ sd t0, PT_R7(sp)
+ j n64_syscall_exit
+ END(handle_sys64)
+
+#define __SYSCALL(nr, entry) PTR_WD entry
+ .align 3
+ .type sys_call_table, @object
+EXPORT(sys_call_table)
+#include <asm/syscall_table_n64.h>
diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S
new file mode 100644
index 000000000..d3c2616cb
--- /dev/null
+++ b/arch/mips/kernel/scall64-o32.S
@@ -0,0 +1,221 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1995 - 2000, 2001 by Ralf Baechle
+ * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
+ * Copyright (C) 2001 MIPS Technologies, Inc.
+ * Copyright (C) 2004 Thiemo Seufer
+ *
+ * Hairy, the userspace application uses a different argument passing
+ * convention than the kernel, so we have to translate things from o32
+ * to ABI64 calling convention. 64-bit syscalls are also processed
+ * here for now.
+ */
+#include <linux/errno.h>
+#include <asm/asm.h>
+#include <asm/asmmacro.h>
+#include <asm/irqflags.h>
+#include <asm/mipsregs.h>
+#include <asm/regdef.h>
+#include <asm/stackframe.h>
+#include <asm/thread_info.h>
+#include <asm/unistd.h>
+#include <asm/sysmips.h>
+
+ .align 5
+NESTED(handle_sys, PT_SIZE, sp)
+ .set noat
+ SAVE_SOME
+ TRACE_IRQS_ON_RELOAD
+ STI
+ .set at
+ ld t1, PT_EPC(sp) # skip syscall on return
+
+ dsubu t0, v0, __NR_O32_Linux # check syscall number
+ sltiu t0, t0, __NR_O32_Linux_syscalls
+ daddiu t1, 4 # skip to next instruction
+ sd t1, PT_EPC(sp)
+ beqz t0, not_o32_scall
+#if 0
+ SAVE_ALL
+ move a1, v0
+ ASM_PRINT("Scall %ld\n")
+ RESTORE_ALL
+#endif
+
+ /* We don't want to stumble over broken sign extensions from
+ userland. O32 does never use the upper half. */
+ sll a0, a0, 0
+ sll a1, a1, 0
+ sll a2, a2, 0
+ sll a3, a3, 0
+
+ sd a3, PT_R26(sp) # save a3 for syscall restarting
+
+ /*
+ * More than four arguments. Try to deal with it by copying the
+ * stack arguments from the user stack to the kernel stack.
+ * This Sucks (TM).
+ *
+ * We intentionally keep the kernel stack a little below the top of
+ * userspace so we don't have to do a slower byte accurate check here.
+ */
+ ld t0, PT_R29(sp) # get old user stack pointer
+ daddu t1, t0, 32
+ bltz t1, bad_stack
+
+load_a4: lw a4, 16(t0) # argument #5 from usp
+load_a5: lw a5, 20(t0) # argument #6 from usp
+load_a6: lw a6, 24(t0) # argument #7 from usp
+load_a7: lw a7, 28(t0) # argument #8 from usp
+loads_done:
+
+ .section __ex_table,"a"
+ PTR_WD load_a4, bad_stack_a4
+ PTR_WD load_a5, bad_stack_a5
+ PTR_WD load_a6, bad_stack_a6
+ PTR_WD load_a7, bad_stack_a7
+ .previous
+
+ li t1, _TIF_WORK_SYSCALL_ENTRY
+ LONG_L t0, TI_FLAGS($28) # syscall tracing enabled?
+ and t0, t1, t0
+ bnez t0, trace_a_syscall
+
+syscall_common:
+ dsll t0, v0, 3 # offset into table
+ ld t2, (sys32_call_table - (__NR_O32_Linux * 8))(t0)
+
+ jalr t2 # Do The Real Thing (TM)
+
+ li t0, -EMAXERRNO - 1 # error?
+ sltu t0, t0, v0
+ sd t0, PT_R7(sp) # set error flag
+ beqz t0, 1f
+
+ ld t1, PT_R2(sp) # syscall number
+ dnegu v0 # error
+ sd t1, PT_R0(sp) # save it for syscall restarting
+1: sd v0, PT_R2(sp) # result
+
+o32_syscall_exit:
+ j syscall_exit_partial
+
+/* ------------------------------------------------------------------------ */
+
+trace_a_syscall:
+ SAVE_STATIC
+ sd a4, PT_R8(sp) # Save argument registers
+ sd a5, PT_R9(sp)
+ sd a6, PT_R10(sp)
+ sd a7, PT_R11(sp) # For indirect syscalls
+
+ move a0, sp
+ /*
+ * absolute syscall number is in v0 unless we called syscall(__NR_###)
+ * where the real syscall number is in a0
+ * note: NR_syscall is the first O32 syscall but the macro is
+ * only defined when compiling with -mabi=32 (CONFIG_32BIT)
+ * therefore __NR_O32_Linux is used (4000)
+ */
+ .set push
+ .set reorder
+ subu t1, v0, __NR_O32_Linux
+ move a1, v0
+ bnez t1, 1f /* __NR_syscall at offset 0 */
+ ld a1, PT_R4(sp) /* Arg1 for __NR_syscall case */
+ .set pop
+
+1: jal syscall_trace_enter
+
+ bltz v0, 1f # seccomp failed? Skip syscall
+
+ RESTORE_STATIC
+ ld v0, PT_R2(sp) # Restore syscall (maybe modified)
+ ld a0, PT_R4(sp) # Restore argument registers
+ ld a1, PT_R5(sp)
+ ld a2, PT_R6(sp)
+ ld a3, PT_R7(sp)
+ ld a4, PT_R8(sp)
+ ld a5, PT_R9(sp)
+ ld a6, PT_R10(sp)
+ ld a7, PT_R11(sp) # For indirect syscalls
+
+ dsubu t0, v0, __NR_O32_Linux # check (new) syscall number
+ sltiu t0, t0, __NR_O32_Linux_syscalls
+ beqz t0, not_o32_scall
+
+ j syscall_common
+
+1: j syscall_exit
+
+/* ------------------------------------------------------------------------ */
+
+ /*
+ * The stackpointer for a call with more than 4 arguments is bad.
+ */
+bad_stack:
+ li v0, EFAULT
+ sd v0, PT_R2(sp)
+ li t0, 1 # set error flag
+ sd t0, PT_R7(sp)
+ j o32_syscall_exit
+
+bad_stack_a4:
+ li a4, 0
+ b load_a5
+
+bad_stack_a5:
+ li a5, 0
+ b load_a6
+
+bad_stack_a6:
+ li a6, 0
+ b load_a7
+
+bad_stack_a7:
+ li a7, 0
+ b loads_done
+
+not_o32_scall:
+ /*
+ * This is not an o32 compatibility syscall, pass it on
+ * to the 64-bit syscall handlers.
+ */
+#ifdef CONFIG_MIPS32_N32
+ j handle_sysn32
+#else
+ j handle_sys64
+#endif
+ END(handle_sys)
+
+LEAF(sys32_syscall)
+ subu t0, a0, __NR_O32_Linux # check syscall number
+ sltiu v0, t0, __NR_O32_Linux_syscalls
+ beqz t0, einval # do not recurse
+ dsll t1, t0, 3
+ beqz v0, einval
+ ld t2, sys32_call_table(t1) # syscall routine
+
+ move a0, a1 # shift argument registers
+ move a1, a2
+ move a2, a3
+ move a3, a4
+ move a4, a5
+ move a5, a6
+ move a6, a7
+ jr t2
+ /* Unreached */
+
+einval: li v0, -ENOSYS
+ jr ra
+ END(sys32_syscall)
+
+#define __SYSCALL_WITH_COMPAT(nr, native, compat) __SYSCALL(nr, compat)
+#define __SYSCALL(nr, entry) PTR_WD entry
+ .align 3
+ .type sys32_call_table,@object
+EXPORT(sys32_call_table)
+#include <asm/syscall_table_o32.h>
diff --git a/arch/mips/kernel/segment.c b/arch/mips/kernel/segment.c
new file mode 100644
index 000000000..24560501c
--- /dev/null
+++ b/arch/mips/kernel/segment.c
@@ -0,0 +1,93 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2013 Imagination Technologies Ltd.
+ */
+
+#include <linux/kernel.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <asm/cpu.h>
+#include <asm/debug.h>
+#include <asm/mipsregs.h>
+
+static void build_segment_config(char *str, unsigned int cfg)
+{
+ unsigned int am;
+ static const char * const am_str[] = {
+ "UK", "MK", "MSK", "MUSK", "MUSUK", "USK",
+ "RSRVD", "UUSK"};
+
+ /* Segment access mode. */
+ am = (cfg & MIPS_SEGCFG_AM) >> MIPS_SEGCFG_AM_SHIFT;
+ str += sprintf(str, "%-5s", am_str[am]);
+
+ /*
+ * Access modes MK, MSK and MUSK are mapped segments. Therefore
+ * there is no direct physical address mapping unless it becomes
+ * unmapped uncached at error level due to EU.
+ */
+ if ((am == 0) || (am > 3) || (cfg & MIPS_SEGCFG_EU))
+ str += sprintf(str, " %03lx",
+ ((cfg & MIPS_SEGCFG_PA) >> MIPS_SEGCFG_PA_SHIFT));
+ else
+ str += sprintf(str, " UND");
+
+ if ((am == 0) || (am > 3))
+ str += sprintf(str, " %01ld",
+ ((cfg & MIPS_SEGCFG_C) >> MIPS_SEGCFG_C_SHIFT));
+ else
+ str += sprintf(str, " U");
+
+ /* Exception configuration. */
+ str += sprintf(str, " %01ld\n",
+ ((cfg & MIPS_SEGCFG_EU) >> MIPS_SEGCFG_EU_SHIFT));
+}
+
+static int segments_show(struct seq_file *m, void *v)
+{
+ unsigned int segcfg;
+ char str[42];
+
+ seq_puts(m, "Segment Virtual Size Access Mode Physical Caching EU\n");
+ seq_puts(m, "------- ------- ---- ----------- -------- ------- --\n");
+
+ segcfg = read_c0_segctl0();
+ build_segment_config(str, segcfg);
+ seq_printf(m, " 0 e0000000 512M %s", str);
+
+ segcfg >>= 16;
+ build_segment_config(str, segcfg);
+ seq_printf(m, " 1 c0000000 512M %s", str);
+
+ segcfg = read_c0_segctl1();
+ build_segment_config(str, segcfg);
+ seq_printf(m, " 2 a0000000 512M %s", str);
+
+ segcfg >>= 16;
+ build_segment_config(str, segcfg);
+ seq_printf(m, " 3 80000000 512M %s", str);
+
+ segcfg = read_c0_segctl2();
+ build_segment_config(str, segcfg);
+ seq_printf(m, " 4 40000000 1G %s", str);
+
+ segcfg >>= 16;
+ build_segment_config(str, segcfg);
+ seq_printf(m, " 5 00000000 1G %s\n", str);
+
+ return 0;
+}
+DEFINE_SHOW_ATTRIBUTE(segments);
+
+static int __init segments_info(void)
+{
+ if (cpu_has_segments)
+ debugfs_create_file("segments", S_IRUGO, mips_debugfs_dir, NULL,
+ &segments_fops);
+ return 0;
+}
+
+device_initcall(segments_info);
diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c
new file mode 100644
index 000000000..e46e7ec76
--- /dev/null
+++ b/arch/mips/kernel/setup.c
@@ -0,0 +1,855 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1995 Linus Torvalds
+ * Copyright (C) 1995 Waldorf Electronics
+ * Copyright (C) 1994, 95, 96, 97, 98, 99, 2000, 01, 02, 03 Ralf Baechle
+ * Copyright (C) 1996 Stoned Elipot
+ * Copyright (C) 1999 Silicon Graphics, Inc.
+ * Copyright (C) 2000, 2001, 2002, 2007 Maciej W. Rozycki
+ */
+#include <linux/init.h>
+#include <linux/cpu.h>
+#include <linux/delay.h>
+#include <linux/ioport.h>
+#include <linux/export.h>
+#include <linux/screen_info.h>
+#include <linux/memblock.h>
+#include <linux/initrd.h>
+#include <linux/root_dev.h>
+#include <linux/highmem.h>
+#include <linux/console.h>
+#include <linux/pfn.h>
+#include <linux/debugfs.h>
+#include <linux/kexec.h>
+#include <linux/sizes.h>
+#include <linux/device.h>
+#include <linux/dma-map-ops.h>
+#include <linux/decompress/generic.h>
+#include <linux/of_fdt.h>
+#include <linux/dmi.h>
+#include <linux/crash_dump.h>
+
+#include <asm/addrspace.h>
+#include <asm/bootinfo.h>
+#include <asm/bugs.h>
+#include <asm/cache.h>
+#include <asm/cdmm.h>
+#include <asm/cpu.h>
+#include <asm/debug.h>
+#include <asm/mmzone.h>
+#include <asm/sections.h>
+#include <asm/setup.h>
+#include <asm/smp-ops.h>
+#include <asm/prom.h>
+#include <asm/fw/fw.h>
+
+#ifdef CONFIG_MIPS_ELF_APPENDED_DTB
+char __section(".appended_dtb") __appended_dtb[0x100000];
+#endif /* CONFIG_MIPS_ELF_APPENDED_DTB */
+
+struct cpuinfo_mips cpu_data[NR_CPUS] __read_mostly;
+
+EXPORT_SYMBOL(cpu_data);
+
+#ifdef CONFIG_VT
+struct screen_info screen_info;
+#endif
+
+/*
+ * Setup information
+ *
+ * These are initialized so they are in the .data section
+ */
+unsigned long mips_machtype __read_mostly = MACH_UNKNOWN;
+
+EXPORT_SYMBOL(mips_machtype);
+
+static char __initdata command_line[COMMAND_LINE_SIZE];
+char __initdata arcs_cmdline[COMMAND_LINE_SIZE];
+
+#ifdef CONFIG_CMDLINE_BOOL
+static const char builtin_cmdline[] __initconst = CONFIG_CMDLINE;
+#else
+static const char builtin_cmdline[] __initconst = "";
+#endif
+
+/*
+ * mips_io_port_base is the begin of the address space to which x86 style
+ * I/O ports are mapped.
+ */
+unsigned long mips_io_port_base = -1;
+EXPORT_SYMBOL(mips_io_port_base);
+
+static struct resource code_resource = { .name = "Kernel code", };
+static struct resource data_resource = { .name = "Kernel data", };
+static struct resource bss_resource = { .name = "Kernel bss", };
+
+unsigned long __kaslr_offset __ro_after_init;
+EXPORT_SYMBOL(__kaslr_offset);
+
+static void *detect_magic __initdata = detect_memory_region;
+
+#ifdef CONFIG_MIPS_AUTO_PFN_OFFSET
+unsigned long ARCH_PFN_OFFSET;
+EXPORT_SYMBOL(ARCH_PFN_OFFSET);
+#endif
+
+void __init detect_memory_region(phys_addr_t start, phys_addr_t sz_min, phys_addr_t sz_max)
+{
+ void *dm = &detect_magic;
+ phys_addr_t size;
+
+ for (size = sz_min; size < sz_max; size <<= 1) {
+ if (!memcmp(dm, dm + size, sizeof(detect_magic)))
+ break;
+ }
+
+ pr_debug("Memory: %lluMB of RAM detected at 0x%llx (min: %lluMB, max: %lluMB)\n",
+ ((unsigned long long) size) / SZ_1M,
+ (unsigned long long) start,
+ ((unsigned long long) sz_min) / SZ_1M,
+ ((unsigned long long) sz_max) / SZ_1M);
+
+ memblock_add(start, size);
+}
+
+/*
+ * Manage initrd
+ */
+#ifdef CONFIG_BLK_DEV_INITRD
+
+static int __init rd_start_early(char *p)
+{
+ unsigned long start = memparse(p, &p);
+
+#ifdef CONFIG_64BIT
+ /* Guess if the sign extension was forgotten by bootloader */
+ if (start < XKPHYS)
+ start = (int)start;
+#endif
+ initrd_start = start;
+ initrd_end += start;
+ return 0;
+}
+early_param("rd_start", rd_start_early);
+
+static int __init rd_size_early(char *p)
+{
+ initrd_end += memparse(p, &p);
+ return 0;
+}
+early_param("rd_size", rd_size_early);
+
+/* it returns the next free pfn after initrd */
+static unsigned long __init init_initrd(void)
+{
+ unsigned long end;
+
+ /*
+ * Board specific code or command line parser should have
+ * already set up initrd_start and initrd_end. In these cases
+ * perfom sanity checks and use them if all looks good.
+ */
+ if (!initrd_start || initrd_end <= initrd_start)
+ goto disable;
+
+ if (initrd_start & ~PAGE_MASK) {
+ pr_err("initrd start must be page aligned\n");
+ goto disable;
+ }
+
+ /*
+ * Sanitize initrd addresses. For example firmware
+ * can't guess if they need to pass them through
+ * 64-bits values if the kernel has been built in pure
+ * 32-bit. We need also to switch from KSEG0 to XKPHYS
+ * addresses now, so the code can now safely use __pa().
+ */
+ end = __pa(initrd_end);
+ initrd_end = (unsigned long)__va(end);
+ initrd_start = (unsigned long)__va(__pa(initrd_start));
+
+ if (initrd_start < PAGE_OFFSET) {
+ pr_err("initrd start < PAGE_OFFSET\n");
+ goto disable;
+ }
+
+ ROOT_DEV = Root_RAM0;
+ return PFN_UP(end);
+disable:
+ initrd_start = 0;
+ initrd_end = 0;
+ return 0;
+}
+
+/* In some conditions (e.g. big endian bootloader with a little endian
+ kernel), the initrd might appear byte swapped. Try to detect this and
+ byte swap it if needed. */
+static void __init maybe_bswap_initrd(void)
+{
+#if defined(CONFIG_CPU_CAVIUM_OCTEON)
+ u64 buf;
+
+ /* Check for CPIO signature */
+ if (!memcmp((void *)initrd_start, "070701", 6))
+ return;
+
+ /* Check for compressed initrd */
+ if (decompress_method((unsigned char *)initrd_start, 8, NULL))
+ return;
+
+ /* Try again with a byte swapped header */
+ buf = swab64p((u64 *)initrd_start);
+ if (!memcmp(&buf, "070701", 6) ||
+ decompress_method((unsigned char *)(&buf), 8, NULL)) {
+ unsigned long i;
+
+ pr_info("Byteswapped initrd detected\n");
+ for (i = initrd_start; i < ALIGN(initrd_end, 8); i += 8)
+ swab64s((u64 *)i);
+ }
+#endif
+}
+
+static void __init finalize_initrd(void)
+{
+ unsigned long size = initrd_end - initrd_start;
+
+ if (size == 0) {
+ printk(KERN_INFO "Initrd not found or empty");
+ goto disable;
+ }
+ if (__pa(initrd_end) > PFN_PHYS(max_low_pfn)) {
+ printk(KERN_ERR "Initrd extends beyond end of memory");
+ goto disable;
+ }
+
+ maybe_bswap_initrd();
+
+ memblock_reserve(__pa(initrd_start), size);
+ initrd_below_start_ok = 1;
+
+ pr_info("Initial ramdisk at: 0x%lx (%lu bytes)\n",
+ initrd_start, size);
+ return;
+disable:
+ printk(KERN_CONT " - disabling initrd\n");
+ initrd_start = 0;
+ initrd_end = 0;
+}
+
+#else /* !CONFIG_BLK_DEV_INITRD */
+
+static unsigned long __init init_initrd(void)
+{
+ return 0;
+}
+
+#define finalize_initrd() do {} while (0)
+
+#endif
+
+/*
+ * Initialize the bootmem allocator. It also setup initrd related data
+ * if needed.
+ */
+#if defined(CONFIG_SGI_IP27) || (defined(CONFIG_CPU_LOONGSON64) && defined(CONFIG_NUMA))
+
+static void __init bootmem_init(void)
+{
+ init_initrd();
+ finalize_initrd();
+}
+
+#else /* !CONFIG_SGI_IP27 */
+
+static void __init bootmem_init(void)
+{
+ phys_addr_t ramstart, ramend;
+ unsigned long start, end;
+ int i;
+
+ ramstart = memblock_start_of_DRAM();
+ ramend = memblock_end_of_DRAM();
+
+ /*
+ * Sanity check any INITRD first. We don't take it into account
+ * for bootmem setup initially, rely on the end-of-kernel-code
+ * as our memory range starting point. Once bootmem is inited we
+ * will reserve the area used for the initrd.
+ */
+ init_initrd();
+
+ /* Reserve memory occupied by kernel. */
+ memblock_reserve(__pa_symbol(&_text),
+ __pa_symbol(&_end) - __pa_symbol(&_text));
+
+ /* max_low_pfn is not a number of pages but the end pfn of low mem */
+
+#ifdef CONFIG_MIPS_AUTO_PFN_OFFSET
+ ARCH_PFN_OFFSET = PFN_UP(ramstart);
+#else
+ /*
+ * Reserve any memory between the start of RAM and PHYS_OFFSET
+ */
+ if (ramstart > PHYS_OFFSET)
+ memblock_reserve(PHYS_OFFSET, ramstart - PHYS_OFFSET);
+
+ if (PFN_UP(ramstart) > ARCH_PFN_OFFSET) {
+ pr_info("Wasting %lu bytes for tracking %lu unused pages\n",
+ (unsigned long)((PFN_UP(ramstart) - ARCH_PFN_OFFSET) * sizeof(struct page)),
+ (unsigned long)(PFN_UP(ramstart) - ARCH_PFN_OFFSET));
+ }
+#endif
+
+ min_low_pfn = ARCH_PFN_OFFSET;
+ max_pfn = PFN_DOWN(ramend);
+ for_each_mem_pfn_range(i, MAX_NUMNODES, &start, &end, NULL) {
+ /*
+ * Skip highmem here so we get an accurate max_low_pfn if low
+ * memory stops short of high memory.
+ * If the region overlaps HIGHMEM_START, end is clipped so
+ * max_pfn excludes the highmem portion.
+ */
+ if (start >= PFN_DOWN(HIGHMEM_START))
+ continue;
+ if (end > PFN_DOWN(HIGHMEM_START))
+ end = PFN_DOWN(HIGHMEM_START);
+ if (end > max_low_pfn)
+ max_low_pfn = end;
+ }
+
+ if (min_low_pfn >= max_low_pfn)
+ panic("Incorrect memory mapping !!!");
+
+ if (max_pfn > PFN_DOWN(HIGHMEM_START)) {
+ max_low_pfn = PFN_DOWN(HIGHMEM_START);
+#ifdef CONFIG_HIGHMEM
+ highstart_pfn = max_low_pfn;
+ highend_pfn = max_pfn;
+#else
+ max_pfn = max_low_pfn;
+#endif
+ }
+
+ /*
+ * Reserve initrd memory if needed.
+ */
+ finalize_initrd();
+}
+
+#endif /* CONFIG_SGI_IP27 */
+
+static int usermem __initdata;
+
+static int __init early_parse_mem(char *p)
+{
+ phys_addr_t start, size;
+
+ if (!p) {
+ pr_err("mem parameter is empty, do nothing\n");
+ return -EINVAL;
+ }
+
+ /*
+ * If a user specifies memory size, we
+ * blow away any automatically generated
+ * size.
+ */
+ if (usermem == 0) {
+ usermem = 1;
+ memblock_remove(memblock_start_of_DRAM(),
+ memblock_end_of_DRAM() - memblock_start_of_DRAM());
+ }
+ start = 0;
+ size = memparse(p, &p);
+ if (*p == '@')
+ start = memparse(p + 1, &p);
+
+ if (IS_ENABLED(CONFIG_NUMA))
+ memblock_add_node(start, size, pa_to_nid(start), MEMBLOCK_NONE);
+ else
+ memblock_add(start, size);
+
+ return 0;
+}
+early_param("mem", early_parse_mem);
+
+static int __init early_parse_memmap(char *p)
+{
+ char *oldp;
+ u64 start_at, mem_size;
+
+ if (!p)
+ return -EINVAL;
+
+ if (!strncmp(p, "exactmap", 8)) {
+ pr_err("\"memmap=exactmap\" invalid on MIPS\n");
+ return 0;
+ }
+
+ oldp = p;
+ mem_size = memparse(p, &p);
+ if (p == oldp)
+ return -EINVAL;
+
+ if (*p == '@') {
+ start_at = memparse(p+1, &p);
+ memblock_add(start_at, mem_size);
+ } else if (*p == '#') {
+ pr_err("\"memmap=nn#ss\" (force ACPI data) invalid on MIPS\n");
+ return -EINVAL;
+ } else if (*p == '$') {
+ start_at = memparse(p+1, &p);
+ memblock_add(start_at, mem_size);
+ memblock_reserve(start_at, mem_size);
+ } else {
+ pr_err("\"memmap\" invalid format!\n");
+ return -EINVAL;
+ }
+
+ if (*p == '\0') {
+ usermem = 1;
+ return 0;
+ } else
+ return -EINVAL;
+}
+early_param("memmap", early_parse_memmap);
+
+static void __init mips_reserve_vmcore(void)
+{
+#ifdef CONFIG_PROC_VMCORE
+ phys_addr_t start, end;
+ u64 i;
+
+ if (!elfcorehdr_size) {
+ for_each_mem_range(i, &start, &end) {
+ if (elfcorehdr_addr >= start && elfcorehdr_addr < end) {
+ /*
+ * Reserve from the elf core header to the end of
+ * the memory segment, that should all be kdump
+ * reserved memory.
+ */
+ elfcorehdr_size = end - elfcorehdr_addr;
+ break;
+ }
+ }
+ }
+
+ pr_info("Reserving %ldKB of memory at %ldKB for kdump\n",
+ (unsigned long)elfcorehdr_size >> 10, (unsigned long)elfcorehdr_addr >> 10);
+
+ memblock_reserve(elfcorehdr_addr, elfcorehdr_size);
+#endif
+}
+
+#ifdef CONFIG_KEXEC
+
+/* 64M alignment for crash kernel regions */
+#define CRASH_ALIGN SZ_64M
+#define CRASH_ADDR_MAX SZ_512M
+
+static void __init mips_parse_crashkernel(void)
+{
+ unsigned long long total_mem;
+ unsigned long long crash_size, crash_base;
+ int ret;
+
+ total_mem = memblock_phys_mem_size();
+ ret = parse_crashkernel(boot_command_line, total_mem,
+ &crash_size, &crash_base);
+ if (ret != 0 || crash_size <= 0)
+ return;
+
+ if (crash_base <= 0) {
+ crash_base = memblock_phys_alloc_range(crash_size, CRASH_ALIGN,
+ CRASH_ALIGN,
+ CRASH_ADDR_MAX);
+ if (!crash_base) {
+ pr_warn("crashkernel reservation failed - No suitable area found.\n");
+ return;
+ }
+ } else {
+ unsigned long long start;
+
+ start = memblock_phys_alloc_range(crash_size, 1,
+ crash_base,
+ crash_base + crash_size);
+ if (start != crash_base) {
+ pr_warn("Invalid memory region reserved for crash kernel\n");
+ return;
+ }
+ }
+
+ crashk_res.start = crash_base;
+ crashk_res.end = crash_base + crash_size - 1;
+}
+
+static void __init request_crashkernel(struct resource *res)
+{
+ int ret;
+
+ if (crashk_res.start == crashk_res.end)
+ return;
+
+ ret = request_resource(res, &crashk_res);
+ if (!ret)
+ pr_info("Reserving %ldMB of memory at %ldMB for crashkernel\n",
+ (unsigned long)(resource_size(&crashk_res) >> 20),
+ (unsigned long)(crashk_res.start >> 20));
+}
+#else /* !defined(CONFIG_KEXEC) */
+static void __init mips_parse_crashkernel(void)
+{
+}
+
+static void __init request_crashkernel(struct resource *res)
+{
+}
+#endif /* !defined(CONFIG_KEXEC) */
+
+static void __init check_kernel_sections_mem(void)
+{
+ phys_addr_t start = __pa_symbol(&_text);
+ phys_addr_t size = __pa_symbol(&_end) - start;
+
+ if (!memblock_is_region_memory(start, size)) {
+ pr_info("Kernel sections are not in the memory maps\n");
+ memblock_add(start, size);
+ }
+}
+
+static void __init bootcmdline_append(const char *s, size_t max)
+{
+ if (!s[0] || !max)
+ return;
+
+ if (boot_command_line[0])
+ strlcat(boot_command_line, " ", COMMAND_LINE_SIZE);
+
+ strlcat(boot_command_line, s, max);
+}
+
+#ifdef CONFIG_OF_EARLY_FLATTREE
+
+static int __init bootcmdline_scan_chosen(unsigned long node, const char *uname,
+ int depth, void *data)
+{
+ bool *dt_bootargs = data;
+ const char *p;
+ int l;
+
+ if (depth != 1 || !data ||
+ (strcmp(uname, "chosen") != 0 && strcmp(uname, "chosen@0") != 0))
+ return 0;
+
+ p = of_get_flat_dt_prop(node, "bootargs", &l);
+ if (p != NULL && l > 0) {
+ bootcmdline_append(p, min(l, COMMAND_LINE_SIZE));
+ *dt_bootargs = true;
+ }
+
+ return 1;
+}
+
+#endif /* CONFIG_OF_EARLY_FLATTREE */
+
+static void __init bootcmdline_init(void)
+{
+ bool dt_bootargs = false;
+
+ /*
+ * If CMDLINE_OVERRIDE is enabled then initializing the command line is
+ * trivial - we simply use the built-in command line unconditionally &
+ * unmodified.
+ */
+ if (IS_ENABLED(CONFIG_CMDLINE_OVERRIDE)) {
+ strscpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE);
+ return;
+ }
+
+ /*
+ * If the user specified a built-in command line &
+ * MIPS_CMDLINE_BUILTIN_EXTEND, then the built-in command line is
+ * prepended to arguments from the bootloader or DT so we'll copy them
+ * to the start of boot_command_line here. Otherwise, empty
+ * boot_command_line to undo anything early_init_dt_scan_chosen() did.
+ */
+ if (IS_ENABLED(CONFIG_MIPS_CMDLINE_BUILTIN_EXTEND))
+ strscpy(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE);
+ else
+ boot_command_line[0] = 0;
+
+#ifdef CONFIG_OF_EARLY_FLATTREE
+ /*
+ * If we're configured to take boot arguments from DT, look for those
+ * now.
+ */
+ if (IS_ENABLED(CONFIG_MIPS_CMDLINE_FROM_DTB) ||
+ IS_ENABLED(CONFIG_MIPS_CMDLINE_DTB_EXTEND))
+ of_scan_flat_dt(bootcmdline_scan_chosen, &dt_bootargs);
+#endif
+
+ /*
+ * If we didn't get any arguments from DT (regardless of whether that's
+ * because we weren't configured to look for them, or because we looked
+ * & found none) then we'll take arguments from the bootloader.
+ * plat_mem_setup() should have filled arcs_cmdline with arguments from
+ * the bootloader.
+ */
+ if (IS_ENABLED(CONFIG_MIPS_CMDLINE_DTB_EXTEND) || !dt_bootargs)
+ bootcmdline_append(arcs_cmdline, COMMAND_LINE_SIZE);
+
+ /*
+ * If the user specified a built-in command line & we didn't already
+ * prepend it, we append it to boot_command_line here.
+ */
+ if (IS_ENABLED(CONFIG_CMDLINE_BOOL) &&
+ !IS_ENABLED(CONFIG_MIPS_CMDLINE_BUILTIN_EXTEND))
+ bootcmdline_append(builtin_cmdline, COMMAND_LINE_SIZE);
+}
+
+/*
+ * arch_mem_init - initialize memory management subsystem
+ *
+ * o plat_mem_setup() detects the memory configuration and will record detected
+ * memory areas using memblock_add.
+ *
+ * At this stage the memory configuration of the system is known to the
+ * kernel but generic memory management system is still entirely uninitialized.
+ *
+ * o bootmem_init()
+ * o sparse_init()
+ * o paging_init()
+ * o dma_contiguous_reserve()
+ *
+ * At this stage the bootmem allocator is ready to use.
+ *
+ * NOTE: historically plat_mem_setup did the entire platform initialization.
+ * This was rather impractical because it meant plat_mem_setup had to
+ * get away without any kind of memory allocator. To keep old code from
+ * breaking plat_setup was just renamed to plat_mem_setup and a second platform
+ * initialization hook for anything else was introduced.
+ */
+static void __init arch_mem_init(char **cmdline_p)
+{
+ /* call board setup routine */
+ plat_mem_setup();
+ memblock_set_bottom_up(true);
+
+ bootcmdline_init();
+ strscpy(command_line, boot_command_line, COMMAND_LINE_SIZE);
+ *cmdline_p = command_line;
+
+ parse_early_param();
+
+ if (usermem)
+ pr_info("User-defined physical RAM map overwrite\n");
+
+ check_kernel_sections_mem();
+
+ early_init_fdt_reserve_self();
+ early_init_fdt_scan_reserved_mem();
+
+#ifndef CONFIG_NUMA
+ memblock_set_node(0, PHYS_ADDR_MAX, &memblock.memory, 0);
+#endif
+ bootmem_init();
+
+ /*
+ * Prevent memblock from allocating high memory.
+ * This cannot be done before max_low_pfn is detected, so up
+ * to this point is possible to only reserve physical memory
+ * with memblock_reserve; memblock_alloc* can be used
+ * only after this point
+ */
+ memblock_set_current_limit(PFN_PHYS(max_low_pfn));
+
+ mips_reserve_vmcore();
+
+ mips_parse_crashkernel();
+ device_tree_init();
+
+ /*
+ * In order to reduce the possibility of kernel panic when failed to
+ * get IO TLB memory under CONFIG_SWIOTLB, it is better to allocate
+ * low memory as small as possible before plat_swiotlb_setup(), so
+ * make sparse_init() using top-down allocation.
+ */
+ memblock_set_bottom_up(false);
+ sparse_init();
+ memblock_set_bottom_up(true);
+
+ plat_swiotlb_setup();
+
+ dma_contiguous_reserve(PFN_PHYS(max_low_pfn));
+
+ /* Reserve for hibernation. */
+ memblock_reserve(__pa_symbol(&__nosave_begin),
+ __pa_symbol(&__nosave_end) - __pa_symbol(&__nosave_begin));
+
+ early_memtest(PFN_PHYS(ARCH_PFN_OFFSET), PFN_PHYS(max_low_pfn));
+}
+
+static void __init resource_init(void)
+{
+ phys_addr_t start, end;
+ u64 i;
+
+ if (UNCAC_BASE != IO_BASE)
+ return;
+
+ code_resource.start = __pa_symbol(&_text);
+ code_resource.end = __pa_symbol(&_etext) - 1;
+ data_resource.start = __pa_symbol(&_etext);
+ data_resource.end = __pa_symbol(&_edata) - 1;
+ bss_resource.start = __pa_symbol(&__bss_start);
+ bss_resource.end = __pa_symbol(&__bss_stop) - 1;
+
+ for_each_mem_range(i, &start, &end) {
+ struct resource *res;
+
+ res = memblock_alloc(sizeof(struct resource), SMP_CACHE_BYTES);
+ if (!res)
+ panic("%s: Failed to allocate %zu bytes\n", __func__,
+ sizeof(struct resource));
+
+ res->start = start;
+ /*
+ * In memblock, end points to the first byte after the
+ * range while in resourses, end points to the last byte in
+ * the range.
+ */
+ res->end = end - 1;
+ res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
+ res->name = "System RAM";
+
+ request_resource(&iomem_resource, res);
+
+ /*
+ * We don't know which RAM region contains kernel data,
+ * so we try it repeatedly and let the resource manager
+ * test it.
+ */
+ request_resource(res, &code_resource);
+ request_resource(res, &data_resource);
+ request_resource(res, &bss_resource);
+ request_crashkernel(res);
+ }
+}
+
+#ifdef CONFIG_SMP
+static void __init prefill_possible_map(void)
+{
+ int i, possible = num_possible_cpus();
+
+ if (possible > nr_cpu_ids)
+ possible = nr_cpu_ids;
+
+ for (i = 0; i < possible; i++)
+ set_cpu_possible(i, true);
+ for (; i < NR_CPUS; i++)
+ set_cpu_possible(i, false);
+
+ set_nr_cpu_ids(possible);
+}
+#else
+static inline void prefill_possible_map(void) {}
+#endif
+
+static void __init setup_rng_seed(void)
+{
+ char *rng_seed_hex = fw_getenv("rngseed");
+ u8 rng_seed[512];
+ size_t len;
+
+ if (!rng_seed_hex)
+ return;
+
+ len = min(sizeof(rng_seed), strlen(rng_seed_hex) / 2);
+ if (hex2bin(rng_seed, rng_seed_hex, len))
+ return;
+
+ add_bootloader_randomness(rng_seed, len);
+ memzero_explicit(rng_seed, len);
+ memzero_explicit(rng_seed_hex, len * 2);
+}
+
+void __init setup_arch(char **cmdline_p)
+{
+ cpu_probe();
+ mips_cm_probe();
+ prom_init();
+
+ setup_early_fdc_console();
+#ifdef CONFIG_EARLY_PRINTK
+ setup_early_printk();
+#endif
+ cpu_report();
+ check_bugs_early();
+
+#if defined(CONFIG_VT)
+#if defined(CONFIG_VGA_CONSOLE)
+ conswitchp = &vga_con;
+#endif
+#endif
+
+ arch_mem_init(cmdline_p);
+ dmi_setup();
+
+ resource_init();
+ plat_smp_setup();
+ prefill_possible_map();
+
+ cpu_cache_init();
+ paging_init();
+
+ memblock_dump_all();
+
+ setup_rng_seed();
+}
+
+unsigned long kernelsp[NR_CPUS];
+unsigned long fw_arg0, fw_arg1, fw_arg2, fw_arg3;
+
+#ifdef CONFIG_DEBUG_FS
+struct dentry *mips_debugfs_dir;
+static int __init debugfs_mips(void)
+{
+ mips_debugfs_dir = debugfs_create_dir("mips", NULL);
+ return 0;
+}
+arch_initcall(debugfs_mips);
+#endif
+
+#ifdef CONFIG_DMA_NONCOHERENT
+static int __init setcoherentio(char *str)
+{
+ dma_default_coherent = true;
+ pr_info("Hardware DMA cache coherency (command line)\n");
+ return 0;
+}
+early_param("coherentio", setcoherentio);
+
+static int __init setnocoherentio(char *str)
+{
+ dma_default_coherent = false;
+ pr_info("Software DMA cache coherency (command line)\n");
+ return 0;
+}
+early_param("nocoherentio", setnocoherentio);
+#endif
+
+void __init arch_cpu_finalize_init(void)
+{
+ unsigned int cpu = smp_processor_id();
+
+ cpu_data[cpu].udelay_val = loops_per_jiffy;
+ check_bugs32();
+
+ if (IS_ENABLED(CONFIG_CPU_R4X00_BUGS64))
+ check_bugs64();
+}
diff --git a/arch/mips/kernel/signal-common.h b/arch/mips/kernel/signal-common.h
new file mode 100644
index 000000000..f50d48435
--- /dev/null
+++ b/arch/mips/kernel/signal-common.h
@@ -0,0 +1,43 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1991, 1992 Linus Torvalds
+ * Copyright (C) 1994 - 2000 Ralf Baechle
+ * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
+ */
+
+#ifndef __SIGNAL_COMMON_H
+#define __SIGNAL_COMMON_H
+
+/* #define DEBUG_SIG */
+
+#ifdef DEBUG_SIG
+# define DEBUGP(fmt, args...) printk("%s: " fmt, __func__, ##args)
+#else
+# define DEBUGP(fmt, args...)
+#endif
+
+/*
+ * Determine which stack to use..
+ */
+extern void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs,
+ size_t frame_size);
+/* Check and clear pending FPU exceptions in saved CSR */
+extern int fpcsr_pending(unsigned int __user *fpcsr);
+
+/* Make sure we will not lose FPU ownership */
+#define lock_fpu_owner() ({ preempt_disable(); pagefault_disable(); })
+#define unlock_fpu_owner() ({ pagefault_enable(); preempt_enable(); })
+
+/* Assembly functions to move context to/from the FPU */
+extern asmlinkage int
+_save_fp_context(void __user *fpregs, void __user *csr);
+extern asmlinkage int
+_restore_fp_context(void __user *fpregs, void __user *csr);
+
+extern asmlinkage int _save_msa_all_upper(void __user *buf);
+extern asmlinkage int _restore_msa_all_upper(void __user *buf);
+
+#endif /* __SIGNAL_COMMON_H */
diff --git a/arch/mips/kernel/signal.c b/arch/mips/kernel/signal.c
new file mode 100644
index 000000000..479999b7f
--- /dev/null
+++ b/arch/mips/kernel/signal.c
@@ -0,0 +1,968 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1991, 1992 Linus Torvalds
+ * Copyright (C) 1994 - 2000 Ralf Baechle
+ * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
+ * Copyright (C) 2014, Imagination Technologies Ltd.
+ */
+#include <linux/cache.h>
+#include <linux/context_tracking.h>
+#include <linux/irqflags.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/personality.h>
+#include <linux/smp.h>
+#include <linux/kernel.h>
+#include <linux/signal.h>
+#include <linux/errno.h>
+#include <linux/wait.h>
+#include <linux/ptrace.h>
+#include <linux/unistd.h>
+#include <linux/uprobes.h>
+#include <linux/compiler.h>
+#include <linux/syscalls.h>
+#include <linux/uaccess.h>
+#include <linux/resume_user_mode.h>
+
+#include <asm/abi.h>
+#include <asm/asm.h>
+#include <linux/bitops.h>
+#include <asm/cacheflush.h>
+#include <asm/fpu.h>
+#include <asm/sim.h>
+#include <asm/ucontext.h>
+#include <asm/cpu-features.h>
+#include <asm/dsp.h>
+#include <asm/inst.h>
+#include <asm/msa.h>
+
+#include "signal-common.h"
+
+static int (*save_fp_context)(void __user *sc);
+static int (*restore_fp_context)(void __user *sc);
+
+struct sigframe {
+ u32 sf_ass[4]; /* argument save space for o32 */
+ u32 sf_pad[2]; /* Was: signal trampoline */
+
+ /* Matches struct ucontext from its uc_mcontext field onwards */
+ struct sigcontext sf_sc;
+ sigset_t sf_mask;
+ unsigned long long sf_extcontext[];
+};
+
+struct rt_sigframe {
+ u32 rs_ass[4]; /* argument save space for o32 */
+ u32 rs_pad[2]; /* Was: signal trampoline */
+ struct siginfo rs_info;
+ struct ucontext rs_uc;
+};
+
+#ifdef CONFIG_MIPS_FP_SUPPORT
+
+/*
+ * Thread saved context copy to/from a signal context presumed to be on the
+ * user stack, and therefore accessed with appropriate macros from uaccess.h.
+ */
+static int copy_fp_to_sigcontext(void __user *sc)
+{
+ struct mips_abi *abi = current->thread.abi;
+ uint64_t __user *fpregs = sc + abi->off_sc_fpregs;
+ uint32_t __user *csr = sc + abi->off_sc_fpc_csr;
+ int i;
+ int err = 0;
+ int inc = test_thread_flag(TIF_32BIT_FPREGS) ? 2 : 1;
+
+ for (i = 0; i < NUM_FPU_REGS; i += inc) {
+ err |=
+ __put_user(get_fpr64(&current->thread.fpu.fpr[i], 0),
+ &fpregs[i]);
+ }
+ err |= __put_user(current->thread.fpu.fcr31, csr);
+
+ return err;
+}
+
+static int copy_fp_from_sigcontext(void __user *sc)
+{
+ struct mips_abi *abi = current->thread.abi;
+ uint64_t __user *fpregs = sc + abi->off_sc_fpregs;
+ uint32_t __user *csr = sc + abi->off_sc_fpc_csr;
+ int i;
+ int err = 0;
+ int inc = test_thread_flag(TIF_32BIT_FPREGS) ? 2 : 1;
+ u64 fpr_val;
+
+ for (i = 0; i < NUM_FPU_REGS; i += inc) {
+ err |= __get_user(fpr_val, &fpregs[i]);
+ set_fpr64(&current->thread.fpu.fpr[i], 0, fpr_val);
+ }
+ err |= __get_user(current->thread.fpu.fcr31, csr);
+
+ return err;
+}
+
+#else /* !CONFIG_MIPS_FP_SUPPORT */
+
+static int copy_fp_to_sigcontext(void __user *sc)
+{
+ return 0;
+}
+
+static int copy_fp_from_sigcontext(void __user *sc)
+{
+ return 0;
+}
+
+#endif /* !CONFIG_MIPS_FP_SUPPORT */
+
+/*
+ * Wrappers for the assembly _{save,restore}_fp_context functions.
+ */
+static int save_hw_fp_context(void __user *sc)
+{
+ struct mips_abi *abi = current->thread.abi;
+ uint64_t __user *fpregs = sc + abi->off_sc_fpregs;
+ uint32_t __user *csr = sc + abi->off_sc_fpc_csr;
+
+ return _save_fp_context(fpregs, csr);
+}
+
+static int restore_hw_fp_context(void __user *sc)
+{
+ struct mips_abi *abi = current->thread.abi;
+ uint64_t __user *fpregs = sc + abi->off_sc_fpregs;
+ uint32_t __user *csr = sc + abi->off_sc_fpc_csr;
+
+ return _restore_fp_context(fpregs, csr);
+}
+
+/*
+ * Extended context handling.
+ */
+
+static inline void __user *sc_to_extcontext(void __user *sc)
+{
+ struct ucontext __user *uc;
+
+ /*
+ * We can just pretend the sigcontext is always embedded in a struct
+ * ucontext here, because the offset from sigcontext to extended
+ * context is the same in the struct sigframe case.
+ */
+ uc = container_of(sc, struct ucontext, uc_mcontext);
+ return &uc->uc_extcontext;
+}
+
+#ifdef CONFIG_CPU_HAS_MSA
+
+static int save_msa_extcontext(void __user *buf)
+{
+ struct msa_extcontext __user *msa = buf;
+ uint64_t val;
+ int i, err;
+
+ if (!thread_msa_context_live())
+ return 0;
+
+ /*
+ * Ensure that we can't lose the live MSA context between checking
+ * for it & writing it to memory.
+ */
+ preempt_disable();
+
+ if (is_msa_enabled()) {
+ /*
+ * There are no EVA versions of the vector register load/store
+ * instructions, so MSA context has to be saved to kernel memory
+ * and then copied to user memory. The save to kernel memory
+ * should already have been done when handling scalar FP
+ * context.
+ */
+ BUG_ON(IS_ENABLED(CONFIG_EVA));
+
+ err = __put_user(read_msa_csr(), &msa->csr);
+ err |= _save_msa_all_upper(&msa->wr);
+
+ preempt_enable();
+ } else {
+ preempt_enable();
+
+ err = __put_user(current->thread.fpu.msacsr, &msa->csr);
+
+ for (i = 0; i < NUM_FPU_REGS; i++) {
+ val = get_fpr64(&current->thread.fpu.fpr[i], 1);
+ err |= __put_user(val, &msa->wr[i]);
+ }
+ }
+
+ err |= __put_user(MSA_EXTCONTEXT_MAGIC, &msa->ext.magic);
+ err |= __put_user(sizeof(*msa), &msa->ext.size);
+
+ return err ? -EFAULT : sizeof(*msa);
+}
+
+static int restore_msa_extcontext(void __user *buf, unsigned int size)
+{
+ struct msa_extcontext __user *msa = buf;
+ unsigned long long val;
+ unsigned int csr;
+ int i, err;
+
+ if (size != sizeof(*msa))
+ return -EINVAL;
+
+ err = get_user(csr, &msa->csr);
+ if (err)
+ return err;
+
+ preempt_disable();
+
+ if (is_msa_enabled()) {
+ /*
+ * There are no EVA versions of the vector register load/store
+ * instructions, so MSA context has to be copied to kernel
+ * memory and later loaded to registers. The same is true of
+ * scalar FP context, so FPU & MSA should have already been
+ * disabled whilst handling scalar FP context.
+ */
+ BUG_ON(IS_ENABLED(CONFIG_EVA));
+
+ write_msa_csr(csr);
+ err |= _restore_msa_all_upper(&msa->wr);
+ preempt_enable();
+ } else {
+ preempt_enable();
+
+ current->thread.fpu.msacsr = csr;
+
+ for (i = 0; i < NUM_FPU_REGS; i++) {
+ err |= __get_user(val, &msa->wr[i]);
+ set_fpr64(&current->thread.fpu.fpr[i], 1, val);
+ }
+ }
+
+ return err;
+}
+
+#else /* !CONFIG_CPU_HAS_MSA */
+
+static int save_msa_extcontext(void __user *buf)
+{
+ return 0;
+}
+
+static int restore_msa_extcontext(void __user *buf, unsigned int size)
+{
+ return SIGSYS;
+}
+
+#endif /* !CONFIG_CPU_HAS_MSA */
+
+static int save_extcontext(void __user *buf)
+{
+ int sz;
+
+ sz = save_msa_extcontext(buf);
+ if (sz < 0)
+ return sz;
+ buf += sz;
+
+ /* If no context was saved then trivially return */
+ if (!sz)
+ return 0;
+
+ /* Write the end marker */
+ if (__put_user(END_EXTCONTEXT_MAGIC, (u32 *)buf))
+ return -EFAULT;
+
+ sz += sizeof(((struct extcontext *)NULL)->magic);
+ return sz;
+}
+
+static int restore_extcontext(void __user *buf)
+{
+ struct extcontext ext;
+ int err;
+
+ while (1) {
+ err = __get_user(ext.magic, (unsigned int *)buf);
+ if (err)
+ return err;
+
+ if (ext.magic == END_EXTCONTEXT_MAGIC)
+ return 0;
+
+ err = __get_user(ext.size, (unsigned int *)(buf
+ + offsetof(struct extcontext, size)));
+ if (err)
+ return err;
+
+ switch (ext.magic) {
+ case MSA_EXTCONTEXT_MAGIC:
+ err = restore_msa_extcontext(buf, ext.size);
+ break;
+
+ default:
+ err = -EINVAL;
+ break;
+ }
+
+ if (err)
+ return err;
+
+ buf += ext.size;
+ }
+}
+
+/*
+ * Helper routines
+ */
+int protected_save_fp_context(void __user *sc)
+{
+ struct mips_abi *abi = current->thread.abi;
+ uint64_t __user *fpregs = sc + abi->off_sc_fpregs;
+ uint32_t __user *csr = sc + abi->off_sc_fpc_csr;
+ uint32_t __user *used_math = sc + abi->off_sc_used_math;
+ unsigned int used, ext_sz;
+ int err;
+
+ used = used_math() ? USED_FP : 0;
+ if (!used)
+ goto fp_done;
+
+ if (!test_thread_flag(TIF_32BIT_FPREGS))
+ used |= USED_FR1;
+ if (test_thread_flag(TIF_HYBRID_FPREGS))
+ used |= USED_HYBRID_FPRS;
+
+ /*
+ * EVA does not have userland equivalents of ldc1 or sdc1, so
+ * save to the kernel FP context & copy that to userland below.
+ */
+ if (IS_ENABLED(CONFIG_EVA))
+ lose_fpu(1);
+
+ while (1) {
+ lock_fpu_owner();
+ if (is_fpu_owner()) {
+ err = save_fp_context(sc);
+ unlock_fpu_owner();
+ } else {
+ unlock_fpu_owner();
+ err = copy_fp_to_sigcontext(sc);
+ }
+ if (likely(!err))
+ break;
+ /* touch the sigcontext and try again */
+ err = __put_user(0, &fpregs[0]) |
+ __put_user(0, &fpregs[31]) |
+ __put_user(0, csr);
+ if (err)
+ return err; /* really bad sigcontext */
+ }
+
+fp_done:
+ ext_sz = err = save_extcontext(sc_to_extcontext(sc));
+ if (err < 0)
+ return err;
+ used |= ext_sz ? USED_EXTCONTEXT : 0;
+
+ return __put_user(used, used_math);
+}
+
+int protected_restore_fp_context(void __user *sc)
+{
+ struct mips_abi *abi = current->thread.abi;
+ uint64_t __user *fpregs = sc + abi->off_sc_fpregs;
+ uint32_t __user *csr = sc + abi->off_sc_fpc_csr;
+ uint32_t __user *used_math = sc + abi->off_sc_used_math;
+ unsigned int used;
+ int err, sig = 0, tmp __maybe_unused;
+
+ err = __get_user(used, used_math);
+ conditional_used_math(used & USED_FP);
+
+ /*
+ * The signal handler may have used FPU; give it up if the program
+ * doesn't want it following sigreturn.
+ */
+ if (err || !(used & USED_FP))
+ lose_fpu(0);
+ if (err)
+ return err;
+ if (!(used & USED_FP))
+ goto fp_done;
+
+ err = sig = fpcsr_pending(csr);
+ if (err < 0)
+ return err;
+
+ /*
+ * EVA does not have userland equivalents of ldc1 or sdc1, so we
+ * disable the FPU here such that the code below simply copies to
+ * the kernel FP context.
+ */
+ if (IS_ENABLED(CONFIG_EVA))
+ lose_fpu(0);
+
+ while (1) {
+ lock_fpu_owner();
+ if (is_fpu_owner()) {
+ err = restore_fp_context(sc);
+ unlock_fpu_owner();
+ } else {
+ unlock_fpu_owner();
+ err = copy_fp_from_sigcontext(sc);
+ }
+ if (likely(!err))
+ break;
+ /* touch the sigcontext and try again */
+ err = __get_user(tmp, &fpregs[0]) |
+ __get_user(tmp, &fpregs[31]) |
+ __get_user(tmp, csr);
+ if (err)
+ break; /* really bad sigcontext */
+ }
+
+fp_done:
+ if (!err && (used & USED_EXTCONTEXT))
+ err = restore_extcontext(sc_to_extcontext(sc));
+
+ return err ?: sig;
+}
+
+int setup_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc)
+{
+ int err = 0;
+ int i;
+
+ err |= __put_user(regs->cp0_epc, &sc->sc_pc);
+
+ err |= __put_user(0, &sc->sc_regs[0]);
+ for (i = 1; i < 32; i++)
+ err |= __put_user(regs->regs[i], &sc->sc_regs[i]);
+
+#ifdef CONFIG_CPU_HAS_SMARTMIPS
+ err |= __put_user(regs->acx, &sc->sc_acx);
+#endif
+ err |= __put_user(regs->hi, &sc->sc_mdhi);
+ err |= __put_user(regs->lo, &sc->sc_mdlo);
+ if (cpu_has_dsp) {
+ err |= __put_user(mfhi1(), &sc->sc_hi1);
+ err |= __put_user(mflo1(), &sc->sc_lo1);
+ err |= __put_user(mfhi2(), &sc->sc_hi2);
+ err |= __put_user(mflo2(), &sc->sc_lo2);
+ err |= __put_user(mfhi3(), &sc->sc_hi3);
+ err |= __put_user(mflo3(), &sc->sc_lo3);
+ err |= __put_user(rddsp(DSP_MASK), &sc->sc_dsp);
+ }
+
+
+ /*
+ * Save FPU state to signal context. Signal handler
+ * will "inherit" current FPU state.
+ */
+ err |= protected_save_fp_context(sc);
+
+ return err;
+}
+
+static size_t extcontext_max_size(void)
+{
+ size_t sz = 0;
+
+ /*
+ * The assumption here is that between this point & the point at which
+ * the extended context is saved the size of the context should only
+ * ever be able to shrink (if the task is preempted), but never grow.
+ * That is, what this function returns is an upper bound on the size of
+ * the extended context for the current task at the current time.
+ */
+
+ if (thread_msa_context_live())
+ sz += sizeof(struct msa_extcontext);
+
+ /* If any context is saved then we'll append the end marker */
+ if (sz)
+ sz += sizeof(((struct extcontext *)NULL)->magic);
+
+ return sz;
+}
+
+int fpcsr_pending(unsigned int __user *fpcsr)
+{
+ int err, sig = 0;
+ unsigned int csr, enabled;
+
+ err = __get_user(csr, fpcsr);
+ enabled = FPU_CSR_UNI_X | ((csr & FPU_CSR_ALL_E) << 5);
+ /*
+ * If the signal handler set some FPU exceptions, clear it and
+ * send SIGFPE.
+ */
+ if (csr & enabled) {
+ csr &= ~enabled;
+ err |= __put_user(csr, fpcsr);
+ sig = SIGFPE;
+ }
+ return err ?: sig;
+}
+
+int restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *sc)
+{
+ unsigned long treg;
+ int err = 0;
+ int i;
+
+ /* Always make any pending restarted system calls return -EINTR */
+ current->restart_block.fn = do_no_restart_syscall;
+
+ err |= __get_user(regs->cp0_epc, &sc->sc_pc);
+
+#ifdef CONFIG_CPU_HAS_SMARTMIPS
+ err |= __get_user(regs->acx, &sc->sc_acx);
+#endif
+ err |= __get_user(regs->hi, &sc->sc_mdhi);
+ err |= __get_user(regs->lo, &sc->sc_mdlo);
+ if (cpu_has_dsp) {
+ err |= __get_user(treg, &sc->sc_hi1); mthi1(treg);
+ err |= __get_user(treg, &sc->sc_lo1); mtlo1(treg);
+ err |= __get_user(treg, &sc->sc_hi2); mthi2(treg);
+ err |= __get_user(treg, &sc->sc_lo2); mtlo2(treg);
+ err |= __get_user(treg, &sc->sc_hi3); mthi3(treg);
+ err |= __get_user(treg, &sc->sc_lo3); mtlo3(treg);
+ err |= __get_user(treg, &sc->sc_dsp); wrdsp(treg, DSP_MASK);
+ }
+
+ for (i = 1; i < 32; i++)
+ err |= __get_user(regs->regs[i], &sc->sc_regs[i]);
+
+ return err ?: protected_restore_fp_context(sc);
+}
+
+#ifdef CONFIG_WAR_ICACHE_REFILLS
+#define SIGMASK ~(cpu_icache_line_size()-1)
+#else
+#define SIGMASK ALMASK
+#endif
+
+void __user *get_sigframe(struct ksignal *ksig, struct pt_regs *regs,
+ size_t frame_size)
+{
+ unsigned long sp;
+
+ /* Leave space for potential extended context */
+ frame_size += extcontext_max_size();
+
+ /* Default to using normal stack */
+ sp = regs->regs[29];
+
+ /*
+ * If we are on the alternate signal stack and would overflow it, don't.
+ * Return an always-bogus address instead so we will die with SIGSEGV.
+ */
+ if (on_sig_stack(sp) && !likely(on_sig_stack(sp - frame_size)))
+ return (void __user __force *)(-1UL);
+
+ /*
+ * FPU emulator may have it's own trampoline active just
+ * above the user stack, 16-bytes before the next lowest
+ * 16 byte boundary. Try to avoid trashing it.
+ */
+ sp -= 32;
+
+ sp = sigsp(sp, ksig);
+
+ return (void __user *)((sp - frame_size) & SIGMASK);
+}
+
+/*
+ * Atomically swap in the new signal mask, and wait for a signal.
+ */
+
+#ifdef CONFIG_TRAD_SIGNALS
+SYSCALL_DEFINE1(sigsuspend, sigset_t __user *, uset)
+{
+ return sys_rt_sigsuspend(uset, sizeof(sigset_t));
+}
+#endif
+
+#ifdef CONFIG_TRAD_SIGNALS
+SYSCALL_DEFINE3(sigaction, int, sig, const struct sigaction __user *, act,
+ struct sigaction __user *, oact)
+{
+ struct k_sigaction new_ka, old_ka;
+ int ret;
+ int err = 0;
+
+ if (act) {
+ old_sigset_t mask;
+
+ if (!access_ok(act, sizeof(*act)))
+ return -EFAULT;
+ err |= __get_user(new_ka.sa.sa_handler, &act->sa_handler);
+ err |= __get_user(new_ka.sa.sa_flags, &act->sa_flags);
+ err |= __get_user(mask, &act->sa_mask.sig[0]);
+ if (err)
+ return -EFAULT;
+
+ siginitset(&new_ka.sa.sa_mask, mask);
+ }
+
+ ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
+
+ if (!ret && oact) {
+ if (!access_ok(oact, sizeof(*oact)))
+ return -EFAULT;
+ err |= __put_user(old_ka.sa.sa_flags, &oact->sa_flags);
+ err |= __put_user(old_ka.sa.sa_handler, &oact->sa_handler);
+ err |= __put_user(old_ka.sa.sa_mask.sig[0], oact->sa_mask.sig);
+ err |= __put_user(0, &oact->sa_mask.sig[1]);
+ err |= __put_user(0, &oact->sa_mask.sig[2]);
+ err |= __put_user(0, &oact->sa_mask.sig[3]);
+ if (err)
+ return -EFAULT;
+ }
+
+ return ret;
+}
+#endif
+
+#ifdef CONFIG_TRAD_SIGNALS
+asmlinkage void sys_sigreturn(void)
+{
+ struct sigframe __user *frame;
+ struct pt_regs *regs;
+ sigset_t blocked;
+ int sig;
+
+ regs = current_pt_regs();
+ frame = (struct sigframe __user *)regs->regs[29];
+ if (!access_ok(frame, sizeof(*frame)))
+ goto badframe;
+ if (__copy_from_user(&blocked, &frame->sf_mask, sizeof(blocked)))
+ goto badframe;
+
+ set_current_blocked(&blocked);
+
+ sig = restore_sigcontext(regs, &frame->sf_sc);
+ if (sig < 0)
+ goto badframe;
+ else if (sig)
+ force_sig(sig);
+
+ /*
+ * Don't let your children do this ...
+ */
+ __asm__ __volatile__(
+ "move\t$29, %0\n\t"
+ "j\tsyscall_exit"
+ : /* no outputs */
+ : "r" (regs));
+ /* Unreached */
+
+badframe:
+ force_sig(SIGSEGV);
+}
+#endif /* CONFIG_TRAD_SIGNALS */
+
+asmlinkage void sys_rt_sigreturn(void)
+{
+ struct rt_sigframe __user *frame;
+ struct pt_regs *regs;
+ sigset_t set;
+ int sig;
+
+ regs = current_pt_regs();
+ frame = (struct rt_sigframe __user *)regs->regs[29];
+ if (!access_ok(frame, sizeof(*frame)))
+ goto badframe;
+ if (__copy_from_user(&set, &frame->rs_uc.uc_sigmask, sizeof(set)))
+ goto badframe;
+
+ set_current_blocked(&set);
+
+ sig = restore_sigcontext(regs, &frame->rs_uc.uc_mcontext);
+ if (sig < 0)
+ goto badframe;
+ else if (sig)
+ force_sig(sig);
+
+ if (restore_altstack(&frame->rs_uc.uc_stack))
+ goto badframe;
+
+ /*
+ * Don't let your children do this ...
+ */
+ __asm__ __volatile__(
+ "move\t$29, %0\n\t"
+ "j\tsyscall_exit"
+ : /* no outputs */
+ : "r" (regs));
+ /* Unreached */
+
+badframe:
+ force_sig(SIGSEGV);
+}
+
+#ifdef CONFIG_TRAD_SIGNALS
+static int setup_frame(void *sig_return, struct ksignal *ksig,
+ struct pt_regs *regs, sigset_t *set)
+{
+ struct sigframe __user *frame;
+ int err = 0;
+
+ frame = get_sigframe(ksig, regs, sizeof(*frame));
+ if (!access_ok(frame, sizeof (*frame)))
+ return -EFAULT;
+
+ err |= setup_sigcontext(regs, &frame->sf_sc);
+ err |= __copy_to_user(&frame->sf_mask, set, sizeof(*set));
+ if (err)
+ return -EFAULT;
+
+ /*
+ * Arguments to signal handler:
+ *
+ * a0 = signal number
+ * a1 = 0 (should be cause)
+ * a2 = pointer to struct sigcontext
+ *
+ * $25 and c0_epc point to the signal handler, $29 points to the
+ * struct sigframe.
+ */
+ regs->regs[ 4] = ksig->sig;
+ regs->regs[ 5] = 0;
+ regs->regs[ 6] = (unsigned long) &frame->sf_sc;
+ regs->regs[29] = (unsigned long) frame;
+ regs->regs[31] = (unsigned long) sig_return;
+ regs->cp0_epc = regs->regs[25] = (unsigned long) ksig->ka.sa.sa_handler;
+
+ DEBUGP("SIG deliver (%s:%d): sp=0x%p pc=0x%lx ra=0x%lx\n",
+ current->comm, current->pid,
+ frame, regs->cp0_epc, regs->regs[31]);
+ return 0;
+}
+#endif
+
+static int setup_rt_frame(void *sig_return, struct ksignal *ksig,
+ struct pt_regs *regs, sigset_t *set)
+{
+ struct rt_sigframe __user *frame;
+
+ frame = get_sigframe(ksig, regs, sizeof(*frame));
+ if (!access_ok(frame, sizeof (*frame)))
+ return -EFAULT;
+
+ /* Create siginfo. */
+ if (copy_siginfo_to_user(&frame->rs_info, &ksig->info))
+ return -EFAULT;
+
+ /* Create the ucontext. */
+ if (__put_user(0, &frame->rs_uc.uc_flags))
+ return -EFAULT;
+ if (__put_user(NULL, &frame->rs_uc.uc_link))
+ return -EFAULT;
+ if (__save_altstack(&frame->rs_uc.uc_stack, regs->regs[29]))
+ return -EFAULT;
+ if (setup_sigcontext(regs, &frame->rs_uc.uc_mcontext))
+ return -EFAULT;
+ if (__copy_to_user(&frame->rs_uc.uc_sigmask, set, sizeof(*set)))
+ return -EFAULT;
+
+ /*
+ * Arguments to signal handler:
+ *
+ * a0 = signal number
+ * a1 = 0 (should be cause)
+ * a2 = pointer to ucontext
+ *
+ * $25 and c0_epc point to the signal handler, $29 points to
+ * the struct rt_sigframe.
+ */
+ regs->regs[ 4] = ksig->sig;
+ regs->regs[ 5] = (unsigned long) &frame->rs_info;
+ regs->regs[ 6] = (unsigned long) &frame->rs_uc;
+ regs->regs[29] = (unsigned long) frame;
+ regs->regs[31] = (unsigned long) sig_return;
+ regs->cp0_epc = regs->regs[25] = (unsigned long) ksig->ka.sa.sa_handler;
+
+ DEBUGP("SIG deliver (%s:%d): sp=0x%p pc=0x%lx ra=0x%lx\n",
+ current->comm, current->pid,
+ frame, regs->cp0_epc, regs->regs[31]);
+
+ return 0;
+}
+
+struct mips_abi mips_abi = {
+#ifdef CONFIG_TRAD_SIGNALS
+ .setup_frame = setup_frame,
+#endif
+ .setup_rt_frame = setup_rt_frame,
+ .restart = __NR_restart_syscall,
+
+ .off_sc_fpregs = offsetof(struct sigcontext, sc_fpregs),
+ .off_sc_fpc_csr = offsetof(struct sigcontext, sc_fpc_csr),
+ .off_sc_used_math = offsetof(struct sigcontext, sc_used_math),
+
+ .vdso = &vdso_image,
+};
+
+static void handle_signal(struct ksignal *ksig, struct pt_regs *regs)
+{
+ sigset_t *oldset = sigmask_to_save();
+ int ret;
+ struct mips_abi *abi = current->thread.abi;
+ void *vdso = current->mm->context.vdso;
+
+ /*
+ * If we were emulating a delay slot instruction, exit that frame such
+ * that addresses in the sigframe are as expected for userland and we
+ * don't have a problem if we reuse the thread's frame for an
+ * instruction within the signal handler.
+ */
+ dsemul_thread_rollback(regs);
+
+ if (regs->regs[0]) {
+ switch(regs->regs[2]) {
+ case ERESTART_RESTARTBLOCK:
+ case ERESTARTNOHAND:
+ regs->regs[2] = EINTR;
+ break;
+ case ERESTARTSYS:
+ if (!(ksig->ka.sa.sa_flags & SA_RESTART)) {
+ regs->regs[2] = EINTR;
+ break;
+ }
+ fallthrough;
+ case ERESTARTNOINTR:
+ regs->regs[7] = regs->regs[26];
+ regs->regs[2] = regs->regs[0];
+ regs->cp0_epc -= 4;
+ }
+
+ regs->regs[0] = 0; /* Don't deal with this again. */
+ }
+
+ rseq_signal_deliver(ksig, regs);
+
+ if (sig_uses_siginfo(&ksig->ka, abi))
+ ret = abi->setup_rt_frame(vdso + abi->vdso->off_rt_sigreturn,
+ ksig, regs, oldset);
+ else
+ ret = abi->setup_frame(vdso + abi->vdso->off_sigreturn,
+ ksig, regs, oldset);
+
+ signal_setup_done(ret, ksig, 0);
+}
+
+static void do_signal(struct pt_regs *regs)
+{
+ struct ksignal ksig;
+
+ if (get_signal(&ksig)) {
+ /* Whee! Actually deliver the signal. */
+ handle_signal(&ksig, regs);
+ return;
+ }
+
+ if (regs->regs[0]) {
+ switch (regs->regs[2]) {
+ case ERESTARTNOHAND:
+ case ERESTARTSYS:
+ case ERESTARTNOINTR:
+ regs->regs[2] = regs->regs[0];
+ regs->regs[7] = regs->regs[26];
+ regs->cp0_epc -= 4;
+ break;
+
+ case ERESTART_RESTARTBLOCK:
+ regs->regs[2] = current->thread.abi->restart;
+ regs->regs[7] = regs->regs[26];
+ regs->cp0_epc -= 4;
+ break;
+ }
+ regs->regs[0] = 0; /* Don't deal with this again. */
+ }
+
+ /*
+ * If there's no signal to deliver, we just put the saved sigmask
+ * back
+ */
+ restore_saved_sigmask();
+}
+
+/*
+ * notification of userspace execution resumption
+ * - triggered by the TIF_WORK_MASK flags
+ */
+asmlinkage void do_notify_resume(struct pt_regs *regs, void *unused,
+ __u32 thread_info_flags)
+{
+ local_irq_enable();
+
+ user_exit();
+
+ if (thread_info_flags & _TIF_UPROBE)
+ uprobe_notify_resume(regs);
+
+ /* deal with pending signal delivery */
+ if (thread_info_flags & (_TIF_SIGPENDING | _TIF_NOTIFY_SIGNAL))
+ do_signal(regs);
+
+ if (thread_info_flags & _TIF_NOTIFY_RESUME)
+ resume_user_mode_work(regs);
+
+ user_enter();
+}
+
+#if defined(CONFIG_SMP) && defined(CONFIG_MIPS_FP_SUPPORT)
+static int smp_save_fp_context(void __user *sc)
+{
+ return raw_cpu_has_fpu
+ ? save_hw_fp_context(sc)
+ : copy_fp_to_sigcontext(sc);
+}
+
+static int smp_restore_fp_context(void __user *sc)
+{
+ return raw_cpu_has_fpu
+ ? restore_hw_fp_context(sc)
+ : copy_fp_from_sigcontext(sc);
+}
+#endif
+
+static int signal_setup(void)
+{
+ /*
+ * The offset from sigcontext to extended context should be the same
+ * regardless of the type of signal, such that userland can always know
+ * where to look if it wishes to find the extended context structures.
+ */
+ BUILD_BUG_ON((offsetof(struct sigframe, sf_extcontext) -
+ offsetof(struct sigframe, sf_sc)) !=
+ (offsetof(struct rt_sigframe, rs_uc.uc_extcontext) -
+ offsetof(struct rt_sigframe, rs_uc.uc_mcontext)));
+
+#if defined(CONFIG_SMP) && defined(CONFIG_MIPS_FP_SUPPORT)
+ /* For now just do the cpu_has_fpu check when the functions are invoked */
+ save_fp_context = smp_save_fp_context;
+ restore_fp_context = smp_restore_fp_context;
+#else
+ if (cpu_has_fpu) {
+ save_fp_context = save_hw_fp_context;
+ restore_fp_context = restore_hw_fp_context;
+ } else {
+ save_fp_context = copy_fp_to_sigcontext;
+ restore_fp_context = copy_fp_from_sigcontext;
+ }
+#endif /* CONFIG_SMP */
+
+ return 0;
+}
+
+arch_initcall(signal_setup);
diff --git a/arch/mips/kernel/signal32.c b/arch/mips/kernel/signal32.c
new file mode 100644
index 000000000..59b896543
--- /dev/null
+++ b/arch/mips/kernel/signal32.c
@@ -0,0 +1,78 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1991, 1992 Linus Torvalds
+ * Copyright (C) 1994 - 2000, 2006 Ralf Baechle
+ * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
+ * Copyright (C) 2016, Imagination Technologies Ltd.
+ */
+#include <linux/compat.h>
+#include <linux/compiler.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/signal.h>
+#include <linux/syscalls.h>
+
+#include <asm/compat-signal.h>
+#include <linux/uaccess.h>
+#include <asm/unistd.h>
+
+#include "signal-common.h"
+
+/* 32-bit compatibility types */
+
+typedef unsigned int __sighandler32_t;
+typedef void (*vfptr_t)(void);
+
+/*
+ * Atomically swap in the new signal mask, and wait for a signal.
+ */
+
+asmlinkage int sys32_sigsuspend(compat_sigset_t __user *uset)
+{
+ return compat_sys_rt_sigsuspend(uset, sizeof(compat_sigset_t));
+}
+
+SYSCALL_DEFINE3(32_sigaction, long, sig, const struct compat_sigaction __user *, act,
+ struct compat_sigaction __user *, oact)
+{
+ struct k_sigaction new_ka, old_ka;
+ int ret;
+ int err = 0;
+
+ if (act) {
+ old_sigset_t mask;
+ s32 handler;
+
+ if (!access_ok(act, sizeof(*act)))
+ return -EFAULT;
+ err |= __get_user(handler, &act->sa_handler);
+ new_ka.sa.sa_handler = (void __user *)(s64)handler;
+ err |= __get_user(new_ka.sa.sa_flags, &act->sa_flags);
+ err |= __get_user(mask, &act->sa_mask.sig[0]);
+ if (err)
+ return -EFAULT;
+
+ siginitset(&new_ka.sa.sa_mask, mask);
+ }
+
+ ret = do_sigaction(sig, act ? &new_ka : NULL, oact ? &old_ka : NULL);
+
+ if (!ret && oact) {
+ if (!access_ok(oact, sizeof(*oact)))
+ return -EFAULT;
+ err |= __put_user(old_ka.sa.sa_flags, &oact->sa_flags);
+ err |= __put_user((u32)(u64)old_ka.sa.sa_handler,
+ &oact->sa_handler);
+ err |= __put_user(old_ka.sa.sa_mask.sig[0], oact->sa_mask.sig);
+ err |= __put_user(0, &oact->sa_mask.sig[1]);
+ err |= __put_user(0, &oact->sa_mask.sig[2]);
+ err |= __put_user(0, &oact->sa_mask.sig[3]);
+ if (err)
+ return -EFAULT;
+ }
+
+ return ret;
+}
diff --git a/arch/mips/kernel/signal_n32.c b/arch/mips/kernel/signal_n32.c
new file mode 100644
index 000000000..cfc77b694
--- /dev/null
+++ b/arch/mips/kernel/signal_n32.c
@@ -0,0 +1,148 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2003 Broadcom Corporation
+ */
+#include <linux/cache.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/smp.h>
+#include <linux/kernel.h>
+#include <linux/signal.h>
+#include <linux/errno.h>
+#include <linux/wait.h>
+#include <linux/ptrace.h>
+#include <linux/unistd.h>
+#include <linux/compat.h>
+#include <linux/bitops.h>
+
+#include <asm/abi.h>
+#include <asm/asm.h>
+#include <asm/cacheflush.h>
+#include <asm/compat-signal.h>
+#include <asm/sim.h>
+#include <linux/uaccess.h>
+#include <asm/ucontext.h>
+#include <asm/fpu.h>
+#include <asm/cpu-features.h>
+
+#include "signal-common.h"
+
+/*
+ * Including <asm/unistd.h> would give use the 64-bit syscall numbers ...
+ */
+#define __NR_N32_restart_syscall 6214
+
+extern int setup_sigcontext(struct pt_regs *, struct sigcontext __user *);
+extern int restore_sigcontext(struct pt_regs *, struct sigcontext __user *);
+
+struct ucontextn32 {
+ u32 uc_flags;
+ s32 uc_link;
+ compat_stack_t uc_stack;
+ struct sigcontext uc_mcontext;
+ compat_sigset_t uc_sigmask; /* mask last for extensibility */
+};
+
+struct rt_sigframe_n32 {
+ u32 rs_ass[4]; /* argument save space for o32 */
+ u32 rs_pad[2]; /* Was: signal trampoline */
+ struct compat_siginfo rs_info;
+ struct ucontextn32 rs_uc;
+};
+
+asmlinkage void sysn32_rt_sigreturn(void)
+{
+ struct rt_sigframe_n32 __user *frame;
+ struct pt_regs *regs;
+ sigset_t set;
+ int sig;
+
+ regs = current_pt_regs();
+ frame = (struct rt_sigframe_n32 __user *)regs->regs[29];
+ if (!access_ok(frame, sizeof(*frame)))
+ goto badframe;
+ if (__copy_conv_sigset_from_user(&set, &frame->rs_uc.uc_sigmask))
+ goto badframe;
+
+ set_current_blocked(&set);
+
+ sig = restore_sigcontext(regs, &frame->rs_uc.uc_mcontext);
+ if (sig < 0)
+ goto badframe;
+ else if (sig)
+ force_sig(sig);
+
+ if (compat_restore_altstack(&frame->rs_uc.uc_stack))
+ goto badframe;
+
+ /*
+ * Don't let your children do this ...
+ */
+ __asm__ __volatile__(
+ "move\t$29, %0\n\t"
+ "j\tsyscall_exit"
+ : /* no outputs */
+ : "r" (regs));
+ /* Unreached */
+
+badframe:
+ force_sig(SIGSEGV);
+}
+
+static int setup_rt_frame_n32(void *sig_return, struct ksignal *ksig,
+ struct pt_regs *regs, sigset_t *set)
+{
+ struct rt_sigframe_n32 __user *frame;
+ int err = 0;
+
+ frame = get_sigframe(ksig, regs, sizeof(*frame));
+ if (!access_ok(frame, sizeof (*frame)))
+ return -EFAULT;
+
+ /* Create siginfo. */
+ err |= copy_siginfo_to_user32(&frame->rs_info, &ksig->info);
+
+ /* Create the ucontext. */
+ err |= __put_user(0, &frame->rs_uc.uc_flags);
+ err |= __put_user(0, &frame->rs_uc.uc_link);
+ err |= __compat_save_altstack(&frame->rs_uc.uc_stack, regs->regs[29]);
+ err |= setup_sigcontext(regs, &frame->rs_uc.uc_mcontext);
+ err |= __copy_conv_sigset_to_user(&frame->rs_uc.uc_sigmask, set);
+
+ if (err)
+ return -EFAULT;
+
+ /*
+ * Arguments to signal handler:
+ *
+ * a0 = signal number
+ * a1 = 0 (should be cause)
+ * a2 = pointer to ucontext
+ *
+ * $25 and c0_epc point to the signal handler, $29 points to
+ * the struct rt_sigframe.
+ */
+ regs->regs[ 4] = ksig->sig;
+ regs->regs[ 5] = (unsigned long) &frame->rs_info;
+ regs->regs[ 6] = (unsigned long) &frame->rs_uc;
+ regs->regs[29] = (unsigned long) frame;
+ regs->regs[31] = (unsigned long) sig_return;
+ regs->cp0_epc = regs->regs[25] = (unsigned long) ksig->ka.sa.sa_handler;
+
+ DEBUGP("SIG deliver (%s:%d): sp=0x%p pc=0x%lx ra=0x%lx\n",
+ current->comm, current->pid,
+ frame, regs->cp0_epc, regs->regs[31]);
+
+ return 0;
+}
+
+struct mips_abi mips_abi_n32 = {
+ .setup_rt_frame = setup_rt_frame_n32,
+ .restart = __NR_N32_restart_syscall,
+
+ .off_sc_fpregs = offsetof(struct sigcontext, sc_fpregs),
+ .off_sc_fpc_csr = offsetof(struct sigcontext, sc_fpc_csr),
+ .off_sc_used_math = offsetof(struct sigcontext, sc_used_math),
+
+ .vdso = &vdso_image_n32,
+};
diff --git a/arch/mips/kernel/signal_o32.c b/arch/mips/kernel/signal_o32.c
new file mode 100644
index 000000000..299a7a28c
--- /dev/null
+++ b/arch/mips/kernel/signal_o32.c
@@ -0,0 +1,290 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1991, 1992 Linus Torvalds
+ * Copyright (C) 1994 - 2000, 2006 Ralf Baechle
+ * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
+ * Copyright (C) 2016, Imagination Technologies Ltd.
+ */
+#include <linux/compiler.h>
+#include <linux/errno.h>
+#include <linux/signal.h>
+#include <linux/sched/signal.h>
+#include <linux/uaccess.h>
+
+#include <asm/abi.h>
+#include <asm/compat-signal.h>
+#include <asm/dsp.h>
+#include <asm/sim.h>
+#include <asm/unistd.h>
+
+#include "signal-common.h"
+
+/*
+ * Including <asm/unistd.h> would give use the 64-bit syscall numbers ...
+ */
+#define __NR_O32_restart_syscall 4253
+
+struct sigframe32 {
+ u32 sf_ass[4]; /* argument save space for o32 */
+ u32 sf_pad[2]; /* Was: signal trampoline */
+ struct sigcontext32 sf_sc;
+ compat_sigset_t sf_mask;
+};
+
+struct ucontext32 {
+ u32 uc_flags;
+ s32 uc_link;
+ compat_stack_t uc_stack;
+ struct sigcontext32 uc_mcontext;
+ compat_sigset_t uc_sigmask; /* mask last for extensibility */
+};
+
+struct rt_sigframe32 {
+ u32 rs_ass[4]; /* argument save space for o32 */
+ u32 rs_pad[2]; /* Was: signal trampoline */
+ compat_siginfo_t rs_info;
+ struct ucontext32 rs_uc;
+};
+
+static int setup_sigcontext32(struct pt_regs *regs,
+ struct sigcontext32 __user *sc)
+{
+ int err = 0;
+ int i;
+
+ err |= __put_user(regs->cp0_epc, &sc->sc_pc);
+
+ err |= __put_user(0, &sc->sc_regs[0]);
+ for (i = 1; i < 32; i++)
+ err |= __put_user(regs->regs[i], &sc->sc_regs[i]);
+
+ err |= __put_user(regs->hi, &sc->sc_mdhi);
+ err |= __put_user(regs->lo, &sc->sc_mdlo);
+ if (cpu_has_dsp) {
+ err |= __put_user(rddsp(DSP_MASK), &sc->sc_dsp);
+ err |= __put_user(mfhi1(), &sc->sc_hi1);
+ err |= __put_user(mflo1(), &sc->sc_lo1);
+ err |= __put_user(mfhi2(), &sc->sc_hi2);
+ err |= __put_user(mflo2(), &sc->sc_lo2);
+ err |= __put_user(mfhi3(), &sc->sc_hi3);
+ err |= __put_user(mflo3(), &sc->sc_lo3);
+ }
+
+ /*
+ * Save FPU state to signal context. Signal handler
+ * will "inherit" current FPU state.
+ */
+ err |= protected_save_fp_context(sc);
+
+ return err;
+}
+
+static int restore_sigcontext32(struct pt_regs *regs,
+ struct sigcontext32 __user *sc)
+{
+ int err = 0;
+ s32 treg;
+ int i;
+
+ /* Always make any pending restarted system calls return -EINTR */
+ current->restart_block.fn = do_no_restart_syscall;
+
+ err |= __get_user(regs->cp0_epc, &sc->sc_pc);
+ err |= __get_user(regs->hi, &sc->sc_mdhi);
+ err |= __get_user(regs->lo, &sc->sc_mdlo);
+ if (cpu_has_dsp) {
+ err |= __get_user(treg, &sc->sc_hi1); mthi1(treg);
+ err |= __get_user(treg, &sc->sc_lo1); mtlo1(treg);
+ err |= __get_user(treg, &sc->sc_hi2); mthi2(treg);
+ err |= __get_user(treg, &sc->sc_lo2); mtlo2(treg);
+ err |= __get_user(treg, &sc->sc_hi3); mthi3(treg);
+ err |= __get_user(treg, &sc->sc_lo3); mtlo3(treg);
+ err |= __get_user(treg, &sc->sc_dsp); wrdsp(treg, DSP_MASK);
+ }
+
+ for (i = 1; i < 32; i++)
+ err |= __get_user(regs->regs[i], &sc->sc_regs[i]);
+
+ return err ?: protected_restore_fp_context(sc);
+}
+
+static int setup_frame_32(void *sig_return, struct ksignal *ksig,
+ struct pt_regs *regs, sigset_t *set)
+{
+ struct sigframe32 __user *frame;
+ int err = 0;
+
+ frame = get_sigframe(ksig, regs, sizeof(*frame));
+ if (!access_ok(frame, sizeof (*frame)))
+ return -EFAULT;
+
+ err |= setup_sigcontext32(regs, &frame->sf_sc);
+ err |= __copy_conv_sigset_to_user(&frame->sf_mask, set);
+
+ if (err)
+ return -EFAULT;
+
+ /*
+ * Arguments to signal handler:
+ *
+ * a0 = signal number
+ * a1 = 0 (should be cause)
+ * a2 = pointer to struct sigcontext
+ *
+ * $25 and c0_epc point to the signal handler, $29 points to the
+ * struct sigframe.
+ */
+ regs->regs[ 4] = ksig->sig;
+ regs->regs[ 5] = 0;
+ regs->regs[ 6] = (unsigned long) &frame->sf_sc;
+ regs->regs[29] = (unsigned long) frame;
+ regs->regs[31] = (unsigned long) sig_return;
+ regs->cp0_epc = regs->regs[25] = (unsigned long) ksig->ka.sa.sa_handler;
+
+ DEBUGP("SIG deliver (%s:%d): sp=0x%p pc=0x%lx ra=0x%lx\n",
+ current->comm, current->pid,
+ frame, regs->cp0_epc, regs->regs[31]);
+
+ return 0;
+}
+
+asmlinkage void sys32_rt_sigreturn(void)
+{
+ struct rt_sigframe32 __user *frame;
+ struct pt_regs *regs;
+ sigset_t set;
+ int sig;
+
+ regs = current_pt_regs();
+ frame = (struct rt_sigframe32 __user *)regs->regs[29];
+ if (!access_ok(frame, sizeof(*frame)))
+ goto badframe;
+ if (__copy_conv_sigset_from_user(&set, &frame->rs_uc.uc_sigmask))
+ goto badframe;
+
+ set_current_blocked(&set);
+
+ sig = restore_sigcontext32(regs, &frame->rs_uc.uc_mcontext);
+ if (sig < 0)
+ goto badframe;
+ else if (sig)
+ force_sig(sig);
+
+ if (compat_restore_altstack(&frame->rs_uc.uc_stack))
+ goto badframe;
+
+ /*
+ * Don't let your children do this ...
+ */
+ __asm__ __volatile__(
+ "move\t$29, %0\n\t"
+ "j\tsyscall_exit"
+ : /* no outputs */
+ : "r" (regs));
+ /* Unreached */
+
+badframe:
+ force_sig(SIGSEGV);
+}
+
+static int setup_rt_frame_32(void *sig_return, struct ksignal *ksig,
+ struct pt_regs *regs, sigset_t *set)
+{
+ struct rt_sigframe32 __user *frame;
+ int err = 0;
+
+ frame = get_sigframe(ksig, regs, sizeof(*frame));
+ if (!access_ok(frame, sizeof (*frame)))
+ return -EFAULT;
+
+ /* Convert (siginfo_t -> compat_siginfo_t) and copy to user. */
+ err |= copy_siginfo_to_user32(&frame->rs_info, &ksig->info);
+
+ /* Create the ucontext. */
+ err |= __put_user(0, &frame->rs_uc.uc_flags);
+ err |= __put_user(0, &frame->rs_uc.uc_link);
+ err |= __compat_save_altstack(&frame->rs_uc.uc_stack, regs->regs[29]);
+ err |= setup_sigcontext32(regs, &frame->rs_uc.uc_mcontext);
+ err |= __copy_conv_sigset_to_user(&frame->rs_uc.uc_sigmask, set);
+
+ if (err)
+ return -EFAULT;
+
+ /*
+ * Arguments to signal handler:
+ *
+ * a0 = signal number
+ * a1 = 0 (should be cause)
+ * a2 = pointer to ucontext
+ *
+ * $25 and c0_epc point to the signal handler, $29 points to
+ * the struct rt_sigframe32.
+ */
+ regs->regs[ 4] = ksig->sig;
+ regs->regs[ 5] = (unsigned long) &frame->rs_info;
+ regs->regs[ 6] = (unsigned long) &frame->rs_uc;
+ regs->regs[29] = (unsigned long) frame;
+ regs->regs[31] = (unsigned long) sig_return;
+ regs->cp0_epc = regs->regs[25] = (unsigned long) ksig->ka.sa.sa_handler;
+
+ DEBUGP("SIG deliver (%s:%d): sp=0x%p pc=0x%lx ra=0x%lx\n",
+ current->comm, current->pid,
+ frame, regs->cp0_epc, regs->regs[31]);
+
+ return 0;
+}
+
+/*
+ * o32 compatibility on 64-bit kernels, without DSP ASE
+ */
+struct mips_abi mips_abi_32 = {
+ .setup_frame = setup_frame_32,
+ .setup_rt_frame = setup_rt_frame_32,
+ .restart = __NR_O32_restart_syscall,
+
+ .off_sc_fpregs = offsetof(struct sigcontext32, sc_fpregs),
+ .off_sc_fpc_csr = offsetof(struct sigcontext32, sc_fpc_csr),
+ .off_sc_used_math = offsetof(struct sigcontext32, sc_used_math),
+
+ .vdso = &vdso_image_o32,
+};
+
+
+asmlinkage void sys32_sigreturn(void)
+{
+ struct sigframe32 __user *frame;
+ struct pt_regs *regs;
+ sigset_t blocked;
+ int sig;
+
+ regs = current_pt_regs();
+ frame = (struct sigframe32 __user *)regs->regs[29];
+ if (!access_ok(frame, sizeof(*frame)))
+ goto badframe;
+ if (__copy_conv_sigset_from_user(&blocked, &frame->sf_mask))
+ goto badframe;
+
+ set_current_blocked(&blocked);
+
+ sig = restore_sigcontext32(regs, &frame->sf_sc);
+ if (sig < 0)
+ goto badframe;
+ else if (sig)
+ force_sig(sig);
+
+ /*
+ * Don't let your children do this ...
+ */
+ __asm__ __volatile__(
+ "move\t$29, %0\n\t"
+ "j\tsyscall_exit"
+ : /* no outputs */
+ : "r" (regs));
+ /* Unreached */
+
+badframe:
+ force_sig(SIGSEGV);
+}
diff --git a/arch/mips/kernel/smp-bmips.c b/arch/mips/kernel/smp-bmips.c
new file mode 100644
index 000000000..f5d7bfa34
--- /dev/null
+++ b/arch/mips/kernel/smp-bmips.c
@@ -0,0 +1,672 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2011 by Kevin Cernekee (cernekee@gmail.com)
+ *
+ * SMP support for BMIPS
+ */
+
+#include <linux/init.h>
+#include <linux/sched.h>
+#include <linux/sched/hotplug.h>
+#include <linux/sched/task_stack.h>
+#include <linux/mm.h>
+#include <linux/delay.h>
+#include <linux/smp.h>
+#include <linux/interrupt.h>
+#include <linux/spinlock.h>
+#include <linux/cpu.h>
+#include <linux/cpumask.h>
+#include <linux/reboot.h>
+#include <linux/io.h>
+#include <linux/compiler.h>
+#include <linux/linkage.h>
+#include <linux/bug.h>
+#include <linux/kernel.h>
+#include <linux/kexec.h>
+#include <linux/irq.h>
+
+#include <asm/time.h>
+#include <asm/processor.h>
+#include <asm/bootinfo.h>
+#include <asm/cacheflush.h>
+#include <asm/tlbflush.h>
+#include <asm/mipsregs.h>
+#include <asm/bmips.h>
+#include <asm/traps.h>
+#include <asm/barrier.h>
+#include <asm/cpu-features.h>
+
+static int __maybe_unused max_cpus = 1;
+
+/* these may be configured by the platform code */
+int bmips_smp_enabled = 1;
+int bmips_cpu_offset;
+cpumask_t bmips_booted_mask;
+unsigned long bmips_tp1_irqs = IE_IRQ1;
+
+#define RESET_FROM_KSEG0 0x80080800
+#define RESET_FROM_KSEG1 0xa0080800
+
+static void bmips_set_reset_vec(int cpu, u32 val);
+
+#ifdef CONFIG_SMP
+
+/* initial $sp, $gp - used by arch/mips/kernel/bmips_vec.S */
+unsigned long bmips_smp_boot_sp;
+unsigned long bmips_smp_boot_gp;
+
+static void bmips43xx_send_ipi_single(int cpu, unsigned int action);
+static void bmips5000_send_ipi_single(int cpu, unsigned int action);
+static irqreturn_t bmips43xx_ipi_interrupt(int irq, void *dev_id);
+static irqreturn_t bmips5000_ipi_interrupt(int irq, void *dev_id);
+
+/* SW interrupts 0,1 are used for interprocessor signaling */
+#define IPI0_IRQ (MIPS_CPU_IRQ_BASE + 0)
+#define IPI1_IRQ (MIPS_CPU_IRQ_BASE + 1)
+
+#define CPUNUM(cpu, shift) (((cpu) + bmips_cpu_offset) << (shift))
+#define ACTION_CLR_IPI(cpu, ipi) (0x2000 | CPUNUM(cpu, 9) | ((ipi) << 8))
+#define ACTION_SET_IPI(cpu, ipi) (0x3000 | CPUNUM(cpu, 9) | ((ipi) << 8))
+#define ACTION_BOOT_THREAD(cpu) (0x08 | CPUNUM(cpu, 0))
+
+static void __init bmips_smp_setup(void)
+{
+ int i, cpu = 1, boot_cpu = 0;
+ int cpu_hw_intr;
+
+ switch (current_cpu_type()) {
+ case CPU_BMIPS4350:
+ case CPU_BMIPS4380:
+ /* arbitration priority */
+ clear_c0_brcm_cmt_ctrl(0x30);
+
+ /* NBK and weak order flags */
+ set_c0_brcm_config_0(0x30000);
+
+ /* Find out if we are running on TP0 or TP1 */
+ boot_cpu = !!(read_c0_brcm_cmt_local() & (1 << 31));
+
+ /*
+ * MIPS interrupts 0,1 (SW INT 0,1) cross over to the other
+ * thread
+ * MIPS interrupt 2 (HW INT 0) is the CPU0 L1 controller output
+ * MIPS interrupt 3 (HW INT 1) is the CPU1 L1 controller output
+ */
+ if (boot_cpu == 0)
+ cpu_hw_intr = 0x02;
+ else
+ cpu_hw_intr = 0x1d;
+
+ change_c0_brcm_cmt_intr(0xf8018000,
+ (cpu_hw_intr << 27) | (0x03 << 15));
+
+ /* single core, 2 threads (2 pipelines) */
+ max_cpus = 2;
+
+ break;
+ case CPU_BMIPS5000:
+ /* enable raceless SW interrupts */
+ set_c0_brcm_config(0x03 << 22);
+
+ /* route HW interrupt 0 to CPU0, HW interrupt 1 to CPU1 */
+ change_c0_brcm_mode(0x1f << 27, 0x02 << 27);
+
+ /* N cores, 2 threads per core */
+ max_cpus = (((read_c0_brcm_config() >> 6) & 0x03) + 1) << 1;
+
+ /* clear any pending SW interrupts */
+ for (i = 0; i < max_cpus; i++) {
+ write_c0_brcm_action(ACTION_CLR_IPI(i, 0));
+ write_c0_brcm_action(ACTION_CLR_IPI(i, 1));
+ }
+
+ break;
+ default:
+ max_cpus = 1;
+ }
+
+ if (!bmips_smp_enabled)
+ max_cpus = 1;
+
+ /* this can be overridden by the BSP */
+ if (!board_ebase_setup)
+ board_ebase_setup = &bmips_ebase_setup;
+
+ if (max_cpus > 1) {
+ __cpu_number_map[boot_cpu] = 0;
+ __cpu_logical_map[0] = boot_cpu;
+
+ for (i = 0; i < max_cpus; i++) {
+ if (i != boot_cpu) {
+ __cpu_number_map[i] = cpu;
+ __cpu_logical_map[cpu] = i;
+ cpu++;
+ }
+ set_cpu_possible(i, 1);
+ set_cpu_present(i, 1);
+ }
+ } else {
+ __cpu_number_map[0] = boot_cpu;
+ __cpu_logical_map[0] = 0;
+ set_cpu_possible(0, 1);
+ set_cpu_present(0, 1);
+ }
+}
+
+/*
+ * IPI IRQ setup - runs on CPU0
+ */
+static void bmips_prepare_cpus(unsigned int max_cpus)
+{
+ irqreturn_t (*bmips_ipi_interrupt)(int irq, void *dev_id);
+
+ switch (current_cpu_type()) {
+ case CPU_BMIPS4350:
+ case CPU_BMIPS4380:
+ bmips_ipi_interrupt = bmips43xx_ipi_interrupt;
+ break;
+ case CPU_BMIPS5000:
+ bmips_ipi_interrupt = bmips5000_ipi_interrupt;
+ break;
+ default:
+ return;
+ }
+
+ if (request_irq(IPI0_IRQ, bmips_ipi_interrupt,
+ IRQF_PERCPU | IRQF_NO_SUSPEND, "smp_ipi0", NULL))
+ panic("Can't request IPI0 interrupt");
+ if (request_irq(IPI1_IRQ, bmips_ipi_interrupt,
+ IRQF_PERCPU | IRQF_NO_SUSPEND, "smp_ipi1", NULL))
+ panic("Can't request IPI1 interrupt");
+}
+
+/*
+ * Tell the hardware to boot CPUx - runs on CPU0
+ */
+static int bmips_boot_secondary(int cpu, struct task_struct *idle)
+{
+ bmips_smp_boot_sp = __KSTK_TOS(idle);
+ bmips_smp_boot_gp = (unsigned long)task_thread_info(idle);
+ mb();
+
+ /*
+ * Initial boot sequence for secondary CPU:
+ * bmips_reset_nmi_vec @ a000_0000 ->
+ * bmips_smp_entry ->
+ * plat_wired_tlb_setup (cached function call; optional) ->
+ * start_secondary (cached jump)
+ *
+ * Warm restart sequence:
+ * play_dead WAIT loop ->
+ * bmips_smp_int_vec @ BMIPS_WARM_RESTART_VEC ->
+ * eret to play_dead ->
+ * bmips_secondary_reentry ->
+ * start_secondary
+ */
+
+ pr_info("SMP: Booting CPU%d...\n", cpu);
+
+ if (cpumask_test_cpu(cpu, &bmips_booted_mask)) {
+ /* kseg1 might not exist if this CPU enabled XKS01 */
+ bmips_set_reset_vec(cpu, RESET_FROM_KSEG0);
+
+ switch (current_cpu_type()) {
+ case CPU_BMIPS4350:
+ case CPU_BMIPS4380:
+ bmips43xx_send_ipi_single(cpu, 0);
+ break;
+ case CPU_BMIPS5000:
+ bmips5000_send_ipi_single(cpu, 0);
+ break;
+ }
+ } else {
+ bmips_set_reset_vec(cpu, RESET_FROM_KSEG1);
+
+ switch (current_cpu_type()) {
+ case CPU_BMIPS4350:
+ case CPU_BMIPS4380:
+ /* Reset slave TP1 if booting from TP0 */
+ if (cpu_logical_map(cpu) == 1)
+ set_c0_brcm_cmt_ctrl(0x01);
+ break;
+ case CPU_BMIPS5000:
+ write_c0_brcm_action(ACTION_BOOT_THREAD(cpu));
+ break;
+ }
+ cpumask_set_cpu(cpu, &bmips_booted_mask);
+ }
+
+ return 0;
+}
+
+/*
+ * Early setup - runs on secondary CPU after cache probe
+ */
+static void bmips_init_secondary(void)
+{
+ bmips_cpu_setup();
+
+ switch (current_cpu_type()) {
+ case CPU_BMIPS4350:
+ case CPU_BMIPS4380:
+ clear_c0_cause(smp_processor_id() ? C_SW1 : C_SW0);
+ break;
+ case CPU_BMIPS5000:
+ write_c0_brcm_action(ACTION_CLR_IPI(smp_processor_id(), 0));
+ cpu_set_core(&current_cpu_data, (read_c0_brcm_config() >> 25) & 3);
+ break;
+ }
+}
+
+/*
+ * Late setup - runs on secondary CPU before entering the idle loop
+ */
+static void bmips_smp_finish(void)
+{
+ pr_info("SMP: CPU%d is running\n", smp_processor_id());
+
+ /* make sure there won't be a timer interrupt for a little while */
+ write_c0_compare(read_c0_count() + mips_hpt_frequency / HZ);
+
+ irq_enable_hazard();
+ set_c0_status(IE_SW0 | IE_SW1 | bmips_tp1_irqs | IE_IRQ5 | ST0_IE);
+ irq_enable_hazard();
+}
+
+/*
+ * BMIPS5000 raceless IPIs
+ *
+ * Each CPU has two inbound SW IRQs which are independent of all other CPUs.
+ * IPI0 is used for SMP_RESCHEDULE_YOURSELF
+ * IPI1 is used for SMP_CALL_FUNCTION
+ */
+
+static void bmips5000_send_ipi_single(int cpu, unsigned int action)
+{
+ write_c0_brcm_action(ACTION_SET_IPI(cpu, action == SMP_CALL_FUNCTION));
+}
+
+static irqreturn_t bmips5000_ipi_interrupt(int irq, void *dev_id)
+{
+ int action = irq - IPI0_IRQ;
+
+ write_c0_brcm_action(ACTION_CLR_IPI(smp_processor_id(), action));
+
+ if (action == 0)
+ scheduler_ipi();
+ else
+ generic_smp_call_function_interrupt();
+
+ return IRQ_HANDLED;
+}
+
+static void bmips5000_send_ipi_mask(const struct cpumask *mask,
+ unsigned int action)
+{
+ unsigned int i;
+
+ for_each_cpu(i, mask)
+ bmips5000_send_ipi_single(i, action);
+}
+
+/*
+ * BMIPS43xx racey IPIs
+ *
+ * We use one inbound SW IRQ for each CPU.
+ *
+ * A spinlock must be held in order to keep CPUx from accidentally clearing
+ * an incoming IPI when it writes CP0 CAUSE to raise an IPI on CPUy. The
+ * same spinlock is used to protect the action masks.
+ */
+
+static DEFINE_SPINLOCK(ipi_lock);
+static DEFINE_PER_CPU(int, ipi_action_mask);
+
+static void bmips43xx_send_ipi_single(int cpu, unsigned int action)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&ipi_lock, flags);
+ set_c0_cause(cpu ? C_SW1 : C_SW0);
+ per_cpu(ipi_action_mask, cpu) |= action;
+ irq_enable_hazard();
+ spin_unlock_irqrestore(&ipi_lock, flags);
+}
+
+static irqreturn_t bmips43xx_ipi_interrupt(int irq, void *dev_id)
+{
+ unsigned long flags;
+ int action, cpu = irq - IPI0_IRQ;
+
+ spin_lock_irqsave(&ipi_lock, flags);
+ action = __this_cpu_read(ipi_action_mask);
+ per_cpu(ipi_action_mask, cpu) = 0;
+ clear_c0_cause(cpu ? C_SW1 : C_SW0);
+ spin_unlock_irqrestore(&ipi_lock, flags);
+
+ if (action & SMP_RESCHEDULE_YOURSELF)
+ scheduler_ipi();
+ if (action & SMP_CALL_FUNCTION)
+ generic_smp_call_function_interrupt();
+
+ return IRQ_HANDLED;
+}
+
+static void bmips43xx_send_ipi_mask(const struct cpumask *mask,
+ unsigned int action)
+{
+ unsigned int i;
+
+ for_each_cpu(i, mask)
+ bmips43xx_send_ipi_single(i, action);
+}
+
+#ifdef CONFIG_HOTPLUG_CPU
+
+static int bmips_cpu_disable(void)
+{
+ unsigned int cpu = smp_processor_id();
+
+ pr_info("SMP: CPU%d is offline\n", cpu);
+
+ set_cpu_online(cpu, false);
+ calculate_cpu_foreign_map();
+ irq_migrate_all_off_this_cpu();
+ clear_c0_status(IE_IRQ5);
+
+ local_flush_tlb_all();
+ local_flush_icache_range(0, ~0);
+
+ return 0;
+}
+
+static void bmips_cpu_die(unsigned int cpu)
+{
+}
+
+void __ref play_dead(void)
+{
+ idle_task_exit();
+
+ /* flush data cache */
+ _dma_cache_wback_inv(0, ~0);
+
+ /*
+ * Wakeup is on SW0 or SW1; disable everything else
+ * Use BEV !IV (BMIPS_WARM_RESTART_VEC) to avoid the regular Linux
+ * IRQ handlers; this clears ST0_IE and returns immediately.
+ */
+ clear_c0_cause(CAUSEF_IV | C_SW0 | C_SW1);
+ change_c0_status(
+ IE_IRQ5 | bmips_tp1_irqs | IE_SW0 | IE_SW1 | ST0_IE | ST0_BEV,
+ IE_SW0 | IE_SW1 | ST0_IE | ST0_BEV);
+ irq_disable_hazard();
+
+ /*
+ * wait for SW interrupt from bmips_boot_secondary(), then jump
+ * back to start_secondary()
+ */
+ __asm__ __volatile__(
+ " wait\n"
+ " j bmips_secondary_reentry\n"
+ : : : "memory");
+}
+
+#endif /* CONFIG_HOTPLUG_CPU */
+
+const struct plat_smp_ops bmips43xx_smp_ops = {
+ .smp_setup = bmips_smp_setup,
+ .prepare_cpus = bmips_prepare_cpus,
+ .boot_secondary = bmips_boot_secondary,
+ .smp_finish = bmips_smp_finish,
+ .init_secondary = bmips_init_secondary,
+ .send_ipi_single = bmips43xx_send_ipi_single,
+ .send_ipi_mask = bmips43xx_send_ipi_mask,
+#ifdef CONFIG_HOTPLUG_CPU
+ .cpu_disable = bmips_cpu_disable,
+ .cpu_die = bmips_cpu_die,
+#endif
+#ifdef CONFIG_KEXEC
+ .kexec_nonboot_cpu = kexec_nonboot_cpu_jump,
+#endif
+};
+
+const struct plat_smp_ops bmips5000_smp_ops = {
+ .smp_setup = bmips_smp_setup,
+ .prepare_cpus = bmips_prepare_cpus,
+ .boot_secondary = bmips_boot_secondary,
+ .smp_finish = bmips_smp_finish,
+ .init_secondary = bmips_init_secondary,
+ .send_ipi_single = bmips5000_send_ipi_single,
+ .send_ipi_mask = bmips5000_send_ipi_mask,
+#ifdef CONFIG_HOTPLUG_CPU
+ .cpu_disable = bmips_cpu_disable,
+ .cpu_die = bmips_cpu_die,
+#endif
+#ifdef CONFIG_KEXEC
+ .kexec_nonboot_cpu = kexec_nonboot_cpu_jump,
+#endif
+};
+
+#endif /* CONFIG_SMP */
+
+/***********************************************************************
+ * BMIPS vector relocation
+ * This is primarily used for SMP boot, but it is applicable to some
+ * UP BMIPS systems as well.
+ ***********************************************************************/
+
+static void bmips_wr_vec(unsigned long dst, char *start, char *end)
+{
+ memcpy((void *)dst, start, end - start);
+ dma_cache_wback(dst, end - start);
+ local_flush_icache_range(dst, dst + (end - start));
+ instruction_hazard();
+}
+
+static inline void bmips_nmi_handler_setup(void)
+{
+ bmips_wr_vec(BMIPS_NMI_RESET_VEC, bmips_reset_nmi_vec,
+ bmips_reset_nmi_vec_end);
+ bmips_wr_vec(BMIPS_WARM_RESTART_VEC, bmips_smp_int_vec,
+ bmips_smp_int_vec_end);
+}
+
+struct reset_vec_info {
+ int cpu;
+ u32 val;
+};
+
+static void bmips_set_reset_vec_remote(void *vinfo)
+{
+ struct reset_vec_info *info = vinfo;
+ int shift = info->cpu & 0x01 ? 16 : 0;
+ u32 mask = ~(0xffff << shift), val = info->val >> 16;
+
+ preempt_disable();
+ if (smp_processor_id() > 0) {
+ smp_call_function_single(0, &bmips_set_reset_vec_remote,
+ info, 1);
+ } else {
+ if (info->cpu & 0x02) {
+ /* BMIPS5200 "should" use mask/shift, but it's buggy */
+ bmips_write_zscm_reg(0xa0, (val << 16) | val);
+ bmips_read_zscm_reg(0xa0);
+ } else {
+ write_c0_brcm_bootvec((read_c0_brcm_bootvec() & mask) |
+ (val << shift));
+ }
+ }
+ preempt_enable();
+}
+
+static void bmips_set_reset_vec(int cpu, u32 val)
+{
+ struct reset_vec_info info;
+
+ if (current_cpu_type() == CPU_BMIPS5000) {
+ /* this needs to run from CPU0 (which is always online) */
+ info.cpu = cpu;
+ info.val = val;
+ bmips_set_reset_vec_remote(&info);
+ } else {
+ void __iomem *cbr = BMIPS_GET_CBR();
+
+ if (cpu == 0)
+ __raw_writel(val, cbr + BMIPS_RELO_VECTOR_CONTROL_0);
+ else {
+ if (current_cpu_type() != CPU_BMIPS4380)
+ return;
+ __raw_writel(val, cbr + BMIPS_RELO_VECTOR_CONTROL_1);
+ }
+ }
+ __sync();
+ back_to_back_c0_hazard();
+}
+
+void bmips_ebase_setup(void)
+{
+ unsigned long new_ebase = ebase;
+
+ BUG_ON(ebase != CKSEG0);
+
+ switch (current_cpu_type()) {
+ case CPU_BMIPS4350:
+ /*
+ * BMIPS4350 cannot relocate the normal vectors, but it
+ * can relocate the BEV=1 vectors. So CPU1 starts up at
+ * the relocated BEV=1, IV=0 general exception vector @
+ * 0xa000_0380.
+ *
+ * set_uncached_handler() is used here because:
+ * - CPU1 will run this from uncached space
+ * - None of the cacheflush functions are set up yet
+ */
+ set_uncached_handler(BMIPS_WARM_RESTART_VEC - CKSEG0,
+ &bmips_smp_int_vec, 0x80);
+ __sync();
+ return;
+ case CPU_BMIPS3300:
+ case CPU_BMIPS4380:
+ /*
+ * 0x8000_0000: reset/NMI (initially in kseg1)
+ * 0x8000_0400: normal vectors
+ */
+ new_ebase = 0x80000400;
+ bmips_set_reset_vec(0, RESET_FROM_KSEG0);
+ break;
+ case CPU_BMIPS5000:
+ /*
+ * 0x8000_0000: reset/NMI (initially in kseg1)
+ * 0x8000_1000: normal vectors
+ */
+ new_ebase = 0x80001000;
+ bmips_set_reset_vec(0, RESET_FROM_KSEG0);
+ write_c0_ebase(new_ebase);
+ break;
+ default:
+ return;
+ }
+
+ board_nmi_handler_setup = &bmips_nmi_handler_setup;
+ ebase = new_ebase;
+}
+
+asmlinkage void __weak plat_wired_tlb_setup(void)
+{
+ /*
+ * Called when starting/restarting a secondary CPU.
+ * Kernel stacks and other important data might only be accessible
+ * once the wired entries are present.
+ */
+}
+
+void bmips_cpu_setup(void)
+{
+ void __iomem __maybe_unused *cbr = BMIPS_GET_CBR();
+ u32 __maybe_unused cfg;
+
+ switch (current_cpu_type()) {
+ case CPU_BMIPS3300:
+ /* Set BIU to async mode */
+ set_c0_brcm_bus_pll(BIT(22));
+ __sync();
+
+ /* put the BIU back in sync mode */
+ clear_c0_brcm_bus_pll(BIT(22));
+
+ /* clear BHTD to enable branch history table */
+ clear_c0_brcm_reset(BIT(16));
+
+ /* Flush and enable RAC */
+ cfg = __raw_readl(cbr + BMIPS_RAC_CONFIG);
+ __raw_writel(cfg | 0x100, cbr + BMIPS_RAC_CONFIG);
+ __raw_readl(cbr + BMIPS_RAC_CONFIG);
+
+ cfg = __raw_readl(cbr + BMIPS_RAC_CONFIG);
+ __raw_writel(cfg | 0xf, cbr + BMIPS_RAC_CONFIG);
+ __raw_readl(cbr + BMIPS_RAC_CONFIG);
+
+ cfg = __raw_readl(cbr + BMIPS_RAC_ADDRESS_RANGE);
+ __raw_writel(cfg | 0x0fff0000, cbr + BMIPS_RAC_ADDRESS_RANGE);
+ __raw_readl(cbr + BMIPS_RAC_ADDRESS_RANGE);
+ break;
+
+ case CPU_BMIPS4380:
+ /* CBG workaround for early BMIPS4380 CPUs */
+ switch (read_c0_prid()) {
+ case 0x2a040:
+ case 0x2a042:
+ case 0x2a044:
+ case 0x2a060:
+ cfg = __raw_readl(cbr + BMIPS_L2_CONFIG);
+ __raw_writel(cfg & ~0x07000000, cbr + BMIPS_L2_CONFIG);
+ __raw_readl(cbr + BMIPS_L2_CONFIG);
+ }
+
+ /* clear BHTD to enable branch history table */
+ clear_c0_brcm_config_0(BIT(21));
+
+ /* XI/ROTR enable */
+ set_c0_brcm_config_0(BIT(23));
+ set_c0_brcm_cmt_ctrl(BIT(15));
+ break;
+
+ case CPU_BMIPS5000:
+ /* enable RDHWR, BRDHWR */
+ set_c0_brcm_config(BIT(17) | BIT(21));
+
+ /* Disable JTB */
+ __asm__ __volatile__(
+ " .set noreorder\n"
+ " li $8, 0x5a455048\n"
+ " .word 0x4088b00f\n" /* mtc0 t0, $22, 15 */
+ " .word 0x4008b008\n" /* mfc0 t0, $22, 8 */
+ " li $9, 0x00008000\n"
+ " or $8, $8, $9\n"
+ " .word 0x4088b008\n" /* mtc0 t0, $22, 8 */
+ " sync\n"
+ " li $8, 0x0\n"
+ " .word 0x4088b00f\n" /* mtc0 t0, $22, 15 */
+ " .set reorder\n"
+ : : : "$8", "$9");
+
+ /* XI enable */
+ set_c0_brcm_config(BIT(27));
+
+ /* enable MIPS32R2 ROR instruction for XI TLB handlers */
+ __asm__ __volatile__(
+ " li $8, 0x5a455048\n"
+ " .word 0x4088b00f\n" /* mtc0 $8, $22, 15 */
+ " nop; nop; nop\n"
+ " .word 0x4008b008\n" /* mfc0 $8, $22, 8 */
+ " lui $9, 0x0100\n"
+ " or $8, $9\n"
+ " .word 0x4088b008\n" /* mtc0 $8, $22, 8 */
+ : : : "$8", "$9");
+ break;
+ }
+}
diff --git a/arch/mips/kernel/smp-cmp.c b/arch/mips/kernel/smp-cmp.c
new file mode 100644
index 000000000..76f5824cd
--- /dev/null
+++ b/arch/mips/kernel/smp-cmp.c
@@ -0,0 +1,148 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ *
+ * Copyright (C) 2007 MIPS Technologies, Inc.
+ * Chris Dearman (chris@mips.com)
+ */
+
+#undef DEBUG
+
+#include <linux/kernel.h>
+#include <linux/sched/task_stack.h>
+#include <linux/smp.h>
+#include <linux/cpumask.h>
+#include <linux/interrupt.h>
+#include <linux/compiler.h>
+
+#include <linux/atomic.h>
+#include <asm/cacheflush.h>
+#include <asm/cpu.h>
+#include <asm/processor.h>
+#include <asm/hardirq.h>
+#include <asm/mmu_context.h>
+#include <asm/smp.h>
+#include <asm/time.h>
+#include <asm/mipsregs.h>
+#include <asm/mipsmtregs.h>
+#include <asm/mips_mt.h>
+#include <asm/amon.h>
+
+static void cmp_init_secondary(void)
+{
+ struct cpuinfo_mips *c __maybe_unused = &current_cpu_data;
+
+ /* Assume GIC is present */
+ change_c0_status(ST0_IM, STATUSF_IP2 | STATUSF_IP3 | STATUSF_IP4 |
+ STATUSF_IP5 | STATUSF_IP6 | STATUSF_IP7);
+
+ /* Enable per-cpu interrupts: platform specific */
+
+#ifdef CONFIG_MIPS_MT_SMP
+ if (cpu_has_mipsmt)
+ cpu_set_vpe_id(c, (read_c0_tcbind() >> TCBIND_CURVPE_SHIFT) &
+ TCBIND_CURVPE);
+#endif
+}
+
+static void cmp_smp_finish(void)
+{
+ pr_debug("SMPCMP: CPU%d: %s\n", smp_processor_id(), __func__);
+
+ /* CDFIXME: remove this? */
+ write_c0_compare(read_c0_count() + (8 * mips_hpt_frequency / HZ));
+
+#ifdef CONFIG_MIPS_MT_FPAFF
+ /* If we have an FPU, enroll ourselves in the FPU-full mask */
+ if (cpu_has_fpu)
+ cpumask_set_cpu(smp_processor_id(), &mt_fpu_cpumask);
+#endif /* CONFIG_MIPS_MT_FPAFF */
+
+ local_irq_enable();
+}
+
+/*
+ * Setup the PC, SP, and GP of a secondary processor and start it running
+ * smp_bootstrap is the place to resume from
+ * __KSTK_TOS(idle) is apparently the stack pointer
+ * (unsigned long)idle->thread_info the gp
+ */
+static int cmp_boot_secondary(int cpu, struct task_struct *idle)
+{
+ struct thread_info *gp = task_thread_info(idle);
+ unsigned long sp = __KSTK_TOS(idle);
+ unsigned long pc = (unsigned long)&smp_bootstrap;
+ unsigned long a0 = 0;
+
+ pr_debug("SMPCMP: CPU%d: %s cpu %d\n", smp_processor_id(),
+ __func__, cpu);
+
+#if 0
+ /* Needed? */
+ flush_icache_range((unsigned long)gp,
+ (unsigned long)(gp + sizeof(struct thread_info)));
+#endif
+
+ amon_cpu_start(cpu, pc, sp, (unsigned long)gp, a0);
+ return 0;
+}
+
+/*
+ * Common setup before any secondaries are started
+ */
+void __init cmp_smp_setup(void)
+{
+ int i;
+ int ncpu = 0;
+
+ pr_debug("SMPCMP: CPU%d: %s\n", smp_processor_id(), __func__);
+
+#ifdef CONFIG_MIPS_MT_FPAFF
+ /* If we have an FPU, enroll ourselves in the FPU-full mask */
+ if (cpu_has_fpu)
+ cpumask_set_cpu(0, &mt_fpu_cpumask);
+#endif /* CONFIG_MIPS_MT_FPAFF */
+
+ for (i = 1; i < NR_CPUS; i++) {
+ if (amon_cpu_avail(i)) {
+ set_cpu_possible(i, true);
+ __cpu_number_map[i] = ++ncpu;
+ __cpu_logical_map[ncpu] = i;
+ }
+ }
+
+ if (cpu_has_mipsmt) {
+ unsigned int nvpe = 1;
+#ifdef CONFIG_MIPS_MT_SMP
+ unsigned int mvpconf0 = read_c0_mvpconf0();
+
+ nvpe = ((mvpconf0 & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT) + 1;
+#endif
+ smp_num_siblings = nvpe;
+ }
+ pr_info("Detected %i available secondary CPU(s)\n", ncpu);
+}
+
+void __init cmp_prepare_cpus(unsigned int max_cpus)
+{
+ pr_debug("SMPCMP: CPU%d: %s max_cpus=%d\n",
+ smp_processor_id(), __func__, max_cpus);
+
+#ifdef CONFIG_MIPS_MT
+ /*
+ * FIXME: some of these options are per-system, some per-core and
+ * some per-cpu
+ */
+ mips_mt_set_cpuoptions();
+#endif
+
+}
+
+const struct plat_smp_ops cmp_smp_ops = {
+ .send_ipi_single = mips_smp_send_ipi_single,
+ .send_ipi_mask = mips_smp_send_ipi_mask,
+ .init_secondary = cmp_init_secondary,
+ .smp_finish = cmp_smp_finish,
+ .boot_secondary = cmp_boot_secondary,
+ .smp_setup = cmp_smp_setup,
+ .prepare_cpus = cmp_prepare_cpus,
+};
diff --git a/arch/mips/kernel/smp-cps.c b/arch/mips/kernel/smp-cps.c
new file mode 100644
index 000000000..bcd6a944b
--- /dev/null
+++ b/arch/mips/kernel/smp-cps.c
@@ -0,0 +1,644 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2013 Imagination Technologies
+ * Author: Paul Burton <paul.burton@mips.com>
+ */
+
+#include <linux/cpu.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/sched/task_stack.h>
+#include <linux/sched/hotplug.h>
+#include <linux/slab.h>
+#include <linux/smp.h>
+#include <linux/types.h>
+#include <linux/irq.h>
+
+#include <asm/bcache.h>
+#include <asm/mips-cps.h>
+#include <asm/mips_mt.h>
+#include <asm/mipsregs.h>
+#include <asm/pm-cps.h>
+#include <asm/r4kcache.h>
+#include <asm/smp-cps.h>
+#include <asm/time.h>
+#include <asm/uasm.h>
+
+static bool threads_disabled;
+static DECLARE_BITMAP(core_power, NR_CPUS);
+
+struct core_boot_config *mips_cps_core_bootcfg;
+
+static int __init setup_nothreads(char *s)
+{
+ threads_disabled = true;
+ return 0;
+}
+early_param("nothreads", setup_nothreads);
+
+static unsigned core_vpe_count(unsigned int cluster, unsigned core)
+{
+ if (threads_disabled)
+ return 1;
+
+ return mips_cps_numvps(cluster, core);
+}
+
+static void __init cps_smp_setup(void)
+{
+ unsigned int nclusters, ncores, nvpes, core_vpes;
+ unsigned long core_entry;
+ int cl, c, v;
+
+ /* Detect & record VPE topology */
+ nvpes = 0;
+ nclusters = mips_cps_numclusters();
+ pr_info("%s topology ", cpu_has_mips_r6 ? "VP" : "VPE");
+ for (cl = 0; cl < nclusters; cl++) {
+ if (cl > 0)
+ pr_cont(",");
+ pr_cont("{");
+
+ ncores = mips_cps_numcores(cl);
+ for (c = 0; c < ncores; c++) {
+ core_vpes = core_vpe_count(cl, c);
+
+ if (c > 0)
+ pr_cont(",");
+ pr_cont("%u", core_vpes);
+
+ /* Use the number of VPEs in cluster 0 core 0 for smp_num_siblings */
+ if (!cl && !c)
+ smp_num_siblings = core_vpes;
+
+ for (v = 0; v < min_t(int, core_vpes, NR_CPUS - nvpes); v++) {
+ cpu_set_cluster(&cpu_data[nvpes + v], cl);
+ cpu_set_core(&cpu_data[nvpes + v], c);
+ cpu_set_vpe_id(&cpu_data[nvpes + v], v);
+ }
+
+ nvpes += core_vpes;
+ }
+
+ pr_cont("}");
+ }
+ pr_cont(" total %u\n", nvpes);
+
+ /* Indicate present CPUs (CPU being synonymous with VPE) */
+ for (v = 0; v < min_t(unsigned, nvpes, NR_CPUS); v++) {
+ set_cpu_possible(v, cpu_cluster(&cpu_data[v]) == 0);
+ set_cpu_present(v, cpu_cluster(&cpu_data[v]) == 0);
+ __cpu_number_map[v] = v;
+ __cpu_logical_map[v] = v;
+ }
+
+ /* Set a coherent default CCA (CWB) */
+ change_c0_config(CONF_CM_CMASK, 0x5);
+
+ /* Core 0 is powered up (we're running on it) */
+ bitmap_set(core_power, 0, 1);
+
+ /* Initialise core 0 */
+ mips_cps_core_init();
+
+ /* Make core 0 coherent with everything */
+ write_gcr_cl_coherence(0xff);
+
+ if (mips_cm_revision() >= CM_REV_CM3) {
+ core_entry = CKSEG1ADDR((unsigned long)mips_cps_core_entry);
+ write_gcr_bev_base(core_entry);
+ }
+
+#ifdef CONFIG_MIPS_MT_FPAFF
+ /* If we have an FPU, enroll ourselves in the FPU-full mask */
+ if (cpu_has_fpu)
+ cpumask_set_cpu(0, &mt_fpu_cpumask);
+#endif /* CONFIG_MIPS_MT_FPAFF */
+}
+
+static void __init cps_prepare_cpus(unsigned int max_cpus)
+{
+ unsigned ncores, core_vpes, c, cca;
+ bool cca_unsuitable, cores_limited;
+ u32 *entry_code;
+
+ mips_mt_set_cpuoptions();
+
+ /* Detect whether the CCA is unsuited to multi-core SMP */
+ cca = read_c0_config() & CONF_CM_CMASK;
+ switch (cca) {
+ case 0x4: /* CWBE */
+ case 0x5: /* CWB */
+ /* The CCA is coherent, multi-core is fine */
+ cca_unsuitable = false;
+ break;
+
+ default:
+ /* CCA is not coherent, multi-core is not usable */
+ cca_unsuitable = true;
+ }
+
+ /* Warn the user if the CCA prevents multi-core */
+ cores_limited = false;
+ if (cca_unsuitable || cpu_has_dc_aliases) {
+ for_each_present_cpu(c) {
+ if (cpus_are_siblings(smp_processor_id(), c))
+ continue;
+
+ set_cpu_present(c, false);
+ cores_limited = true;
+ }
+ }
+ if (cores_limited)
+ pr_warn("Using only one core due to %s%s%s\n",
+ cca_unsuitable ? "unsuitable CCA" : "",
+ (cca_unsuitable && cpu_has_dc_aliases) ? " & " : "",
+ cpu_has_dc_aliases ? "dcache aliasing" : "");
+
+ /*
+ * Patch the start of mips_cps_core_entry to provide:
+ *
+ * s0 = kseg0 CCA
+ */
+ entry_code = (u32 *)&mips_cps_core_entry;
+ uasm_i_addiu(&entry_code, 16, 0, cca);
+ blast_dcache_range((unsigned long)&mips_cps_core_entry,
+ (unsigned long)entry_code);
+ bc_wback_inv((unsigned long)&mips_cps_core_entry,
+ (void *)entry_code - (void *)&mips_cps_core_entry);
+ __sync();
+
+ /* Allocate core boot configuration structs */
+ ncores = mips_cps_numcores(0);
+ mips_cps_core_bootcfg = kcalloc(ncores, sizeof(*mips_cps_core_bootcfg),
+ GFP_KERNEL);
+ if (!mips_cps_core_bootcfg) {
+ pr_err("Failed to allocate boot config for %u cores\n", ncores);
+ goto err_out;
+ }
+
+ /* Allocate VPE boot configuration structs */
+ for (c = 0; c < ncores; c++) {
+ core_vpes = core_vpe_count(0, c);
+ mips_cps_core_bootcfg[c].vpe_config = kcalloc(core_vpes,
+ sizeof(*mips_cps_core_bootcfg[c].vpe_config),
+ GFP_KERNEL);
+ if (!mips_cps_core_bootcfg[c].vpe_config) {
+ pr_err("Failed to allocate %u VPE boot configs\n",
+ core_vpes);
+ goto err_out;
+ }
+ }
+
+ /* Mark this CPU as booted */
+ atomic_set(&mips_cps_core_bootcfg[cpu_core(&current_cpu_data)].vpe_mask,
+ 1 << cpu_vpe_id(&current_cpu_data));
+
+ return;
+err_out:
+ /* Clean up allocations */
+ if (mips_cps_core_bootcfg) {
+ for (c = 0; c < ncores; c++)
+ kfree(mips_cps_core_bootcfg[c].vpe_config);
+ kfree(mips_cps_core_bootcfg);
+ mips_cps_core_bootcfg = NULL;
+ }
+
+ /* Effectively disable SMP by declaring CPUs not present */
+ for_each_possible_cpu(c) {
+ if (c == 0)
+ continue;
+ set_cpu_present(c, false);
+ }
+}
+
+static void boot_core(unsigned int core, unsigned int vpe_id)
+{
+ u32 stat, seq_state;
+ unsigned timeout;
+
+ /* Select the appropriate core */
+ mips_cm_lock_other(0, core, 0, CM_GCR_Cx_OTHER_BLOCK_LOCAL);
+
+ /* Set its reset vector */
+ write_gcr_co_reset_base(CKSEG1ADDR((unsigned long)mips_cps_core_entry));
+
+ /* Ensure its coherency is disabled */
+ write_gcr_co_coherence(0);
+
+ /* Start it with the legacy memory map and exception base */
+ write_gcr_co_reset_ext_base(CM_GCR_Cx_RESET_EXT_BASE_UEB);
+
+ /* Ensure the core can access the GCRs */
+ set_gcr_access(1 << core);
+
+ if (mips_cpc_present()) {
+ /* Reset the core */
+ mips_cpc_lock_other(core);
+
+ if (mips_cm_revision() >= CM_REV_CM3) {
+ /* Run only the requested VP following the reset */
+ write_cpc_co_vp_stop(0xf);
+ write_cpc_co_vp_run(1 << vpe_id);
+
+ /*
+ * Ensure that the VP_RUN register is written before the
+ * core leaves reset.
+ */
+ wmb();
+ }
+
+ write_cpc_co_cmd(CPC_Cx_CMD_RESET);
+
+ timeout = 100;
+ while (true) {
+ stat = read_cpc_co_stat_conf();
+ seq_state = stat & CPC_Cx_STAT_CONF_SEQSTATE;
+ seq_state >>= __ffs(CPC_Cx_STAT_CONF_SEQSTATE);
+
+ /* U6 == coherent execution, ie. the core is up */
+ if (seq_state == CPC_Cx_STAT_CONF_SEQSTATE_U6)
+ break;
+
+ /* Delay a little while before we start warning */
+ if (timeout) {
+ timeout--;
+ mdelay(10);
+ continue;
+ }
+
+ pr_warn("Waiting for core %u to start... STAT_CONF=0x%x\n",
+ core, stat);
+ mdelay(1000);
+ }
+
+ mips_cpc_unlock_other();
+ } else {
+ /* Take the core out of reset */
+ write_gcr_co_reset_release(0);
+ }
+
+ mips_cm_unlock_other();
+
+ /* The core is now powered up */
+ bitmap_set(core_power, core, 1);
+}
+
+static void remote_vpe_boot(void *dummy)
+{
+ unsigned core = cpu_core(&current_cpu_data);
+ struct core_boot_config *core_cfg = &mips_cps_core_bootcfg[core];
+
+ mips_cps_boot_vpes(core_cfg, cpu_vpe_id(&current_cpu_data));
+}
+
+static int cps_boot_secondary(int cpu, struct task_struct *idle)
+{
+ unsigned core = cpu_core(&cpu_data[cpu]);
+ unsigned vpe_id = cpu_vpe_id(&cpu_data[cpu]);
+ struct core_boot_config *core_cfg = &mips_cps_core_bootcfg[core];
+ struct vpe_boot_config *vpe_cfg = &core_cfg->vpe_config[vpe_id];
+ unsigned long core_entry;
+ unsigned int remote;
+ int err;
+
+ /* We don't yet support booting CPUs in other clusters */
+ if (cpu_cluster(&cpu_data[cpu]) != cpu_cluster(&raw_current_cpu_data))
+ return -ENOSYS;
+
+ vpe_cfg->pc = (unsigned long)&smp_bootstrap;
+ vpe_cfg->sp = __KSTK_TOS(idle);
+ vpe_cfg->gp = (unsigned long)task_thread_info(idle);
+
+ atomic_or(1 << cpu_vpe_id(&cpu_data[cpu]), &core_cfg->vpe_mask);
+
+ preempt_disable();
+
+ if (!test_bit(core, core_power)) {
+ /* Boot a VPE on a powered down core */
+ boot_core(core, vpe_id);
+ goto out;
+ }
+
+ if (cpu_has_vp) {
+ mips_cm_lock_other(0, core, vpe_id, CM_GCR_Cx_OTHER_BLOCK_LOCAL);
+ core_entry = CKSEG1ADDR((unsigned long)mips_cps_core_entry);
+ write_gcr_co_reset_base(core_entry);
+ mips_cm_unlock_other();
+ }
+
+ if (!cpus_are_siblings(cpu, smp_processor_id())) {
+ /* Boot a VPE on another powered up core */
+ for (remote = 0; remote < NR_CPUS; remote++) {
+ if (!cpus_are_siblings(cpu, remote))
+ continue;
+ if (cpu_online(remote))
+ break;
+ }
+ if (remote >= NR_CPUS) {
+ pr_crit("No online CPU in core %u to start CPU%d\n",
+ core, cpu);
+ goto out;
+ }
+
+ err = smp_call_function_single(remote, remote_vpe_boot,
+ NULL, 1);
+ if (err)
+ panic("Failed to call remote CPU\n");
+ goto out;
+ }
+
+ BUG_ON(!cpu_has_mipsmt && !cpu_has_vp);
+
+ /* Boot a VPE on this core */
+ mips_cps_boot_vpes(core_cfg, vpe_id);
+out:
+ preempt_enable();
+ return 0;
+}
+
+static void cps_init_secondary(void)
+{
+ /* Disable MT - we only want to run 1 TC per VPE */
+ if (cpu_has_mipsmt)
+ dmt();
+
+ if (mips_cm_revision() >= CM_REV_CM3) {
+ unsigned int ident = read_gic_vl_ident();
+
+ /*
+ * Ensure that our calculation of the VP ID matches up with
+ * what the GIC reports, otherwise we'll have configured
+ * interrupts incorrectly.
+ */
+ BUG_ON(ident != mips_cm_vp_id(smp_processor_id()));
+ }
+
+ if (cpu_has_veic)
+ clear_c0_status(ST0_IM);
+ else
+ change_c0_status(ST0_IM, STATUSF_IP2 | STATUSF_IP3 |
+ STATUSF_IP4 | STATUSF_IP5 |
+ STATUSF_IP6 | STATUSF_IP7);
+}
+
+static void cps_smp_finish(void)
+{
+ write_c0_compare(read_c0_count() + (8 * mips_hpt_frequency / HZ));
+
+#ifdef CONFIG_MIPS_MT_FPAFF
+ /* If we have an FPU, enroll ourselves in the FPU-full mask */
+ if (cpu_has_fpu)
+ cpumask_set_cpu(smp_processor_id(), &mt_fpu_cpumask);
+#endif /* CONFIG_MIPS_MT_FPAFF */
+
+ local_irq_enable();
+}
+
+#if defined(CONFIG_HOTPLUG_CPU) || defined(CONFIG_KEXEC)
+
+enum cpu_death {
+ CPU_DEATH_HALT,
+ CPU_DEATH_POWER,
+};
+
+static void cps_shutdown_this_cpu(enum cpu_death death)
+{
+ unsigned int cpu, core, vpe_id;
+
+ cpu = smp_processor_id();
+ core = cpu_core(&cpu_data[cpu]);
+
+ if (death == CPU_DEATH_HALT) {
+ vpe_id = cpu_vpe_id(&cpu_data[cpu]);
+
+ pr_debug("Halting core %d VP%d\n", core, vpe_id);
+ if (cpu_has_mipsmt) {
+ /* Halt this TC */
+ write_c0_tchalt(TCHALT_H);
+ instruction_hazard();
+ } else if (cpu_has_vp) {
+ write_cpc_cl_vp_stop(1 << vpe_id);
+
+ /* Ensure that the VP_STOP register is written */
+ wmb();
+ }
+ } else {
+ pr_debug("Gating power to core %d\n", core);
+ /* Power down the core */
+ cps_pm_enter_state(CPS_PM_POWER_GATED);
+ }
+}
+
+#ifdef CONFIG_KEXEC
+
+static void cps_kexec_nonboot_cpu(void)
+{
+ if (cpu_has_mipsmt || cpu_has_vp)
+ cps_shutdown_this_cpu(CPU_DEATH_HALT);
+ else
+ cps_shutdown_this_cpu(CPU_DEATH_POWER);
+}
+
+#endif /* CONFIG_KEXEC */
+
+#endif /* CONFIG_HOTPLUG_CPU || CONFIG_KEXEC */
+
+#ifdef CONFIG_HOTPLUG_CPU
+
+static int cps_cpu_disable(void)
+{
+ unsigned cpu = smp_processor_id();
+ struct core_boot_config *core_cfg;
+
+ if (!cps_pm_support_state(CPS_PM_POWER_GATED))
+ return -EINVAL;
+
+ core_cfg = &mips_cps_core_bootcfg[cpu_core(&current_cpu_data)];
+ atomic_sub(1 << cpu_vpe_id(&current_cpu_data), &core_cfg->vpe_mask);
+ smp_mb__after_atomic();
+ set_cpu_online(cpu, false);
+ calculate_cpu_foreign_map();
+ irq_migrate_all_off_this_cpu();
+
+ return 0;
+}
+
+static unsigned cpu_death_sibling;
+static enum cpu_death cpu_death;
+
+void play_dead(void)
+{
+ unsigned int cpu;
+
+ local_irq_disable();
+ idle_task_exit();
+ cpu = smp_processor_id();
+ cpu_death = CPU_DEATH_POWER;
+
+ pr_debug("CPU%d going offline\n", cpu);
+
+ if (cpu_has_mipsmt || cpu_has_vp) {
+ /* Look for another online VPE within the core */
+ for_each_online_cpu(cpu_death_sibling) {
+ if (!cpus_are_siblings(cpu, cpu_death_sibling))
+ continue;
+
+ /*
+ * There is an online VPE within the core. Just halt
+ * this TC and leave the core alone.
+ */
+ cpu_death = CPU_DEATH_HALT;
+ break;
+ }
+ }
+
+ /* This CPU has chosen its way out */
+ (void)cpu_report_death();
+
+ cps_shutdown_this_cpu(cpu_death);
+
+ /* This should never be reached */
+ panic("Failed to offline CPU %u", cpu);
+}
+
+static void wait_for_sibling_halt(void *ptr_cpu)
+{
+ unsigned cpu = (unsigned long)ptr_cpu;
+ unsigned vpe_id = cpu_vpe_id(&cpu_data[cpu]);
+ unsigned halted;
+ unsigned long flags;
+
+ do {
+ local_irq_save(flags);
+ settc(vpe_id);
+ halted = read_tc_c0_tchalt();
+ local_irq_restore(flags);
+ } while (!(halted & TCHALT_H));
+}
+
+static void cps_cpu_die(unsigned int cpu)
+{
+ unsigned core = cpu_core(&cpu_data[cpu]);
+ unsigned int vpe_id = cpu_vpe_id(&cpu_data[cpu]);
+ ktime_t fail_time;
+ unsigned stat;
+ int err;
+
+ /* Wait for the cpu to choose its way out */
+ if (!cpu_wait_death(cpu, 5)) {
+ pr_err("CPU%u: didn't offline\n", cpu);
+ return;
+ }
+
+ /*
+ * Now wait for the CPU to actually offline. Without doing this that
+ * offlining may race with one or more of:
+ *
+ * - Onlining the CPU again.
+ * - Powering down the core if another VPE within it is offlined.
+ * - A sibling VPE entering a non-coherent state.
+ *
+ * In the non-MT halt case (ie. infinite loop) the CPU is doing nothing
+ * with which we could race, so do nothing.
+ */
+ if (cpu_death == CPU_DEATH_POWER) {
+ /*
+ * Wait for the core to enter a powered down or clock gated
+ * state, the latter happening when a JTAG probe is connected
+ * in which case the CPC will refuse to power down the core.
+ */
+ fail_time = ktime_add_ms(ktime_get(), 2000);
+ do {
+ mips_cm_lock_other(0, core, 0, CM_GCR_Cx_OTHER_BLOCK_LOCAL);
+ mips_cpc_lock_other(core);
+ stat = read_cpc_co_stat_conf();
+ stat &= CPC_Cx_STAT_CONF_SEQSTATE;
+ stat >>= __ffs(CPC_Cx_STAT_CONF_SEQSTATE);
+ mips_cpc_unlock_other();
+ mips_cm_unlock_other();
+
+ if (stat == CPC_Cx_STAT_CONF_SEQSTATE_D0 ||
+ stat == CPC_Cx_STAT_CONF_SEQSTATE_D2 ||
+ stat == CPC_Cx_STAT_CONF_SEQSTATE_U2)
+ break;
+
+ /*
+ * The core ought to have powered down, but didn't &
+ * now we don't really know what state it's in. It's
+ * likely that its _pwr_up pin has been wired to logic
+ * 1 & it powered back up as soon as we powered it
+ * down...
+ *
+ * The best we can do is warn the user & continue in
+ * the hope that the core is doing nothing harmful &
+ * might behave properly if we online it later.
+ */
+ if (WARN(ktime_after(ktime_get(), fail_time),
+ "CPU%u hasn't powered down, seq. state %u\n",
+ cpu, stat))
+ break;
+ } while (1);
+
+ /* Indicate the core is powered off */
+ bitmap_clear(core_power, core, 1);
+ } else if (cpu_has_mipsmt) {
+ /*
+ * Have a CPU with access to the offlined CPUs registers wait
+ * for its TC to halt.
+ */
+ err = smp_call_function_single(cpu_death_sibling,
+ wait_for_sibling_halt,
+ (void *)(unsigned long)cpu, 1);
+ if (err)
+ panic("Failed to call remote sibling CPU\n");
+ } else if (cpu_has_vp) {
+ do {
+ mips_cm_lock_other(0, core, vpe_id, CM_GCR_Cx_OTHER_BLOCK_LOCAL);
+ stat = read_cpc_co_vp_running();
+ mips_cm_unlock_other();
+ } while (stat & (1 << vpe_id));
+ }
+}
+
+#endif /* CONFIG_HOTPLUG_CPU */
+
+static const struct plat_smp_ops cps_smp_ops = {
+ .smp_setup = cps_smp_setup,
+ .prepare_cpus = cps_prepare_cpus,
+ .boot_secondary = cps_boot_secondary,
+ .init_secondary = cps_init_secondary,
+ .smp_finish = cps_smp_finish,
+ .send_ipi_single = mips_smp_send_ipi_single,
+ .send_ipi_mask = mips_smp_send_ipi_mask,
+#ifdef CONFIG_HOTPLUG_CPU
+ .cpu_disable = cps_cpu_disable,
+ .cpu_die = cps_cpu_die,
+#endif
+#ifdef CONFIG_KEXEC
+ .kexec_nonboot_cpu = cps_kexec_nonboot_cpu,
+#endif
+};
+
+bool mips_cps_smp_in_use(void)
+{
+ extern const struct plat_smp_ops *mp_ops;
+ return mp_ops == &cps_smp_ops;
+}
+
+int register_cps_smp_ops(void)
+{
+ if (!mips_cm_present()) {
+ pr_warn("MIPS CPS SMP unable to proceed without a CM\n");
+ return -ENODEV;
+ }
+
+ /* check we have a GIC - we need one for IPIs */
+ if (!(read_gcr_gic_status() & CM_GCR_GIC_STATUS_EX)) {
+ pr_warn("MIPS CPS SMP unable to proceed without a GIC\n");
+ return -ENODEV;
+ }
+
+ register_smp_ops(&cps_smp_ops);
+ return 0;
+}
diff --git a/arch/mips/kernel/smp-mt.c b/arch/mips/kernel/smp-mt.c
new file mode 100644
index 000000000..5f04a0141
--- /dev/null
+++ b/arch/mips/kernel/smp-mt.c
@@ -0,0 +1,240 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ *
+ * Copyright (C) 2004, 05, 06 MIPS Technologies, Inc.
+ * Elizabeth Clarke (beth@mips.com)
+ * Ralf Baechle (ralf@linux-mips.org)
+ * Copyright (C) 2006 Ralf Baechle (ralf@linux-mips.org)
+ */
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/cpumask.h>
+#include <linux/interrupt.h>
+#include <linux/compiler.h>
+#include <linux/sched/task_stack.h>
+#include <linux/smp.h>
+
+#include <linux/atomic.h>
+#include <asm/cacheflush.h>
+#include <asm/cpu.h>
+#include <asm/processor.h>
+#include <asm/hardirq.h>
+#include <asm/mmu_context.h>
+#include <asm/time.h>
+#include <asm/mipsregs.h>
+#include <asm/mipsmtregs.h>
+#include <asm/mips_mt.h>
+#include <asm/mips-cps.h>
+
+static void __init smvp_copy_vpe_config(void)
+{
+ write_vpe_c0_status(
+ (read_c0_status() & ~(ST0_IM | ST0_IE | ST0_KSU)) | ST0_CU0);
+
+ /* set config to be the same as vpe0, particularly kseg0 coherency alg */
+ write_vpe_c0_config( read_c0_config());
+
+ /* make sure there are no software interrupts pending */
+ write_vpe_c0_cause(0);
+
+ /* Propagate Config7 */
+ write_vpe_c0_config7(read_c0_config7());
+
+ write_vpe_c0_count(read_c0_count());
+}
+
+static unsigned int __init smvp_vpe_init(unsigned int tc, unsigned int mvpconf0,
+ unsigned int ncpu)
+{
+ if (tc > ((mvpconf0 & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT))
+ return ncpu;
+
+ /* Deactivate all but VPE 0 */
+ if (tc != 0) {
+ unsigned long tmp = read_vpe_c0_vpeconf0();
+
+ tmp &= ~VPECONF0_VPA;
+
+ /* master VPE */
+ tmp |= VPECONF0_MVP;
+ write_vpe_c0_vpeconf0(tmp);
+
+ /* Record this as available CPU */
+ set_cpu_possible(tc, true);
+ set_cpu_present(tc, true);
+ __cpu_number_map[tc] = ++ncpu;
+ __cpu_logical_map[ncpu] = tc;
+ }
+
+ /* Disable multi-threading with TC's */
+ write_vpe_c0_vpecontrol(read_vpe_c0_vpecontrol() & ~VPECONTROL_TE);
+
+ if (tc != 0)
+ smvp_copy_vpe_config();
+
+ cpu_set_vpe_id(&cpu_data[ncpu], tc);
+
+ return ncpu;
+}
+
+static void __init smvp_tc_init(unsigned int tc, unsigned int mvpconf0)
+{
+ unsigned long tmp;
+
+ if (!tc)
+ return;
+
+ /* bind a TC to each VPE, May as well put all excess TC's
+ on the last VPE */
+ if (tc >= (((mvpconf0 & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT)+1))
+ write_tc_c0_tcbind(read_tc_c0_tcbind() | ((mvpconf0 & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT));
+ else {
+ write_tc_c0_tcbind(read_tc_c0_tcbind() | tc);
+
+ /* and set XTC */
+ write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() | (tc << VPECONF0_XTC_SHIFT));
+ }
+
+ tmp = read_tc_c0_tcstatus();
+
+ /* mark not allocated and not dynamically allocatable */
+ tmp &= ~(TCSTATUS_A | TCSTATUS_DA);
+ tmp |= TCSTATUS_IXMT; /* interrupt exempt */
+ write_tc_c0_tcstatus(tmp);
+
+ write_tc_c0_tchalt(TCHALT_H);
+}
+
+static void vsmp_init_secondary(void)
+{
+ /* This is Malta specific: IPI,performance and timer interrupts */
+ if (mips_gic_present())
+ change_c0_status(ST0_IM, STATUSF_IP2 | STATUSF_IP3 |
+ STATUSF_IP4 | STATUSF_IP5 |
+ STATUSF_IP6 | STATUSF_IP7);
+ else
+ change_c0_status(ST0_IM, STATUSF_IP0 | STATUSF_IP1 |
+ STATUSF_IP6 | STATUSF_IP7);
+}
+
+static void vsmp_smp_finish(void)
+{
+ /* CDFIXME: remove this? */
+ write_c0_compare(read_c0_count() + (8* mips_hpt_frequency/HZ));
+
+#ifdef CONFIG_MIPS_MT_FPAFF
+ /* If we have an FPU, enroll ourselves in the FPU-full mask */
+ if (cpu_has_fpu)
+ cpumask_set_cpu(smp_processor_id(), &mt_fpu_cpumask);
+#endif /* CONFIG_MIPS_MT_FPAFF */
+
+ local_irq_enable();
+}
+
+/*
+ * Setup the PC, SP, and GP of a secondary processor and start it
+ * running!
+ * smp_bootstrap is the place to resume from
+ * __KSTK_TOS(idle) is apparently the stack pointer
+ * (unsigned long)idle->thread_info the gp
+ * assumes a 1:1 mapping of TC => VPE
+ */
+static int vsmp_boot_secondary(int cpu, struct task_struct *idle)
+{
+ struct thread_info *gp = task_thread_info(idle);
+ dvpe();
+ set_c0_mvpcontrol(MVPCONTROL_VPC);
+
+ settc(cpu);
+
+ /* restart */
+ write_tc_c0_tcrestart((unsigned long)&smp_bootstrap);
+
+ /* enable the tc this vpe/cpu will be running */
+ write_tc_c0_tcstatus((read_tc_c0_tcstatus() & ~TCSTATUS_IXMT) | TCSTATUS_A);
+
+ write_tc_c0_tchalt(0);
+
+ /* enable the VPE */
+ write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() | VPECONF0_VPA);
+
+ /* stack pointer */
+ write_tc_gpr_sp( __KSTK_TOS(idle));
+
+ /* global pointer */
+ write_tc_gpr_gp((unsigned long)gp);
+
+ flush_icache_range((unsigned long)gp,
+ (unsigned long)(gp + sizeof(struct thread_info)));
+
+ /* finally out of configuration and into chaos */
+ clear_c0_mvpcontrol(MVPCONTROL_VPC);
+
+ evpe(EVPE_ENABLE);
+
+ return 0;
+}
+
+/*
+ * Common setup before any secondaries are started
+ * Make sure all CPU's are in a sensible state before we boot any of the
+ * secondaries
+ */
+static void __init vsmp_smp_setup(void)
+{
+ unsigned int mvpconf0, ntc, tc, ncpu = 0;
+ unsigned int nvpe;
+
+#ifdef CONFIG_MIPS_MT_FPAFF
+ /* If we have an FPU, enroll ourselves in the FPU-full mask */
+ if (cpu_has_fpu)
+ cpumask_set_cpu(0, &mt_fpu_cpumask);
+#endif /* CONFIG_MIPS_MT_FPAFF */
+ if (!cpu_has_mipsmt)
+ return;
+
+ /* disable MT so we can configure */
+ dvpe();
+ dmt();
+
+ /* Put MVPE's into 'configuration state' */
+ set_c0_mvpcontrol(MVPCONTROL_VPC);
+
+ mvpconf0 = read_c0_mvpconf0();
+ ntc = (mvpconf0 & MVPCONF0_PTC) >> MVPCONF0_PTC_SHIFT;
+
+ nvpe = ((mvpconf0 & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT) + 1;
+ smp_num_siblings = nvpe;
+
+ /* we'll always have more TC's than VPE's, so loop setting everything
+ to a sensible state */
+ for (tc = 0; tc <= ntc; tc++) {
+ settc(tc);
+
+ smvp_tc_init(tc, mvpconf0);
+ ncpu = smvp_vpe_init(tc, mvpconf0, ncpu);
+ }
+
+ /* Release config state */
+ clear_c0_mvpcontrol(MVPCONTROL_VPC);
+
+ /* We'll wait until starting the secondaries before starting MVPE */
+
+ printk(KERN_INFO "Detected %i available secondary CPU(s)\n", ncpu);
+}
+
+static void __init vsmp_prepare_cpus(unsigned int max_cpus)
+{
+ mips_mt_set_cpuoptions();
+}
+
+const struct plat_smp_ops vsmp_smp_ops = {
+ .send_ipi_single = mips_smp_send_ipi_single,
+ .send_ipi_mask = mips_smp_send_ipi_mask,
+ .init_secondary = vsmp_init_secondary,
+ .smp_finish = vsmp_smp_finish,
+ .boot_secondary = vsmp_boot_secondary,
+ .smp_setup = vsmp_smp_setup,
+ .prepare_cpus = vsmp_prepare_cpus,
+};
+
diff --git a/arch/mips/kernel/smp-up.c b/arch/mips/kernel/smp-up.c
new file mode 100644
index 000000000..525d3196f
--- /dev/null
+++ b/arch/mips/kernel/smp-up.c
@@ -0,0 +1,79 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2006, 07 by Ralf Baechle (ralf@linux-mips.org)
+ *
+ * Symmetric Uniprocessor (TM) Support
+ */
+#include <linux/kernel.h>
+#include <linux/sched.h>
+
+/*
+ * Send inter-processor interrupt
+ */
+static void up_send_ipi_single(int cpu, unsigned int action)
+{
+ panic(KERN_ERR "%s called", __func__);
+}
+
+static inline void up_send_ipi_mask(const struct cpumask *mask,
+ unsigned int action)
+{
+ panic(KERN_ERR "%s called", __func__);
+}
+
+/*
+ * After we've done initial boot, this function is called to allow the
+ * board code to clean up state, if needed
+ */
+static void up_init_secondary(void)
+{
+}
+
+static void up_smp_finish(void)
+{
+}
+
+/*
+ * Firmware CPU startup hook
+ */
+static int up_boot_secondary(int cpu, struct task_struct *idle)
+{
+ return 0;
+}
+
+static void __init up_smp_setup(void)
+{
+}
+
+static void __init up_prepare_cpus(unsigned int max_cpus)
+{
+}
+
+#ifdef CONFIG_HOTPLUG_CPU
+static int up_cpu_disable(void)
+{
+ return -ENOSYS;
+}
+
+static void up_cpu_die(unsigned int cpu)
+{
+ BUG();
+}
+#endif
+
+const struct plat_smp_ops up_smp_ops = {
+ .send_ipi_single = up_send_ipi_single,
+ .send_ipi_mask = up_send_ipi_mask,
+ .init_secondary = up_init_secondary,
+ .smp_finish = up_smp_finish,
+ .boot_secondary = up_boot_secondary,
+ .smp_setup = up_smp_setup,
+ .prepare_cpus = up_prepare_cpus,
+#ifdef CONFIG_HOTPLUG_CPU
+ .cpu_disable = up_cpu_disable,
+ .cpu_die = up_cpu_die,
+#endif
+};
diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c
new file mode 100644
index 000000000..002c91fcb
--- /dev/null
+++ b/arch/mips/kernel/smp.c
@@ -0,0 +1,714 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ *
+ * Copyright (C) 2000, 2001 Kanoj Sarcar
+ * Copyright (C) 2000, 2001 Ralf Baechle
+ * Copyright (C) 2000, 2001 Silicon Graphics, Inc.
+ * Copyright (C) 2000, 2001, 2003 Broadcom Corporation
+ */
+#include <linux/cache.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/smp.h>
+#include <linux/spinlock.h>
+#include <linux/threads.h>
+#include <linux/export.h>
+#include <linux/time.h>
+#include <linux/timex.h>
+#include <linux/sched/mm.h>
+#include <linux/cpumask.h>
+#include <linux/cpu.h>
+#include <linux/err.h>
+#include <linux/ftrace.h>
+#include <linux/irqdomain.h>
+#include <linux/of.h>
+#include <linux/of_irq.h>
+
+#include <linux/atomic.h>
+#include <asm/cpu.h>
+#include <asm/ginvt.h>
+#include <asm/processor.h>
+#include <asm/idle.h>
+#include <asm/r4k-timer.h>
+#include <asm/mips-cps.h>
+#include <asm/mmu_context.h>
+#include <asm/time.h>
+#include <asm/setup.h>
+#include <asm/maar.h>
+
+int __cpu_number_map[CONFIG_MIPS_NR_CPU_NR_MAP]; /* Map physical to logical */
+EXPORT_SYMBOL(__cpu_number_map);
+
+int __cpu_logical_map[NR_CPUS]; /* Map logical to physical */
+EXPORT_SYMBOL(__cpu_logical_map);
+
+/* Number of TCs (or siblings in Intel speak) per CPU core */
+int smp_num_siblings = 1;
+EXPORT_SYMBOL(smp_num_siblings);
+
+/* representing the TCs (or siblings in Intel speak) of each logical CPU */
+cpumask_t cpu_sibling_map[NR_CPUS] __read_mostly;
+EXPORT_SYMBOL(cpu_sibling_map);
+
+/* representing the core map of multi-core chips of each logical CPU */
+cpumask_t cpu_core_map[NR_CPUS] __read_mostly;
+EXPORT_SYMBOL(cpu_core_map);
+
+static DECLARE_COMPLETION(cpu_starting);
+static DECLARE_COMPLETION(cpu_running);
+
+/*
+ * A logical cpu mask containing only one VPE per core to
+ * reduce the number of IPIs on large MT systems.
+ */
+cpumask_t cpu_foreign_map[NR_CPUS] __read_mostly;
+EXPORT_SYMBOL(cpu_foreign_map);
+
+/* representing cpus for which sibling maps can be computed */
+static cpumask_t cpu_sibling_setup_map;
+
+/* representing cpus for which core maps can be computed */
+static cpumask_t cpu_core_setup_map;
+
+cpumask_t cpu_coherent_mask;
+
+#ifdef CONFIG_GENERIC_IRQ_IPI
+static struct irq_desc *call_desc;
+static struct irq_desc *sched_desc;
+#endif
+
+static inline void set_cpu_sibling_map(int cpu)
+{
+ int i;
+
+ cpumask_set_cpu(cpu, &cpu_sibling_setup_map);
+
+ if (smp_num_siblings > 1) {
+ for_each_cpu(i, &cpu_sibling_setup_map) {
+ if (cpus_are_siblings(cpu, i)) {
+ cpumask_set_cpu(i, &cpu_sibling_map[cpu]);
+ cpumask_set_cpu(cpu, &cpu_sibling_map[i]);
+ }
+ }
+ } else
+ cpumask_set_cpu(cpu, &cpu_sibling_map[cpu]);
+}
+
+static inline void set_cpu_core_map(int cpu)
+{
+ int i;
+
+ cpumask_set_cpu(cpu, &cpu_core_setup_map);
+
+ for_each_cpu(i, &cpu_core_setup_map) {
+ if (cpu_data[cpu].package == cpu_data[i].package) {
+ cpumask_set_cpu(i, &cpu_core_map[cpu]);
+ cpumask_set_cpu(cpu, &cpu_core_map[i]);
+ }
+ }
+}
+
+/*
+ * Calculate a new cpu_foreign_map mask whenever a
+ * new cpu appears or disappears.
+ */
+void calculate_cpu_foreign_map(void)
+{
+ int i, k, core_present;
+ cpumask_t temp_foreign_map;
+
+ /* Re-calculate the mask */
+ cpumask_clear(&temp_foreign_map);
+ for_each_online_cpu(i) {
+ core_present = 0;
+ for_each_cpu(k, &temp_foreign_map)
+ if (cpus_are_siblings(i, k))
+ core_present = 1;
+ if (!core_present)
+ cpumask_set_cpu(i, &temp_foreign_map);
+ }
+
+ for_each_online_cpu(i)
+ cpumask_andnot(&cpu_foreign_map[i],
+ &temp_foreign_map, &cpu_sibling_map[i]);
+}
+
+const struct plat_smp_ops *mp_ops;
+EXPORT_SYMBOL(mp_ops);
+
+void register_smp_ops(const struct plat_smp_ops *ops)
+{
+ if (mp_ops)
+ printk(KERN_WARNING "Overriding previously set SMP ops\n");
+
+ mp_ops = ops;
+}
+
+#ifdef CONFIG_GENERIC_IRQ_IPI
+void mips_smp_send_ipi_single(int cpu, unsigned int action)
+{
+ mips_smp_send_ipi_mask(cpumask_of(cpu), action);
+}
+
+void mips_smp_send_ipi_mask(const struct cpumask *mask, unsigned int action)
+{
+ unsigned long flags;
+ unsigned int core;
+ int cpu;
+
+ local_irq_save(flags);
+
+ switch (action) {
+ case SMP_CALL_FUNCTION:
+ __ipi_send_mask(call_desc, mask);
+ break;
+
+ case SMP_RESCHEDULE_YOURSELF:
+ __ipi_send_mask(sched_desc, mask);
+ break;
+
+ default:
+ BUG();
+ }
+
+ if (mips_cpc_present()) {
+ for_each_cpu(cpu, mask) {
+ if (cpus_are_siblings(cpu, smp_processor_id()))
+ continue;
+
+ core = cpu_core(&cpu_data[cpu]);
+
+ while (!cpumask_test_cpu(cpu, &cpu_coherent_mask)) {
+ mips_cm_lock_other_cpu(cpu, CM_GCR_Cx_OTHER_BLOCK_LOCAL);
+ mips_cpc_lock_other(core);
+ write_cpc_co_cmd(CPC_Cx_CMD_PWRUP);
+ mips_cpc_unlock_other();
+ mips_cm_unlock_other();
+ }
+ }
+ }
+
+ local_irq_restore(flags);
+}
+
+
+static irqreturn_t ipi_resched_interrupt(int irq, void *dev_id)
+{
+ scheduler_ipi();
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t ipi_call_interrupt(int irq, void *dev_id)
+{
+ generic_smp_call_function_interrupt();
+
+ return IRQ_HANDLED;
+}
+
+static void smp_ipi_init_one(unsigned int virq, const char *name,
+ irq_handler_t handler)
+{
+ int ret;
+
+ irq_set_handler(virq, handle_percpu_irq);
+ ret = request_irq(virq, handler, IRQF_PERCPU, name, NULL);
+ BUG_ON(ret);
+}
+
+static unsigned int call_virq, sched_virq;
+
+int mips_smp_ipi_allocate(const struct cpumask *mask)
+{
+ int virq;
+ struct irq_domain *ipidomain;
+ struct device_node *node;
+
+ node = of_irq_find_parent(of_root);
+ ipidomain = irq_find_matching_host(node, DOMAIN_BUS_IPI);
+
+ /*
+ * Some platforms have half DT setup. So if we found irq node but
+ * didn't find an ipidomain, try to search for one that is not in the
+ * DT.
+ */
+ if (node && !ipidomain)
+ ipidomain = irq_find_matching_host(NULL, DOMAIN_BUS_IPI);
+
+ /*
+ * There are systems which use IPI IRQ domains, but only have one
+ * registered when some runtime condition is met. For example a Malta
+ * kernel may include support for GIC & CPU interrupt controller IPI
+ * IRQ domains, but if run on a system with no GIC & no MT ASE then
+ * neither will be supported or registered.
+ *
+ * We only have a problem if we're actually using multiple CPUs so fail
+ * loudly if that is the case. Otherwise simply return, skipping IPI
+ * setup, if we're running with only a single CPU.
+ */
+ if (!ipidomain) {
+ BUG_ON(num_present_cpus() > 1);
+ return 0;
+ }
+
+ virq = irq_reserve_ipi(ipidomain, mask);
+ BUG_ON(!virq);
+ if (!call_virq)
+ call_virq = virq;
+
+ virq = irq_reserve_ipi(ipidomain, mask);
+ BUG_ON(!virq);
+ if (!sched_virq)
+ sched_virq = virq;
+
+ if (irq_domain_is_ipi_per_cpu(ipidomain)) {
+ int cpu;
+
+ for_each_cpu(cpu, mask) {
+ smp_ipi_init_one(call_virq + cpu, "IPI call",
+ ipi_call_interrupt);
+ smp_ipi_init_one(sched_virq + cpu, "IPI resched",
+ ipi_resched_interrupt);
+ }
+ } else {
+ smp_ipi_init_one(call_virq, "IPI call", ipi_call_interrupt);
+ smp_ipi_init_one(sched_virq, "IPI resched",
+ ipi_resched_interrupt);
+ }
+
+ return 0;
+}
+
+int mips_smp_ipi_free(const struct cpumask *mask)
+{
+ struct irq_domain *ipidomain;
+ struct device_node *node;
+
+ node = of_irq_find_parent(of_root);
+ ipidomain = irq_find_matching_host(node, DOMAIN_BUS_IPI);
+
+ /*
+ * Some platforms have half DT setup. So if we found irq node but
+ * didn't find an ipidomain, try to search for one that is not in the
+ * DT.
+ */
+ if (node && !ipidomain)
+ ipidomain = irq_find_matching_host(NULL, DOMAIN_BUS_IPI);
+
+ BUG_ON(!ipidomain);
+
+ if (irq_domain_is_ipi_per_cpu(ipidomain)) {
+ int cpu;
+
+ for_each_cpu(cpu, mask) {
+ free_irq(call_virq + cpu, NULL);
+ free_irq(sched_virq + cpu, NULL);
+ }
+ }
+ irq_destroy_ipi(call_virq, mask);
+ irq_destroy_ipi(sched_virq, mask);
+ return 0;
+}
+
+
+static int __init mips_smp_ipi_init(void)
+{
+ if (num_possible_cpus() == 1)
+ return 0;
+
+ mips_smp_ipi_allocate(cpu_possible_mask);
+
+ call_desc = irq_to_desc(call_virq);
+ sched_desc = irq_to_desc(sched_virq);
+
+ return 0;
+}
+early_initcall(mips_smp_ipi_init);
+#endif
+
+/*
+ * First C code run on the secondary CPUs after being started up by
+ * the master.
+ */
+asmlinkage void start_secondary(void)
+{
+ unsigned int cpu = raw_smp_processor_id();
+
+ cpu_probe();
+ per_cpu_trap_init(false);
+ rcu_cpu_starting(cpu);
+ mips_clockevent_init();
+ mp_ops->init_secondary();
+ cpu_report();
+ maar_init();
+
+ /*
+ * XXX parity protection should be folded in here when it's converted
+ * to an option instead of something based on .cputype
+ */
+
+ calibrate_delay();
+ cpu_data[cpu].udelay_val = loops_per_jiffy;
+
+ set_cpu_sibling_map(cpu);
+ set_cpu_core_map(cpu);
+
+ cpumask_set_cpu(cpu, &cpu_coherent_mask);
+ notify_cpu_starting(cpu);
+
+ /* Notify boot CPU that we're starting & ready to sync counters */
+ complete(&cpu_starting);
+
+ synchronise_count_slave(cpu);
+
+ /* The CPU is running and counters synchronised, now mark it online */
+ set_cpu_online(cpu, true);
+
+ calculate_cpu_foreign_map();
+
+ /*
+ * Notify boot CPU that we're up & online and it can safely return
+ * from __cpu_up
+ */
+ complete(&cpu_running);
+
+ /*
+ * irq will be enabled in ->smp_finish(), enabling it too early
+ * is dangerous.
+ */
+ WARN_ON_ONCE(!irqs_disabled());
+ mp_ops->smp_finish();
+
+ cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
+}
+
+static void stop_this_cpu(void *dummy)
+{
+ /*
+ * Remove this CPU:
+ */
+
+ set_cpu_online(smp_processor_id(), false);
+ calculate_cpu_foreign_map();
+ local_irq_disable();
+ while (1);
+}
+
+void smp_send_stop(void)
+{
+ smp_call_function(stop_this_cpu, NULL, 0);
+}
+
+void __init smp_cpus_done(unsigned int max_cpus)
+{
+}
+
+/* called from main before smp_init() */
+void __init smp_prepare_cpus(unsigned int max_cpus)
+{
+ init_new_context(current, &init_mm);
+ current_thread_info()->cpu = 0;
+ mp_ops->prepare_cpus(max_cpus);
+ set_cpu_sibling_map(0);
+ set_cpu_core_map(0);
+ calculate_cpu_foreign_map();
+#ifndef CONFIG_HOTPLUG_CPU
+ init_cpu_present(cpu_possible_mask);
+#endif
+ cpumask_copy(&cpu_coherent_mask, cpu_possible_mask);
+}
+
+/* preload SMP state for boot cpu */
+void smp_prepare_boot_cpu(void)
+{
+ if (mp_ops->prepare_boot_cpu)
+ mp_ops->prepare_boot_cpu();
+ set_cpu_possible(0, true);
+ set_cpu_online(0, true);
+}
+
+int __cpu_up(unsigned int cpu, struct task_struct *tidle)
+{
+ int err;
+
+ err = mp_ops->boot_secondary(cpu, tidle);
+ if (err)
+ return err;
+
+ /* Wait for CPU to start and be ready to sync counters */
+ if (!wait_for_completion_timeout(&cpu_starting,
+ msecs_to_jiffies(1000))) {
+ pr_crit("CPU%u: failed to start\n", cpu);
+ return -EIO;
+ }
+
+ synchronise_count_master(cpu);
+
+ /* Wait for CPU to finish startup & mark itself online before return */
+ wait_for_completion(&cpu_running);
+ return 0;
+}
+
+/* Not really SMP stuff ... */
+int setup_profiling_timer(unsigned int multiplier)
+{
+ return 0;
+}
+
+static void flush_tlb_all_ipi(void *info)
+{
+ local_flush_tlb_all();
+}
+
+void flush_tlb_all(void)
+{
+ if (cpu_has_mmid) {
+ htw_stop();
+ ginvt_full();
+ sync_ginv();
+ instruction_hazard();
+ htw_start();
+ return;
+ }
+
+ on_each_cpu(flush_tlb_all_ipi, NULL, 1);
+}
+
+static void flush_tlb_mm_ipi(void *mm)
+{
+ drop_mmu_context((struct mm_struct *)mm);
+}
+
+/*
+ * Special Variant of smp_call_function for use by TLB functions:
+ *
+ * o No return value
+ * o collapses to normal function call on UP kernels
+ * o collapses to normal function call on systems with a single shared
+ * primary cache.
+ */
+static inline void smp_on_other_tlbs(void (*func) (void *info), void *info)
+{
+ smp_call_function(func, info, 1);
+}
+
+static inline void smp_on_each_tlb(void (*func) (void *info), void *info)
+{
+ preempt_disable();
+
+ smp_on_other_tlbs(func, info);
+ func(info);
+
+ preempt_enable();
+}
+
+/*
+ * The following tlb flush calls are invoked when old translations are
+ * being torn down, or pte attributes are changing. For single threaded
+ * address spaces, a new context is obtained on the current cpu, and tlb
+ * context on other cpus are invalidated to force a new context allocation
+ * at switch_mm time, should the mm ever be used on other cpus. For
+ * multithreaded address spaces, inter-CPU interrupts have to be sent.
+ * Another case where inter-CPU interrupts are required is when the target
+ * mm might be active on another cpu (eg debuggers doing the flushes on
+ * behalf of debugees, kswapd stealing pages from another process etc).
+ * Kanoj 07/00.
+ */
+
+void flush_tlb_mm(struct mm_struct *mm)
+{
+ if (!mm)
+ return;
+
+ if (atomic_read(&mm->mm_users) == 0)
+ return; /* happens as a result of exit_mmap() */
+
+ preempt_disable();
+
+ if (cpu_has_mmid) {
+ /*
+ * No need to worry about other CPUs - the ginvt in
+ * drop_mmu_context() will be globalized.
+ */
+ } else if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
+ smp_on_other_tlbs(flush_tlb_mm_ipi, mm);
+ } else {
+ unsigned int cpu;
+
+ for_each_online_cpu(cpu) {
+ if (cpu != smp_processor_id() && cpu_context(cpu, mm))
+ set_cpu_context(cpu, mm, 0);
+ }
+ }
+ drop_mmu_context(mm);
+
+ preempt_enable();
+}
+
+struct flush_tlb_data {
+ struct vm_area_struct *vma;
+ unsigned long addr1;
+ unsigned long addr2;
+};
+
+static void flush_tlb_range_ipi(void *info)
+{
+ struct flush_tlb_data *fd = info;
+
+ local_flush_tlb_range(fd->vma, fd->addr1, fd->addr2);
+}
+
+void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
+{
+ struct mm_struct *mm = vma->vm_mm;
+ unsigned long addr;
+ u32 old_mmid;
+
+ preempt_disable();
+ if (cpu_has_mmid) {
+ htw_stop();
+ old_mmid = read_c0_memorymapid();
+ write_c0_memorymapid(cpu_asid(0, mm));
+ mtc0_tlbw_hazard();
+ addr = round_down(start, PAGE_SIZE * 2);
+ end = round_up(end, PAGE_SIZE * 2);
+ do {
+ ginvt_va_mmid(addr);
+ sync_ginv();
+ addr += PAGE_SIZE * 2;
+ } while (addr < end);
+ write_c0_memorymapid(old_mmid);
+ instruction_hazard();
+ htw_start();
+ } else if ((atomic_read(&mm->mm_users) != 1) || (current->mm != mm)) {
+ struct flush_tlb_data fd = {
+ .vma = vma,
+ .addr1 = start,
+ .addr2 = end,
+ };
+
+ smp_on_other_tlbs(flush_tlb_range_ipi, &fd);
+ local_flush_tlb_range(vma, start, end);
+ } else {
+ unsigned int cpu;
+ int exec = vma->vm_flags & VM_EXEC;
+
+ for_each_online_cpu(cpu) {
+ /*
+ * flush_cache_range() will only fully flush icache if
+ * the VMA is executable, otherwise we must invalidate
+ * ASID without it appearing to has_valid_asid() as if
+ * mm has been completely unused by that CPU.
+ */
+ if (cpu != smp_processor_id() && cpu_context(cpu, mm))
+ set_cpu_context(cpu, mm, !exec);
+ }
+ local_flush_tlb_range(vma, start, end);
+ }
+ preempt_enable();
+}
+
+static void flush_tlb_kernel_range_ipi(void *info)
+{
+ struct flush_tlb_data *fd = info;
+
+ local_flush_tlb_kernel_range(fd->addr1, fd->addr2);
+}
+
+void flush_tlb_kernel_range(unsigned long start, unsigned long end)
+{
+ struct flush_tlb_data fd = {
+ .addr1 = start,
+ .addr2 = end,
+ };
+
+ on_each_cpu(flush_tlb_kernel_range_ipi, &fd, 1);
+}
+
+static void flush_tlb_page_ipi(void *info)
+{
+ struct flush_tlb_data *fd = info;
+
+ local_flush_tlb_page(fd->vma, fd->addr1);
+}
+
+void flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
+{
+ u32 old_mmid;
+
+ preempt_disable();
+ if (cpu_has_mmid) {
+ htw_stop();
+ old_mmid = read_c0_memorymapid();
+ write_c0_memorymapid(cpu_asid(0, vma->vm_mm));
+ mtc0_tlbw_hazard();
+ ginvt_va_mmid(page);
+ sync_ginv();
+ write_c0_memorymapid(old_mmid);
+ instruction_hazard();
+ htw_start();
+ } else if ((atomic_read(&vma->vm_mm->mm_users) != 1) ||
+ (current->mm != vma->vm_mm)) {
+ struct flush_tlb_data fd = {
+ .vma = vma,
+ .addr1 = page,
+ };
+
+ smp_on_other_tlbs(flush_tlb_page_ipi, &fd);
+ local_flush_tlb_page(vma, page);
+ } else {
+ unsigned int cpu;
+
+ for_each_online_cpu(cpu) {
+ /*
+ * flush_cache_page() only does partial flushes, so
+ * invalidate ASID without it appearing to
+ * has_valid_asid() as if mm has been completely unused
+ * by that CPU.
+ */
+ if (cpu != smp_processor_id() && cpu_context(cpu, vma->vm_mm))
+ set_cpu_context(cpu, vma->vm_mm, 1);
+ }
+ local_flush_tlb_page(vma, page);
+ }
+ preempt_enable();
+}
+
+static void flush_tlb_one_ipi(void *info)
+{
+ unsigned long vaddr = (unsigned long) info;
+
+ local_flush_tlb_one(vaddr);
+}
+
+void flush_tlb_one(unsigned long vaddr)
+{
+ smp_on_each_tlb(flush_tlb_one_ipi, (void *) vaddr);
+}
+
+EXPORT_SYMBOL(flush_tlb_page);
+EXPORT_SYMBOL(flush_tlb_one);
+
+#ifdef CONFIG_GENERIC_CLOCKEVENTS_BROADCAST
+
+static void tick_broadcast_callee(void *info)
+{
+ tick_receive_broadcast();
+}
+
+static DEFINE_PER_CPU(call_single_data_t, tick_broadcast_csd) =
+ CSD_INIT(tick_broadcast_callee, NULL);
+
+void tick_broadcast(const struct cpumask *mask)
+{
+ call_single_data_t *csd;
+ int cpu;
+
+ for_each_cpu(cpu, mask) {
+ csd = &per_cpu(tick_broadcast_csd, cpu);
+ smp_call_function_single_async(cpu, csd);
+ }
+}
+
+#endif /* CONFIG_GENERIC_CLOCKEVENTS_BROADCAST */
diff --git a/arch/mips/kernel/spinlock_test.c b/arch/mips/kernel/spinlock_test.c
new file mode 100644
index 000000000..90f53e041
--- /dev/null
+++ b/arch/mips/kernel/spinlock_test.c
@@ -0,0 +1,127 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/init.h>
+#include <linux/kthread.h>
+#include <linux/hrtimer.h>
+#include <linux/fs.h>
+#include <linux/debugfs.h>
+#include <linux/export.h>
+#include <linux/spinlock.h>
+#include <asm/debug.h>
+
+static int ss_get(void *data, u64 *val)
+{
+ ktime_t start, finish;
+ int loops;
+ int cont;
+ DEFINE_RAW_SPINLOCK(ss_spin);
+
+ loops = 1000000;
+ cont = 1;
+
+ start = ktime_get();
+
+ while (cont) {
+ raw_spin_lock(&ss_spin);
+ loops--;
+ if (loops == 0)
+ cont = 0;
+ raw_spin_unlock(&ss_spin);
+ }
+
+ finish = ktime_get();
+
+ *val = ktime_us_delta(finish, start);
+
+ return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(fops_ss, ss_get, NULL, "%llu\n");
+
+
+
+struct spin_multi_state {
+ raw_spinlock_t lock;
+ atomic_t start_wait;
+ atomic_t enter_wait;
+ atomic_t exit_wait;
+ int loops;
+};
+
+struct spin_multi_per_thread {
+ struct spin_multi_state *state;
+ ktime_t start;
+};
+
+static int multi_other(void *data)
+{
+ int loops;
+ int cont;
+ struct spin_multi_per_thread *pt = data;
+ struct spin_multi_state *s = pt->state;
+
+ loops = s->loops;
+ cont = 1;
+
+ atomic_dec(&s->enter_wait);
+
+ while (atomic_read(&s->enter_wait))
+ ; /* spin */
+
+ pt->start = ktime_get();
+
+ atomic_dec(&s->start_wait);
+
+ while (atomic_read(&s->start_wait))
+ ; /* spin */
+
+ while (cont) {
+ raw_spin_lock(&s->lock);
+ loops--;
+ if (loops == 0)
+ cont = 0;
+ raw_spin_unlock(&s->lock);
+ }
+
+ atomic_dec(&s->exit_wait);
+ while (atomic_read(&s->exit_wait))
+ ; /* spin */
+ return 0;
+}
+
+static int multi_get(void *data, u64 *val)
+{
+ ktime_t finish;
+ struct spin_multi_state ms;
+ struct spin_multi_per_thread t1, t2;
+
+ ms.lock = __RAW_SPIN_LOCK_UNLOCKED("multi_get");
+ ms.loops = 1000000;
+
+ atomic_set(&ms.start_wait, 2);
+ atomic_set(&ms.enter_wait, 2);
+ atomic_set(&ms.exit_wait, 2);
+ t1.state = &ms;
+ t2.state = &ms;
+
+ kthread_run(multi_other, &t2, "multi_get");
+
+ multi_other(&t1);
+
+ finish = ktime_get();
+
+ *val = ktime_us_delta(finish, t1.start);
+
+ return 0;
+}
+
+DEFINE_DEBUGFS_ATTRIBUTE(fops_multi, multi_get, NULL, "%llu\n");
+
+static int __init spinlock_test(void)
+{
+ debugfs_create_file_unsafe("spin_single", S_IRUGO, mips_debugfs_dir, NULL,
+ &fops_ss);
+ debugfs_create_file_unsafe("spin_multi", S_IRUGO, mips_debugfs_dir, NULL,
+ &fops_multi);
+ return 0;
+}
+device_initcall(spinlock_test);
diff --git a/arch/mips/kernel/spram.c b/arch/mips/kernel/spram.c
new file mode 100644
index 000000000..d5d96214c
--- /dev/null
+++ b/arch/mips/kernel/spram.c
@@ -0,0 +1,220 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * MIPS SPRAM support
+ *
+ * Copyright (C) 2007, 2008 MIPS Technologies, Inc.
+ */
+#include <linux/kernel.h>
+#include <linux/ptrace.h>
+#include <linux/stddef.h>
+
+#include <asm/fpu.h>
+#include <asm/mipsregs.h>
+#include <asm/r4kcache.h>
+#include <asm/hazards.h>
+
+/*
+ * These definitions are correct for the 24K/34K/74K SPRAM sample
+ * implementation. The 4KS interpreted the tags differently...
+ */
+#define SPRAM_TAG0_ENABLE 0x00000080
+#define SPRAM_TAG0_PA_MASK 0xfffff000
+#define SPRAM_TAG1_SIZE_MASK 0xfffff000
+
+#define SPRAM_TAG_STRIDE 8
+
+#define ERRCTL_SPRAM (1 << 28)
+
+/* errctl access */
+#define read_c0_errctl(x) read_c0_ecc(x)
+#define write_c0_errctl(x) write_c0_ecc(x)
+
+/*
+ * Different semantics to the set_c0_* function built by __BUILD_SET_C0
+ */
+static unsigned int bis_c0_errctl(unsigned int set)
+{
+ unsigned int res;
+ res = read_c0_errctl();
+ write_c0_errctl(res | set);
+ return res;
+}
+
+static void ispram_store_tag(unsigned int offset, unsigned int data)
+{
+ unsigned int errctl;
+
+ /* enable SPRAM tag access */
+ errctl = bis_c0_errctl(ERRCTL_SPRAM);
+ ehb();
+
+ write_c0_taglo(data);
+ ehb();
+
+ cache_op(Index_Store_Tag_I, CKSEG0|offset);
+ ehb();
+
+ write_c0_errctl(errctl);
+ ehb();
+}
+
+
+static unsigned int ispram_load_tag(unsigned int offset)
+{
+ unsigned int data;
+ unsigned int errctl;
+
+ /* enable SPRAM tag access */
+ errctl = bis_c0_errctl(ERRCTL_SPRAM);
+ ehb();
+ cache_op(Index_Load_Tag_I, CKSEG0 | offset);
+ ehb();
+ data = read_c0_taglo();
+ ehb();
+ write_c0_errctl(errctl);
+ ehb();
+
+ return data;
+}
+
+static void dspram_store_tag(unsigned int offset, unsigned int data)
+{
+ unsigned int errctl;
+
+ /* enable SPRAM tag access */
+ errctl = bis_c0_errctl(ERRCTL_SPRAM);
+ ehb();
+ write_c0_dtaglo(data);
+ ehb();
+ cache_op(Index_Store_Tag_D, CKSEG0 | offset);
+ ehb();
+ write_c0_errctl(errctl);
+ ehb();
+}
+
+
+static unsigned int dspram_load_tag(unsigned int offset)
+{
+ unsigned int data;
+ unsigned int errctl;
+
+ errctl = bis_c0_errctl(ERRCTL_SPRAM);
+ ehb();
+ cache_op(Index_Load_Tag_D, CKSEG0 | offset);
+ ehb();
+ data = read_c0_dtaglo();
+ ehb();
+ write_c0_errctl(errctl);
+ ehb();
+
+ return data;
+}
+
+static void probe_spram(char *type,
+ unsigned int base,
+ unsigned int (*read)(unsigned int),
+ void (*write)(unsigned int, unsigned int))
+{
+ unsigned int firstsize = 0, lastsize = 0;
+ unsigned int firstpa = 0, lastpa = 0, pa = 0;
+ unsigned int offset = 0;
+ unsigned int size, tag0, tag1;
+ unsigned int enabled;
+ int i;
+
+ /*
+ * The limit is arbitrary but avoids the loop running away if
+ * the SPRAM tags are implemented differently
+ */
+
+ for (i = 0; i < 8; i++) {
+ tag0 = read(offset);
+ tag1 = read(offset+SPRAM_TAG_STRIDE);
+ pr_debug("DBG %s%d: tag0=%08x tag1=%08x\n",
+ type, i, tag0, tag1);
+
+ size = tag1 & SPRAM_TAG1_SIZE_MASK;
+
+ if (size == 0)
+ break;
+
+ if (i != 0) {
+ /* tags may repeat... */
+ if ((pa == firstpa && size == firstsize) ||
+ (pa == lastpa && size == lastsize))
+ break;
+ }
+
+ /* Align base with size */
+ base = (base + size - 1) & ~(size-1);
+
+ /* reprogram the base address base address and enable */
+ tag0 = (base & SPRAM_TAG0_PA_MASK) | SPRAM_TAG0_ENABLE;
+ write(offset, tag0);
+
+ base += size;
+
+ /* reread the tag */
+ tag0 = read(offset);
+ pa = tag0 & SPRAM_TAG0_PA_MASK;
+ enabled = tag0 & SPRAM_TAG0_ENABLE;
+
+ if (i == 0) {
+ firstpa = pa;
+ firstsize = size;
+ }
+
+ lastpa = pa;
+ lastsize = size;
+
+ if (strcmp(type, "DSPRAM") == 0) {
+ unsigned int *vp = (unsigned int *)(CKSEG1 | pa);
+ unsigned int v;
+#define TDAT 0x5a5aa5a5
+ vp[0] = TDAT;
+ vp[1] = ~TDAT;
+
+ mb();
+
+ v = vp[0];
+ if (v != TDAT)
+ printk(KERN_ERR "vp=%p wrote=%08x got=%08x\n",
+ vp, TDAT, v);
+ v = vp[1];
+ if (v != ~TDAT)
+ printk(KERN_ERR "vp=%p wrote=%08x got=%08x\n",
+ vp+1, ~TDAT, v);
+ }
+
+ pr_info("%s%d: PA=%08x,Size=%08x%s\n",
+ type, i, pa, size, enabled ? ",enabled" : "");
+ offset += 2 * SPRAM_TAG_STRIDE;
+ }
+}
+void spram_config(void)
+{
+ unsigned int config0;
+
+ switch (current_cpu_type()) {
+ case CPU_24K:
+ case CPU_34K:
+ case CPU_74K:
+ case CPU_1004K:
+ case CPU_1074K:
+ case CPU_INTERAPTIV:
+ case CPU_PROAPTIV:
+ case CPU_P5600:
+ case CPU_QEMU_GENERIC:
+ case CPU_I6400:
+ case CPU_P6600:
+ config0 = read_c0_config();
+ /* FIXME: addresses are Malta specific */
+ if (config0 & MIPS_CONF_ISP) {
+ probe_spram("ISPRAM", 0x1c000000,
+ &ispram_load_tag, &ispram_store_tag);
+ }
+ if (config0 & MIPS_CONF_DSP)
+ probe_spram("DSPRAM", 0x1c100000,
+ &dspram_load_tag, &dspram_store_tag);
+ }
+}
diff --git a/arch/mips/kernel/stacktrace.c b/arch/mips/kernel/stacktrace.c
new file mode 100644
index 000000000..f2e720940
--- /dev/null
+++ b/arch/mips/kernel/stacktrace.c
@@ -0,0 +1,93 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Stack trace management functions
+ *
+ * Copyright (C) 2006 Atsushi Nemoto <anemo@mba.ocn.ne.jp>
+ */
+#include <linux/sched.h>
+#include <linux/sched/debug.h>
+#include <linux/sched/task_stack.h>
+#include <linux/stacktrace.h>
+#include <linux/export.h>
+#include <asm/stacktrace.h>
+
+/*
+ * Save stack-backtrace addresses into a stack_trace buffer:
+ */
+static void save_raw_context_stack(struct stack_trace *trace,
+ unsigned long reg29, int savesched)
+{
+ unsigned long *sp = (unsigned long *)reg29;
+ unsigned long addr;
+
+ while (!kstack_end(sp)) {
+ addr = *sp++;
+ if (__kernel_text_address(addr) &&
+ (savesched || !in_sched_functions(addr))) {
+ if (trace->skip > 0)
+ trace->skip--;
+ else
+ trace->entries[trace->nr_entries++] = addr;
+ if (trace->nr_entries >= trace->max_entries)
+ break;
+ }
+ }
+}
+
+static void save_context_stack(struct stack_trace *trace,
+ struct task_struct *tsk, struct pt_regs *regs, int savesched)
+{
+ unsigned long sp = regs->regs[29];
+#ifdef CONFIG_KALLSYMS
+ unsigned long ra = regs->regs[31];
+ unsigned long pc = regs->cp0_epc;
+
+ if (raw_show_trace || !__kernel_text_address(pc)) {
+ unsigned long stack_page =
+ (unsigned long)task_stack_page(tsk);
+ if (stack_page && sp >= stack_page &&
+ sp <= stack_page + THREAD_SIZE - 32)
+ save_raw_context_stack(trace, sp, savesched);
+ return;
+ }
+ do {
+ if (savesched || !in_sched_functions(pc)) {
+ if (trace->skip > 0)
+ trace->skip--;
+ else
+ trace->entries[trace->nr_entries++] = pc;
+ if (trace->nr_entries >= trace->max_entries)
+ break;
+ }
+ pc = unwind_stack(tsk, &sp, pc, &ra);
+ } while (pc);
+#else
+ save_raw_context_stack(trace, sp, savesched);
+#endif
+}
+
+/*
+ * Save stack-backtrace addresses into a stack_trace buffer.
+ */
+void save_stack_trace(struct stack_trace *trace)
+{
+ save_stack_trace_tsk(current, trace);
+}
+EXPORT_SYMBOL_GPL(save_stack_trace);
+
+void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
+{
+ struct pt_regs dummyregs;
+ struct pt_regs *regs = &dummyregs;
+
+ WARN_ON(trace->nr_entries || !trace->max_entries);
+
+ if (tsk != current) {
+ regs->regs[29] = tsk->thread.reg29;
+ regs->regs[31] = 0;
+ regs->cp0_epc = tsk->thread.reg31;
+ } else
+ prepare_frametrace(regs);
+ save_context_stack(trace, tsk, regs, tsk == current);
+}
+EXPORT_SYMBOL_GPL(save_stack_trace_tsk);
diff --git a/arch/mips/kernel/sync-r4k.c b/arch/mips/kernel/sync-r4k.c
new file mode 100644
index 000000000..abdd7aaa3
--- /dev/null
+++ b/arch/mips/kernel/sync-r4k.c
@@ -0,0 +1,122 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Count register synchronisation.
+ *
+ * All CPUs will have their count registers synchronised to the CPU0 next time
+ * value. This can cause a small timewarp for CPU0. All other CPU's should
+ * not have done anything significant (but they may have had interrupts
+ * enabled briefly - prom_smp_finish() should not be responsible for enabling
+ * interrupts...)
+ */
+
+#include <linux/kernel.h>
+#include <linux/irqflags.h>
+#include <linux/cpumask.h>
+
+#include <asm/r4k-timer.h>
+#include <linux/atomic.h>
+#include <asm/barrier.h>
+#include <asm/mipsregs.h>
+
+static unsigned int initcount = 0;
+static atomic_t count_count_start = ATOMIC_INIT(0);
+static atomic_t count_count_stop = ATOMIC_INIT(0);
+
+#define COUNTON 100
+#define NR_LOOPS 3
+
+void synchronise_count_master(int cpu)
+{
+ int i;
+ unsigned long flags;
+
+ pr_info("Synchronize counters for CPU %u: ", cpu);
+
+ local_irq_save(flags);
+
+ /*
+ * We loop a few times to get a primed instruction cache,
+ * then the last pass is more or less synchronised and
+ * the master and slaves each set their cycle counters to a known
+ * value all at once. This reduces the chance of having random offsets
+ * between the processors, and guarantees that the maximum
+ * delay between the cycle counters is never bigger than
+ * the latency of information-passing (cachelines) between
+ * two CPUs.
+ */
+
+ for (i = 0; i < NR_LOOPS; i++) {
+ /* slaves loop on '!= 2' */
+ while (atomic_read(&count_count_start) != 1)
+ mb();
+ atomic_set(&count_count_stop, 0);
+ smp_wmb();
+
+ /* Let the slave writes its count register */
+ atomic_inc(&count_count_start);
+
+ /* Count will be initialised to current timer */
+ if (i == 1)
+ initcount = read_c0_count();
+
+ /*
+ * Everyone initialises count in the last loop:
+ */
+ if (i == NR_LOOPS-1)
+ write_c0_count(initcount);
+
+ /*
+ * Wait for slave to leave the synchronization point:
+ */
+ while (atomic_read(&count_count_stop) != 1)
+ mb();
+ atomic_set(&count_count_start, 0);
+ smp_wmb();
+ atomic_inc(&count_count_stop);
+ }
+ /* Arrange for an interrupt in a short while */
+ write_c0_compare(read_c0_count() + COUNTON);
+
+ local_irq_restore(flags);
+
+ /*
+ * i386 code reported the skew here, but the
+ * count registers were almost certainly out of sync
+ * so no point in alarming people
+ */
+ pr_cont("done.\n");
+}
+
+void synchronise_count_slave(int cpu)
+{
+ int i;
+ unsigned long flags;
+
+ local_irq_save(flags);
+
+ /*
+ * Not every cpu is online at the time this gets called,
+ * so we first wait for the master to say everyone is ready
+ */
+
+ for (i = 0; i < NR_LOOPS; i++) {
+ atomic_inc(&count_count_start);
+ while (atomic_read(&count_count_start) != 2)
+ mb();
+
+ /*
+ * Everyone initialises count in the last loop:
+ */
+ if (i == NR_LOOPS-1)
+ write_c0_count(initcount);
+
+ atomic_inc(&count_count_stop);
+ while (atomic_read(&count_count_stop) != 2)
+ mb();
+ }
+ /* Arrange for an interrupt in a short while */
+ write_c0_compare(read_c0_count() + COUNTON);
+
+ local_irq_restore(flags);
+}
+#undef NR_LOOPS
diff --git a/arch/mips/kernel/syscall.c b/arch/mips/kernel/syscall.c
new file mode 100644
index 000000000..ae93a607d
--- /dev/null
+++ b/arch/mips/kernel/syscall.c
@@ -0,0 +1,242 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1995, 1996, 1997, 2000, 2001, 05 by Ralf Baechle
+ * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
+ * Copyright (C) 2001 MIPS Technologies, Inc.
+ */
+#include <linux/capability.h>
+#include <linux/errno.h>
+#include <linux/linkage.h>
+#include <linux/fs.h>
+#include <linux/smp.h>
+#include <linux/ptrace.h>
+#include <linux/string.h>
+#include <linux/syscalls.h>
+#include <linux/file.h>
+#include <linux/utsname.h>
+#include <linux/unistd.h>
+#include <linux/sem.h>
+#include <linux/msg.h>
+#include <linux/shm.h>
+#include <linux/compiler.h>
+#include <linux/ipc.h>
+#include <linux/uaccess.h>
+#include <linux/slab.h>
+#include <linux/elf.h>
+#include <linux/sched/task_stack.h>
+
+#include <asm/asm.h>
+#include <asm/asm-eva.h>
+#include <asm/branch.h>
+#include <asm/cachectl.h>
+#include <asm/cacheflush.h>
+#include <asm/asm-offsets.h>
+#include <asm/signal.h>
+#include <asm/sim.h>
+#include <asm/shmparam.h>
+#include <asm/sync.h>
+#include <asm/sysmips.h>
+#include <asm/switch_to.h>
+
+/*
+ * For historic reasons the pipe(2) syscall on MIPS has an unusual calling
+ * convention. It returns results in registers $v0 / $v1 which means there
+ * is no need for it to do verify the validity of a userspace pointer
+ * argument. Historically that used to be expensive in Linux. These days
+ * the performance advantage is negligible.
+ */
+asmlinkage int sysm_pipe(void)
+{
+ int fd[2];
+ int error = do_pipe_flags(fd, 0);
+ if (error)
+ return error;
+ current_pt_regs()->regs[3] = fd[1];
+ return fd[0];
+}
+
+SYSCALL_DEFINE6(mips_mmap, unsigned long, addr, unsigned long, len,
+ unsigned long, prot, unsigned long, flags, unsigned long,
+ fd, off_t, offset)
+{
+ if (offset & ~PAGE_MASK)
+ return -EINVAL;
+ return ksys_mmap_pgoff(addr, len, prot, flags, fd,
+ offset >> PAGE_SHIFT);
+}
+
+SYSCALL_DEFINE6(mips_mmap2, unsigned long, addr, unsigned long, len,
+ unsigned long, prot, unsigned long, flags, unsigned long, fd,
+ unsigned long, pgoff)
+{
+ if (pgoff & (~PAGE_MASK >> 12))
+ return -EINVAL;
+
+ return ksys_mmap_pgoff(addr, len, prot, flags, fd,
+ pgoff >> (PAGE_SHIFT - 12));
+}
+
+save_static_function(sys_fork);
+save_static_function(sys_clone);
+save_static_function(sys_clone3);
+
+SYSCALL_DEFINE1(set_thread_area, unsigned long, addr)
+{
+ struct thread_info *ti = task_thread_info(current);
+
+ ti->tp_value = addr;
+ if (cpu_has_userlocal)
+ write_c0_userlocal(addr);
+
+ return 0;
+}
+
+static inline int mips_atomic_set(unsigned long addr, unsigned long new)
+{
+ unsigned long old, tmp;
+ struct pt_regs *regs;
+ unsigned int err;
+
+ if (unlikely(addr & 3))
+ return -EINVAL;
+
+ if (unlikely(!access_ok((const void __user *)addr, 4)))
+ return -EINVAL;
+
+ if (cpu_has_llsc && IS_ENABLED(CONFIG_WAR_R10000_LLSC)) {
+ __asm__ __volatile__ (
+ " .set push \n"
+ " .set arch=r4000 \n"
+ " li %[err], 0 \n"
+ "1: ll %[old], (%[addr]) \n"
+ " move %[tmp], %[new] \n"
+ "2: sc %[tmp], (%[addr]) \n"
+ " beqzl %[tmp], 1b \n"
+ "3: \n"
+ " .insn \n"
+ " .section .fixup,\"ax\" \n"
+ "4: li %[err], %[efault] \n"
+ " j 3b \n"
+ " .previous \n"
+ " .section __ex_table,\"a\" \n"
+ " "STR(PTR_WD)" 1b, 4b \n"
+ " "STR(PTR_WD)" 2b, 4b \n"
+ " .previous \n"
+ " .set pop \n"
+ : [old] "=&r" (old),
+ [err] "=&r" (err),
+ [tmp] "=&r" (tmp)
+ : [addr] "r" (addr),
+ [new] "r" (new),
+ [efault] "i" (-EFAULT)
+ : "memory");
+ } else if (cpu_has_llsc) {
+ __asm__ __volatile__ (
+ " .set push \n"
+ " .set "MIPS_ISA_ARCH_LEVEL" \n"
+ " li %[err], 0 \n"
+ "1: \n"
+ " " __SYNC(full, loongson3_war) " \n"
+ user_ll("%[old]", "(%[addr])")
+ " move %[tmp], %[new] \n"
+ "2: \n"
+ user_sc("%[tmp]", "(%[addr])")
+ " beqz %[tmp], 1b \n"
+ "3: \n"
+ " .insn \n"
+ " .section .fixup,\"ax\" \n"
+ "5: li %[err], %[efault] \n"
+ " j 3b \n"
+ " .previous \n"
+ " .section __ex_table,\"a\" \n"
+ " "STR(PTR_WD)" 1b, 5b \n"
+ " "STR(PTR_WD)" 2b, 5b \n"
+ " .previous \n"
+ " .set pop \n"
+ : [old] "=&r" (old),
+ [err] "=&r" (err),
+ [tmp] "=&r" (tmp)
+ : [addr] "r" (addr),
+ [new] "r" (new),
+ [efault] "i" (-EFAULT)
+ : "memory");
+ } else {
+ do {
+ preempt_disable();
+ ll_bit = 1;
+ ll_task = current;
+ preempt_enable();
+
+ err = __get_user(old, (unsigned int *) addr);
+ err |= __put_user(new, (unsigned int *) addr);
+ if (err)
+ break;
+ rmb();
+ } while (!ll_bit);
+ }
+
+ if (unlikely(err))
+ return err;
+
+ regs = current_pt_regs();
+ regs->regs[2] = old;
+ regs->regs[7] = 0; /* No error */
+
+ /*
+ * Don't let your children do this ...
+ */
+ __asm__ __volatile__(
+ " move $29, %0 \n"
+ " j syscall_exit \n"
+ : /* no outputs */
+ : "r" (regs));
+
+ /* unreached. Honestly. */
+ unreachable();
+}
+
+/*
+ * mips_atomic_set() normally returns directly via syscall_exit potentially
+ * clobbering static registers, so be sure to preserve them.
+ */
+save_static_function(sys_sysmips);
+
+SYSCALL_DEFINE3(sysmips, long, cmd, long, arg1, long, arg2)
+{
+ switch (cmd) {
+ case MIPS_ATOMIC_SET:
+ return mips_atomic_set(arg1, arg2);
+
+ case MIPS_FIXADE:
+ if (arg1 & ~3)
+ return -EINVAL;
+
+ if (arg1 & 1)
+ set_thread_flag(TIF_FIXADE);
+ else
+ clear_thread_flag(TIF_FIXADE);
+ if (arg1 & 2)
+ set_thread_flag(TIF_LOGADE);
+ else
+ clear_thread_flag(TIF_LOGADE);
+
+ return 0;
+
+ case FLUSH_CACHE:
+ __flush_cache_all();
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+/*
+ * No implemented yet ...
+ */
+SYSCALL_DEFINE3(cachectl, char *, addr, int, nbytes, int, op)
+{
+ return -ENOSYS;
+}
diff --git a/arch/mips/kernel/syscalls/Makefile b/arch/mips/kernel/syscalls/Makefile
new file mode 100644
index 000000000..e6b21de65
--- /dev/null
+++ b/arch/mips/kernel/syscalls/Makefile
@@ -0,0 +1,51 @@
+# SPDX-License-Identifier: GPL-2.0
+kapi := arch/$(SRCARCH)/include/generated/asm
+uapi := arch/$(SRCARCH)/include/generated/uapi/asm
+
+$(shell mkdir -p $(uapi) $(kapi))
+
+syshdr := $(srctree)/scripts/syscallhdr.sh
+sysnr := $(srctree)/$(src)/syscallnr.sh
+systbl := $(srctree)/scripts/syscalltbl.sh
+
+quiet_cmd_syshdr = SYSHDR $@
+ cmd_syshdr = $(CONFIG_SHELL) $(syshdr) --offset __NR_Linux $< $@
+
+quiet_cmd_sysnr = SYSNR $@
+ cmd_sysnr = $(CONFIG_SHELL) '$(sysnr)' '$<' '$@' \
+ '$(sysnr_abis_$(basetarget))' \
+ '$(sysnr_pfx_$(basetarget))'
+
+quiet_cmd_systbl = SYSTBL $@
+ cmd_systbl = $(CONFIG_SHELL) $(systbl) $< $@
+
+$(uapi)/unistd_%.h: $(src)/syscall_%.tbl $(syshdr) FORCE
+ $(call if_changed,syshdr)
+
+sysnr_pfx_unistd_nr_n32 := N32
+sysnr_pfx_unistd_nr_n64 := 64
+sysnr_pfx_unistd_nr_o32 := O32
+
+$(kapi)/unistd_nr_%.h: $(src)/syscall_%.tbl $(sysnr) FORCE
+ $(call if_changed,sysnr)
+
+$(kapi)/syscall_table_%.h: $(src)/syscall_%.tbl $(systbl) FORCE
+ $(call if_changed,systbl)
+
+uapisyshdr-y += unistd_n32.h \
+ unistd_n64.h \
+ unistd_o32.h
+kapisyshdr-y += syscall_table_n32.h \
+ syscall_table_n64.h \
+ syscall_table_o32.h \
+ unistd_nr_n32.h \
+ unistd_nr_n64.h \
+ unistd_nr_o32.h
+
+uapisyshdr-y := $(addprefix $(uapi)/, $(uapisyshdr-y))
+kapisyshdr-y := $(addprefix $(kapi)/, $(kapisyshdr-y))
+targets += $(addprefix ../../../../, $(uapisyshdr-y) $(kapisyshdr-y))
+
+PHONY += all
+all: $(uapisyshdr-y) $(kapisyshdr-y)
+ @:
diff --git a/arch/mips/kernel/syscalls/syscall_n32.tbl b/arch/mips/kernel/syscalls/syscall_n32.tbl
new file mode 100644
index 000000000..253ff994e
--- /dev/null
+++ b/arch/mips/kernel/syscalls/syscall_n32.tbl
@@ -0,0 +1,391 @@
+# SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
+#
+# system call numbers and entry vectors for mips
+#
+# The format is:
+# <number> <abi> <name> <entry point> <compat entry point>
+#
+# The <abi> is always "n32" for this file.
+#
+0 n32 read sys_read
+1 n32 write sys_write
+2 n32 open sys_open
+3 n32 close sys_close
+4 n32 stat sys_newstat
+5 n32 fstat sys_newfstat
+6 n32 lstat sys_newlstat
+7 n32 poll sys_poll
+8 n32 lseek sys_lseek
+9 n32 mmap sys_mips_mmap
+10 n32 mprotect sys_mprotect
+11 n32 munmap sys_munmap
+12 n32 brk sys_brk
+13 n32 rt_sigaction compat_sys_rt_sigaction
+14 n32 rt_sigprocmask compat_sys_rt_sigprocmask
+15 n32 ioctl compat_sys_ioctl
+16 n32 pread64 sys_pread64
+17 n32 pwrite64 sys_pwrite64
+18 n32 readv sys_readv
+19 n32 writev sys_writev
+20 n32 access sys_access
+21 n32 pipe sysm_pipe
+22 n32 _newselect compat_sys_select
+23 n32 sched_yield sys_sched_yield
+24 n32 mremap sys_mremap
+25 n32 msync sys_msync
+26 n32 mincore sys_mincore
+27 n32 madvise sys_madvise
+28 n32 shmget sys_shmget
+29 n32 shmat sys_shmat
+30 n32 shmctl compat_sys_old_shmctl
+31 n32 dup sys_dup
+32 n32 dup2 sys_dup2
+33 n32 pause sys_pause
+34 n32 nanosleep sys_nanosleep_time32
+35 n32 getitimer compat_sys_getitimer
+36 n32 setitimer compat_sys_setitimer
+37 n32 alarm sys_alarm
+38 n32 getpid sys_getpid
+39 n32 sendfile compat_sys_sendfile
+40 n32 socket sys_socket
+41 n32 connect sys_connect
+42 n32 accept sys_accept
+43 n32 sendto sys_sendto
+44 n32 recvfrom compat_sys_recvfrom
+45 n32 sendmsg compat_sys_sendmsg
+46 n32 recvmsg compat_sys_recvmsg
+47 n32 shutdown sys_shutdown
+48 n32 bind sys_bind
+49 n32 listen sys_listen
+50 n32 getsockname sys_getsockname
+51 n32 getpeername sys_getpeername
+52 n32 socketpair sys_socketpair
+53 n32 setsockopt sys_setsockopt
+54 n32 getsockopt sys_getsockopt
+55 n32 clone __sys_clone
+56 n32 fork __sys_fork
+57 n32 execve compat_sys_execve
+58 n32 exit sys_exit
+59 n32 wait4 compat_sys_wait4
+60 n32 kill sys_kill
+61 n32 uname sys_newuname
+62 n32 semget sys_semget
+63 n32 semop sys_semop
+64 n32 semctl compat_sys_old_semctl
+65 n32 shmdt sys_shmdt
+66 n32 msgget sys_msgget
+67 n32 msgsnd compat_sys_msgsnd
+68 n32 msgrcv compat_sys_msgrcv
+69 n32 msgctl compat_sys_old_msgctl
+70 n32 fcntl compat_sys_fcntl
+71 n32 flock sys_flock
+72 n32 fsync sys_fsync
+73 n32 fdatasync sys_fdatasync
+74 n32 truncate sys_truncate
+75 n32 ftruncate sys_ftruncate
+76 n32 getdents compat_sys_getdents
+77 n32 getcwd sys_getcwd
+78 n32 chdir sys_chdir
+79 n32 fchdir sys_fchdir
+80 n32 rename sys_rename
+81 n32 mkdir sys_mkdir
+82 n32 rmdir sys_rmdir
+83 n32 creat sys_creat
+84 n32 link sys_link
+85 n32 unlink sys_unlink
+86 n32 symlink sys_symlink
+87 n32 readlink sys_readlink
+88 n32 chmod sys_chmod
+89 n32 fchmod sys_fchmod
+90 n32 chown sys_chown
+91 n32 fchown sys_fchown
+92 n32 lchown sys_lchown
+93 n32 umask sys_umask
+94 n32 gettimeofday compat_sys_gettimeofday
+95 n32 getrlimit compat_sys_getrlimit
+96 n32 getrusage compat_sys_getrusage
+97 n32 sysinfo compat_sys_sysinfo
+98 n32 times compat_sys_times
+99 n32 ptrace compat_sys_ptrace
+100 n32 getuid sys_getuid
+101 n32 syslog sys_syslog
+102 n32 getgid sys_getgid
+103 n32 setuid sys_setuid
+104 n32 setgid sys_setgid
+105 n32 geteuid sys_geteuid
+106 n32 getegid sys_getegid
+107 n32 setpgid sys_setpgid
+108 n32 getppid sys_getppid
+109 n32 getpgrp sys_getpgrp
+110 n32 setsid sys_setsid
+111 n32 setreuid sys_setreuid
+112 n32 setregid sys_setregid
+113 n32 getgroups sys_getgroups
+114 n32 setgroups sys_setgroups
+115 n32 setresuid sys_setresuid
+116 n32 getresuid sys_getresuid
+117 n32 setresgid sys_setresgid
+118 n32 getresgid sys_getresgid
+119 n32 getpgid sys_getpgid
+120 n32 setfsuid sys_setfsuid
+121 n32 setfsgid sys_setfsgid
+122 n32 getsid sys_getsid
+123 n32 capget sys_capget
+124 n32 capset sys_capset
+125 n32 rt_sigpending compat_sys_rt_sigpending
+126 n32 rt_sigtimedwait compat_sys_rt_sigtimedwait_time32
+127 n32 rt_sigqueueinfo compat_sys_rt_sigqueueinfo
+128 n32 rt_sigsuspend compat_sys_rt_sigsuspend
+129 n32 sigaltstack compat_sys_sigaltstack
+130 n32 utime sys_utime32
+131 n32 mknod sys_mknod
+132 n32 personality sys_32_personality
+133 n32 ustat compat_sys_ustat
+134 n32 statfs compat_sys_statfs
+135 n32 fstatfs compat_sys_fstatfs
+136 n32 sysfs sys_sysfs
+137 n32 getpriority sys_getpriority
+138 n32 setpriority sys_setpriority
+139 n32 sched_setparam sys_sched_setparam
+140 n32 sched_getparam sys_sched_getparam
+141 n32 sched_setscheduler sys_sched_setscheduler
+142 n32 sched_getscheduler sys_sched_getscheduler
+143 n32 sched_get_priority_max sys_sched_get_priority_max
+144 n32 sched_get_priority_min sys_sched_get_priority_min
+145 n32 sched_rr_get_interval sys_sched_rr_get_interval_time32
+146 n32 mlock sys_mlock
+147 n32 munlock sys_munlock
+148 n32 mlockall sys_mlockall
+149 n32 munlockall sys_munlockall
+150 n32 vhangup sys_vhangup
+151 n32 pivot_root sys_pivot_root
+152 n32 _sysctl sys_ni_syscall
+153 n32 prctl sys_prctl
+154 n32 adjtimex sys_adjtimex_time32
+155 n32 setrlimit compat_sys_setrlimit
+156 n32 chroot sys_chroot
+157 n32 sync sys_sync
+158 n32 acct sys_acct
+159 n32 settimeofday compat_sys_settimeofday
+160 n32 mount sys_mount
+161 n32 umount2 sys_umount
+162 n32 swapon sys_swapon
+163 n32 swapoff sys_swapoff
+164 n32 reboot sys_reboot
+165 n32 sethostname sys_sethostname
+166 n32 setdomainname sys_setdomainname
+167 n32 create_module sys_ni_syscall
+168 n32 init_module sys_init_module
+169 n32 delete_module sys_delete_module
+170 n32 get_kernel_syms sys_ni_syscall
+171 n32 query_module sys_ni_syscall
+172 n32 quotactl sys_quotactl
+173 n32 nfsservctl sys_ni_syscall
+174 n32 getpmsg sys_ni_syscall
+175 n32 putpmsg sys_ni_syscall
+176 n32 afs_syscall sys_ni_syscall
+# 177 reserved for security
+177 n32 reserved177 sys_ni_syscall
+178 n32 gettid sys_gettid
+179 n32 readahead sys_readahead
+180 n32 setxattr sys_setxattr
+181 n32 lsetxattr sys_lsetxattr
+182 n32 fsetxattr sys_fsetxattr
+183 n32 getxattr sys_getxattr
+184 n32 lgetxattr sys_lgetxattr
+185 n32 fgetxattr sys_fgetxattr
+186 n32 listxattr sys_listxattr
+187 n32 llistxattr sys_llistxattr
+188 n32 flistxattr sys_flistxattr
+189 n32 removexattr sys_removexattr
+190 n32 lremovexattr sys_lremovexattr
+191 n32 fremovexattr sys_fremovexattr
+192 n32 tkill sys_tkill
+193 n32 reserved193 sys_ni_syscall
+194 n32 futex sys_futex_time32
+195 n32 sched_setaffinity compat_sys_sched_setaffinity
+196 n32 sched_getaffinity compat_sys_sched_getaffinity
+197 n32 cacheflush sys_cacheflush
+198 n32 cachectl sys_cachectl
+199 n32 sysmips __sys_sysmips
+200 n32 io_setup compat_sys_io_setup
+201 n32 io_destroy sys_io_destroy
+202 n32 io_getevents sys_io_getevents_time32
+203 n32 io_submit compat_sys_io_submit
+204 n32 io_cancel sys_io_cancel
+205 n32 exit_group sys_exit_group
+206 n32 lookup_dcookie sys_lookup_dcookie
+207 n32 epoll_create sys_epoll_create
+208 n32 epoll_ctl sys_epoll_ctl
+209 n32 epoll_wait sys_epoll_wait
+210 n32 remap_file_pages sys_remap_file_pages
+211 n32 rt_sigreturn sysn32_rt_sigreturn
+212 n32 fcntl64 compat_sys_fcntl64
+213 n32 set_tid_address sys_set_tid_address
+214 n32 restart_syscall sys_restart_syscall
+215 n32 semtimedop sys_semtimedop_time32
+216 n32 fadvise64 sys_fadvise64_64
+217 n32 statfs64 compat_sys_statfs64
+218 n32 fstatfs64 compat_sys_fstatfs64
+219 n32 sendfile64 sys_sendfile64
+220 n32 timer_create compat_sys_timer_create
+221 n32 timer_settime sys_timer_settime32
+222 n32 timer_gettime sys_timer_gettime32
+223 n32 timer_getoverrun sys_timer_getoverrun
+224 n32 timer_delete sys_timer_delete
+225 n32 clock_settime sys_clock_settime32
+226 n32 clock_gettime sys_clock_gettime32
+227 n32 clock_getres sys_clock_getres_time32
+228 n32 clock_nanosleep sys_clock_nanosleep_time32
+229 n32 tgkill sys_tgkill
+230 n32 utimes sys_utimes_time32
+231 n32 mbind sys_mbind
+232 n32 get_mempolicy sys_get_mempolicy
+233 n32 set_mempolicy sys_set_mempolicy
+234 n32 mq_open compat_sys_mq_open
+235 n32 mq_unlink sys_mq_unlink
+236 n32 mq_timedsend sys_mq_timedsend_time32
+237 n32 mq_timedreceive sys_mq_timedreceive_time32
+238 n32 mq_notify compat_sys_mq_notify
+239 n32 mq_getsetattr compat_sys_mq_getsetattr
+240 n32 vserver sys_ni_syscall
+241 n32 waitid compat_sys_waitid
+# 242 was sys_setaltroot
+243 n32 add_key sys_add_key
+244 n32 request_key sys_request_key
+245 n32 keyctl compat_sys_keyctl
+246 n32 set_thread_area sys_set_thread_area
+247 n32 inotify_init sys_inotify_init
+248 n32 inotify_add_watch sys_inotify_add_watch
+249 n32 inotify_rm_watch sys_inotify_rm_watch
+250 n32 migrate_pages sys_migrate_pages
+251 n32 openat sys_openat
+252 n32 mkdirat sys_mkdirat
+253 n32 mknodat sys_mknodat
+254 n32 fchownat sys_fchownat
+255 n32 futimesat sys_futimesat_time32
+256 n32 newfstatat sys_newfstatat
+257 n32 unlinkat sys_unlinkat
+258 n32 renameat sys_renameat
+259 n32 linkat sys_linkat
+260 n32 symlinkat sys_symlinkat
+261 n32 readlinkat sys_readlinkat
+262 n32 fchmodat sys_fchmodat
+263 n32 faccessat sys_faccessat
+264 n32 pselect6 compat_sys_pselect6_time32
+265 n32 ppoll compat_sys_ppoll_time32
+266 n32 unshare sys_unshare
+267 n32 splice sys_splice
+268 n32 sync_file_range sys_sync_file_range
+269 n32 tee sys_tee
+270 n32 vmsplice sys_vmsplice
+271 n32 move_pages sys_move_pages
+272 n32 set_robust_list compat_sys_set_robust_list
+273 n32 get_robust_list compat_sys_get_robust_list
+274 n32 kexec_load compat_sys_kexec_load
+275 n32 getcpu sys_getcpu
+276 n32 epoll_pwait compat_sys_epoll_pwait
+277 n32 ioprio_set sys_ioprio_set
+278 n32 ioprio_get sys_ioprio_get
+279 n32 utimensat sys_utimensat_time32
+280 n32 signalfd compat_sys_signalfd
+281 n32 timerfd sys_ni_syscall
+282 n32 eventfd sys_eventfd
+283 n32 fallocate sys_fallocate
+284 n32 timerfd_create sys_timerfd_create
+285 n32 timerfd_gettime sys_timerfd_gettime32
+286 n32 timerfd_settime sys_timerfd_settime32
+287 n32 signalfd4 compat_sys_signalfd4
+288 n32 eventfd2 sys_eventfd2
+289 n32 epoll_create1 sys_epoll_create1
+290 n32 dup3 sys_dup3
+291 n32 pipe2 sys_pipe2
+292 n32 inotify_init1 sys_inotify_init1
+293 n32 preadv compat_sys_preadv
+294 n32 pwritev compat_sys_pwritev
+295 n32 rt_tgsigqueueinfo compat_sys_rt_tgsigqueueinfo
+296 n32 perf_event_open sys_perf_event_open
+297 n32 accept4 sys_accept4
+298 n32 recvmmsg compat_sys_recvmmsg_time32
+299 n32 getdents64 sys_getdents64
+300 n32 fanotify_init sys_fanotify_init
+301 n32 fanotify_mark sys_fanotify_mark
+302 n32 prlimit64 sys_prlimit64
+303 n32 name_to_handle_at sys_name_to_handle_at
+304 n32 open_by_handle_at sys_open_by_handle_at
+305 n32 clock_adjtime sys_clock_adjtime32
+306 n32 syncfs sys_syncfs
+307 n32 sendmmsg compat_sys_sendmmsg
+308 n32 setns sys_setns
+309 n32 process_vm_readv sys_process_vm_readv
+310 n32 process_vm_writev sys_process_vm_writev
+311 n32 kcmp sys_kcmp
+312 n32 finit_module sys_finit_module
+313 n32 sched_setattr sys_sched_setattr
+314 n32 sched_getattr sys_sched_getattr
+315 n32 renameat2 sys_renameat2
+316 n32 seccomp sys_seccomp
+317 n32 getrandom sys_getrandom
+318 n32 memfd_create sys_memfd_create
+319 n32 bpf sys_bpf
+320 n32 execveat compat_sys_execveat
+321 n32 userfaultfd sys_userfaultfd
+322 n32 membarrier sys_membarrier
+323 n32 mlock2 sys_mlock2
+324 n32 copy_file_range sys_copy_file_range
+325 n32 preadv2 compat_sys_preadv2
+326 n32 pwritev2 compat_sys_pwritev2
+327 n32 pkey_mprotect sys_pkey_mprotect
+328 n32 pkey_alloc sys_pkey_alloc
+329 n32 pkey_free sys_pkey_free
+330 n32 statx sys_statx
+331 n32 rseq sys_rseq
+332 n32 io_pgetevents compat_sys_io_pgetevents
+# 333 through 402 are unassigned to sync up with generic numbers
+403 n32 clock_gettime64 sys_clock_gettime
+404 n32 clock_settime64 sys_clock_settime
+405 n32 clock_adjtime64 sys_clock_adjtime
+406 n32 clock_getres_time64 sys_clock_getres
+407 n32 clock_nanosleep_time64 sys_clock_nanosleep
+408 n32 timer_gettime64 sys_timer_gettime
+409 n32 timer_settime64 sys_timer_settime
+410 n32 timerfd_gettime64 sys_timerfd_gettime
+411 n32 timerfd_settime64 sys_timerfd_settime
+412 n32 utimensat_time64 sys_utimensat
+413 n32 pselect6_time64 compat_sys_pselect6_time64
+414 n32 ppoll_time64 compat_sys_ppoll_time64
+416 n32 io_pgetevents_time64 sys_io_pgetevents
+417 n32 recvmmsg_time64 compat_sys_recvmmsg_time64
+418 n32 mq_timedsend_time64 sys_mq_timedsend
+419 n32 mq_timedreceive_time64 sys_mq_timedreceive
+420 n32 semtimedop_time64 sys_semtimedop
+421 n32 rt_sigtimedwait_time64 compat_sys_rt_sigtimedwait_time64
+422 n32 futex_time64 sys_futex
+423 n32 sched_rr_get_interval_time64 sys_sched_rr_get_interval
+424 n32 pidfd_send_signal sys_pidfd_send_signal
+425 n32 io_uring_setup sys_io_uring_setup
+426 n32 io_uring_enter sys_io_uring_enter
+427 n32 io_uring_register sys_io_uring_register
+428 n32 open_tree sys_open_tree
+429 n32 move_mount sys_move_mount
+430 n32 fsopen sys_fsopen
+431 n32 fsconfig sys_fsconfig
+432 n32 fsmount sys_fsmount
+433 n32 fspick sys_fspick
+434 n32 pidfd_open sys_pidfd_open
+435 n32 clone3 __sys_clone3
+436 n32 close_range sys_close_range
+437 n32 openat2 sys_openat2
+438 n32 pidfd_getfd sys_pidfd_getfd
+439 n32 faccessat2 sys_faccessat2
+440 n32 process_madvise sys_process_madvise
+441 n32 epoll_pwait2 compat_sys_epoll_pwait2
+442 n32 mount_setattr sys_mount_setattr
+443 n32 quotactl_fd sys_quotactl_fd
+444 n32 landlock_create_ruleset sys_landlock_create_ruleset
+445 n32 landlock_add_rule sys_landlock_add_rule
+446 n32 landlock_restrict_self sys_landlock_restrict_self
+# 447 reserved for memfd_secret
+448 n32 process_mrelease sys_process_mrelease
+449 n32 futex_waitv sys_futex_waitv
+450 n32 set_mempolicy_home_node sys_set_mempolicy_home_node
diff --git a/arch/mips/kernel/syscalls/syscall_n64.tbl b/arch/mips/kernel/syscalls/syscall_n64.tbl
new file mode 100644
index 000000000..3f1886ad9
--- /dev/null
+++ b/arch/mips/kernel/syscalls/syscall_n64.tbl
@@ -0,0 +1,367 @@
+# SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
+#
+# system call numbers and entry vectors for mips
+#
+# The format is:
+# <number> <abi> <name> <entry point>
+#
+# The <abi> is always "n64" for this file.
+#
+0 n64 read sys_read
+1 n64 write sys_write
+2 n64 open sys_open
+3 n64 close sys_close
+4 n64 stat sys_newstat
+5 n64 fstat sys_newfstat
+6 n64 lstat sys_newlstat
+7 n64 poll sys_poll
+8 n64 lseek sys_lseek
+9 n64 mmap sys_mips_mmap
+10 n64 mprotect sys_mprotect
+11 n64 munmap sys_munmap
+12 n64 brk sys_brk
+13 n64 rt_sigaction sys_rt_sigaction
+14 n64 rt_sigprocmask sys_rt_sigprocmask
+15 n64 ioctl sys_ioctl
+16 n64 pread64 sys_pread64
+17 n64 pwrite64 sys_pwrite64
+18 n64 readv sys_readv
+19 n64 writev sys_writev
+20 n64 access sys_access
+21 n64 pipe sysm_pipe
+22 n64 _newselect sys_select
+23 n64 sched_yield sys_sched_yield
+24 n64 mremap sys_mremap
+25 n64 msync sys_msync
+26 n64 mincore sys_mincore
+27 n64 madvise sys_madvise
+28 n64 shmget sys_shmget
+29 n64 shmat sys_shmat
+30 n64 shmctl sys_old_shmctl
+31 n64 dup sys_dup
+32 n64 dup2 sys_dup2
+33 n64 pause sys_pause
+34 n64 nanosleep sys_nanosleep
+35 n64 getitimer sys_getitimer
+36 n64 setitimer sys_setitimer
+37 n64 alarm sys_alarm
+38 n64 getpid sys_getpid
+39 n64 sendfile sys_sendfile64
+40 n64 socket sys_socket
+41 n64 connect sys_connect
+42 n64 accept sys_accept
+43 n64 sendto sys_sendto
+44 n64 recvfrom sys_recvfrom
+45 n64 sendmsg sys_sendmsg
+46 n64 recvmsg sys_recvmsg
+47 n64 shutdown sys_shutdown
+48 n64 bind sys_bind
+49 n64 listen sys_listen
+50 n64 getsockname sys_getsockname
+51 n64 getpeername sys_getpeername
+52 n64 socketpair sys_socketpair
+53 n64 setsockopt sys_setsockopt
+54 n64 getsockopt sys_getsockopt
+55 n64 clone __sys_clone
+56 n64 fork __sys_fork
+57 n64 execve sys_execve
+58 n64 exit sys_exit
+59 n64 wait4 sys_wait4
+60 n64 kill sys_kill
+61 n64 uname sys_newuname
+62 n64 semget sys_semget
+63 n64 semop sys_semop
+64 n64 semctl sys_old_semctl
+65 n64 shmdt sys_shmdt
+66 n64 msgget sys_msgget
+67 n64 msgsnd sys_msgsnd
+68 n64 msgrcv sys_msgrcv
+69 n64 msgctl sys_old_msgctl
+70 n64 fcntl sys_fcntl
+71 n64 flock sys_flock
+72 n64 fsync sys_fsync
+73 n64 fdatasync sys_fdatasync
+74 n64 truncate sys_truncate
+75 n64 ftruncate sys_ftruncate
+76 n64 getdents sys_getdents
+77 n64 getcwd sys_getcwd
+78 n64 chdir sys_chdir
+79 n64 fchdir sys_fchdir
+80 n64 rename sys_rename
+81 n64 mkdir sys_mkdir
+82 n64 rmdir sys_rmdir
+83 n64 creat sys_creat
+84 n64 link sys_link
+85 n64 unlink sys_unlink
+86 n64 symlink sys_symlink
+87 n64 readlink sys_readlink
+88 n64 chmod sys_chmod
+89 n64 fchmod sys_fchmod
+90 n64 chown sys_chown
+91 n64 fchown sys_fchown
+92 n64 lchown sys_lchown
+93 n64 umask sys_umask
+94 n64 gettimeofday sys_gettimeofday
+95 n64 getrlimit sys_getrlimit
+96 n64 getrusage sys_getrusage
+97 n64 sysinfo sys_sysinfo
+98 n64 times sys_times
+99 n64 ptrace sys_ptrace
+100 n64 getuid sys_getuid
+101 n64 syslog sys_syslog
+102 n64 getgid sys_getgid
+103 n64 setuid sys_setuid
+104 n64 setgid sys_setgid
+105 n64 geteuid sys_geteuid
+106 n64 getegid sys_getegid
+107 n64 setpgid sys_setpgid
+108 n64 getppid sys_getppid
+109 n64 getpgrp sys_getpgrp
+110 n64 setsid sys_setsid
+111 n64 setreuid sys_setreuid
+112 n64 setregid sys_setregid
+113 n64 getgroups sys_getgroups
+114 n64 setgroups sys_setgroups
+115 n64 setresuid sys_setresuid
+116 n64 getresuid sys_getresuid
+117 n64 setresgid sys_setresgid
+118 n64 getresgid sys_getresgid
+119 n64 getpgid sys_getpgid
+120 n64 setfsuid sys_setfsuid
+121 n64 setfsgid sys_setfsgid
+122 n64 getsid sys_getsid
+123 n64 capget sys_capget
+124 n64 capset sys_capset
+125 n64 rt_sigpending sys_rt_sigpending
+126 n64 rt_sigtimedwait sys_rt_sigtimedwait
+127 n64 rt_sigqueueinfo sys_rt_sigqueueinfo
+128 n64 rt_sigsuspend sys_rt_sigsuspend
+129 n64 sigaltstack sys_sigaltstack
+130 n64 utime sys_utime
+131 n64 mknod sys_mknod
+132 n64 personality sys_personality
+133 n64 ustat sys_ustat
+134 n64 statfs sys_statfs
+135 n64 fstatfs sys_fstatfs
+136 n64 sysfs sys_sysfs
+137 n64 getpriority sys_getpriority
+138 n64 setpriority sys_setpriority
+139 n64 sched_setparam sys_sched_setparam
+140 n64 sched_getparam sys_sched_getparam
+141 n64 sched_setscheduler sys_sched_setscheduler
+142 n64 sched_getscheduler sys_sched_getscheduler
+143 n64 sched_get_priority_max sys_sched_get_priority_max
+144 n64 sched_get_priority_min sys_sched_get_priority_min
+145 n64 sched_rr_get_interval sys_sched_rr_get_interval
+146 n64 mlock sys_mlock
+147 n64 munlock sys_munlock
+148 n64 mlockall sys_mlockall
+149 n64 munlockall sys_munlockall
+150 n64 vhangup sys_vhangup
+151 n64 pivot_root sys_pivot_root
+152 n64 _sysctl sys_ni_syscall
+153 n64 prctl sys_prctl
+154 n64 adjtimex sys_adjtimex
+155 n64 setrlimit sys_setrlimit
+156 n64 chroot sys_chroot
+157 n64 sync sys_sync
+158 n64 acct sys_acct
+159 n64 settimeofday sys_settimeofday
+160 n64 mount sys_mount
+161 n64 umount2 sys_umount
+162 n64 swapon sys_swapon
+163 n64 swapoff sys_swapoff
+164 n64 reboot sys_reboot
+165 n64 sethostname sys_sethostname
+166 n64 setdomainname sys_setdomainname
+167 n64 create_module sys_ni_syscall
+168 n64 init_module sys_init_module
+169 n64 delete_module sys_delete_module
+170 n64 get_kernel_syms sys_ni_syscall
+171 n64 query_module sys_ni_syscall
+172 n64 quotactl sys_quotactl
+173 n64 nfsservctl sys_ni_syscall
+174 n64 getpmsg sys_ni_syscall
+175 n64 putpmsg sys_ni_syscall
+176 n64 afs_syscall sys_ni_syscall
+# 177 reserved for security
+177 n64 reserved177 sys_ni_syscall
+178 n64 gettid sys_gettid
+179 n64 readahead sys_readahead
+180 n64 setxattr sys_setxattr
+181 n64 lsetxattr sys_lsetxattr
+182 n64 fsetxattr sys_fsetxattr
+183 n64 getxattr sys_getxattr
+184 n64 lgetxattr sys_lgetxattr
+185 n64 fgetxattr sys_fgetxattr
+186 n64 listxattr sys_listxattr
+187 n64 llistxattr sys_llistxattr
+188 n64 flistxattr sys_flistxattr
+189 n64 removexattr sys_removexattr
+190 n64 lremovexattr sys_lremovexattr
+191 n64 fremovexattr sys_fremovexattr
+192 n64 tkill sys_tkill
+193 n64 reserved193 sys_ni_syscall
+194 n64 futex sys_futex
+195 n64 sched_setaffinity sys_sched_setaffinity
+196 n64 sched_getaffinity sys_sched_getaffinity
+197 n64 cacheflush sys_cacheflush
+198 n64 cachectl sys_cachectl
+199 n64 sysmips __sys_sysmips
+200 n64 io_setup sys_io_setup
+201 n64 io_destroy sys_io_destroy
+202 n64 io_getevents sys_io_getevents
+203 n64 io_submit sys_io_submit
+204 n64 io_cancel sys_io_cancel
+205 n64 exit_group sys_exit_group
+206 n64 lookup_dcookie sys_lookup_dcookie
+207 n64 epoll_create sys_epoll_create
+208 n64 epoll_ctl sys_epoll_ctl
+209 n64 epoll_wait sys_epoll_wait
+210 n64 remap_file_pages sys_remap_file_pages
+211 n64 rt_sigreturn sys_rt_sigreturn
+212 n64 set_tid_address sys_set_tid_address
+213 n64 restart_syscall sys_restart_syscall
+214 n64 semtimedop sys_semtimedop
+215 n64 fadvise64 sys_fadvise64_64
+216 n64 timer_create sys_timer_create
+217 n64 timer_settime sys_timer_settime
+218 n64 timer_gettime sys_timer_gettime
+219 n64 timer_getoverrun sys_timer_getoverrun
+220 n64 timer_delete sys_timer_delete
+221 n64 clock_settime sys_clock_settime
+222 n64 clock_gettime sys_clock_gettime
+223 n64 clock_getres sys_clock_getres
+224 n64 clock_nanosleep sys_clock_nanosleep
+225 n64 tgkill sys_tgkill
+226 n64 utimes sys_utimes
+227 n64 mbind sys_mbind
+228 n64 get_mempolicy sys_get_mempolicy
+229 n64 set_mempolicy sys_set_mempolicy
+230 n64 mq_open sys_mq_open
+231 n64 mq_unlink sys_mq_unlink
+232 n64 mq_timedsend sys_mq_timedsend
+233 n64 mq_timedreceive sys_mq_timedreceive
+234 n64 mq_notify sys_mq_notify
+235 n64 mq_getsetattr sys_mq_getsetattr
+236 n64 vserver sys_ni_syscall
+237 n64 waitid sys_waitid
+# 238 was sys_setaltroot
+239 n64 add_key sys_add_key
+240 n64 request_key sys_request_key
+241 n64 keyctl sys_keyctl
+242 n64 set_thread_area sys_set_thread_area
+243 n64 inotify_init sys_inotify_init
+244 n64 inotify_add_watch sys_inotify_add_watch
+245 n64 inotify_rm_watch sys_inotify_rm_watch
+246 n64 migrate_pages sys_migrate_pages
+247 n64 openat sys_openat
+248 n64 mkdirat sys_mkdirat
+249 n64 mknodat sys_mknodat
+250 n64 fchownat sys_fchownat
+251 n64 futimesat sys_futimesat
+252 n64 newfstatat sys_newfstatat
+253 n64 unlinkat sys_unlinkat
+254 n64 renameat sys_renameat
+255 n64 linkat sys_linkat
+256 n64 symlinkat sys_symlinkat
+257 n64 readlinkat sys_readlinkat
+258 n64 fchmodat sys_fchmodat
+259 n64 faccessat sys_faccessat
+260 n64 pselect6 sys_pselect6
+261 n64 ppoll sys_ppoll
+262 n64 unshare sys_unshare
+263 n64 splice sys_splice
+264 n64 sync_file_range sys_sync_file_range
+265 n64 tee sys_tee
+266 n64 vmsplice sys_vmsplice
+267 n64 move_pages sys_move_pages
+268 n64 set_robust_list sys_set_robust_list
+269 n64 get_robust_list sys_get_robust_list
+270 n64 kexec_load sys_kexec_load
+271 n64 getcpu sys_getcpu
+272 n64 epoll_pwait sys_epoll_pwait
+273 n64 ioprio_set sys_ioprio_set
+274 n64 ioprio_get sys_ioprio_get
+275 n64 utimensat sys_utimensat
+276 n64 signalfd sys_signalfd
+277 n64 timerfd sys_ni_syscall
+278 n64 eventfd sys_eventfd
+279 n64 fallocate sys_fallocate
+280 n64 timerfd_create sys_timerfd_create
+281 n64 timerfd_gettime sys_timerfd_gettime
+282 n64 timerfd_settime sys_timerfd_settime
+283 n64 signalfd4 sys_signalfd4
+284 n64 eventfd2 sys_eventfd2
+285 n64 epoll_create1 sys_epoll_create1
+286 n64 dup3 sys_dup3
+287 n64 pipe2 sys_pipe2
+288 n64 inotify_init1 sys_inotify_init1
+289 n64 preadv sys_preadv
+290 n64 pwritev sys_pwritev
+291 n64 rt_tgsigqueueinfo sys_rt_tgsigqueueinfo
+292 n64 perf_event_open sys_perf_event_open
+293 n64 accept4 sys_accept4
+294 n64 recvmmsg sys_recvmmsg
+295 n64 fanotify_init sys_fanotify_init
+296 n64 fanotify_mark sys_fanotify_mark
+297 n64 prlimit64 sys_prlimit64
+298 n64 name_to_handle_at sys_name_to_handle_at
+299 n64 open_by_handle_at sys_open_by_handle_at
+300 n64 clock_adjtime sys_clock_adjtime
+301 n64 syncfs sys_syncfs
+302 n64 sendmmsg sys_sendmmsg
+303 n64 setns sys_setns
+304 n64 process_vm_readv sys_process_vm_readv
+305 n64 process_vm_writev sys_process_vm_writev
+306 n64 kcmp sys_kcmp
+307 n64 finit_module sys_finit_module
+308 n64 getdents64 sys_getdents64
+309 n64 sched_setattr sys_sched_setattr
+310 n64 sched_getattr sys_sched_getattr
+311 n64 renameat2 sys_renameat2
+312 n64 seccomp sys_seccomp
+313 n64 getrandom sys_getrandom
+314 n64 memfd_create sys_memfd_create
+315 n64 bpf sys_bpf
+316 n64 execveat sys_execveat
+317 n64 userfaultfd sys_userfaultfd
+318 n64 membarrier sys_membarrier
+319 n64 mlock2 sys_mlock2
+320 n64 copy_file_range sys_copy_file_range
+321 n64 preadv2 sys_preadv2
+322 n64 pwritev2 sys_pwritev2
+323 n64 pkey_mprotect sys_pkey_mprotect
+324 n64 pkey_alloc sys_pkey_alloc
+325 n64 pkey_free sys_pkey_free
+326 n64 statx sys_statx
+327 n64 rseq sys_rseq
+328 n64 io_pgetevents sys_io_pgetevents
+# 329 through 423 are reserved to sync up with other architectures
+424 n64 pidfd_send_signal sys_pidfd_send_signal
+425 n64 io_uring_setup sys_io_uring_setup
+426 n64 io_uring_enter sys_io_uring_enter
+427 n64 io_uring_register sys_io_uring_register
+428 n64 open_tree sys_open_tree
+429 n64 move_mount sys_move_mount
+430 n64 fsopen sys_fsopen
+431 n64 fsconfig sys_fsconfig
+432 n64 fsmount sys_fsmount
+433 n64 fspick sys_fspick
+434 n64 pidfd_open sys_pidfd_open
+435 n64 clone3 __sys_clone3
+436 n64 close_range sys_close_range
+437 n64 openat2 sys_openat2
+438 n64 pidfd_getfd sys_pidfd_getfd
+439 n64 faccessat2 sys_faccessat2
+440 n64 process_madvise sys_process_madvise
+441 n64 epoll_pwait2 sys_epoll_pwait2
+442 n64 mount_setattr sys_mount_setattr
+443 n64 quotactl_fd sys_quotactl_fd
+444 n64 landlock_create_ruleset sys_landlock_create_ruleset
+445 n64 landlock_add_rule sys_landlock_add_rule
+446 n64 landlock_restrict_self sys_landlock_restrict_self
+# 447 reserved for memfd_secret
+448 n64 process_mrelease sys_process_mrelease
+449 n64 futex_waitv sys_futex_waitv
+450 common set_mempolicy_home_node sys_set_mempolicy_home_node
diff --git a/arch/mips/kernel/syscalls/syscall_o32.tbl b/arch/mips/kernel/syscalls/syscall_o32.tbl
new file mode 100644
index 000000000..8f243e35a
--- /dev/null
+++ b/arch/mips/kernel/syscalls/syscall_o32.tbl
@@ -0,0 +1,440 @@
+# SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
+#
+# system call numbers and entry vectors for mips
+#
+# The format is:
+# <number> <abi> <name> <entry point> <compat entry point>
+#
+# The <abi> is always "o32" for this file.
+#
+0 o32 syscall sys_syscall sys32_syscall
+1 o32 exit sys_exit
+2 o32 fork __sys_fork
+3 o32 read sys_read
+4 o32 write sys_write
+5 o32 open sys_open compat_sys_open
+6 o32 close sys_close
+7 o32 waitpid sys_waitpid
+8 o32 creat sys_creat
+9 o32 link sys_link
+10 o32 unlink sys_unlink
+11 o32 execve sys_execve compat_sys_execve
+12 o32 chdir sys_chdir
+13 o32 time sys_time32
+14 o32 mknod sys_mknod
+15 o32 chmod sys_chmod
+16 o32 lchown sys_lchown
+17 o32 break sys_ni_syscall
+# 18 was sys_stat
+18 o32 unused18 sys_ni_syscall
+19 o32 lseek sys_lseek
+20 o32 getpid sys_getpid
+21 o32 mount sys_mount
+22 o32 umount sys_oldumount
+23 o32 setuid sys_setuid
+24 o32 getuid sys_getuid
+25 o32 stime sys_stime32
+26 o32 ptrace sys_ptrace compat_sys_ptrace
+27 o32 alarm sys_alarm
+# 28 was sys_fstat
+28 o32 unused28 sys_ni_syscall
+29 o32 pause sys_pause
+30 o32 utime sys_utime32
+31 o32 stty sys_ni_syscall
+32 o32 gtty sys_ni_syscall
+33 o32 access sys_access
+34 o32 nice sys_nice
+35 o32 ftime sys_ni_syscall
+36 o32 sync sys_sync
+37 o32 kill sys_kill
+38 o32 rename sys_rename
+39 o32 mkdir sys_mkdir
+40 o32 rmdir sys_rmdir
+41 o32 dup sys_dup
+42 o32 pipe sysm_pipe
+43 o32 times sys_times compat_sys_times
+44 o32 prof sys_ni_syscall
+45 o32 brk sys_brk
+46 o32 setgid sys_setgid
+47 o32 getgid sys_getgid
+48 o32 signal sys_ni_syscall
+49 o32 geteuid sys_geteuid
+50 o32 getegid sys_getegid
+51 o32 acct sys_acct
+52 o32 umount2 sys_umount
+53 o32 lock sys_ni_syscall
+54 o32 ioctl sys_ioctl compat_sys_ioctl
+55 o32 fcntl sys_fcntl compat_sys_fcntl
+56 o32 mpx sys_ni_syscall
+57 o32 setpgid sys_setpgid
+58 o32 ulimit sys_ni_syscall
+59 o32 unused59 sys_olduname
+60 o32 umask sys_umask
+61 o32 chroot sys_chroot
+62 o32 ustat sys_ustat compat_sys_ustat
+63 o32 dup2 sys_dup2
+64 o32 getppid sys_getppid
+65 o32 getpgrp sys_getpgrp
+66 o32 setsid sys_setsid
+67 o32 sigaction sys_sigaction sys_32_sigaction
+68 o32 sgetmask sys_sgetmask
+69 o32 ssetmask sys_ssetmask
+70 o32 setreuid sys_setreuid
+71 o32 setregid sys_setregid
+72 o32 sigsuspend sys_sigsuspend sys32_sigsuspend
+73 o32 sigpending sys_sigpending compat_sys_sigpending
+74 o32 sethostname sys_sethostname
+75 o32 setrlimit sys_setrlimit compat_sys_setrlimit
+76 o32 getrlimit sys_getrlimit compat_sys_getrlimit
+77 o32 getrusage sys_getrusage compat_sys_getrusage
+78 o32 gettimeofday sys_gettimeofday compat_sys_gettimeofday
+79 o32 settimeofday sys_settimeofday compat_sys_settimeofday
+80 o32 getgroups sys_getgroups
+81 o32 setgroups sys_setgroups
+# 82 was old_select
+82 o32 reserved82 sys_ni_syscall
+83 o32 symlink sys_symlink
+# 84 was sys_lstat
+84 o32 unused84 sys_ni_syscall
+85 o32 readlink sys_readlink
+86 o32 uselib sys_uselib
+87 o32 swapon sys_swapon
+88 o32 reboot sys_reboot
+89 o32 readdir sys_old_readdir compat_sys_old_readdir
+90 o32 mmap sys_mips_mmap
+91 o32 munmap sys_munmap
+92 o32 truncate sys_truncate compat_sys_truncate
+93 o32 ftruncate sys_ftruncate compat_sys_ftruncate
+94 o32 fchmod sys_fchmod
+95 o32 fchown sys_fchown
+96 o32 getpriority sys_getpriority
+97 o32 setpriority sys_setpriority
+98 o32 profil sys_ni_syscall
+99 o32 statfs sys_statfs compat_sys_statfs
+100 o32 fstatfs sys_fstatfs compat_sys_fstatfs
+101 o32 ioperm sys_ni_syscall
+102 o32 socketcall sys_socketcall compat_sys_socketcall
+103 o32 syslog sys_syslog
+104 o32 setitimer sys_setitimer compat_sys_setitimer
+105 o32 getitimer sys_getitimer compat_sys_getitimer
+106 o32 stat sys_newstat compat_sys_newstat
+107 o32 lstat sys_newlstat compat_sys_newlstat
+108 o32 fstat sys_newfstat compat_sys_newfstat
+109 o32 unused109 sys_uname
+110 o32 iopl sys_ni_syscall
+111 o32 vhangup sys_vhangup
+112 o32 idle sys_ni_syscall
+113 o32 vm86 sys_ni_syscall
+114 o32 wait4 sys_wait4 compat_sys_wait4
+115 o32 swapoff sys_swapoff
+116 o32 sysinfo sys_sysinfo compat_sys_sysinfo
+117 o32 ipc sys_ipc compat_sys_ipc
+118 o32 fsync sys_fsync
+119 o32 sigreturn sys_sigreturn sys32_sigreturn
+120 o32 clone __sys_clone
+121 o32 setdomainname sys_setdomainname
+122 o32 uname sys_newuname
+123 o32 modify_ldt sys_ni_syscall
+124 o32 adjtimex sys_adjtimex_time32
+125 o32 mprotect sys_mprotect
+126 o32 sigprocmask sys_sigprocmask compat_sys_sigprocmask
+127 o32 create_module sys_ni_syscall
+128 o32 init_module sys_init_module
+129 o32 delete_module sys_delete_module
+130 o32 get_kernel_syms sys_ni_syscall
+131 o32 quotactl sys_quotactl
+132 o32 getpgid sys_getpgid
+133 o32 fchdir sys_fchdir
+134 o32 bdflush sys_ni_syscall
+135 o32 sysfs sys_sysfs
+136 o32 personality sys_personality sys_32_personality
+137 o32 afs_syscall sys_ni_syscall
+138 o32 setfsuid sys_setfsuid
+139 o32 setfsgid sys_setfsgid
+140 o32 _llseek sys_llseek sys_32_llseek
+141 o32 getdents sys_getdents compat_sys_getdents
+142 o32 _newselect sys_select compat_sys_select
+143 o32 flock sys_flock
+144 o32 msync sys_msync
+145 o32 readv sys_readv
+146 o32 writev sys_writev
+147 o32 cacheflush sys_cacheflush
+148 o32 cachectl sys_cachectl
+149 o32 sysmips __sys_sysmips
+150 o32 unused150 sys_ni_syscall
+151 o32 getsid sys_getsid
+152 o32 fdatasync sys_fdatasync
+153 o32 _sysctl sys_ni_syscall
+154 o32 mlock sys_mlock
+155 o32 munlock sys_munlock
+156 o32 mlockall sys_mlockall
+157 o32 munlockall sys_munlockall
+158 o32 sched_setparam sys_sched_setparam
+159 o32 sched_getparam sys_sched_getparam
+160 o32 sched_setscheduler sys_sched_setscheduler
+161 o32 sched_getscheduler sys_sched_getscheduler
+162 o32 sched_yield sys_sched_yield
+163 o32 sched_get_priority_max sys_sched_get_priority_max
+164 o32 sched_get_priority_min sys_sched_get_priority_min
+165 o32 sched_rr_get_interval sys_sched_rr_get_interval_time32
+166 o32 nanosleep sys_nanosleep_time32
+167 o32 mremap sys_mremap
+168 o32 accept sys_accept
+169 o32 bind sys_bind
+170 o32 connect sys_connect
+171 o32 getpeername sys_getpeername
+172 o32 getsockname sys_getsockname
+173 o32 getsockopt sys_getsockopt sys_getsockopt
+174 o32 listen sys_listen
+175 o32 recv sys_recv compat_sys_recv
+176 o32 recvfrom sys_recvfrom compat_sys_recvfrom
+177 o32 recvmsg sys_recvmsg compat_sys_recvmsg
+178 o32 send sys_send
+179 o32 sendmsg sys_sendmsg compat_sys_sendmsg
+180 o32 sendto sys_sendto
+181 o32 setsockopt sys_setsockopt sys_setsockopt
+182 o32 shutdown sys_shutdown
+183 o32 socket sys_socket
+184 o32 socketpair sys_socketpair
+185 o32 setresuid sys_setresuid
+186 o32 getresuid sys_getresuid
+187 o32 query_module sys_ni_syscall
+188 o32 poll sys_poll
+189 o32 nfsservctl sys_ni_syscall
+190 o32 setresgid sys_setresgid
+191 o32 getresgid sys_getresgid
+192 o32 prctl sys_prctl
+193 o32 rt_sigreturn sys_rt_sigreturn sys32_rt_sigreturn
+194 o32 rt_sigaction sys_rt_sigaction compat_sys_rt_sigaction
+195 o32 rt_sigprocmask sys_rt_sigprocmask compat_sys_rt_sigprocmask
+196 o32 rt_sigpending sys_rt_sigpending compat_sys_rt_sigpending
+197 o32 rt_sigtimedwait sys_rt_sigtimedwait_time32 compat_sys_rt_sigtimedwait_time32
+198 o32 rt_sigqueueinfo sys_rt_sigqueueinfo compat_sys_rt_sigqueueinfo
+199 o32 rt_sigsuspend sys_rt_sigsuspend compat_sys_rt_sigsuspend
+200 o32 pread64 sys_pread64 sys_32_pread
+201 o32 pwrite64 sys_pwrite64 sys_32_pwrite
+202 o32 chown sys_chown
+203 o32 getcwd sys_getcwd
+204 o32 capget sys_capget
+205 o32 capset sys_capset
+206 o32 sigaltstack sys_sigaltstack compat_sys_sigaltstack
+207 o32 sendfile sys_sendfile compat_sys_sendfile
+208 o32 getpmsg sys_ni_syscall
+209 o32 putpmsg sys_ni_syscall
+210 o32 mmap2 sys_mips_mmap2
+211 o32 truncate64 sys_truncate64 sys_32_truncate64
+212 o32 ftruncate64 sys_ftruncate64 sys_32_ftruncate64
+213 o32 stat64 sys_stat64 sys_newstat
+214 o32 lstat64 sys_lstat64 sys_newlstat
+215 o32 fstat64 sys_fstat64 sys_newfstat
+216 o32 pivot_root sys_pivot_root
+217 o32 mincore sys_mincore
+218 o32 madvise sys_madvise
+219 o32 getdents64 sys_getdents64
+220 o32 fcntl64 sys_fcntl64 compat_sys_fcntl64
+221 o32 reserved221 sys_ni_syscall
+222 o32 gettid sys_gettid
+223 o32 readahead sys_readahead sys32_readahead
+224 o32 setxattr sys_setxattr
+225 o32 lsetxattr sys_lsetxattr
+226 o32 fsetxattr sys_fsetxattr
+227 o32 getxattr sys_getxattr
+228 o32 lgetxattr sys_lgetxattr
+229 o32 fgetxattr sys_fgetxattr
+230 o32 listxattr sys_listxattr
+231 o32 llistxattr sys_llistxattr
+232 o32 flistxattr sys_flistxattr
+233 o32 removexattr sys_removexattr
+234 o32 lremovexattr sys_lremovexattr
+235 o32 fremovexattr sys_fremovexattr
+236 o32 tkill sys_tkill
+237 o32 sendfile64 sys_sendfile64
+238 o32 futex sys_futex_time32
+239 o32 sched_setaffinity sys_sched_setaffinity compat_sys_sched_setaffinity
+240 o32 sched_getaffinity sys_sched_getaffinity compat_sys_sched_getaffinity
+241 o32 io_setup sys_io_setup compat_sys_io_setup
+242 o32 io_destroy sys_io_destroy
+243 o32 io_getevents sys_io_getevents_time32
+244 o32 io_submit sys_io_submit compat_sys_io_submit
+245 o32 io_cancel sys_io_cancel
+246 o32 exit_group sys_exit_group
+247 o32 lookup_dcookie sys_lookup_dcookie compat_sys_lookup_dcookie
+248 o32 epoll_create sys_epoll_create
+249 o32 epoll_ctl sys_epoll_ctl
+250 o32 epoll_wait sys_epoll_wait
+251 o32 remap_file_pages sys_remap_file_pages
+252 o32 set_tid_address sys_set_tid_address
+253 o32 restart_syscall sys_restart_syscall
+254 o32 fadvise64 sys_fadvise64_64 sys32_fadvise64_64
+255 o32 statfs64 sys_statfs64 compat_sys_statfs64
+256 o32 fstatfs64 sys_fstatfs64 compat_sys_fstatfs64
+257 o32 timer_create sys_timer_create compat_sys_timer_create
+258 o32 timer_settime sys_timer_settime32
+259 o32 timer_gettime sys_timer_gettime32
+260 o32 timer_getoverrun sys_timer_getoverrun
+261 o32 timer_delete sys_timer_delete
+262 o32 clock_settime sys_clock_settime32
+263 o32 clock_gettime sys_clock_gettime32
+264 o32 clock_getres sys_clock_getres_time32
+265 o32 clock_nanosleep sys_clock_nanosleep_time32
+266 o32 tgkill sys_tgkill
+267 o32 utimes sys_utimes_time32
+268 o32 mbind sys_mbind
+269 o32 get_mempolicy sys_get_mempolicy
+270 o32 set_mempolicy sys_set_mempolicy
+271 o32 mq_open sys_mq_open compat_sys_mq_open
+272 o32 mq_unlink sys_mq_unlink
+273 o32 mq_timedsend sys_mq_timedsend_time32
+274 o32 mq_timedreceive sys_mq_timedreceive_time32
+275 o32 mq_notify sys_mq_notify compat_sys_mq_notify
+276 o32 mq_getsetattr sys_mq_getsetattr compat_sys_mq_getsetattr
+277 o32 vserver sys_ni_syscall
+278 o32 waitid sys_waitid compat_sys_waitid
+# 279 was sys_setaltroot
+280 o32 add_key sys_add_key
+281 o32 request_key sys_request_key
+282 o32 keyctl sys_keyctl compat_sys_keyctl
+283 o32 set_thread_area sys_set_thread_area
+284 o32 inotify_init sys_inotify_init
+285 o32 inotify_add_watch sys_inotify_add_watch
+286 o32 inotify_rm_watch sys_inotify_rm_watch
+287 o32 migrate_pages sys_migrate_pages
+288 o32 openat sys_openat compat_sys_openat
+289 o32 mkdirat sys_mkdirat
+290 o32 mknodat sys_mknodat
+291 o32 fchownat sys_fchownat
+292 o32 futimesat sys_futimesat_time32
+293 o32 fstatat64 sys_fstatat64 sys_newfstatat
+294 o32 unlinkat sys_unlinkat
+295 o32 renameat sys_renameat
+296 o32 linkat sys_linkat
+297 o32 symlinkat sys_symlinkat
+298 o32 readlinkat sys_readlinkat
+299 o32 fchmodat sys_fchmodat
+300 o32 faccessat sys_faccessat
+301 o32 pselect6 sys_pselect6_time32 compat_sys_pselect6_time32
+302 o32 ppoll sys_ppoll_time32 compat_sys_ppoll_time32
+303 o32 unshare sys_unshare
+304 o32 splice sys_splice
+305 o32 sync_file_range sys_sync_file_range sys32_sync_file_range
+306 o32 tee sys_tee
+307 o32 vmsplice sys_vmsplice
+308 o32 move_pages sys_move_pages
+309 o32 set_robust_list sys_set_robust_list compat_sys_set_robust_list
+310 o32 get_robust_list sys_get_robust_list compat_sys_get_robust_list
+311 o32 kexec_load sys_kexec_load compat_sys_kexec_load
+312 o32 getcpu sys_getcpu
+313 o32 epoll_pwait sys_epoll_pwait compat_sys_epoll_pwait
+314 o32 ioprio_set sys_ioprio_set
+315 o32 ioprio_get sys_ioprio_get
+316 o32 utimensat sys_utimensat_time32
+317 o32 signalfd sys_signalfd compat_sys_signalfd
+318 o32 timerfd sys_ni_syscall
+319 o32 eventfd sys_eventfd
+320 o32 fallocate sys_fallocate sys32_fallocate
+321 o32 timerfd_create sys_timerfd_create
+322 o32 timerfd_gettime sys_timerfd_gettime32
+323 o32 timerfd_settime sys_timerfd_settime32
+324 o32 signalfd4 sys_signalfd4 compat_sys_signalfd4
+325 o32 eventfd2 sys_eventfd2
+326 o32 epoll_create1 sys_epoll_create1
+327 o32 dup3 sys_dup3
+328 o32 pipe2 sys_pipe2
+329 o32 inotify_init1 sys_inotify_init1
+330 o32 preadv sys_preadv compat_sys_preadv
+331 o32 pwritev sys_pwritev compat_sys_pwritev
+332 o32 rt_tgsigqueueinfo sys_rt_tgsigqueueinfo compat_sys_rt_tgsigqueueinfo
+333 o32 perf_event_open sys_perf_event_open
+334 o32 accept4 sys_accept4
+335 o32 recvmmsg sys_recvmmsg_time32 compat_sys_recvmmsg_time32
+336 o32 fanotify_init sys_fanotify_init
+337 o32 fanotify_mark sys_fanotify_mark compat_sys_fanotify_mark
+338 o32 prlimit64 sys_prlimit64
+339 o32 name_to_handle_at sys_name_to_handle_at
+340 o32 open_by_handle_at sys_open_by_handle_at compat_sys_open_by_handle_at
+341 o32 clock_adjtime sys_clock_adjtime32
+342 o32 syncfs sys_syncfs
+343 o32 sendmmsg sys_sendmmsg compat_sys_sendmmsg
+344 o32 setns sys_setns
+345 o32 process_vm_readv sys_process_vm_readv
+346 o32 process_vm_writev sys_process_vm_writev
+347 o32 kcmp sys_kcmp
+348 o32 finit_module sys_finit_module
+349 o32 sched_setattr sys_sched_setattr
+350 o32 sched_getattr sys_sched_getattr
+351 o32 renameat2 sys_renameat2
+352 o32 seccomp sys_seccomp
+353 o32 getrandom sys_getrandom
+354 o32 memfd_create sys_memfd_create
+355 o32 bpf sys_bpf
+356 o32 execveat sys_execveat compat_sys_execveat
+357 o32 userfaultfd sys_userfaultfd
+358 o32 membarrier sys_membarrier
+359 o32 mlock2 sys_mlock2
+360 o32 copy_file_range sys_copy_file_range
+361 o32 preadv2 sys_preadv2 compat_sys_preadv2
+362 o32 pwritev2 sys_pwritev2 compat_sys_pwritev2
+363 o32 pkey_mprotect sys_pkey_mprotect
+364 o32 pkey_alloc sys_pkey_alloc
+365 o32 pkey_free sys_pkey_free
+366 o32 statx sys_statx
+367 o32 rseq sys_rseq
+368 o32 io_pgetevents sys_io_pgetevents_time32 compat_sys_io_pgetevents
+# room for arch specific calls
+393 o32 semget sys_semget
+394 o32 semctl sys_semctl compat_sys_semctl
+395 o32 shmget sys_shmget
+396 o32 shmctl sys_shmctl compat_sys_shmctl
+397 o32 shmat sys_shmat compat_sys_shmat
+398 o32 shmdt sys_shmdt
+399 o32 msgget sys_msgget
+400 o32 msgsnd sys_msgsnd compat_sys_msgsnd
+401 o32 msgrcv sys_msgrcv compat_sys_msgrcv
+402 o32 msgctl sys_msgctl compat_sys_msgctl
+403 o32 clock_gettime64 sys_clock_gettime sys_clock_gettime
+404 o32 clock_settime64 sys_clock_settime sys_clock_settime
+405 o32 clock_adjtime64 sys_clock_adjtime sys_clock_adjtime
+406 o32 clock_getres_time64 sys_clock_getres sys_clock_getres
+407 o32 clock_nanosleep_time64 sys_clock_nanosleep sys_clock_nanosleep
+408 o32 timer_gettime64 sys_timer_gettime sys_timer_gettime
+409 o32 timer_settime64 sys_timer_settime sys_timer_settime
+410 o32 timerfd_gettime64 sys_timerfd_gettime sys_timerfd_gettime
+411 o32 timerfd_settime64 sys_timerfd_settime sys_timerfd_settime
+412 o32 utimensat_time64 sys_utimensat sys_utimensat
+413 o32 pselect6_time64 sys_pselect6 compat_sys_pselect6_time64
+414 o32 ppoll_time64 sys_ppoll compat_sys_ppoll_time64
+416 o32 io_pgetevents_time64 sys_io_pgetevents sys_io_pgetevents
+417 o32 recvmmsg_time64 sys_recvmmsg compat_sys_recvmmsg_time64
+418 o32 mq_timedsend_time64 sys_mq_timedsend sys_mq_timedsend
+419 o32 mq_timedreceive_time64 sys_mq_timedreceive sys_mq_timedreceive
+420 o32 semtimedop_time64 sys_semtimedop sys_semtimedop
+421 o32 rt_sigtimedwait_time64 sys_rt_sigtimedwait compat_sys_rt_sigtimedwait_time64
+422 o32 futex_time64 sys_futex sys_futex
+423 o32 sched_rr_get_interval_time64 sys_sched_rr_get_interval sys_sched_rr_get_interval
+424 o32 pidfd_send_signal sys_pidfd_send_signal
+425 o32 io_uring_setup sys_io_uring_setup
+426 o32 io_uring_enter sys_io_uring_enter
+427 o32 io_uring_register sys_io_uring_register
+428 o32 open_tree sys_open_tree
+429 o32 move_mount sys_move_mount
+430 o32 fsopen sys_fsopen
+431 o32 fsconfig sys_fsconfig
+432 o32 fsmount sys_fsmount
+433 o32 fspick sys_fspick
+434 o32 pidfd_open sys_pidfd_open
+435 o32 clone3 __sys_clone3
+436 o32 close_range sys_close_range
+437 o32 openat2 sys_openat2
+438 o32 pidfd_getfd sys_pidfd_getfd
+439 o32 faccessat2 sys_faccessat2
+440 o32 process_madvise sys_process_madvise
+441 o32 epoll_pwait2 sys_epoll_pwait2 compat_sys_epoll_pwait2
+442 o32 mount_setattr sys_mount_setattr
+443 o32 quotactl_fd sys_quotactl_fd
+444 o32 landlock_create_ruleset sys_landlock_create_ruleset
+445 o32 landlock_add_rule sys_landlock_add_rule
+446 o32 landlock_restrict_self sys_landlock_restrict_self
+# 447 reserved for memfd_secret
+448 o32 process_mrelease sys_process_mrelease
+449 o32 futex_waitv sys_futex_waitv
+450 o32 set_mempolicy_home_node sys_set_mempolicy_home_node
diff --git a/arch/mips/kernel/syscalls/syscallnr.sh b/arch/mips/kernel/syscalls/syscallnr.sh
new file mode 100644
index 000000000..c190bbefb
--- /dev/null
+++ b/arch/mips/kernel/syscalls/syscallnr.sh
@@ -0,0 +1,26 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0
+
+in="$1"
+out="$2"
+my_abis=`echo "($3)" | tr ',' '|'`
+prefix="$4"
+
+fileguard=_UAPI_ASM_MIPS_`basename "$out" | sed \
+ -e 'y/abcdefghijklmnopqrstuvwxyz/ABCDEFGHIJKLMNOPQRSTUVWXYZ/' \
+ -e 's/[^A-Z0-9_]/_/g' -e 's/__/_/g'`
+grep -E "^[0-9A-Fa-fXx]+[[:space:]]+${my_abis}" "$in" | sort -n | (
+ printf "#ifndef %s\n" "${fileguard}"
+ printf "#define %s\n" "${fileguard}"
+ printf "\n"
+
+ nxt=0
+ while read nr abi name entry compat ; do
+ nxt=$((nr+1))
+ done
+
+ printf "#define __NR_%s_Linux_syscalls\t%s\n" "${prefix}" "${nxt}"
+ printf "\n"
+ printf "#endif /* %s */" "${fileguard}"
+ printf "\n"
+) > "$out"
diff --git a/arch/mips/kernel/sysrq.c b/arch/mips/kernel/sysrq.c
new file mode 100644
index 000000000..9c1a20191
--- /dev/null
+++ b/arch/mips/kernel/sysrq.c
@@ -0,0 +1,66 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * MIPS specific sysrq operations.
+ *
+ * Copyright (C) 2015 Imagination Technologies Ltd.
+ */
+#include <linux/init.h>
+#include <linux/smp.h>
+#include <linux/spinlock.h>
+#include <linux/sysrq.h>
+#include <linux/workqueue.h>
+
+#include <asm/cpu-features.h>
+#include <asm/mipsregs.h>
+#include <asm/tlbdebug.h>
+
+/*
+ * Dump TLB entries on all CPUs.
+ */
+
+static DEFINE_SPINLOCK(show_lock);
+
+static void sysrq_tlbdump_single(void *dummy)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&show_lock, flags);
+
+ pr_info("CPU%d:\n", smp_processor_id());
+ dump_tlb_regs();
+ pr_info("\n");
+ dump_tlb_all();
+ pr_info("\n");
+
+ spin_unlock_irqrestore(&show_lock, flags);
+}
+
+#ifdef CONFIG_SMP
+static void sysrq_tlbdump_othercpus(struct work_struct *dummy)
+{
+ smp_call_function(sysrq_tlbdump_single, NULL, 0);
+}
+
+static DECLARE_WORK(sysrq_tlbdump, sysrq_tlbdump_othercpus);
+#endif
+
+static void sysrq_handle_tlbdump(int key)
+{
+ sysrq_tlbdump_single(NULL);
+#ifdef CONFIG_SMP
+ schedule_work(&sysrq_tlbdump);
+#endif
+}
+
+static const struct sysrq_key_op sysrq_tlbdump_op = {
+ .handler = sysrq_handle_tlbdump,
+ .help_msg = "show-tlbs(x)",
+ .action_msg = "Show TLB entries",
+ .enable_mask = SYSRQ_ENABLE_DUMP,
+};
+
+static int __init mips_sysrq_init(void)
+{
+ return register_sysrq_key('x', &sysrq_tlbdump_op);
+}
+arch_initcall(mips_sysrq_init);
diff --git a/arch/mips/kernel/time.c b/arch/mips/kernel/time.c
new file mode 100644
index 000000000..ed339d797
--- /dev/null
+++ b/arch/mips/kernel/time.c
@@ -0,0 +1,167 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright 2001 MontaVista Software Inc.
+ * Author: Jun Sun, jsun@mvista.com or jsun@junsun.net
+ * Copyright (c) 2003, 2004 Maciej W. Rozycki
+ *
+ * Common time service routines for MIPS machines.
+ */
+#include <linux/bug.h>
+#include <linux/clockchips.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/sched.h>
+#include <linux/param.h>
+#include <linux/time.h>
+#include <linux/timex.h>
+#include <linux/smp.h>
+#include <linux/spinlock.h>
+#include <linux/export.h>
+#include <linux/cpufreq.h>
+#include <linux/delay.h>
+
+#include <asm/cpu-features.h>
+#include <asm/cpu-type.h>
+#include <asm/div64.h>
+#include <asm/time.h>
+
+#ifdef CONFIG_CPU_FREQ
+
+static DEFINE_PER_CPU(unsigned long, pcp_lpj_ref);
+static DEFINE_PER_CPU(unsigned long, pcp_lpj_ref_freq);
+static unsigned long glb_lpj_ref;
+static unsigned long glb_lpj_ref_freq;
+
+static int cpufreq_callback(struct notifier_block *nb,
+ unsigned long val, void *data)
+{
+ struct cpufreq_freqs *freq = data;
+ struct cpumask *cpus = freq->policy->cpus;
+ unsigned long lpj;
+ int cpu;
+
+ /*
+ * Skip lpj numbers adjustment if the CPU-freq transition is safe for
+ * the loops delay. (Is this possible?)
+ */
+ if (freq->flags & CPUFREQ_CONST_LOOPS)
+ return NOTIFY_OK;
+
+ /* Save the initial values of the lpjes for future scaling. */
+ if (!glb_lpj_ref) {
+ glb_lpj_ref = boot_cpu_data.udelay_val;
+ glb_lpj_ref_freq = freq->old;
+
+ for_each_online_cpu(cpu) {
+ per_cpu(pcp_lpj_ref, cpu) =
+ cpu_data[cpu].udelay_val;
+ per_cpu(pcp_lpj_ref_freq, cpu) = freq->old;
+ }
+ }
+
+ /*
+ * Adjust global lpj variable and per-CPU udelay_val number in
+ * accordance with the new CPU frequency.
+ */
+ if ((val == CPUFREQ_PRECHANGE && freq->old < freq->new) ||
+ (val == CPUFREQ_POSTCHANGE && freq->old > freq->new)) {
+ loops_per_jiffy = cpufreq_scale(glb_lpj_ref,
+ glb_lpj_ref_freq,
+ freq->new);
+
+ for_each_cpu(cpu, cpus) {
+ lpj = cpufreq_scale(per_cpu(pcp_lpj_ref, cpu),
+ per_cpu(pcp_lpj_ref_freq, cpu),
+ freq->new);
+ cpu_data[cpu].udelay_val = (unsigned int)lpj;
+ }
+ }
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block cpufreq_notifier = {
+ .notifier_call = cpufreq_callback,
+};
+
+static int __init register_cpufreq_notifier(void)
+{
+ return cpufreq_register_notifier(&cpufreq_notifier,
+ CPUFREQ_TRANSITION_NOTIFIER);
+}
+core_initcall(register_cpufreq_notifier);
+
+#endif /* CONFIG_CPU_FREQ */
+
+/*
+ * forward reference
+ */
+DEFINE_SPINLOCK(rtc_lock);
+EXPORT_SYMBOL(rtc_lock);
+
+static int null_perf_irq(void)
+{
+ return 0;
+}
+
+int (*perf_irq)(void) = null_perf_irq;
+
+EXPORT_SYMBOL(perf_irq);
+
+/*
+ * time_init() - it does the following things.
+ *
+ * 1) plat_time_init() -
+ * a) (optional) set up RTC routines,
+ * b) (optional) calibrate and set the mips_hpt_frequency
+ * (only needed if you intended to use cpu counter as timer interrupt
+ * source)
+ * 2) calculate a couple of cached variables for later usage
+ */
+
+unsigned int mips_hpt_frequency;
+EXPORT_SYMBOL_GPL(mips_hpt_frequency);
+
+static __init int cpu_has_mfc0_count_bug(void)
+{
+ switch (current_cpu_type()) {
+ case CPU_R4000PC:
+ case CPU_R4000SC:
+ case CPU_R4000MC:
+ /*
+ * V3.0 is documented as suffering from the mfc0 from count bug.
+ * Afaik this is the last version of the R4000. Later versions
+ * were marketed as R4400.
+ */
+ return 1;
+
+ case CPU_R4400PC:
+ case CPU_R4400SC:
+ case CPU_R4400MC:
+ /*
+ * The published errata for the R4400 up to 3.0 say the CPU
+ * has the mfc0 from count bug. This seems the last version
+ * produced.
+ */
+ return 1;
+ }
+
+ return 0;
+}
+
+void __init time_init(void)
+{
+ plat_time_init();
+
+ /*
+ * The use of the R4k timer as a clock event takes precedence;
+ * if reading the Count register might interfere with the timer
+ * interrupt, then we don't use the timer as a clock source.
+ * We may still use the timer as a clock source though if the
+ * timer interrupt isn't reliable; the interference doesn't
+ * matter then, because we don't use the interrupt.
+ */
+ if (mips_clockevent_init() != 0 || !cpu_has_mfc0_count_bug())
+ init_mips_clocksource();
+}
diff --git a/arch/mips/kernel/topology.c b/arch/mips/kernel/topology.c
new file mode 100644
index 000000000..9429d85a4
--- /dev/null
+++ b/arch/mips/kernel/topology.c
@@ -0,0 +1,28 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/cpu.h>
+#include <linux/cpumask.h>
+#include <linux/init.h>
+#include <linux/node.h>
+#include <linux/nodemask.h>
+#include <linux/percpu.h>
+
+static DEFINE_PER_CPU(struct cpu, cpu_devices);
+
+static int __init topology_init(void)
+{
+ int i, ret;
+
+ for_each_present_cpu(i) {
+ struct cpu *c = &per_cpu(cpu_devices, i);
+
+ c->hotpluggable = !!i;
+ ret = register_cpu(c, i);
+ if (ret)
+ printk(KERN_WARNING "topology_init: register_cpu %d "
+ "failed (%d)\n", i, ret);
+ }
+
+ return 0;
+}
+
+subsys_initcall(topology_init);
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
new file mode 100644
index 000000000..246c6a6b0
--- /dev/null
+++ b/arch/mips/kernel/traps.c
@@ -0,0 +1,2562 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1994 - 1999, 2000, 01, 06 Ralf Baechle
+ * Copyright (C) 1995, 1996 Paul M. Antoine
+ * Copyright (C) 1998 Ulf Carlsson
+ * Copyright (C) 1999 Silicon Graphics, Inc.
+ * Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com
+ * Copyright (C) 2002, 2003, 2004, 2005, 2007 Maciej W. Rozycki
+ * Copyright (C) 2000, 2001, 2012 MIPS Technologies, Inc. All rights reserved.
+ * Copyright (C) 2014, Imagination Technologies Ltd.
+ */
+#include <linux/bitops.h>
+#include <linux/bug.h>
+#include <linux/compiler.h>
+#include <linux/context_tracking.h>
+#include <linux/cpu_pm.h>
+#include <linux/kexec.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/extable.h>
+#include <linux/mm.h>
+#include <linux/sched/mm.h>
+#include <linux/sched/debug.h>
+#include <linux/smp.h>
+#include <linux/spinlock.h>
+#include <linux/kallsyms.h>
+#include <linux/memblock.h>
+#include <linux/interrupt.h>
+#include <linux/ptrace.h>
+#include <linux/kgdb.h>
+#include <linux/kdebug.h>
+#include <linux/kprobes.h>
+#include <linux/notifier.h>
+#include <linux/kdb.h>
+#include <linux/irq.h>
+#include <linux/perf_event.h>
+
+#include <asm/addrspace.h>
+#include <asm/bootinfo.h>
+#include <asm/branch.h>
+#include <asm/break.h>
+#include <asm/cop2.h>
+#include <asm/cpu.h>
+#include <asm/cpu-type.h>
+#include <asm/dsp.h>
+#include <asm/fpu.h>
+#include <asm/fpu_emulator.h>
+#include <asm/idle.h>
+#include <asm/isa-rev.h>
+#include <asm/mips-cps.h>
+#include <asm/mips-r2-to-r6-emul.h>
+#include <asm/mipsregs.h>
+#include <asm/mipsmtregs.h>
+#include <asm/module.h>
+#include <asm/msa.h>
+#include <asm/ptrace.h>
+#include <asm/sections.h>
+#include <asm/siginfo.h>
+#include <asm/tlbdebug.h>
+#include <asm/traps.h>
+#include <linux/uaccess.h>
+#include <asm/watch.h>
+#include <asm/mmu_context.h>
+#include <asm/types.h>
+#include <asm/stacktrace.h>
+#include <asm/tlbex.h>
+#include <asm/uasm.h>
+
+#include <asm/mach-loongson64/cpucfg-emul.h>
+
+#include "access-helper.h"
+
+extern void check_wait(void);
+extern asmlinkage void rollback_handle_int(void);
+extern asmlinkage void handle_int(void);
+extern asmlinkage void handle_adel(void);
+extern asmlinkage void handle_ades(void);
+extern asmlinkage void handle_ibe(void);
+extern asmlinkage void handle_dbe(void);
+extern asmlinkage void handle_sys(void);
+extern asmlinkage void handle_bp(void);
+extern asmlinkage void handle_ri(void);
+extern asmlinkage void handle_ri_rdhwr_tlbp(void);
+extern asmlinkage void handle_ri_rdhwr(void);
+extern asmlinkage void handle_cpu(void);
+extern asmlinkage void handle_ov(void);
+extern asmlinkage void handle_tr(void);
+extern asmlinkage void handle_msa_fpe(void);
+extern asmlinkage void handle_fpe(void);
+extern asmlinkage void handle_ftlb(void);
+extern asmlinkage void handle_gsexc(void);
+extern asmlinkage void handle_msa(void);
+extern asmlinkage void handle_mdmx(void);
+extern asmlinkage void handle_watch(void);
+extern asmlinkage void handle_mt(void);
+extern asmlinkage void handle_dsp(void);
+extern asmlinkage void handle_mcheck(void);
+extern asmlinkage void handle_reserved(void);
+extern void tlb_do_page_fault_0(void);
+
+void (*board_be_init)(void);
+static int (*board_be_handler)(struct pt_regs *regs, int is_fixup);
+void (*board_nmi_handler_setup)(void);
+void (*board_ejtag_handler_setup)(void);
+void (*board_bind_eic_interrupt)(int irq, int regset);
+void (*board_ebase_setup)(void);
+void(*board_cache_error_setup)(void);
+
+void mips_set_be_handler(int (*handler)(struct pt_regs *regs, int is_fixup))
+{
+ board_be_handler = handler;
+}
+EXPORT_SYMBOL_GPL(mips_set_be_handler);
+
+static void show_raw_backtrace(unsigned long reg29, const char *loglvl,
+ bool user)
+{
+ unsigned long *sp = (unsigned long *)(reg29 & ~3);
+ unsigned long addr;
+
+ printk("%sCall Trace:", loglvl);
+#ifdef CONFIG_KALLSYMS
+ printk("%s\n", loglvl);
+#endif
+ while (!kstack_end(sp)) {
+ if (__get_addr(&addr, sp++, user)) {
+ printk("%s (Bad stack address)", loglvl);
+ break;
+ }
+ if (__kernel_text_address(addr))
+ print_ip_sym(loglvl, addr);
+ }
+ printk("%s\n", loglvl);
+}
+
+#ifdef CONFIG_KALLSYMS
+int raw_show_trace;
+static int __init set_raw_show_trace(char *str)
+{
+ raw_show_trace = 1;
+ return 1;
+}
+__setup("raw_show_trace", set_raw_show_trace);
+#endif
+
+static void show_backtrace(struct task_struct *task, const struct pt_regs *regs,
+ const char *loglvl, bool user)
+{
+ unsigned long sp = regs->regs[29];
+ unsigned long ra = regs->regs[31];
+ unsigned long pc = regs->cp0_epc;
+
+ if (!task)
+ task = current;
+
+ if (raw_show_trace || user_mode(regs) || !__kernel_text_address(pc)) {
+ show_raw_backtrace(sp, loglvl, user);
+ return;
+ }
+ printk("%sCall Trace:\n", loglvl);
+ do {
+ print_ip_sym(loglvl, pc);
+ pc = unwind_stack(task, &sp, pc, &ra);
+ } while (pc);
+ pr_cont("\n");
+}
+
+/*
+ * This routine abuses get_user()/put_user() to reference pointers
+ * with at least a bit of error checking ...
+ */
+static void show_stacktrace(struct task_struct *task,
+ const struct pt_regs *regs, const char *loglvl, bool user)
+{
+ const int field = 2 * sizeof(unsigned long);
+ unsigned long stackdata;
+ int i;
+ unsigned long *sp = (unsigned long *)regs->regs[29];
+
+ printk("%sStack :", loglvl);
+ i = 0;
+ while ((unsigned long) sp & (PAGE_SIZE - 1)) {
+ if (i && ((i % (64 / field)) == 0)) {
+ pr_cont("\n");
+ printk("%s ", loglvl);
+ }
+ if (i > 39) {
+ pr_cont(" ...");
+ break;
+ }
+
+ if (__get_addr(&stackdata, sp++, user)) {
+ pr_cont(" (Bad stack address)");
+ break;
+ }
+
+ pr_cont(" %0*lx", field, stackdata);
+ i++;
+ }
+ pr_cont("\n");
+ show_backtrace(task, regs, loglvl, user);
+}
+
+void show_stack(struct task_struct *task, unsigned long *sp, const char *loglvl)
+{
+ struct pt_regs regs;
+
+ regs.cp0_status = KSU_KERNEL;
+ if (sp) {
+ regs.regs[29] = (unsigned long)sp;
+ regs.regs[31] = 0;
+ regs.cp0_epc = 0;
+ } else {
+ if (task && task != current) {
+ regs.regs[29] = task->thread.reg29;
+ regs.regs[31] = 0;
+ regs.cp0_epc = task->thread.reg31;
+ } else {
+ prepare_frametrace(&regs);
+ }
+ }
+ show_stacktrace(task, &regs, loglvl, false);
+}
+
+static void show_code(void *pc, bool user)
+{
+ long i;
+ unsigned short *pc16 = NULL;
+
+ printk("Code:");
+
+ if ((unsigned long)pc & 1)
+ pc16 = (u16 *)((unsigned long)pc & ~1);
+
+ for(i = -3 ; i < 6 ; i++) {
+ if (pc16) {
+ u16 insn16;
+
+ if (__get_inst16(&insn16, pc16 + i, user))
+ goto bad_address;
+
+ pr_cont("%c%04x%c", (i?' ':'<'), insn16, (i?' ':'>'));
+ } else {
+ u32 insn32;
+
+ if (__get_inst32(&insn32, (u32 *)pc + i, user))
+ goto bad_address;
+
+ pr_cont("%c%08x%c", (i?' ':'<'), insn32, (i?' ':'>'));
+ }
+ }
+ pr_cont("\n");
+ return;
+
+bad_address:
+ pr_cont(" (Bad address in epc)\n\n");
+}
+
+static void __show_regs(const struct pt_regs *regs)
+{
+ const int field = 2 * sizeof(unsigned long);
+ unsigned int cause = regs->cp0_cause;
+ unsigned int exccode;
+ int i;
+
+ show_regs_print_info(KERN_DEFAULT);
+
+ /*
+ * Saved main processor registers
+ */
+ for (i = 0; i < 32; ) {
+ if ((i % 4) == 0)
+ printk("$%2d :", i);
+ if (i == 0)
+ pr_cont(" %0*lx", field, 0UL);
+ else if (i == 26 || i == 27)
+ pr_cont(" %*s", field, "");
+ else
+ pr_cont(" %0*lx", field, regs->regs[i]);
+
+ i++;
+ if ((i % 4) == 0)
+ pr_cont("\n");
+ }
+
+#ifdef CONFIG_CPU_HAS_SMARTMIPS
+ printk("Acx : %0*lx\n", field, regs->acx);
+#endif
+ if (MIPS_ISA_REV < 6) {
+ printk("Hi : %0*lx\n", field, regs->hi);
+ printk("Lo : %0*lx\n", field, regs->lo);
+ }
+
+ /*
+ * Saved cp0 registers
+ */
+ printk("epc : %0*lx %pS\n", field, regs->cp0_epc,
+ (void *) regs->cp0_epc);
+ printk("ra : %0*lx %pS\n", field, regs->regs[31],
+ (void *) regs->regs[31]);
+
+ printk("Status: %08x ", (uint32_t) regs->cp0_status);
+
+ if (cpu_has_3kex) {
+ if (regs->cp0_status & ST0_KUO)
+ pr_cont("KUo ");
+ if (regs->cp0_status & ST0_IEO)
+ pr_cont("IEo ");
+ if (regs->cp0_status & ST0_KUP)
+ pr_cont("KUp ");
+ if (regs->cp0_status & ST0_IEP)
+ pr_cont("IEp ");
+ if (regs->cp0_status & ST0_KUC)
+ pr_cont("KUc ");
+ if (regs->cp0_status & ST0_IEC)
+ pr_cont("IEc ");
+ } else if (cpu_has_4kex) {
+ if (regs->cp0_status & ST0_KX)
+ pr_cont("KX ");
+ if (regs->cp0_status & ST0_SX)
+ pr_cont("SX ");
+ if (regs->cp0_status & ST0_UX)
+ pr_cont("UX ");
+ switch (regs->cp0_status & ST0_KSU) {
+ case KSU_USER:
+ pr_cont("USER ");
+ break;
+ case KSU_SUPERVISOR:
+ pr_cont("SUPERVISOR ");
+ break;
+ case KSU_KERNEL:
+ pr_cont("KERNEL ");
+ break;
+ default:
+ pr_cont("BAD_MODE ");
+ break;
+ }
+ if (regs->cp0_status & ST0_ERL)
+ pr_cont("ERL ");
+ if (regs->cp0_status & ST0_EXL)
+ pr_cont("EXL ");
+ if (regs->cp0_status & ST0_IE)
+ pr_cont("IE ");
+ }
+ pr_cont("\n");
+
+ exccode = (cause & CAUSEF_EXCCODE) >> CAUSEB_EXCCODE;
+ printk("Cause : %08x (ExcCode %02x)\n", cause, exccode);
+
+ if (1 <= exccode && exccode <= 5)
+ printk("BadVA : %0*lx\n", field, regs->cp0_badvaddr);
+
+ printk("PrId : %08x (%s)\n", read_c0_prid(),
+ cpu_name_string());
+}
+
+/*
+ * FIXME: really the generic show_regs should take a const pointer argument.
+ */
+void show_regs(struct pt_regs *regs)
+{
+ __show_regs(regs);
+ dump_stack();
+}
+
+void show_registers(struct pt_regs *regs)
+{
+ const int field = 2 * sizeof(unsigned long);
+
+ __show_regs(regs);
+ print_modules();
+ printk("Process %s (pid: %d, threadinfo=%p, task=%p, tls=%0*lx)\n",
+ current->comm, current->pid, current_thread_info(), current,
+ field, current_thread_info()->tp_value);
+ if (cpu_has_userlocal) {
+ unsigned long tls;
+
+ tls = read_c0_userlocal();
+ if (tls != current_thread_info()->tp_value)
+ printk("*HwTLS: %0*lx\n", field, tls);
+ }
+
+ show_stacktrace(current, regs, KERN_DEFAULT, user_mode(regs));
+ show_code((void *)regs->cp0_epc, user_mode(regs));
+ printk("\n");
+}
+
+static DEFINE_RAW_SPINLOCK(die_lock);
+
+void __noreturn die(const char *str, struct pt_regs *regs)
+{
+ static int die_counter;
+ int sig = SIGSEGV;
+
+ oops_enter();
+
+ if (notify_die(DIE_OOPS, str, regs, 0, current->thread.trap_nr,
+ SIGSEGV) == NOTIFY_STOP)
+ sig = 0;
+
+ console_verbose();
+ raw_spin_lock_irq(&die_lock);
+ bust_spinlocks(1);
+
+ printk("%s[#%d]:\n", str, ++die_counter);
+ show_registers(regs);
+ add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
+ raw_spin_unlock_irq(&die_lock);
+
+ oops_exit();
+
+ if (in_interrupt())
+ panic("Fatal exception in interrupt");
+
+ if (panic_on_oops)
+ panic("Fatal exception");
+
+ if (regs && kexec_should_crash(current))
+ crash_kexec(regs);
+
+ make_task_dead(sig);
+}
+
+extern struct exception_table_entry __start___dbe_table[];
+extern struct exception_table_entry __stop___dbe_table[];
+
+__asm__(
+" .section __dbe_table, \"a\"\n"
+" .previous \n");
+
+/* Given an address, look for it in the exception tables. */
+static const struct exception_table_entry *search_dbe_tables(unsigned long addr)
+{
+ const struct exception_table_entry *e;
+
+ e = search_extable(__start___dbe_table,
+ __stop___dbe_table - __start___dbe_table, addr);
+ if (!e)
+ e = search_module_dbetables(addr);
+ return e;
+}
+
+asmlinkage void do_be(struct pt_regs *regs)
+{
+ const int field = 2 * sizeof(unsigned long);
+ const struct exception_table_entry *fixup = NULL;
+ int data = regs->cp0_cause & 4;
+ int action = MIPS_BE_FATAL;
+ enum ctx_state prev_state;
+
+ prev_state = exception_enter();
+ /* XXX For now. Fixme, this searches the wrong table ... */
+ if (data && !user_mode(regs))
+ fixup = search_dbe_tables(exception_epc(regs));
+
+ if (fixup)
+ action = MIPS_BE_FIXUP;
+
+ if (board_be_handler)
+ action = board_be_handler(regs, fixup != NULL);
+ else
+ mips_cm_error_report();
+
+ switch (action) {
+ case MIPS_BE_DISCARD:
+ goto out;
+ case MIPS_BE_FIXUP:
+ if (fixup) {
+ regs->cp0_epc = fixup->nextinsn;
+ goto out;
+ }
+ break;
+ default:
+ break;
+ }
+
+ /*
+ * Assume it would be too dangerous to continue ...
+ */
+ printk(KERN_ALERT "%s bus error, epc == %0*lx, ra == %0*lx\n",
+ data ? "Data" : "Instruction",
+ field, regs->cp0_epc, field, regs->regs[31]);
+ if (notify_die(DIE_OOPS, "bus error", regs, 0, current->thread.trap_nr,
+ SIGBUS) == NOTIFY_STOP)
+ goto out;
+
+ die_if_kernel("Oops", regs);
+ force_sig(SIGBUS);
+
+out:
+ exception_exit(prev_state);
+}
+
+/*
+ * ll/sc, rdhwr, sync emulation
+ */
+
+#define OPCODE 0xfc000000
+#define BASE 0x03e00000
+#define RT 0x001f0000
+#define OFFSET 0x0000ffff
+#define LL 0xc0000000
+#define SC 0xe0000000
+#define SPEC0 0x00000000
+#define SPEC3 0x7c000000
+#define RD 0x0000f800
+#define FUNC 0x0000003f
+#define SYNC 0x0000000f
+#define RDHWR 0x0000003b
+
+/* microMIPS definitions */
+#define MM_POOL32A_FUNC 0xfc00ffff
+#define MM_RDHWR 0x00006b3c
+#define MM_RS 0x001f0000
+#define MM_RT 0x03e00000
+
+/*
+ * The ll_bit is cleared by r*_switch.S
+ */
+
+unsigned int ll_bit;
+struct task_struct *ll_task;
+
+static inline int simulate_ll(struct pt_regs *regs, unsigned int opcode)
+{
+ unsigned long value, __user *vaddr;
+ long offset;
+
+ /*
+ * analyse the ll instruction that just caused a ri exception
+ * and put the referenced address to addr.
+ */
+
+ /* sign extend offset */
+ offset = opcode & OFFSET;
+ offset <<= 16;
+ offset >>= 16;
+
+ vaddr = (unsigned long __user *)
+ ((unsigned long)(regs->regs[(opcode & BASE) >> 21]) + offset);
+
+ if ((unsigned long)vaddr & 3)
+ return SIGBUS;
+ if (get_user(value, vaddr))
+ return SIGSEGV;
+
+ preempt_disable();
+
+ if (ll_task == NULL || ll_task == current) {
+ ll_bit = 1;
+ } else {
+ ll_bit = 0;
+ }
+ ll_task = current;
+
+ preempt_enable();
+
+ regs->regs[(opcode & RT) >> 16] = value;
+
+ return 0;
+}
+
+static inline int simulate_sc(struct pt_regs *regs, unsigned int opcode)
+{
+ unsigned long __user *vaddr;
+ unsigned long reg;
+ long offset;
+
+ /*
+ * analyse the sc instruction that just caused a ri exception
+ * and put the referenced address to addr.
+ */
+
+ /* sign extend offset */
+ offset = opcode & OFFSET;
+ offset <<= 16;
+ offset >>= 16;
+
+ vaddr = (unsigned long __user *)
+ ((unsigned long)(regs->regs[(opcode & BASE) >> 21]) + offset);
+ reg = (opcode & RT) >> 16;
+
+ if ((unsigned long)vaddr & 3)
+ return SIGBUS;
+
+ preempt_disable();
+
+ if (ll_bit == 0 || ll_task != current) {
+ regs->regs[reg] = 0;
+ preempt_enable();
+ return 0;
+ }
+
+ preempt_enable();
+
+ if (put_user(regs->regs[reg], vaddr))
+ return SIGSEGV;
+
+ regs->regs[reg] = 1;
+
+ return 0;
+}
+
+/*
+ * ll uses the opcode of lwc0 and sc uses the opcode of swc0. That is both
+ * opcodes are supposed to result in coprocessor unusable exceptions if
+ * executed on ll/sc-less processors. That's the theory. In practice a
+ * few processors such as NEC's VR4100 throw reserved instruction exceptions
+ * instead, so we're doing the emulation thing in both exception handlers.
+ */
+static int simulate_llsc(struct pt_regs *regs, unsigned int opcode)
+{
+ if ((opcode & OPCODE) == LL) {
+ perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
+ 1, regs, 0);
+ return simulate_ll(regs, opcode);
+ }
+ if ((opcode & OPCODE) == SC) {
+ perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
+ 1, regs, 0);
+ return simulate_sc(regs, opcode);
+ }
+
+ return -1; /* Must be something else ... */
+}
+
+/*
+ * Simulate trapping 'rdhwr' instructions to provide user accessible
+ * registers not implemented in hardware.
+ */
+static int simulate_rdhwr(struct pt_regs *regs, int rd, int rt)
+{
+ struct thread_info *ti = task_thread_info(current);
+
+ perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
+ 1, regs, 0);
+ switch (rd) {
+ case MIPS_HWR_CPUNUM: /* CPU number */
+ regs->regs[rt] = smp_processor_id();
+ return 0;
+ case MIPS_HWR_SYNCISTEP: /* SYNCI length */
+ regs->regs[rt] = min(current_cpu_data.dcache.linesz,
+ current_cpu_data.icache.linesz);
+ return 0;
+ case MIPS_HWR_CC: /* Read count register */
+ regs->regs[rt] = read_c0_count();
+ return 0;
+ case MIPS_HWR_CCRES: /* Count register resolution */
+ switch (current_cpu_type()) {
+ case CPU_20KC:
+ case CPU_25KF:
+ regs->regs[rt] = 1;
+ break;
+ default:
+ regs->regs[rt] = 2;
+ }
+ return 0;
+ case MIPS_HWR_ULR: /* Read UserLocal register */
+ regs->regs[rt] = ti->tp_value;
+ return 0;
+ default:
+ return -1;
+ }
+}
+
+static int simulate_rdhwr_normal(struct pt_regs *regs, unsigned int opcode)
+{
+ if ((opcode & OPCODE) == SPEC3 && (opcode & FUNC) == RDHWR) {
+ int rd = (opcode & RD) >> 11;
+ int rt = (opcode & RT) >> 16;
+
+ simulate_rdhwr(regs, rd, rt);
+ return 0;
+ }
+
+ /* Not ours. */
+ return -1;
+}
+
+static int simulate_rdhwr_mm(struct pt_regs *regs, unsigned int opcode)
+{
+ if ((opcode & MM_POOL32A_FUNC) == MM_RDHWR) {
+ int rd = (opcode & MM_RS) >> 16;
+ int rt = (opcode & MM_RT) >> 21;
+ simulate_rdhwr(regs, rd, rt);
+ return 0;
+ }
+
+ /* Not ours. */
+ return -1;
+}
+
+static int simulate_sync(struct pt_regs *regs, unsigned int opcode)
+{
+ if ((opcode & OPCODE) == SPEC0 && (opcode & FUNC) == SYNC) {
+ perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
+ 1, regs, 0);
+ return 0;
+ }
+
+ return -1; /* Must be something else ... */
+}
+
+/*
+ * Loongson-3 CSR instructions emulation
+ */
+
+#ifdef CONFIG_CPU_LOONGSON3_CPUCFG_EMULATION
+
+#define LWC2 0xc8000000
+#define RS BASE
+#define CSR_OPCODE2 0x00000118
+#define CSR_OPCODE2_MASK 0x000007ff
+#define CSR_FUNC_MASK RT
+#define CSR_FUNC_CPUCFG 0x8
+
+static int simulate_loongson3_cpucfg(struct pt_regs *regs,
+ unsigned int opcode)
+{
+ int op = opcode & OPCODE;
+ int op2 = opcode & CSR_OPCODE2_MASK;
+ int csr_func = (opcode & CSR_FUNC_MASK) >> 16;
+
+ if (op == LWC2 && op2 == CSR_OPCODE2 && csr_func == CSR_FUNC_CPUCFG) {
+ int rd = (opcode & RD) >> 11;
+ int rs = (opcode & RS) >> 21;
+ __u64 sel = regs->regs[rs];
+
+ perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0);
+
+ /* Do not emulate on unsupported core models. */
+ preempt_disable();
+ if (!loongson3_cpucfg_emulation_enabled(&current_cpu_data)) {
+ preempt_enable();
+ return -1;
+ }
+ regs->regs[rd] = loongson3_cpucfg_read_synthesized(
+ &current_cpu_data, sel);
+ preempt_enable();
+ return 0;
+ }
+
+ /* Not ours. */
+ return -1;
+}
+#endif /* CONFIG_CPU_LOONGSON3_CPUCFG_EMULATION */
+
+asmlinkage void do_ov(struct pt_regs *regs)
+{
+ enum ctx_state prev_state;
+
+ prev_state = exception_enter();
+ die_if_kernel("Integer overflow", regs);
+
+ force_sig_fault(SIGFPE, FPE_INTOVF, (void __user *)regs->cp0_epc);
+ exception_exit(prev_state);
+}
+
+#ifdef CONFIG_MIPS_FP_SUPPORT
+
+/*
+ * Send SIGFPE according to FCSR Cause bits, which must have already
+ * been masked against Enable bits. This is impotant as Inexact can
+ * happen together with Overflow or Underflow, and `ptrace' can set
+ * any bits.
+ */
+void force_fcr31_sig(unsigned long fcr31, void __user *fault_addr,
+ struct task_struct *tsk)
+{
+ int si_code = FPE_FLTUNK;
+
+ if (fcr31 & FPU_CSR_INV_X)
+ si_code = FPE_FLTINV;
+ else if (fcr31 & FPU_CSR_DIV_X)
+ si_code = FPE_FLTDIV;
+ else if (fcr31 & FPU_CSR_OVF_X)
+ si_code = FPE_FLTOVF;
+ else if (fcr31 & FPU_CSR_UDF_X)
+ si_code = FPE_FLTUND;
+ else if (fcr31 & FPU_CSR_INE_X)
+ si_code = FPE_FLTRES;
+
+ force_sig_fault_to_task(SIGFPE, si_code, fault_addr, tsk);
+}
+
+int process_fpemu_return(int sig, void __user *fault_addr, unsigned long fcr31)
+{
+ int si_code;
+
+ switch (sig) {
+ case 0:
+ return 0;
+
+ case SIGFPE:
+ force_fcr31_sig(fcr31, fault_addr, current);
+ return 1;
+
+ case SIGBUS:
+ force_sig_fault(SIGBUS, BUS_ADRERR, fault_addr);
+ return 1;
+
+ case SIGSEGV:
+ mmap_read_lock(current->mm);
+ if (vma_lookup(current->mm, (unsigned long)fault_addr))
+ si_code = SEGV_ACCERR;
+ else
+ si_code = SEGV_MAPERR;
+ mmap_read_unlock(current->mm);
+ force_sig_fault(SIGSEGV, si_code, fault_addr);
+ return 1;
+
+ default:
+ force_sig(sig);
+ return 1;
+ }
+}
+
+static int simulate_fp(struct pt_regs *regs, unsigned int opcode,
+ unsigned long old_epc, unsigned long old_ra)
+{
+ union mips_instruction inst = { .word = opcode };
+ void __user *fault_addr;
+ unsigned long fcr31;
+ int sig;
+
+ /* If it's obviously not an FP instruction, skip it */
+ switch (inst.i_format.opcode) {
+ case cop1_op:
+ case cop1x_op:
+ case lwc1_op:
+ case ldc1_op:
+ case swc1_op:
+ case sdc1_op:
+ break;
+
+ default:
+ return -1;
+ }
+
+ /*
+ * do_ri skipped over the instruction via compute_return_epc, undo
+ * that for the FPU emulator.
+ */
+ regs->cp0_epc = old_epc;
+ regs->regs[31] = old_ra;
+
+ /* Run the emulator */
+ sig = fpu_emulator_cop1Handler(regs, &current->thread.fpu, 1,
+ &fault_addr);
+
+ /*
+ * We can't allow the emulated instruction to leave any
+ * enabled Cause bits set in $fcr31.
+ */
+ fcr31 = mask_fcr31_x(current->thread.fpu.fcr31);
+ current->thread.fpu.fcr31 &= ~fcr31;
+
+ /* Restore the hardware register state */
+ own_fpu(1);
+
+ /* Send a signal if required. */
+ process_fpemu_return(sig, fault_addr, fcr31);
+
+ return 0;
+}
+
+/*
+ * XXX Delayed fp exceptions when doing a lazy ctx switch XXX
+ */
+asmlinkage void do_fpe(struct pt_regs *regs, unsigned long fcr31)
+{
+ enum ctx_state prev_state;
+ void __user *fault_addr;
+ int sig;
+
+ prev_state = exception_enter();
+ if (notify_die(DIE_FP, "FP exception", regs, 0, current->thread.trap_nr,
+ SIGFPE) == NOTIFY_STOP)
+ goto out;
+
+ /* Clear FCSR.Cause before enabling interrupts */
+ write_32bit_cp1_register(CP1_STATUS, fcr31 & ~mask_fcr31_x(fcr31));
+ local_irq_enable();
+
+ die_if_kernel("FP exception in kernel code", regs);
+
+ if (fcr31 & FPU_CSR_UNI_X) {
+ /*
+ * Unimplemented operation exception. If we've got the full
+ * software emulator on-board, let's use it...
+ *
+ * Force FPU to dump state into task/thread context. We're
+ * moving a lot of data here for what is probably a single
+ * instruction, but the alternative is to pre-decode the FP
+ * register operands before invoking the emulator, which seems
+ * a bit extreme for what should be an infrequent event.
+ */
+
+ /* Run the emulator */
+ sig = fpu_emulator_cop1Handler(regs, &current->thread.fpu, 1,
+ &fault_addr);
+
+ /*
+ * We can't allow the emulated instruction to leave any
+ * enabled Cause bits set in $fcr31.
+ */
+ fcr31 = mask_fcr31_x(current->thread.fpu.fcr31);
+ current->thread.fpu.fcr31 &= ~fcr31;
+
+ /* Restore the hardware register state */
+ own_fpu(1); /* Using the FPU again. */
+ } else {
+ sig = SIGFPE;
+ fault_addr = (void __user *) regs->cp0_epc;
+ }
+
+ /* Send a signal if required. */
+ process_fpemu_return(sig, fault_addr, fcr31);
+
+out:
+ exception_exit(prev_state);
+}
+
+/*
+ * MIPS MT processors may have fewer FPU contexts than CPU threads. If we've
+ * emulated more than some threshold number of instructions, force migration to
+ * a "CPU" that has FP support.
+ */
+static void mt_ase_fp_affinity(void)
+{
+#ifdef CONFIG_MIPS_MT_FPAFF
+ if (mt_fpemul_threshold > 0 &&
+ ((current->thread.emulated_fp++ > mt_fpemul_threshold))) {
+ /*
+ * If there's no FPU present, or if the application has already
+ * restricted the allowed set to exclude any CPUs with FPUs,
+ * we'll skip the procedure.
+ */
+ if (cpumask_intersects(&current->cpus_mask, &mt_fpu_cpumask)) {
+ cpumask_t tmask;
+
+ current->thread.user_cpus_allowed
+ = current->cpus_mask;
+ cpumask_and(&tmask, &current->cpus_mask,
+ &mt_fpu_cpumask);
+ set_cpus_allowed_ptr(current, &tmask);
+ set_thread_flag(TIF_FPUBOUND);
+ }
+ }
+#endif /* CONFIG_MIPS_MT_FPAFF */
+}
+
+#else /* !CONFIG_MIPS_FP_SUPPORT */
+
+static int simulate_fp(struct pt_regs *regs, unsigned int opcode,
+ unsigned long old_epc, unsigned long old_ra)
+{
+ return -1;
+}
+
+#endif /* !CONFIG_MIPS_FP_SUPPORT */
+
+void do_trap_or_bp(struct pt_regs *regs, unsigned int code, int si_code,
+ const char *str)
+{
+ char b[40];
+
+#ifdef CONFIG_KGDB_LOW_LEVEL_TRAP
+ if (kgdb_ll_trap(DIE_TRAP, str, regs, code, current->thread.trap_nr,
+ SIGTRAP) == NOTIFY_STOP)
+ return;
+#endif /* CONFIG_KGDB_LOW_LEVEL_TRAP */
+
+ if (notify_die(DIE_TRAP, str, regs, code, current->thread.trap_nr,
+ SIGTRAP) == NOTIFY_STOP)
+ return;
+
+ /*
+ * A short test says that IRIX 5.3 sends SIGTRAP for all trap
+ * insns, even for trap and break codes that indicate arithmetic
+ * failures. Weird ...
+ * But should we continue the brokenness??? --macro
+ */
+ switch (code) {
+ case BRK_OVERFLOW:
+ case BRK_DIVZERO:
+ scnprintf(b, sizeof(b), "%s instruction in kernel code", str);
+ die_if_kernel(b, regs);
+ force_sig_fault(SIGFPE,
+ code == BRK_DIVZERO ? FPE_INTDIV : FPE_INTOVF,
+ (void __user *) regs->cp0_epc);
+ break;
+ case BRK_BUG:
+ die_if_kernel("Kernel bug detected", regs);
+ force_sig(SIGTRAP);
+ break;
+ case BRK_MEMU:
+ /*
+ * This breakpoint code is used by the FPU emulator to retake
+ * control of the CPU after executing the instruction from the
+ * delay slot of an emulated branch.
+ *
+ * Terminate if exception was recognized as a delay slot return
+ * otherwise handle as normal.
+ */
+ if (do_dsemulret(regs))
+ return;
+
+ die_if_kernel("Math emu break/trap", regs);
+ force_sig(SIGTRAP);
+ break;
+ default:
+ scnprintf(b, sizeof(b), "%s instruction in kernel code", str);
+ die_if_kernel(b, regs);
+ if (si_code) {
+ force_sig_fault(SIGTRAP, si_code, NULL);
+ } else {
+ force_sig(SIGTRAP);
+ }
+ }
+}
+
+asmlinkage void do_bp(struct pt_regs *regs)
+{
+ unsigned long epc = msk_isa16_mode(exception_epc(regs));
+ unsigned int opcode, bcode;
+ enum ctx_state prev_state;
+ bool user = user_mode(regs);
+
+ prev_state = exception_enter();
+ current->thread.trap_nr = (regs->cp0_cause >> 2) & 0x1f;
+ if (get_isa16_mode(regs->cp0_epc)) {
+ u16 instr[2];
+
+ if (__get_inst16(&instr[0], (u16 *)epc, user))
+ goto out_sigsegv;
+
+ if (!cpu_has_mmips) {
+ /* MIPS16e mode */
+ bcode = (instr[0] >> 5) & 0x3f;
+ } else if (mm_insn_16bit(instr[0])) {
+ /* 16-bit microMIPS BREAK */
+ bcode = instr[0] & 0xf;
+ } else {
+ /* 32-bit microMIPS BREAK */
+ if (__get_inst16(&instr[1], (u16 *)(epc + 2), user))
+ goto out_sigsegv;
+ opcode = (instr[0] << 16) | instr[1];
+ bcode = (opcode >> 6) & ((1 << 20) - 1);
+ }
+ } else {
+ if (__get_inst32(&opcode, (u32 *)epc, user))
+ goto out_sigsegv;
+ bcode = (opcode >> 6) & ((1 << 20) - 1);
+ }
+
+ /*
+ * There is the ancient bug in the MIPS assemblers that the break
+ * code starts left to bit 16 instead to bit 6 in the opcode.
+ * Gas is bug-compatible, but not always, grrr...
+ * We handle both cases with a simple heuristics. --macro
+ */
+ if (bcode >= (1 << 10))
+ bcode = ((bcode & ((1 << 10) - 1)) << 10) | (bcode >> 10);
+
+ /*
+ * notify the kprobe handlers, if instruction is likely to
+ * pertain to them.
+ */
+ switch (bcode) {
+ case BRK_UPROBE:
+ if (notify_die(DIE_UPROBE, "uprobe", regs, bcode,
+ current->thread.trap_nr, SIGTRAP) == NOTIFY_STOP)
+ goto out;
+ else
+ break;
+ case BRK_UPROBE_XOL:
+ if (notify_die(DIE_UPROBE_XOL, "uprobe_xol", regs, bcode,
+ current->thread.trap_nr, SIGTRAP) == NOTIFY_STOP)
+ goto out;
+ else
+ break;
+ case BRK_KPROBE_BP:
+ if (notify_die(DIE_BREAK, "debug", regs, bcode,
+ current->thread.trap_nr, SIGTRAP) == NOTIFY_STOP)
+ goto out;
+ else
+ break;
+ case BRK_KPROBE_SSTEPBP:
+ if (notify_die(DIE_SSTEPBP, "single_step", regs, bcode,
+ current->thread.trap_nr, SIGTRAP) == NOTIFY_STOP)
+ goto out;
+ else
+ break;
+ default:
+ break;
+ }
+
+ do_trap_or_bp(regs, bcode, TRAP_BRKPT, "Break");
+
+out:
+ exception_exit(prev_state);
+ return;
+
+out_sigsegv:
+ force_sig(SIGSEGV);
+ goto out;
+}
+
+asmlinkage void do_tr(struct pt_regs *regs)
+{
+ u32 opcode, tcode = 0;
+ enum ctx_state prev_state;
+ u16 instr[2];
+ bool user = user_mode(regs);
+ unsigned long epc = msk_isa16_mode(exception_epc(regs));
+
+ prev_state = exception_enter();
+ current->thread.trap_nr = (regs->cp0_cause >> 2) & 0x1f;
+ if (get_isa16_mode(regs->cp0_epc)) {
+ if (__get_inst16(&instr[0], (u16 *)(epc + 0), user) ||
+ __get_inst16(&instr[1], (u16 *)(epc + 2), user))
+ goto out_sigsegv;
+ opcode = (instr[0] << 16) | instr[1];
+ /* Immediate versions don't provide a code. */
+ if (!(opcode & OPCODE))
+ tcode = (opcode >> 12) & ((1 << 4) - 1);
+ } else {
+ if (__get_inst32(&opcode, (u32 *)epc, user))
+ goto out_sigsegv;
+ /* Immediate versions don't provide a code. */
+ if (!(opcode & OPCODE))
+ tcode = (opcode >> 6) & ((1 << 10) - 1);
+ }
+
+ do_trap_or_bp(regs, tcode, 0, "Trap");
+
+out:
+ exception_exit(prev_state);
+ return;
+
+out_sigsegv:
+ force_sig(SIGSEGV);
+ goto out;
+}
+
+asmlinkage void do_ri(struct pt_regs *regs)
+{
+ unsigned int __user *epc = (unsigned int __user *)exception_epc(regs);
+ unsigned long old_epc = regs->cp0_epc;
+ unsigned long old31 = regs->regs[31];
+ enum ctx_state prev_state;
+ unsigned int opcode = 0;
+ int status = -1;
+
+ /*
+ * Avoid any kernel code. Just emulate the R2 instruction
+ * as quickly as possible.
+ */
+ if (mipsr2_emulation && cpu_has_mips_r6 &&
+ likely(user_mode(regs)) &&
+ likely(get_user(opcode, epc) >= 0)) {
+ unsigned long fcr31 = 0;
+
+ status = mipsr2_decoder(regs, opcode, &fcr31);
+ switch (status) {
+ case 0:
+ case SIGEMT:
+ return;
+ case SIGILL:
+ goto no_r2_instr;
+ default:
+ process_fpemu_return(status,
+ &current->thread.cp0_baduaddr,
+ fcr31);
+ return;
+ }
+ }
+
+no_r2_instr:
+
+ prev_state = exception_enter();
+ current->thread.trap_nr = (regs->cp0_cause >> 2) & 0x1f;
+
+ if (notify_die(DIE_RI, "RI Fault", regs, 0, current->thread.trap_nr,
+ SIGILL) == NOTIFY_STOP)
+ goto out;
+
+ die_if_kernel("Reserved instruction in kernel code", regs);
+
+ if (unlikely(compute_return_epc(regs) < 0))
+ goto out;
+
+ if (!get_isa16_mode(regs->cp0_epc)) {
+ if (unlikely(get_user(opcode, epc) < 0))
+ status = SIGSEGV;
+
+ if (!cpu_has_llsc && status < 0)
+ status = simulate_llsc(regs, opcode);
+
+ if (status < 0)
+ status = simulate_rdhwr_normal(regs, opcode);
+
+ if (status < 0)
+ status = simulate_sync(regs, opcode);
+
+ if (status < 0)
+ status = simulate_fp(regs, opcode, old_epc, old31);
+
+#ifdef CONFIG_CPU_LOONGSON3_CPUCFG_EMULATION
+ if (status < 0)
+ status = simulate_loongson3_cpucfg(regs, opcode);
+#endif
+ } else if (cpu_has_mmips) {
+ unsigned short mmop[2] = { 0 };
+
+ if (unlikely(get_user(mmop[0], (u16 __user *)epc + 0) < 0))
+ status = SIGSEGV;
+ if (unlikely(get_user(mmop[1], (u16 __user *)epc + 1) < 0))
+ status = SIGSEGV;
+ opcode = mmop[0];
+ opcode = (opcode << 16) | mmop[1];
+
+ if (status < 0)
+ status = simulate_rdhwr_mm(regs, opcode);
+ }
+
+ if (status < 0)
+ status = SIGILL;
+
+ if (unlikely(status > 0)) {
+ regs->cp0_epc = old_epc; /* Undo skip-over. */
+ regs->regs[31] = old31;
+ force_sig(status);
+ }
+
+out:
+ exception_exit(prev_state);
+}
+
+/*
+ * No lock; only written during early bootup by CPU 0.
+ */
+static RAW_NOTIFIER_HEAD(cu2_chain);
+
+int __ref register_cu2_notifier(struct notifier_block *nb)
+{
+ return raw_notifier_chain_register(&cu2_chain, nb);
+}
+
+int cu2_notifier_call_chain(unsigned long val, void *v)
+{
+ return raw_notifier_call_chain(&cu2_chain, val, v);
+}
+
+static int default_cu2_call(struct notifier_block *nfb, unsigned long action,
+ void *data)
+{
+ struct pt_regs *regs = data;
+
+ die_if_kernel("COP2: Unhandled kernel unaligned access or invalid "
+ "instruction", regs);
+ force_sig(SIGILL);
+
+ return NOTIFY_OK;
+}
+
+#ifdef CONFIG_MIPS_FP_SUPPORT
+
+static int enable_restore_fp_context(int msa)
+{
+ int err, was_fpu_owner, prior_msa;
+ bool first_fp;
+
+ /* Initialize context if it hasn't been used already */
+ first_fp = init_fp_ctx(current);
+
+ if (first_fp) {
+ preempt_disable();
+ err = own_fpu_inatomic(1);
+ if (msa && !err) {
+ enable_msa();
+ /*
+ * with MSA enabled, userspace can see MSACSR
+ * and MSA regs, but the values in them are from
+ * other task before current task, restore them
+ * from saved fp/msa context
+ */
+ write_msa_csr(current->thread.fpu.msacsr);
+ /*
+ * own_fpu_inatomic(1) just restore low 64bit,
+ * fix the high 64bit
+ */
+ init_msa_upper();
+ set_thread_flag(TIF_USEDMSA);
+ set_thread_flag(TIF_MSA_CTX_LIVE);
+ }
+ preempt_enable();
+ return err;
+ }
+
+ /*
+ * This task has formerly used the FP context.
+ *
+ * If this thread has no live MSA vector context then we can simply
+ * restore the scalar FP context. If it has live MSA vector context
+ * (that is, it has or may have used MSA since last performing a
+ * function call) then we'll need to restore the vector context. This
+ * applies even if we're currently only executing a scalar FP
+ * instruction. This is because if we were to later execute an MSA
+ * instruction then we'd either have to:
+ *
+ * - Restore the vector context & clobber any registers modified by
+ * scalar FP instructions between now & then.
+ *
+ * or
+ *
+ * - Not restore the vector context & lose the most significant bits
+ * of all vector registers.
+ *
+ * Neither of those options is acceptable. We cannot restore the least
+ * significant bits of the registers now & only restore the most
+ * significant bits later because the most significant bits of any
+ * vector registers whose aliased FP register is modified now will have
+ * been zeroed. We'd have no way to know that when restoring the vector
+ * context & thus may load an outdated value for the most significant
+ * bits of a vector register.
+ */
+ if (!msa && !thread_msa_context_live())
+ return own_fpu(1);
+
+ /*
+ * This task is using or has previously used MSA. Thus we require
+ * that Status.FR == 1.
+ */
+ preempt_disable();
+ was_fpu_owner = is_fpu_owner();
+ err = own_fpu_inatomic(0);
+ if (err)
+ goto out;
+
+ enable_msa();
+ write_msa_csr(current->thread.fpu.msacsr);
+ set_thread_flag(TIF_USEDMSA);
+
+ /*
+ * If this is the first time that the task is using MSA and it has
+ * previously used scalar FP in this time slice then we already nave
+ * FP context which we shouldn't clobber. We do however need to clear
+ * the upper 64b of each vector register so that this task has no
+ * opportunity to see data left behind by another.
+ */
+ prior_msa = test_and_set_thread_flag(TIF_MSA_CTX_LIVE);
+ if (!prior_msa && was_fpu_owner) {
+ init_msa_upper();
+
+ goto out;
+ }
+
+ if (!prior_msa) {
+ /*
+ * Restore the least significant 64b of each vector register
+ * from the existing scalar FP context.
+ */
+ _restore_fp(current);
+
+ /*
+ * The task has not formerly used MSA, so clear the upper 64b
+ * of each vector register such that it cannot see data left
+ * behind by another task.
+ */
+ init_msa_upper();
+ } else {
+ /* We need to restore the vector context. */
+ restore_msa(current);
+
+ /* Restore the scalar FP control & status register */
+ if (!was_fpu_owner)
+ write_32bit_cp1_register(CP1_STATUS,
+ current->thread.fpu.fcr31);
+ }
+
+out:
+ preempt_enable();
+
+ return 0;
+}
+
+#else /* !CONFIG_MIPS_FP_SUPPORT */
+
+static int enable_restore_fp_context(int msa)
+{
+ return SIGILL;
+}
+
+#endif /* CONFIG_MIPS_FP_SUPPORT */
+
+asmlinkage void do_cpu(struct pt_regs *regs)
+{
+ enum ctx_state prev_state;
+ unsigned int __user *epc;
+ unsigned long old_epc, old31;
+ unsigned int opcode;
+ unsigned int cpid;
+ int status;
+
+ prev_state = exception_enter();
+ cpid = (regs->cp0_cause >> CAUSEB_CE) & 3;
+
+ if (cpid != 2)
+ die_if_kernel("do_cpu invoked from kernel context!", regs);
+
+ switch (cpid) {
+ case 0:
+ epc = (unsigned int __user *)exception_epc(regs);
+ old_epc = regs->cp0_epc;
+ old31 = regs->regs[31];
+ opcode = 0;
+ status = -1;
+
+ if (unlikely(compute_return_epc(regs) < 0))
+ break;
+
+ if (!get_isa16_mode(regs->cp0_epc)) {
+ if (unlikely(get_user(opcode, epc) < 0))
+ status = SIGSEGV;
+
+ if (!cpu_has_llsc && status < 0)
+ status = simulate_llsc(regs, opcode);
+ }
+
+ if (status < 0)
+ status = SIGILL;
+
+ if (unlikely(status > 0)) {
+ regs->cp0_epc = old_epc; /* Undo skip-over. */
+ regs->regs[31] = old31;
+ force_sig(status);
+ }
+
+ break;
+
+#ifdef CONFIG_MIPS_FP_SUPPORT
+ case 3:
+ /*
+ * The COP3 opcode space and consequently the CP0.Status.CU3
+ * bit and the CP0.Cause.CE=3 encoding have been removed as
+ * of the MIPS III ISA. From the MIPS IV and MIPS32r2 ISAs
+ * up the space has been reused for COP1X instructions, that
+ * are enabled by the CP0.Status.CU1 bit and consequently
+ * use the CP0.Cause.CE=1 encoding for Coprocessor Unusable
+ * exceptions. Some FPU-less processors that implement one
+ * of these ISAs however use this code erroneously for COP1X
+ * instructions. Therefore we redirect this trap to the FP
+ * emulator too.
+ */
+ if (raw_cpu_has_fpu || !cpu_has_mips_4_5_64_r2_r6) {
+ force_sig(SIGILL);
+ break;
+ }
+ fallthrough;
+ case 1: {
+ void __user *fault_addr;
+ unsigned long fcr31;
+ int err, sig;
+
+ err = enable_restore_fp_context(0);
+
+ if (raw_cpu_has_fpu && !err)
+ break;
+
+ sig = fpu_emulator_cop1Handler(regs, &current->thread.fpu, 0,
+ &fault_addr);
+
+ /*
+ * We can't allow the emulated instruction to leave
+ * any enabled Cause bits set in $fcr31.
+ */
+ fcr31 = mask_fcr31_x(current->thread.fpu.fcr31);
+ current->thread.fpu.fcr31 &= ~fcr31;
+
+ /* Send a signal if required. */
+ if (!process_fpemu_return(sig, fault_addr, fcr31) && !err)
+ mt_ase_fp_affinity();
+
+ break;
+ }
+#else /* CONFIG_MIPS_FP_SUPPORT */
+ case 1:
+ case 3:
+ force_sig(SIGILL);
+ break;
+#endif /* CONFIG_MIPS_FP_SUPPORT */
+
+ case 2:
+ raw_notifier_call_chain(&cu2_chain, CU2_EXCEPTION, regs);
+ break;
+ }
+
+ exception_exit(prev_state);
+}
+
+asmlinkage void do_msa_fpe(struct pt_regs *regs, unsigned int msacsr)
+{
+ enum ctx_state prev_state;
+
+ prev_state = exception_enter();
+ current->thread.trap_nr = (regs->cp0_cause >> 2) & 0x1f;
+ if (notify_die(DIE_MSAFP, "MSA FP exception", regs, 0,
+ current->thread.trap_nr, SIGFPE) == NOTIFY_STOP)
+ goto out;
+
+ /* Clear MSACSR.Cause before enabling interrupts */
+ write_msa_csr(msacsr & ~MSA_CSR_CAUSEF);
+ local_irq_enable();
+
+ die_if_kernel("do_msa_fpe invoked from kernel context!", regs);
+ force_sig(SIGFPE);
+out:
+ exception_exit(prev_state);
+}
+
+asmlinkage void do_msa(struct pt_regs *regs)
+{
+ enum ctx_state prev_state;
+ int err;
+
+ prev_state = exception_enter();
+
+ if (!cpu_has_msa || test_thread_flag(TIF_32BIT_FPREGS)) {
+ force_sig(SIGILL);
+ goto out;
+ }
+
+ die_if_kernel("do_msa invoked from kernel context!", regs);
+
+ err = enable_restore_fp_context(1);
+ if (err)
+ force_sig(SIGILL);
+out:
+ exception_exit(prev_state);
+}
+
+asmlinkage void do_mdmx(struct pt_regs *regs)
+{
+ enum ctx_state prev_state;
+
+ prev_state = exception_enter();
+ force_sig(SIGILL);
+ exception_exit(prev_state);
+}
+
+/*
+ * Called with interrupts disabled.
+ */
+asmlinkage void do_watch(struct pt_regs *regs)
+{
+ enum ctx_state prev_state;
+
+ prev_state = exception_enter();
+ /*
+ * Clear WP (bit 22) bit of cause register so we don't loop
+ * forever.
+ */
+ clear_c0_cause(CAUSEF_WP);
+
+ /*
+ * If the current thread has the watch registers loaded, save
+ * their values and send SIGTRAP. Otherwise another thread
+ * left the registers set, clear them and continue.
+ */
+ if (test_tsk_thread_flag(current, TIF_LOAD_WATCH)) {
+ mips_read_watch_registers();
+ local_irq_enable();
+ force_sig_fault(SIGTRAP, TRAP_HWBKPT, NULL);
+ } else {
+ mips_clear_watch_registers();
+ local_irq_enable();
+ }
+ exception_exit(prev_state);
+}
+
+asmlinkage void do_mcheck(struct pt_regs *regs)
+{
+ int multi_match = regs->cp0_status & ST0_TS;
+ enum ctx_state prev_state;
+
+ prev_state = exception_enter();
+ show_regs(regs);
+
+ if (multi_match) {
+ dump_tlb_regs();
+ pr_info("\n");
+ dump_tlb_all();
+ }
+
+ show_code((void *)regs->cp0_epc, user_mode(regs));
+
+ /*
+ * Some chips may have other causes of machine check (e.g. SB1
+ * graduation timer)
+ */
+ panic("Caught Machine Check exception - %scaused by multiple "
+ "matching entries in the TLB.",
+ (multi_match) ? "" : "not ");
+}
+
+asmlinkage void do_mt(struct pt_regs *regs)
+{
+ int subcode;
+
+ subcode = (read_vpe_c0_vpecontrol() & VPECONTROL_EXCPT)
+ >> VPECONTROL_EXCPT_SHIFT;
+ switch (subcode) {
+ case 0:
+ printk(KERN_DEBUG "Thread Underflow\n");
+ break;
+ case 1:
+ printk(KERN_DEBUG "Thread Overflow\n");
+ break;
+ case 2:
+ printk(KERN_DEBUG "Invalid YIELD Qualifier\n");
+ break;
+ case 3:
+ printk(KERN_DEBUG "Gating Storage Exception\n");
+ break;
+ case 4:
+ printk(KERN_DEBUG "YIELD Scheduler Exception\n");
+ break;
+ case 5:
+ printk(KERN_DEBUG "Gating Storage Scheduler Exception\n");
+ break;
+ default:
+ printk(KERN_DEBUG "*** UNKNOWN THREAD EXCEPTION %d ***\n",
+ subcode);
+ break;
+ }
+ die_if_kernel("MIPS MT Thread exception in kernel", regs);
+
+ force_sig(SIGILL);
+}
+
+
+asmlinkage void do_dsp(struct pt_regs *regs)
+{
+ if (cpu_has_dsp)
+ panic("Unexpected DSP exception");
+
+ force_sig(SIGILL);
+}
+
+asmlinkage void do_reserved(struct pt_regs *regs)
+{
+ /*
+ * Game over - no way to handle this if it ever occurs. Most probably
+ * caused by a new unknown cpu type or after another deadly
+ * hard/software error.
+ */
+ show_regs(regs);
+ panic("Caught reserved exception %ld - should not happen.",
+ (regs->cp0_cause & 0x7f) >> 2);
+}
+
+static int __initdata l1parity = 1;
+static int __init nol1parity(char *s)
+{
+ l1parity = 0;
+ return 1;
+}
+__setup("nol1par", nol1parity);
+static int __initdata l2parity = 1;
+static int __init nol2parity(char *s)
+{
+ l2parity = 0;
+ return 1;
+}
+__setup("nol2par", nol2parity);
+
+/*
+ * Some MIPS CPUs can enable/disable for cache parity detection, but do
+ * it different ways.
+ */
+static inline __init void parity_protection_init(void)
+{
+#define ERRCTL_PE 0x80000000
+#define ERRCTL_L2P 0x00800000
+
+ if (mips_cm_revision() >= CM_REV_CM3) {
+ ulong gcr_ectl, cp0_ectl;
+
+ /*
+ * With CM3 systems we need to ensure that the L1 & L2
+ * parity enables are set to the same value, since this
+ * is presumed by the hardware engineers.
+ *
+ * If the user disabled either of L1 or L2 ECC checking,
+ * disable both.
+ */
+ l1parity &= l2parity;
+ l2parity &= l1parity;
+
+ /* Probe L1 ECC support */
+ cp0_ectl = read_c0_ecc();
+ write_c0_ecc(cp0_ectl | ERRCTL_PE);
+ back_to_back_c0_hazard();
+ cp0_ectl = read_c0_ecc();
+
+ /* Probe L2 ECC support */
+ gcr_ectl = read_gcr_err_control();
+
+ if (!(gcr_ectl & CM_GCR_ERR_CONTROL_L2_ECC_SUPPORT) ||
+ !(cp0_ectl & ERRCTL_PE)) {
+ /*
+ * One of L1 or L2 ECC checking isn't supported,
+ * so we cannot enable either.
+ */
+ l1parity = l2parity = 0;
+ }
+
+ /* Configure L1 ECC checking */
+ if (l1parity)
+ cp0_ectl |= ERRCTL_PE;
+ else
+ cp0_ectl &= ~ERRCTL_PE;
+ write_c0_ecc(cp0_ectl);
+ back_to_back_c0_hazard();
+ WARN_ON(!!(read_c0_ecc() & ERRCTL_PE) != l1parity);
+
+ /* Configure L2 ECC checking */
+ if (l2parity)
+ gcr_ectl |= CM_GCR_ERR_CONTROL_L2_ECC_EN;
+ else
+ gcr_ectl &= ~CM_GCR_ERR_CONTROL_L2_ECC_EN;
+ write_gcr_err_control(gcr_ectl);
+ gcr_ectl = read_gcr_err_control();
+ gcr_ectl &= CM_GCR_ERR_CONTROL_L2_ECC_EN;
+ WARN_ON(!!gcr_ectl != l2parity);
+
+ pr_info("Cache parity protection %sabled\n",
+ l1parity ? "en" : "dis");
+ return;
+ }
+
+ switch (current_cpu_type()) {
+ case CPU_24K:
+ case CPU_34K:
+ case CPU_74K:
+ case CPU_1004K:
+ case CPU_1074K:
+ case CPU_INTERAPTIV:
+ case CPU_PROAPTIV:
+ case CPU_P5600:
+ case CPU_QEMU_GENERIC:
+ case CPU_P6600:
+ {
+ unsigned long errctl;
+ unsigned int l1parity_present, l2parity_present;
+
+ errctl = read_c0_ecc();
+ errctl &= ~(ERRCTL_PE|ERRCTL_L2P);
+
+ /* probe L1 parity support */
+ write_c0_ecc(errctl | ERRCTL_PE);
+ back_to_back_c0_hazard();
+ l1parity_present = (read_c0_ecc() & ERRCTL_PE);
+
+ /* probe L2 parity support */
+ write_c0_ecc(errctl|ERRCTL_L2P);
+ back_to_back_c0_hazard();
+ l2parity_present = (read_c0_ecc() & ERRCTL_L2P);
+
+ if (l1parity_present && l2parity_present) {
+ if (l1parity)
+ errctl |= ERRCTL_PE;
+ if (l1parity ^ l2parity)
+ errctl |= ERRCTL_L2P;
+ } else if (l1parity_present) {
+ if (l1parity)
+ errctl |= ERRCTL_PE;
+ } else if (l2parity_present) {
+ if (l2parity)
+ errctl |= ERRCTL_L2P;
+ } else {
+ /* No parity available */
+ }
+
+ printk(KERN_INFO "Writing ErrCtl register=%08lx\n", errctl);
+
+ write_c0_ecc(errctl);
+ back_to_back_c0_hazard();
+ errctl = read_c0_ecc();
+ printk(KERN_INFO "Readback ErrCtl register=%08lx\n", errctl);
+
+ if (l1parity_present)
+ printk(KERN_INFO "Cache parity protection %sabled\n",
+ (errctl & ERRCTL_PE) ? "en" : "dis");
+
+ if (l2parity_present) {
+ if (l1parity_present && l1parity)
+ errctl ^= ERRCTL_L2P;
+ printk(KERN_INFO "L2 cache parity protection %sabled\n",
+ (errctl & ERRCTL_L2P) ? "en" : "dis");
+ }
+ }
+ break;
+
+ case CPU_5KC:
+ case CPU_5KE:
+ case CPU_LOONGSON32:
+ write_c0_ecc(0x80000000);
+ back_to_back_c0_hazard();
+ /* Set the PE bit (bit 31) in the c0_errctl register. */
+ printk(KERN_INFO "Cache parity protection %sabled\n",
+ (read_c0_ecc() & 0x80000000) ? "en" : "dis");
+ break;
+ case CPU_20KC:
+ case CPU_25KF:
+ /* Clear the DE bit (bit 16) in the c0_status register. */
+ printk(KERN_INFO "Enable cache parity protection for "
+ "MIPS 20KC/25KF CPUs.\n");
+ clear_c0_status(ST0_DE);
+ break;
+ default:
+ break;
+ }
+}
+
+asmlinkage void cache_parity_error(void)
+{
+ const int field = 2 * sizeof(unsigned long);
+ unsigned int reg_val;
+
+ /* For the moment, report the problem and hang. */
+ printk("Cache error exception:\n");
+ printk("cp0_errorepc == %0*lx\n", field, read_c0_errorepc());
+ reg_val = read_c0_cacheerr();
+ printk("c0_cacheerr == %08x\n", reg_val);
+
+ printk("Decoded c0_cacheerr: %s cache fault in %s reference.\n",
+ reg_val & (1<<30) ? "secondary" : "primary",
+ reg_val & (1<<31) ? "data" : "insn");
+ if ((cpu_has_mips_r2_r6) &&
+ ((current_cpu_data.processor_id & 0xff0000) == PRID_COMP_MIPS)) {
+ pr_err("Error bits: %s%s%s%s%s%s%s%s\n",
+ reg_val & (1<<29) ? "ED " : "",
+ reg_val & (1<<28) ? "ET " : "",
+ reg_val & (1<<27) ? "ES " : "",
+ reg_val & (1<<26) ? "EE " : "",
+ reg_val & (1<<25) ? "EB " : "",
+ reg_val & (1<<24) ? "EI " : "",
+ reg_val & (1<<23) ? "E1 " : "",
+ reg_val & (1<<22) ? "E0 " : "");
+ } else {
+ pr_err("Error bits: %s%s%s%s%s%s%s\n",
+ reg_val & (1<<29) ? "ED " : "",
+ reg_val & (1<<28) ? "ET " : "",
+ reg_val & (1<<26) ? "EE " : "",
+ reg_val & (1<<25) ? "EB " : "",
+ reg_val & (1<<24) ? "EI " : "",
+ reg_val & (1<<23) ? "E1 " : "",
+ reg_val & (1<<22) ? "E0 " : "");
+ }
+ printk("IDX: 0x%08x\n", reg_val & ((1<<22)-1));
+
+#if defined(CONFIG_CPU_MIPS32) || defined(CONFIG_CPU_MIPS64)
+ if (reg_val & (1<<22))
+ printk("DErrAddr0: 0x%0*lx\n", field, read_c0_derraddr0());
+
+ if (reg_val & (1<<23))
+ printk("DErrAddr1: 0x%0*lx\n", field, read_c0_derraddr1());
+#endif
+
+ panic("Can't handle the cache error!");
+}
+
+asmlinkage void do_ftlb(void)
+{
+ const int field = 2 * sizeof(unsigned long);
+ unsigned int reg_val;
+
+ /* For the moment, report the problem and hang. */
+ if ((cpu_has_mips_r2_r6) &&
+ (((current_cpu_data.processor_id & 0xff0000) == PRID_COMP_MIPS) ||
+ ((current_cpu_data.processor_id & 0xff0000) == PRID_COMP_LOONGSON))) {
+ pr_err("FTLB error exception, cp0_ecc=0x%08x:\n",
+ read_c0_ecc());
+ pr_err("cp0_errorepc == %0*lx\n", field, read_c0_errorepc());
+ reg_val = read_c0_cacheerr();
+ pr_err("c0_cacheerr == %08x\n", reg_val);
+
+ if ((reg_val & 0xc0000000) == 0xc0000000) {
+ pr_err("Decoded c0_cacheerr: FTLB parity error\n");
+ } else {
+ pr_err("Decoded c0_cacheerr: %s cache fault in %s reference.\n",
+ reg_val & (1<<30) ? "secondary" : "primary",
+ reg_val & (1<<31) ? "data" : "insn");
+ }
+ } else {
+ pr_err("FTLB error exception\n");
+ }
+ /* Just print the cacheerr bits for now */
+ cache_parity_error();
+}
+
+asmlinkage void do_gsexc(struct pt_regs *regs, u32 diag1)
+{
+ u32 exccode = (diag1 & LOONGSON_DIAG1_EXCCODE) >>
+ LOONGSON_DIAG1_EXCCODE_SHIFT;
+ enum ctx_state prev_state;
+
+ prev_state = exception_enter();
+
+ switch (exccode) {
+ case 0x08:
+ /* Undocumented exception, will trigger on certain
+ * also-undocumented instructions accessible from userspace.
+ * Processor state is not otherwise corrupted, but currently
+ * we don't know how to proceed. Maybe there is some
+ * undocumented control flag to enable the instructions?
+ */
+ force_sig(SIGILL);
+ break;
+
+ default:
+ /* None of the other exceptions, documented or not, have
+ * further details given; none are encountered in the wild
+ * either. Panic in case some of them turn out to be fatal.
+ */
+ show_regs(regs);
+ panic("Unhandled Loongson exception - GSCause = %08x", diag1);
+ }
+
+ exception_exit(prev_state);
+}
+
+/*
+ * SDBBP EJTAG debug exception handler.
+ * We skip the instruction and return to the next instruction.
+ */
+void ejtag_exception_handler(struct pt_regs *regs)
+{
+ const int field = 2 * sizeof(unsigned long);
+ unsigned long depc, old_epc, old_ra;
+ unsigned int debug;
+
+ printk(KERN_DEBUG "SDBBP EJTAG debug exception - not handled yet, just ignored!\n");
+ depc = read_c0_depc();
+ debug = read_c0_debug();
+ printk(KERN_DEBUG "c0_depc = %0*lx, DEBUG = %08x\n", field, depc, debug);
+ if (debug & 0x80000000) {
+ /*
+ * In branch delay slot.
+ * We cheat a little bit here and use EPC to calculate the
+ * debug return address (DEPC). EPC is restored after the
+ * calculation.
+ */
+ old_epc = regs->cp0_epc;
+ old_ra = regs->regs[31];
+ regs->cp0_epc = depc;
+ compute_return_epc(regs);
+ depc = regs->cp0_epc;
+ regs->cp0_epc = old_epc;
+ regs->regs[31] = old_ra;
+ } else
+ depc += 4;
+ write_c0_depc(depc);
+
+#if 0
+ printk(KERN_DEBUG "\n\n----- Enable EJTAG single stepping ----\n\n");
+ write_c0_debug(debug | 0x100);
+#endif
+}
+
+/*
+ * NMI exception handler.
+ * No lock; only written during early bootup by CPU 0.
+ */
+static RAW_NOTIFIER_HEAD(nmi_chain);
+
+int register_nmi_notifier(struct notifier_block *nb)
+{
+ return raw_notifier_chain_register(&nmi_chain, nb);
+}
+
+void __noreturn nmi_exception_handler(struct pt_regs *regs)
+{
+ char str[100];
+
+ nmi_enter();
+ raw_notifier_call_chain(&nmi_chain, 0, regs);
+ bust_spinlocks(1);
+ snprintf(str, 100, "CPU%d NMI taken, CP0_EPC=%lx\n",
+ smp_processor_id(), regs->cp0_epc);
+ regs->cp0_epc = read_c0_errorepc();
+ die(str, regs);
+ nmi_exit();
+}
+
+unsigned long ebase;
+EXPORT_SYMBOL_GPL(ebase);
+unsigned long exception_handlers[32];
+unsigned long vi_handlers[64];
+
+void reserve_exception_space(phys_addr_t addr, unsigned long size)
+{
+ memblock_reserve(addr, size);
+}
+
+void __init *set_except_vector(int n, void *addr)
+{
+ unsigned long handler = (unsigned long) addr;
+ unsigned long old_handler;
+
+#ifdef CONFIG_CPU_MICROMIPS
+ /*
+ * Only the TLB handlers are cache aligned with an even
+ * address. All other handlers are on an odd address and
+ * require no modification. Otherwise, MIPS32 mode will
+ * be entered when handling any TLB exceptions. That
+ * would be bad...since we must stay in microMIPS mode.
+ */
+ if (!(handler & 0x1))
+ handler |= 1;
+#endif
+ old_handler = xchg(&exception_handlers[n], handler);
+
+ if (n == 0 && cpu_has_divec) {
+#ifdef CONFIG_CPU_MICROMIPS
+ unsigned long jump_mask = ~((1 << 27) - 1);
+#else
+ unsigned long jump_mask = ~((1 << 28) - 1);
+#endif
+ u32 *buf = (u32 *)(ebase + 0x200);
+ unsigned int k0 = 26;
+ if ((handler & jump_mask) == ((ebase + 0x200) & jump_mask)) {
+ uasm_i_j(&buf, handler & ~jump_mask);
+ uasm_i_nop(&buf);
+ } else {
+ UASM_i_LA(&buf, k0, handler);
+ uasm_i_jr(&buf, k0);
+ uasm_i_nop(&buf);
+ }
+ local_flush_icache_range(ebase + 0x200, (unsigned long)buf);
+ }
+ return (void *)old_handler;
+}
+
+static void do_default_vi(void)
+{
+ show_regs(get_irq_regs());
+ panic("Caught unexpected vectored interrupt.");
+}
+
+static void *set_vi_srs_handler(int n, vi_handler_t addr, int srs)
+{
+ unsigned long handler;
+ unsigned long old_handler = vi_handlers[n];
+ int srssets = current_cpu_data.srsets;
+ u16 *h;
+ unsigned char *b;
+
+ BUG_ON(!cpu_has_veic && !cpu_has_vint);
+
+ if (addr == NULL) {
+ handler = (unsigned long) do_default_vi;
+ srs = 0;
+ } else
+ handler = (unsigned long) addr;
+ vi_handlers[n] = handler;
+
+ b = (unsigned char *)(ebase + 0x200 + n*VECTORSPACING);
+
+ if (srs >= srssets)
+ panic("Shadow register set %d not supported", srs);
+
+ if (cpu_has_veic) {
+ if (board_bind_eic_interrupt)
+ board_bind_eic_interrupt(n, srs);
+ } else if (cpu_has_vint) {
+ /* SRSMap is only defined if shadow sets are implemented */
+ if (srssets > 1)
+ change_c0_srsmap(0xf << n*4, srs << n*4);
+ }
+
+ if (srs == 0) {
+ /*
+ * If no shadow set is selected then use the default handler
+ * that does normal register saving and standard interrupt exit
+ */
+ extern const u8 except_vec_vi[], except_vec_vi_lui[];
+ extern const u8 except_vec_vi_ori[], except_vec_vi_end[];
+ extern const u8 rollback_except_vec_vi[];
+ const u8 *vec_start = using_rollback_handler() ?
+ rollback_except_vec_vi : except_vec_vi;
+#if defined(CONFIG_CPU_MICROMIPS) || defined(CONFIG_CPU_BIG_ENDIAN)
+ const int lui_offset = except_vec_vi_lui - vec_start + 2;
+ const int ori_offset = except_vec_vi_ori - vec_start + 2;
+#else
+ const int lui_offset = except_vec_vi_lui - vec_start;
+ const int ori_offset = except_vec_vi_ori - vec_start;
+#endif
+ const int handler_len = except_vec_vi_end - vec_start;
+
+ if (handler_len > VECTORSPACING) {
+ /*
+ * Sigh... panicing won't help as the console
+ * is probably not configured :(
+ */
+ panic("VECTORSPACING too small");
+ }
+
+ set_handler(((unsigned long)b - ebase), vec_start,
+#ifdef CONFIG_CPU_MICROMIPS
+ (handler_len - 1));
+#else
+ handler_len);
+#endif
+ h = (u16 *)(b + lui_offset);
+ *h = (handler >> 16) & 0xffff;
+ h = (u16 *)(b + ori_offset);
+ *h = (handler & 0xffff);
+ local_flush_icache_range((unsigned long)b,
+ (unsigned long)(b+handler_len));
+ }
+ else {
+ /*
+ * In other cases jump directly to the interrupt handler. It
+ * is the handler's responsibility to save registers if required
+ * (eg hi/lo) and return from the exception using "eret".
+ */
+ u32 insn;
+
+ h = (u16 *)b;
+ /* j handler */
+#ifdef CONFIG_CPU_MICROMIPS
+ insn = 0xd4000000 | (((u32)handler & 0x07ffffff) >> 1);
+#else
+ insn = 0x08000000 | (((u32)handler & 0x0fffffff) >> 2);
+#endif
+ h[0] = (insn >> 16) & 0xffff;
+ h[1] = insn & 0xffff;
+ h[2] = 0;
+ h[3] = 0;
+ local_flush_icache_range((unsigned long)b,
+ (unsigned long)(b+8));
+ }
+
+ return (void *)old_handler;
+}
+
+void *set_vi_handler(int n, vi_handler_t addr)
+{
+ return set_vi_srs_handler(n, addr, 0);
+}
+
+extern void tlb_init(void);
+
+/*
+ * Timer interrupt
+ */
+int cp0_compare_irq;
+EXPORT_SYMBOL_GPL(cp0_compare_irq);
+int cp0_compare_irq_shift;
+
+/*
+ * Performance counter IRQ or -1 if shared with timer
+ */
+int cp0_perfcount_irq;
+EXPORT_SYMBOL_GPL(cp0_perfcount_irq);
+
+/*
+ * Fast debug channel IRQ or -1 if not present
+ */
+int cp0_fdc_irq;
+EXPORT_SYMBOL_GPL(cp0_fdc_irq);
+
+static int noulri;
+
+static int __init ulri_disable(char *s)
+{
+ pr_info("Disabling ulri\n");
+ noulri = 1;
+
+ return 1;
+}
+__setup("noulri", ulri_disable);
+
+/* configure STATUS register */
+static void configure_status(void)
+{
+ /*
+ * Disable coprocessors and select 32-bit or 64-bit addressing
+ * and the 16/32 or 32/32 FPR register model. Reset the BEV
+ * flag that some firmware may have left set and the TS bit (for
+ * IP27). Set XX for ISA IV code to work.
+ */
+ unsigned int status_set = ST0_KERNEL_CUMASK;
+#ifdef CONFIG_64BIT
+ status_set |= ST0_FR|ST0_KX|ST0_SX|ST0_UX;
+#endif
+ if (current_cpu_data.isa_level & MIPS_CPU_ISA_IV)
+ status_set |= ST0_XX;
+ if (cpu_has_dsp)
+ status_set |= ST0_MX;
+
+ change_c0_status(ST0_CU|ST0_MX|ST0_RE|ST0_FR|ST0_BEV|ST0_TS|ST0_KX|ST0_SX|ST0_UX,
+ status_set);
+ back_to_back_c0_hazard();
+}
+
+unsigned int hwrena;
+EXPORT_SYMBOL_GPL(hwrena);
+
+/* configure HWRENA register */
+static void configure_hwrena(void)
+{
+ hwrena = cpu_hwrena_impl_bits;
+
+ if (cpu_has_mips_r2_r6)
+ hwrena |= MIPS_HWRENA_CPUNUM |
+ MIPS_HWRENA_SYNCISTEP |
+ MIPS_HWRENA_CC |
+ MIPS_HWRENA_CCRES;
+
+ if (!noulri && cpu_has_userlocal)
+ hwrena |= MIPS_HWRENA_ULR;
+
+ if (hwrena)
+ write_c0_hwrena(hwrena);
+}
+
+static void configure_exception_vector(void)
+{
+ if (cpu_has_mips_r2_r6) {
+ unsigned long sr = set_c0_status(ST0_BEV);
+ /* If available, use WG to set top bits of EBASE */
+ if (cpu_has_ebase_wg) {
+#ifdef CONFIG_64BIT
+ write_c0_ebase_64(ebase | MIPS_EBASE_WG);
+#else
+ write_c0_ebase(ebase | MIPS_EBASE_WG);
+#endif
+ }
+ write_c0_ebase(ebase);
+ write_c0_status(sr);
+ }
+ if (cpu_has_veic || cpu_has_vint) {
+ /* Setting vector spacing enables EI/VI mode */
+ change_c0_intctl(0x3e0, VECTORSPACING);
+ }
+ if (cpu_has_divec) {
+ if (cpu_has_mipsmt) {
+ unsigned int vpflags = dvpe();
+ set_c0_cause(CAUSEF_IV);
+ evpe(vpflags);
+ } else
+ set_c0_cause(CAUSEF_IV);
+ }
+}
+
+void per_cpu_trap_init(bool is_boot_cpu)
+{
+ unsigned int cpu = smp_processor_id();
+
+ configure_status();
+ configure_hwrena();
+
+ configure_exception_vector();
+
+ /*
+ * Before R2 both interrupt numbers were fixed to 7, so on R2 only:
+ *
+ * o read IntCtl.IPTI to determine the timer interrupt
+ * o read IntCtl.IPPCI to determine the performance counter interrupt
+ * o read IntCtl.IPFDC to determine the fast debug channel interrupt
+ */
+ if (cpu_has_mips_r2_r6) {
+ cp0_compare_irq_shift = CAUSEB_TI - CAUSEB_IP;
+ cp0_compare_irq = (read_c0_intctl() >> INTCTLB_IPTI) & 7;
+ cp0_perfcount_irq = (read_c0_intctl() >> INTCTLB_IPPCI) & 7;
+ cp0_fdc_irq = (read_c0_intctl() >> INTCTLB_IPFDC) & 7;
+ if (!cp0_fdc_irq)
+ cp0_fdc_irq = -1;
+
+ } else {
+ cp0_compare_irq = CP0_LEGACY_COMPARE_IRQ;
+ cp0_compare_irq_shift = CP0_LEGACY_PERFCNT_IRQ;
+ cp0_perfcount_irq = -1;
+ cp0_fdc_irq = -1;
+ }
+
+ if (cpu_has_mmid)
+ cpu_data[cpu].asid_cache = 0;
+ else if (!cpu_data[cpu].asid_cache)
+ cpu_data[cpu].asid_cache = asid_first_version(cpu);
+
+ mmgrab(&init_mm);
+ current->active_mm = &init_mm;
+ BUG_ON(current->mm);
+ enter_lazy_tlb(&init_mm, current);
+
+ /* Boot CPU's cache setup in setup_arch(). */
+ if (!is_boot_cpu)
+ cpu_cache_init();
+ tlb_init();
+ TLBMISS_HANDLER_SETUP();
+}
+
+/* Install CPU exception handler */
+void set_handler(unsigned long offset, const void *addr, unsigned long size)
+{
+#ifdef CONFIG_CPU_MICROMIPS
+ memcpy((void *)(ebase + offset), ((unsigned char *)addr - 1), size);
+#else
+ memcpy((void *)(ebase + offset), addr, size);
+#endif
+ local_flush_icache_range(ebase + offset, ebase + offset + size);
+}
+
+static const char panic_null_cerr[] =
+ "Trying to set NULL cache error exception handler\n";
+
+/*
+ * Install uncached CPU exception handler.
+ * This is suitable only for the cache error exception which is the only
+ * exception handler that is being run uncached.
+ */
+void set_uncached_handler(unsigned long offset, void *addr,
+ unsigned long size)
+{
+ unsigned long uncached_ebase = CKSEG1ADDR(ebase);
+
+ if (!addr)
+ panic(panic_null_cerr);
+
+ memcpy((void *)(uncached_ebase + offset), addr, size);
+}
+
+static int __initdata rdhwr_noopt;
+static int __init set_rdhwr_noopt(char *str)
+{
+ rdhwr_noopt = 1;
+ return 1;
+}
+
+__setup("rdhwr_noopt", set_rdhwr_noopt);
+
+void __init trap_init(void)
+{
+ extern char except_vec3_generic;
+ extern char except_vec4;
+ extern char except_vec3_r4000;
+ unsigned long i, vec_size;
+ phys_addr_t ebase_pa;
+
+ check_wait();
+
+ if (!cpu_has_mips_r2_r6) {
+ ebase = CAC_BASE;
+ vec_size = 0x400;
+ } else {
+ if (cpu_has_veic || cpu_has_vint)
+ vec_size = 0x200 + VECTORSPACING*64;
+ else
+ vec_size = PAGE_SIZE;
+
+ ebase_pa = memblock_phys_alloc(vec_size, 1 << fls(vec_size));
+ if (!ebase_pa)
+ panic("%s: Failed to allocate %lu bytes align=0x%x\n",
+ __func__, vec_size, 1 << fls(vec_size));
+
+ /*
+ * Try to ensure ebase resides in KSeg0 if possible.
+ *
+ * It shouldn't generally be in XKPhys on MIPS64 to avoid
+ * hitting a poorly defined exception base for Cache Errors.
+ * The allocation is likely to be in the low 512MB of physical,
+ * in which case we should be able to convert to KSeg0.
+ *
+ * EVA is special though as it allows segments to be rearranged
+ * and to become uncached during cache error handling.
+ */
+ if (!IS_ENABLED(CONFIG_EVA) && !WARN_ON(ebase_pa >= 0x20000000))
+ ebase = CKSEG0ADDR(ebase_pa);
+ else
+ ebase = (unsigned long)phys_to_virt(ebase_pa);
+ }
+
+ if (cpu_has_mmips) {
+ unsigned int config3 = read_c0_config3();
+
+ if (IS_ENABLED(CONFIG_CPU_MICROMIPS))
+ write_c0_config3(config3 | MIPS_CONF3_ISA_OE);
+ else
+ write_c0_config3(config3 & ~MIPS_CONF3_ISA_OE);
+ }
+
+ if (board_ebase_setup)
+ board_ebase_setup();
+ per_cpu_trap_init(true);
+ memblock_set_bottom_up(false);
+
+ /*
+ * Copy the generic exception handlers to their final destination.
+ * This will be overridden later as suitable for a particular
+ * configuration.
+ */
+ set_handler(0x180, &except_vec3_generic, 0x80);
+
+ /*
+ * Setup default vectors
+ */
+ for (i = 0; i <= 31; i++)
+ set_except_vector(i, handle_reserved);
+
+ /*
+ * Copy the EJTAG debug exception vector handler code to it's final
+ * destination.
+ */
+ if (cpu_has_ejtag && board_ejtag_handler_setup)
+ board_ejtag_handler_setup();
+
+ /*
+ * Only some CPUs have the watch exceptions.
+ */
+ if (cpu_has_watch)
+ set_except_vector(EXCCODE_WATCH, handle_watch);
+
+ /*
+ * Initialise interrupt handlers
+ */
+ if (cpu_has_veic || cpu_has_vint) {
+ int nvec = cpu_has_veic ? 64 : 8;
+ for (i = 0; i < nvec; i++)
+ set_vi_handler(i, NULL);
+ }
+ else if (cpu_has_divec)
+ set_handler(0x200, &except_vec4, 0x8);
+
+ /*
+ * Some CPUs can enable/disable for cache parity detection, but does
+ * it different ways.
+ */
+ parity_protection_init();
+
+ /*
+ * The Data Bus Errors / Instruction Bus Errors are signaled
+ * by external hardware. Therefore these two exceptions
+ * may have board specific handlers.
+ */
+ if (board_be_init)
+ board_be_init();
+
+ set_except_vector(EXCCODE_INT, using_rollback_handler() ?
+ rollback_handle_int : handle_int);
+ set_except_vector(EXCCODE_MOD, handle_tlbm);
+ set_except_vector(EXCCODE_TLBL, handle_tlbl);
+ set_except_vector(EXCCODE_TLBS, handle_tlbs);
+
+ set_except_vector(EXCCODE_ADEL, handle_adel);
+ set_except_vector(EXCCODE_ADES, handle_ades);
+
+ set_except_vector(EXCCODE_IBE, handle_ibe);
+ set_except_vector(EXCCODE_DBE, handle_dbe);
+
+ set_except_vector(EXCCODE_SYS, handle_sys);
+ set_except_vector(EXCCODE_BP, handle_bp);
+
+ if (rdhwr_noopt)
+ set_except_vector(EXCCODE_RI, handle_ri);
+ else {
+ if (cpu_has_vtag_icache)
+ set_except_vector(EXCCODE_RI, handle_ri_rdhwr_tlbp);
+ else if (current_cpu_type() == CPU_LOONGSON64)
+ set_except_vector(EXCCODE_RI, handle_ri_rdhwr_tlbp);
+ else
+ set_except_vector(EXCCODE_RI, handle_ri_rdhwr);
+ }
+
+ set_except_vector(EXCCODE_CPU, handle_cpu);
+ set_except_vector(EXCCODE_OV, handle_ov);
+ set_except_vector(EXCCODE_TR, handle_tr);
+ set_except_vector(EXCCODE_MSAFPE, handle_msa_fpe);
+
+ if (board_nmi_handler_setup)
+ board_nmi_handler_setup();
+
+ if (cpu_has_fpu && !cpu_has_nofpuex)
+ set_except_vector(EXCCODE_FPE, handle_fpe);
+
+ if (cpu_has_ftlbparex)
+ set_except_vector(MIPS_EXCCODE_TLBPAR, handle_ftlb);
+
+ if (cpu_has_gsexcex)
+ set_except_vector(LOONGSON_EXCCODE_GSEXC, handle_gsexc);
+
+ if (cpu_has_rixiex) {
+ set_except_vector(EXCCODE_TLBRI, tlb_do_page_fault_0);
+ set_except_vector(EXCCODE_TLBXI, tlb_do_page_fault_0);
+ }
+
+ set_except_vector(EXCCODE_MSADIS, handle_msa);
+ set_except_vector(EXCCODE_MDMX, handle_mdmx);
+
+ if (cpu_has_mcheck)
+ set_except_vector(EXCCODE_MCHECK, handle_mcheck);
+
+ if (cpu_has_mipsmt)
+ set_except_vector(EXCCODE_THREAD, handle_mt);
+
+ set_except_vector(EXCCODE_DSPDIS, handle_dsp);
+
+ if (board_cache_error_setup)
+ board_cache_error_setup();
+
+ if (cpu_has_vce)
+ /* Special exception: R4[04]00 uses also the divec space. */
+ set_handler(0x180, &except_vec3_r4000, 0x100);
+ else if (cpu_has_4kex)
+ set_handler(0x180, &except_vec3_generic, 0x80);
+ else
+ set_handler(0x080, &except_vec3_generic, 0x80);
+
+ local_flush_icache_range(ebase, ebase + vec_size);
+
+ sort_extable(__start___dbe_table, __stop___dbe_table);
+
+ cu2_notifier(default_cu2_call, 0x80000000); /* Run last */
+}
+
+static int trap_pm_notifier(struct notifier_block *self, unsigned long cmd,
+ void *v)
+{
+ switch (cmd) {
+ case CPU_PM_ENTER_FAILED:
+ case CPU_PM_EXIT:
+ configure_status();
+ configure_hwrena();
+ configure_exception_vector();
+
+ /* Restore register with CPU number for TLB handlers */
+ TLBMISS_HANDLER_RESTORE();
+
+ break;
+ }
+
+ return NOTIFY_OK;
+}
+
+static struct notifier_block trap_pm_notifier_block = {
+ .notifier_call = trap_pm_notifier,
+};
+
+static int __init trap_pm_init(void)
+{
+ return cpu_pm_register_notifier(&trap_pm_notifier_block);
+}
+arch_initcall(trap_pm_init);
diff --git a/arch/mips/kernel/unaligned.c b/arch/mips/kernel/unaligned.c
new file mode 100644
index 000000000..7b5aba5df
--- /dev/null
+++ b/arch/mips/kernel/unaligned.c
@@ -0,0 +1,1572 @@
+/*
+ * Handle unaligned accesses by emulation.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1996, 1998, 1999, 2002 by Ralf Baechle
+ * Copyright (C) 1999 Silicon Graphics, Inc.
+ * Copyright (C) 2014 Imagination Technologies Ltd.
+ *
+ * This file contains exception handler for address error exception with the
+ * special capability to execute faulting instructions in software. The
+ * handler does not try to handle the case when the program counter points
+ * to an address not aligned to a word boundary.
+ *
+ * Putting data to unaligned addresses is a bad practice even on Intel where
+ * only the performance is affected. Much worse is that such code is non-
+ * portable. Due to several programs that die on MIPS due to alignment
+ * problems I decided to implement this handler anyway though I originally
+ * didn't intend to do this at all for user code.
+ *
+ * For now I enable fixing of address errors by default to make life easier.
+ * I however intend to disable this somewhen in the future when the alignment
+ * problems with user programs have been fixed. For programmers this is the
+ * right way to go.
+ *
+ * Fixing address errors is a per process option. The option is inherited
+ * across fork(2) and execve(2) calls. If you really want to use the
+ * option in your user programs - I discourage the use of the software
+ * emulation strongly - use the following code in your userland stuff:
+ *
+ * #include <sys/sysmips.h>
+ *
+ * ...
+ * sysmips(MIPS_FIXADE, x);
+ * ...
+ *
+ * The argument x is 0 for disabling software emulation, enabled otherwise.
+ *
+ * Below a little program to play around with this feature.
+ *
+ * #include <stdio.h>
+ * #include <sys/sysmips.h>
+ *
+ * struct foo {
+ * unsigned char bar[8];
+ * };
+ *
+ * main(int argc, char *argv[])
+ * {
+ * struct foo x = {0, 1, 2, 3, 4, 5, 6, 7};
+ * unsigned int *p = (unsigned int *) (x.bar + 3);
+ * int i;
+ *
+ * if (argc > 1)
+ * sysmips(MIPS_FIXADE, atoi(argv[1]));
+ *
+ * printf("*p = %08lx\n", *p);
+ *
+ * *p = 0xdeadface;
+ *
+ * for(i = 0; i <= 7; i++)
+ * printf("%02x ", x.bar[i]);
+ * printf("\n");
+ * }
+ *
+ * Coprocessor loads are not supported; I think this case is unimportant
+ * in the practice.
+ *
+ * TODO: Handle ndc (attempted store to doubleword in uncached memory)
+ * exception for the R6000.
+ * A store crossing a page boundary might be executed only partially.
+ * Undo the partial store in this case.
+ */
+#include <linux/context_tracking.h>
+#include <linux/mm.h>
+#include <linux/signal.h>
+#include <linux/smp.h>
+#include <linux/sched.h>
+#include <linux/debugfs.h>
+#include <linux/perf_event.h>
+
+#include <asm/asm.h>
+#include <asm/branch.h>
+#include <asm/byteorder.h>
+#include <asm/cop2.h>
+#include <asm/debug.h>
+#include <asm/fpu.h>
+#include <asm/fpu_emulator.h>
+#include <asm/inst.h>
+#include <asm/unaligned-emul.h>
+#include <asm/mmu_context.h>
+#include <linux/uaccess.h>
+
+#include "access-helper.h"
+
+enum {
+ UNALIGNED_ACTION_QUIET,
+ UNALIGNED_ACTION_SIGNAL,
+ UNALIGNED_ACTION_SHOW,
+};
+#ifdef CONFIG_DEBUG_FS
+static u32 unaligned_instructions;
+static u32 unaligned_action;
+#else
+#define unaligned_action UNALIGNED_ACTION_QUIET
+#endif
+extern void show_registers(struct pt_regs *regs);
+
+static void emulate_load_store_insn(struct pt_regs *regs,
+ void __user *addr, unsigned int *pc)
+{
+ unsigned long origpc, orig31, value;
+ union mips_instruction insn;
+ unsigned int res;
+ bool user = user_mode(regs);
+
+ origpc = (unsigned long)pc;
+ orig31 = regs->regs[31];
+
+ perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS, 1, regs, 0);
+
+ /*
+ * This load never faults.
+ */
+ __get_inst32(&insn.word, pc, user);
+
+ switch (insn.i_format.opcode) {
+ /*
+ * These are instructions that a compiler doesn't generate. We
+ * can assume therefore that the code is MIPS-aware and
+ * really buggy. Emulating these instructions would break the
+ * semantics anyway.
+ */
+ case ll_op:
+ case lld_op:
+ case sc_op:
+ case scd_op:
+
+ /*
+ * For these instructions the only way to create an address
+ * error is an attempted access to kernel/supervisor address
+ * space.
+ */
+ case ldl_op:
+ case ldr_op:
+ case lwl_op:
+ case lwr_op:
+ case sdl_op:
+ case sdr_op:
+ case swl_op:
+ case swr_op:
+ case lb_op:
+ case lbu_op:
+ case sb_op:
+ goto sigbus;
+
+ /*
+ * The remaining opcodes are the ones that are really of
+ * interest.
+ */
+ case spec3_op:
+ if (insn.dsp_format.func == lx_op) {
+ switch (insn.dsp_format.op) {
+ case lwx_op:
+ if (user && !access_ok(addr, 4))
+ goto sigbus;
+ LoadW(addr, value, res);
+ if (res)
+ goto fault;
+ compute_return_epc(regs);
+ regs->regs[insn.dsp_format.rd] = value;
+ break;
+ case lhx_op:
+ if (user && !access_ok(addr, 2))
+ goto sigbus;
+ LoadHW(addr, value, res);
+ if (res)
+ goto fault;
+ compute_return_epc(regs);
+ regs->regs[insn.dsp_format.rd] = value;
+ break;
+ default:
+ goto sigill;
+ }
+ }
+#ifdef CONFIG_EVA
+ else {
+ /*
+ * we can land here only from kernel accessing user
+ * memory, so we need to "switch" the address limit to
+ * user space, so that address check can work properly.
+ */
+ switch (insn.spec3_format.func) {
+ case lhe_op:
+ if (!access_ok(addr, 2))
+ goto sigbus;
+ LoadHWE(addr, value, res);
+ if (res)
+ goto fault;
+ compute_return_epc(regs);
+ regs->regs[insn.spec3_format.rt] = value;
+ break;
+ case lwe_op:
+ if (!access_ok(addr, 4))
+ goto sigbus;
+ LoadWE(addr, value, res);
+ if (res)
+ goto fault;
+ compute_return_epc(regs);
+ regs->regs[insn.spec3_format.rt] = value;
+ break;
+ case lhue_op:
+ if (!access_ok(addr, 2))
+ goto sigbus;
+ LoadHWUE(addr, value, res);
+ if (res)
+ goto fault;
+ compute_return_epc(regs);
+ regs->regs[insn.spec3_format.rt] = value;
+ break;
+ case she_op:
+ if (!access_ok(addr, 2))
+ goto sigbus;
+ compute_return_epc(regs);
+ value = regs->regs[insn.spec3_format.rt];
+ StoreHWE(addr, value, res);
+ if (res)
+ goto fault;
+ break;
+ case swe_op:
+ if (!access_ok(addr, 4))
+ goto sigbus;
+ compute_return_epc(regs);
+ value = regs->regs[insn.spec3_format.rt];
+ StoreWE(addr, value, res);
+ if (res)
+ goto fault;
+ break;
+ default:
+ goto sigill;
+ }
+ }
+#endif
+ break;
+ case lh_op:
+ if (user && !access_ok(addr, 2))
+ goto sigbus;
+
+ if (IS_ENABLED(CONFIG_EVA) && user)
+ LoadHWE(addr, value, res);
+ else
+ LoadHW(addr, value, res);
+
+ if (res)
+ goto fault;
+ compute_return_epc(regs);
+ regs->regs[insn.i_format.rt] = value;
+ break;
+
+ case lw_op:
+ if (user && !access_ok(addr, 4))
+ goto sigbus;
+
+ if (IS_ENABLED(CONFIG_EVA) && user)
+ LoadWE(addr, value, res);
+ else
+ LoadW(addr, value, res);
+
+ if (res)
+ goto fault;
+ compute_return_epc(regs);
+ regs->regs[insn.i_format.rt] = value;
+ break;
+
+ case lhu_op:
+ if (user && !access_ok(addr, 2))
+ goto sigbus;
+
+ if (IS_ENABLED(CONFIG_EVA) && user)
+ LoadHWUE(addr, value, res);
+ else
+ LoadHWU(addr, value, res);
+
+ if (res)
+ goto fault;
+ compute_return_epc(regs);
+ regs->regs[insn.i_format.rt] = value;
+ break;
+
+ case lwu_op:
+#ifdef CONFIG_64BIT
+ /*
+ * A 32-bit kernel might be running on a 64-bit processor. But
+ * if we're on a 32-bit processor and an i-cache incoherency
+ * or race makes us see a 64-bit instruction here the sdl/sdr
+ * would blow up, so for now we don't handle unaligned 64-bit
+ * instructions on 32-bit kernels.
+ */
+ if (user && !access_ok(addr, 4))
+ goto sigbus;
+
+ LoadWU(addr, value, res);
+ if (res)
+ goto fault;
+ compute_return_epc(regs);
+ regs->regs[insn.i_format.rt] = value;
+ break;
+#endif /* CONFIG_64BIT */
+
+ /* Cannot handle 64-bit instructions in 32-bit kernel */
+ goto sigill;
+
+ case ld_op:
+#ifdef CONFIG_64BIT
+ /*
+ * A 32-bit kernel might be running on a 64-bit processor. But
+ * if we're on a 32-bit processor and an i-cache incoherency
+ * or race makes us see a 64-bit instruction here the sdl/sdr
+ * would blow up, so for now we don't handle unaligned 64-bit
+ * instructions on 32-bit kernels.
+ */
+ if (user && !access_ok(addr, 8))
+ goto sigbus;
+
+ LoadDW(addr, value, res);
+ if (res)
+ goto fault;
+ compute_return_epc(regs);
+ regs->regs[insn.i_format.rt] = value;
+ break;
+#endif /* CONFIG_64BIT */
+
+ /* Cannot handle 64-bit instructions in 32-bit kernel */
+ goto sigill;
+
+ case sh_op:
+ if (user && !access_ok(addr, 2))
+ goto sigbus;
+
+ compute_return_epc(regs);
+ value = regs->regs[insn.i_format.rt];
+
+ if (IS_ENABLED(CONFIG_EVA) && user)
+ StoreHWE(addr, value, res);
+ else
+ StoreHW(addr, value, res);
+
+ if (res)
+ goto fault;
+ break;
+
+ case sw_op:
+ if (user && !access_ok(addr, 4))
+ goto sigbus;
+
+ compute_return_epc(regs);
+ value = regs->regs[insn.i_format.rt];
+
+ if (IS_ENABLED(CONFIG_EVA) && user)
+ StoreWE(addr, value, res);
+ else
+ StoreW(addr, value, res);
+
+ if (res)
+ goto fault;
+ break;
+
+ case sd_op:
+#ifdef CONFIG_64BIT
+ /*
+ * A 32-bit kernel might be running on a 64-bit processor. But
+ * if we're on a 32-bit processor and an i-cache incoherency
+ * or race makes us see a 64-bit instruction here the sdl/sdr
+ * would blow up, so for now we don't handle unaligned 64-bit
+ * instructions on 32-bit kernels.
+ */
+ if (user && !access_ok(addr, 8))
+ goto sigbus;
+
+ compute_return_epc(regs);
+ value = regs->regs[insn.i_format.rt];
+ StoreDW(addr, value, res);
+ if (res)
+ goto fault;
+ break;
+#endif /* CONFIG_64BIT */
+
+ /* Cannot handle 64-bit instructions in 32-bit kernel */
+ goto sigill;
+
+#ifdef CONFIG_MIPS_FP_SUPPORT
+
+ case lwc1_op:
+ case ldc1_op:
+ case swc1_op:
+ case sdc1_op:
+ case cop1x_op: {
+ void __user *fault_addr = NULL;
+
+ die_if_kernel("Unaligned FP access in kernel code", regs);
+ BUG_ON(!used_math());
+
+ res = fpu_emulator_cop1Handler(regs, &current->thread.fpu, 1,
+ &fault_addr);
+ own_fpu(1); /* Restore FPU state. */
+
+ /* Signal if something went wrong. */
+ process_fpemu_return(res, fault_addr, 0);
+
+ if (res == 0)
+ break;
+ return;
+ }
+#endif /* CONFIG_MIPS_FP_SUPPORT */
+
+#ifdef CONFIG_CPU_HAS_MSA
+
+ case msa_op: {
+ unsigned int wd, preempted;
+ enum msa_2b_fmt df;
+ union fpureg *fpr;
+
+ if (!cpu_has_msa)
+ goto sigill;
+
+ /*
+ * If we've reached this point then userland should have taken
+ * the MSA disabled exception & initialised vector context at
+ * some point in the past.
+ */
+ BUG_ON(!thread_msa_context_live());
+
+ df = insn.msa_mi10_format.df;
+ wd = insn.msa_mi10_format.wd;
+ fpr = &current->thread.fpu.fpr[wd];
+
+ switch (insn.msa_mi10_format.func) {
+ case msa_ld_op:
+ if (!access_ok(addr, sizeof(*fpr)))
+ goto sigbus;
+
+ do {
+ /*
+ * If we have live MSA context keep track of
+ * whether we get preempted in order to avoid
+ * the register context we load being clobbered
+ * by the live context as it's saved during
+ * preemption. If we don't have live context
+ * then it can't be saved to clobber the value
+ * we load.
+ */
+ preempted = test_thread_flag(TIF_USEDMSA);
+
+ res = __copy_from_user_inatomic(fpr, addr,
+ sizeof(*fpr));
+ if (res)
+ goto fault;
+
+ /*
+ * Update the hardware register if it is in use
+ * by the task in this quantum, in order to
+ * avoid having to save & restore the whole
+ * vector context.
+ */
+ preempt_disable();
+ if (test_thread_flag(TIF_USEDMSA)) {
+ write_msa_wr(wd, fpr, df);
+ preempted = 0;
+ }
+ preempt_enable();
+ } while (preempted);
+ break;
+
+ case msa_st_op:
+ if (!access_ok(addr, sizeof(*fpr)))
+ goto sigbus;
+
+ /*
+ * Update from the hardware register if it is in use by
+ * the task in this quantum, in order to avoid having to
+ * save & restore the whole vector context.
+ */
+ preempt_disable();
+ if (test_thread_flag(TIF_USEDMSA))
+ read_msa_wr(wd, fpr, df);
+ preempt_enable();
+
+ res = __copy_to_user_inatomic(addr, fpr, sizeof(*fpr));
+ if (res)
+ goto fault;
+ break;
+
+ default:
+ goto sigbus;
+ }
+
+ compute_return_epc(regs);
+ break;
+ }
+#endif /* CONFIG_CPU_HAS_MSA */
+
+#ifndef CONFIG_CPU_MIPSR6
+ /*
+ * COP2 is available to implementor for application specific use.
+ * It's up to applications to register a notifier chain and do
+ * whatever they have to do, including possible sending of signals.
+ *
+ * This instruction has been reallocated in Release 6
+ */
+ case lwc2_op:
+ cu2_notifier_call_chain(CU2_LWC2_OP, regs);
+ break;
+
+ case ldc2_op:
+ cu2_notifier_call_chain(CU2_LDC2_OP, regs);
+ break;
+
+ case swc2_op:
+ cu2_notifier_call_chain(CU2_SWC2_OP, regs);
+ break;
+
+ case sdc2_op:
+ cu2_notifier_call_chain(CU2_SDC2_OP, regs);
+ break;
+#endif
+ default:
+ /*
+ * Pheeee... We encountered an yet unknown instruction or
+ * cache coherence problem. Die sucker, die ...
+ */
+ goto sigill;
+ }
+
+#ifdef CONFIG_DEBUG_FS
+ unaligned_instructions++;
+#endif
+
+ return;
+
+fault:
+ /* roll back jump/branch */
+ regs->cp0_epc = origpc;
+ regs->regs[31] = orig31;
+ /* Did we have an exception handler installed? */
+ if (fixup_exception(regs))
+ return;
+
+ die_if_kernel("Unhandled kernel unaligned access", regs);
+ force_sig(SIGSEGV);
+
+ return;
+
+sigbus:
+ die_if_kernel("Unhandled kernel unaligned access", regs);
+ force_sig(SIGBUS);
+
+ return;
+
+sigill:
+ die_if_kernel
+ ("Unhandled kernel unaligned access or invalid instruction", regs);
+ force_sig(SIGILL);
+}
+
+/* Recode table from 16-bit register notation to 32-bit GPR. */
+const int reg16to32[] = { 16, 17, 2, 3, 4, 5, 6, 7 };
+
+/* Recode table from 16-bit STORE register notation to 32-bit GPR. */
+static const int reg16to32st[] = { 0, 17, 2, 3, 4, 5, 6, 7 };
+
+static void emulate_load_store_microMIPS(struct pt_regs *regs,
+ void __user *addr)
+{
+ unsigned long value;
+ unsigned int res;
+ int i;
+ unsigned int reg = 0, rvar;
+ unsigned long orig31;
+ u16 __user *pc16;
+ u16 halfword;
+ unsigned int word;
+ unsigned long origpc, contpc;
+ union mips_instruction insn;
+ struct mm_decoded_insn mminsn;
+ bool user = user_mode(regs);
+
+ origpc = regs->cp0_epc;
+ orig31 = regs->regs[31];
+
+ mminsn.micro_mips_mode = 1;
+
+ /*
+ * This load never faults.
+ */
+ pc16 = (unsigned short __user *)msk_isa16_mode(regs->cp0_epc);
+ __get_user(halfword, pc16);
+ pc16++;
+ contpc = regs->cp0_epc + 2;
+ word = ((unsigned int)halfword << 16);
+ mminsn.pc_inc = 2;
+
+ if (!mm_insn_16bit(halfword)) {
+ __get_user(halfword, pc16);
+ pc16++;
+ contpc = regs->cp0_epc + 4;
+ mminsn.pc_inc = 4;
+ word |= halfword;
+ }
+ mminsn.insn = word;
+
+ if (get_user(halfword, pc16))
+ goto fault;
+ mminsn.next_pc_inc = 2;
+ word = ((unsigned int)halfword << 16);
+
+ if (!mm_insn_16bit(halfword)) {
+ pc16++;
+ if (get_user(halfword, pc16))
+ goto fault;
+ mminsn.next_pc_inc = 4;
+ word |= halfword;
+ }
+ mminsn.next_insn = word;
+
+ insn = (union mips_instruction)(mminsn.insn);
+ if (mm_isBranchInstr(regs, mminsn, &contpc))
+ insn = (union mips_instruction)(mminsn.next_insn);
+
+ /* Parse instruction to find what to do */
+
+ switch (insn.mm_i_format.opcode) {
+
+ case mm_pool32a_op:
+ switch (insn.mm_x_format.func) {
+ case mm_lwxs_op:
+ reg = insn.mm_x_format.rd;
+ goto loadW;
+ }
+
+ goto sigbus;
+
+ case mm_pool32b_op:
+ switch (insn.mm_m_format.func) {
+ case mm_lwp_func:
+ reg = insn.mm_m_format.rd;
+ if (reg == 31)
+ goto sigbus;
+
+ if (user && !access_ok(addr, 8))
+ goto sigbus;
+
+ LoadW(addr, value, res);
+ if (res)
+ goto fault;
+ regs->regs[reg] = value;
+ addr += 4;
+ LoadW(addr, value, res);
+ if (res)
+ goto fault;
+ regs->regs[reg + 1] = value;
+ goto success;
+
+ case mm_swp_func:
+ reg = insn.mm_m_format.rd;
+ if (reg == 31)
+ goto sigbus;
+
+ if (user && !access_ok(addr, 8))
+ goto sigbus;
+
+ value = regs->regs[reg];
+ StoreW(addr, value, res);
+ if (res)
+ goto fault;
+ addr += 4;
+ value = regs->regs[reg + 1];
+ StoreW(addr, value, res);
+ if (res)
+ goto fault;
+ goto success;
+
+ case mm_ldp_func:
+#ifdef CONFIG_64BIT
+ reg = insn.mm_m_format.rd;
+ if (reg == 31)
+ goto sigbus;
+
+ if (user && !access_ok(addr, 16))
+ goto sigbus;
+
+ LoadDW(addr, value, res);
+ if (res)
+ goto fault;
+ regs->regs[reg] = value;
+ addr += 8;
+ LoadDW(addr, value, res);
+ if (res)
+ goto fault;
+ regs->regs[reg + 1] = value;
+ goto success;
+#endif /* CONFIG_64BIT */
+
+ goto sigill;
+
+ case mm_sdp_func:
+#ifdef CONFIG_64BIT
+ reg = insn.mm_m_format.rd;
+ if (reg == 31)
+ goto sigbus;
+
+ if (user && !access_ok(addr, 16))
+ goto sigbus;
+
+ value = regs->regs[reg];
+ StoreDW(addr, value, res);
+ if (res)
+ goto fault;
+ addr += 8;
+ value = regs->regs[reg + 1];
+ StoreDW(addr, value, res);
+ if (res)
+ goto fault;
+ goto success;
+#endif /* CONFIG_64BIT */
+
+ goto sigill;
+
+ case mm_lwm32_func:
+ reg = insn.mm_m_format.rd;
+ rvar = reg & 0xf;
+ if ((rvar > 9) || !reg)
+ goto sigill;
+ if (reg & 0x10) {
+ if (user && !access_ok(addr, 4 * (rvar + 1)))
+ goto sigbus;
+ } else {
+ if (user && !access_ok(addr, 4 * rvar))
+ goto sigbus;
+ }
+ if (rvar == 9)
+ rvar = 8;
+ for (i = 16; rvar; rvar--, i++) {
+ LoadW(addr, value, res);
+ if (res)
+ goto fault;
+ addr += 4;
+ regs->regs[i] = value;
+ }
+ if ((reg & 0xf) == 9) {
+ LoadW(addr, value, res);
+ if (res)
+ goto fault;
+ addr += 4;
+ regs->regs[30] = value;
+ }
+ if (reg & 0x10) {
+ LoadW(addr, value, res);
+ if (res)
+ goto fault;
+ regs->regs[31] = value;
+ }
+ goto success;
+
+ case mm_swm32_func:
+ reg = insn.mm_m_format.rd;
+ rvar = reg & 0xf;
+ if ((rvar > 9) || !reg)
+ goto sigill;
+ if (reg & 0x10) {
+ if (user && !access_ok(addr, 4 * (rvar + 1)))
+ goto sigbus;
+ } else {
+ if (user && !access_ok(addr, 4 * rvar))
+ goto sigbus;
+ }
+ if (rvar == 9)
+ rvar = 8;
+ for (i = 16; rvar; rvar--, i++) {
+ value = regs->regs[i];
+ StoreW(addr, value, res);
+ if (res)
+ goto fault;
+ addr += 4;
+ }
+ if ((reg & 0xf) == 9) {
+ value = regs->regs[30];
+ StoreW(addr, value, res);
+ if (res)
+ goto fault;
+ addr += 4;
+ }
+ if (reg & 0x10) {
+ value = regs->regs[31];
+ StoreW(addr, value, res);
+ if (res)
+ goto fault;
+ }
+ goto success;
+
+ case mm_ldm_func:
+#ifdef CONFIG_64BIT
+ reg = insn.mm_m_format.rd;
+ rvar = reg & 0xf;
+ if ((rvar > 9) || !reg)
+ goto sigill;
+ if (reg & 0x10) {
+ if (user && !access_ok(addr, 8 * (rvar + 1)))
+ goto sigbus;
+ } else {
+ if (user && !access_ok(addr, 8 * rvar))
+ goto sigbus;
+ }
+ if (rvar == 9)
+ rvar = 8;
+
+ for (i = 16; rvar; rvar--, i++) {
+ LoadDW(addr, value, res);
+ if (res)
+ goto fault;
+ addr += 4;
+ regs->regs[i] = value;
+ }
+ if ((reg & 0xf) == 9) {
+ LoadDW(addr, value, res);
+ if (res)
+ goto fault;
+ addr += 8;
+ regs->regs[30] = value;
+ }
+ if (reg & 0x10) {
+ LoadDW(addr, value, res);
+ if (res)
+ goto fault;
+ regs->regs[31] = value;
+ }
+ goto success;
+#endif /* CONFIG_64BIT */
+
+ goto sigill;
+
+ case mm_sdm_func:
+#ifdef CONFIG_64BIT
+ reg = insn.mm_m_format.rd;
+ rvar = reg & 0xf;
+ if ((rvar > 9) || !reg)
+ goto sigill;
+ if (reg & 0x10) {
+ if (user && !access_ok(addr, 8 * (rvar + 1)))
+ goto sigbus;
+ } else {
+ if (user && !access_ok(addr, 8 * rvar))
+ goto sigbus;
+ }
+ if (rvar == 9)
+ rvar = 8;
+
+ for (i = 16; rvar; rvar--, i++) {
+ value = regs->regs[i];
+ StoreDW(addr, value, res);
+ if (res)
+ goto fault;
+ addr += 8;
+ }
+ if ((reg & 0xf) == 9) {
+ value = regs->regs[30];
+ StoreDW(addr, value, res);
+ if (res)
+ goto fault;
+ addr += 8;
+ }
+ if (reg & 0x10) {
+ value = regs->regs[31];
+ StoreDW(addr, value, res);
+ if (res)
+ goto fault;
+ }
+ goto success;
+#endif /* CONFIG_64BIT */
+
+ goto sigill;
+
+ /* LWC2, SWC2, LDC2, SDC2 are not serviced */
+ }
+
+ goto sigbus;
+
+ case mm_pool32c_op:
+ switch (insn.mm_m_format.func) {
+ case mm_lwu_func:
+ reg = insn.mm_m_format.rd;
+ goto loadWU;
+ }
+
+ /* LL,SC,LLD,SCD are not serviced */
+ goto sigbus;
+
+#ifdef CONFIG_MIPS_FP_SUPPORT
+ case mm_pool32f_op:
+ switch (insn.mm_x_format.func) {
+ case mm_lwxc1_func:
+ case mm_swxc1_func:
+ case mm_ldxc1_func:
+ case mm_sdxc1_func:
+ goto fpu_emul;
+ }
+
+ goto sigbus;
+
+ case mm_ldc132_op:
+ case mm_sdc132_op:
+ case mm_lwc132_op:
+ case mm_swc132_op: {
+ void __user *fault_addr = NULL;
+
+fpu_emul:
+ /* roll back jump/branch */
+ regs->cp0_epc = origpc;
+ regs->regs[31] = orig31;
+
+ die_if_kernel("Unaligned FP access in kernel code", regs);
+ BUG_ON(!used_math());
+ BUG_ON(!is_fpu_owner());
+
+ res = fpu_emulator_cop1Handler(regs, &current->thread.fpu, 1,
+ &fault_addr);
+ own_fpu(1); /* restore FPU state */
+
+ /* If something went wrong, signal */
+ process_fpemu_return(res, fault_addr, 0);
+
+ if (res == 0)
+ goto success;
+ return;
+ }
+#endif /* CONFIG_MIPS_FP_SUPPORT */
+
+ case mm_lh32_op:
+ reg = insn.mm_i_format.rt;
+ goto loadHW;
+
+ case mm_lhu32_op:
+ reg = insn.mm_i_format.rt;
+ goto loadHWU;
+
+ case mm_lw32_op:
+ reg = insn.mm_i_format.rt;
+ goto loadW;
+
+ case mm_sh32_op:
+ reg = insn.mm_i_format.rt;
+ goto storeHW;
+
+ case mm_sw32_op:
+ reg = insn.mm_i_format.rt;
+ goto storeW;
+
+ case mm_ld32_op:
+ reg = insn.mm_i_format.rt;
+ goto loadDW;
+
+ case mm_sd32_op:
+ reg = insn.mm_i_format.rt;
+ goto storeDW;
+
+ case mm_pool16c_op:
+ switch (insn.mm16_m_format.func) {
+ case mm_lwm16_op:
+ reg = insn.mm16_m_format.rlist;
+ rvar = reg + 1;
+ if (user && !access_ok(addr, 4 * rvar))
+ goto sigbus;
+
+ for (i = 16; rvar; rvar--, i++) {
+ LoadW(addr, value, res);
+ if (res)
+ goto fault;
+ addr += 4;
+ regs->regs[i] = value;
+ }
+ LoadW(addr, value, res);
+ if (res)
+ goto fault;
+ regs->regs[31] = value;
+
+ goto success;
+
+ case mm_swm16_op:
+ reg = insn.mm16_m_format.rlist;
+ rvar = reg + 1;
+ if (user && !access_ok(addr, 4 * rvar))
+ goto sigbus;
+
+ for (i = 16; rvar; rvar--, i++) {
+ value = regs->regs[i];
+ StoreW(addr, value, res);
+ if (res)
+ goto fault;
+ addr += 4;
+ }
+ value = regs->regs[31];
+ StoreW(addr, value, res);
+ if (res)
+ goto fault;
+
+ goto success;
+
+ }
+
+ goto sigbus;
+
+ case mm_lhu16_op:
+ reg = reg16to32[insn.mm16_rb_format.rt];
+ goto loadHWU;
+
+ case mm_lw16_op:
+ reg = reg16to32[insn.mm16_rb_format.rt];
+ goto loadW;
+
+ case mm_sh16_op:
+ reg = reg16to32st[insn.mm16_rb_format.rt];
+ goto storeHW;
+
+ case mm_sw16_op:
+ reg = reg16to32st[insn.mm16_rb_format.rt];
+ goto storeW;
+
+ case mm_lwsp16_op:
+ reg = insn.mm16_r5_format.rt;
+ goto loadW;
+
+ case mm_swsp16_op:
+ reg = insn.mm16_r5_format.rt;
+ goto storeW;
+
+ case mm_lwgp16_op:
+ reg = reg16to32[insn.mm16_r3_format.rt];
+ goto loadW;
+
+ default:
+ goto sigill;
+ }
+
+loadHW:
+ if (user && !access_ok(addr, 2))
+ goto sigbus;
+
+ LoadHW(addr, value, res);
+ if (res)
+ goto fault;
+ regs->regs[reg] = value;
+ goto success;
+
+loadHWU:
+ if (user && !access_ok(addr, 2))
+ goto sigbus;
+
+ LoadHWU(addr, value, res);
+ if (res)
+ goto fault;
+ regs->regs[reg] = value;
+ goto success;
+
+loadW:
+ if (user && !access_ok(addr, 4))
+ goto sigbus;
+
+ LoadW(addr, value, res);
+ if (res)
+ goto fault;
+ regs->regs[reg] = value;
+ goto success;
+
+loadWU:
+#ifdef CONFIG_64BIT
+ /*
+ * A 32-bit kernel might be running on a 64-bit processor. But
+ * if we're on a 32-bit processor and an i-cache incoherency
+ * or race makes us see a 64-bit instruction here the sdl/sdr
+ * would blow up, so for now we don't handle unaligned 64-bit
+ * instructions on 32-bit kernels.
+ */
+ if (user && !access_ok(addr, 4))
+ goto sigbus;
+
+ LoadWU(addr, value, res);
+ if (res)
+ goto fault;
+ regs->regs[reg] = value;
+ goto success;
+#endif /* CONFIG_64BIT */
+
+ /* Cannot handle 64-bit instructions in 32-bit kernel */
+ goto sigill;
+
+loadDW:
+#ifdef CONFIG_64BIT
+ /*
+ * A 32-bit kernel might be running on a 64-bit processor. But
+ * if we're on a 32-bit processor and an i-cache incoherency
+ * or race makes us see a 64-bit instruction here the sdl/sdr
+ * would blow up, so for now we don't handle unaligned 64-bit
+ * instructions on 32-bit kernels.
+ */
+ if (user && !access_ok(addr, 8))
+ goto sigbus;
+
+ LoadDW(addr, value, res);
+ if (res)
+ goto fault;
+ regs->regs[reg] = value;
+ goto success;
+#endif /* CONFIG_64BIT */
+
+ /* Cannot handle 64-bit instructions in 32-bit kernel */
+ goto sigill;
+
+storeHW:
+ if (user && !access_ok(addr, 2))
+ goto sigbus;
+
+ value = regs->regs[reg];
+ StoreHW(addr, value, res);
+ if (res)
+ goto fault;
+ goto success;
+
+storeW:
+ if (user && !access_ok(addr, 4))
+ goto sigbus;
+
+ value = regs->regs[reg];
+ StoreW(addr, value, res);
+ if (res)
+ goto fault;
+ goto success;
+
+storeDW:
+#ifdef CONFIG_64BIT
+ /*
+ * A 32-bit kernel might be running on a 64-bit processor. But
+ * if we're on a 32-bit processor and an i-cache incoherency
+ * or race makes us see a 64-bit instruction here the sdl/sdr
+ * would blow up, so for now we don't handle unaligned 64-bit
+ * instructions on 32-bit kernels.
+ */
+ if (user && !access_ok(addr, 8))
+ goto sigbus;
+
+ value = regs->regs[reg];
+ StoreDW(addr, value, res);
+ if (res)
+ goto fault;
+ goto success;
+#endif /* CONFIG_64BIT */
+
+ /* Cannot handle 64-bit instructions in 32-bit kernel */
+ goto sigill;
+
+success:
+ regs->cp0_epc = contpc; /* advance or branch */
+
+#ifdef CONFIG_DEBUG_FS
+ unaligned_instructions++;
+#endif
+ return;
+
+fault:
+ /* roll back jump/branch */
+ regs->cp0_epc = origpc;
+ regs->regs[31] = orig31;
+ /* Did we have an exception handler installed? */
+ if (fixup_exception(regs))
+ return;
+
+ die_if_kernel("Unhandled kernel unaligned access", regs);
+ force_sig(SIGSEGV);
+
+ return;
+
+sigbus:
+ die_if_kernel("Unhandled kernel unaligned access", regs);
+ force_sig(SIGBUS);
+
+ return;
+
+sigill:
+ die_if_kernel
+ ("Unhandled kernel unaligned access or invalid instruction", regs);
+ force_sig(SIGILL);
+}
+
+static void emulate_load_store_MIPS16e(struct pt_regs *regs, void __user * addr)
+{
+ unsigned long value;
+ unsigned int res;
+ int reg;
+ unsigned long orig31;
+ u16 __user *pc16;
+ unsigned long origpc;
+ union mips16e_instruction mips16inst, oldinst;
+ unsigned int opcode;
+ int extended = 0;
+ bool user = user_mode(regs);
+
+ origpc = regs->cp0_epc;
+ orig31 = regs->regs[31];
+ pc16 = (unsigned short __user *)msk_isa16_mode(origpc);
+ /*
+ * This load never faults.
+ */
+ __get_user(mips16inst.full, pc16);
+ oldinst = mips16inst;
+
+ /* skip EXTEND instruction */
+ if (mips16inst.ri.opcode == MIPS16e_extend_op) {
+ extended = 1;
+ pc16++;
+ __get_user(mips16inst.full, pc16);
+ } else if (delay_slot(regs)) {
+ /* skip jump instructions */
+ /* JAL/JALX are 32 bits but have OPCODE in first short int */
+ if (mips16inst.ri.opcode == MIPS16e_jal_op)
+ pc16++;
+ pc16++;
+ if (get_user(mips16inst.full, pc16))
+ goto sigbus;
+ }
+
+ opcode = mips16inst.ri.opcode;
+ switch (opcode) {
+ case MIPS16e_i64_op: /* I64 or RI64 instruction */
+ switch (mips16inst.i64.func) { /* I64/RI64 func field check */
+ case MIPS16e_ldpc_func:
+ case MIPS16e_ldsp_func:
+ reg = reg16to32[mips16inst.ri64.ry];
+ goto loadDW;
+
+ case MIPS16e_sdsp_func:
+ reg = reg16to32[mips16inst.ri64.ry];
+ goto writeDW;
+
+ case MIPS16e_sdrasp_func:
+ reg = 29; /* GPRSP */
+ goto writeDW;
+ }
+
+ goto sigbus;
+
+ case MIPS16e_swsp_op:
+ reg = reg16to32[mips16inst.ri.rx];
+ if (extended && cpu_has_mips16e2)
+ switch (mips16inst.ri.imm >> 5) {
+ case 0: /* SWSP */
+ case 1: /* SWGP */
+ break;
+ case 2: /* SHGP */
+ opcode = MIPS16e_sh_op;
+ break;
+ default:
+ goto sigbus;
+ }
+ break;
+
+ case MIPS16e_lwpc_op:
+ reg = reg16to32[mips16inst.ri.rx];
+ break;
+
+ case MIPS16e_lwsp_op:
+ reg = reg16to32[mips16inst.ri.rx];
+ if (extended && cpu_has_mips16e2)
+ switch (mips16inst.ri.imm >> 5) {
+ case 0: /* LWSP */
+ case 1: /* LWGP */
+ break;
+ case 2: /* LHGP */
+ opcode = MIPS16e_lh_op;
+ break;
+ case 4: /* LHUGP */
+ opcode = MIPS16e_lhu_op;
+ break;
+ default:
+ goto sigbus;
+ }
+ break;
+
+ case MIPS16e_i8_op:
+ if (mips16inst.i8.func != MIPS16e_swrasp_func)
+ goto sigbus;
+ reg = 29; /* GPRSP */
+ break;
+
+ default:
+ reg = reg16to32[mips16inst.rri.ry];
+ break;
+ }
+
+ switch (opcode) {
+
+ case MIPS16e_lb_op:
+ case MIPS16e_lbu_op:
+ case MIPS16e_sb_op:
+ goto sigbus;
+
+ case MIPS16e_lh_op:
+ if (user && !access_ok(addr, 2))
+ goto sigbus;
+
+ LoadHW(addr, value, res);
+ if (res)
+ goto fault;
+ MIPS16e_compute_return_epc(regs, &oldinst);
+ regs->regs[reg] = value;
+ break;
+
+ case MIPS16e_lhu_op:
+ if (user && !access_ok(addr, 2))
+ goto sigbus;
+
+ LoadHWU(addr, value, res);
+ if (res)
+ goto fault;
+ MIPS16e_compute_return_epc(regs, &oldinst);
+ regs->regs[reg] = value;
+ break;
+
+ case MIPS16e_lw_op:
+ case MIPS16e_lwpc_op:
+ case MIPS16e_lwsp_op:
+ if (user && !access_ok(addr, 4))
+ goto sigbus;
+
+ LoadW(addr, value, res);
+ if (res)
+ goto fault;
+ MIPS16e_compute_return_epc(regs, &oldinst);
+ regs->regs[reg] = value;
+ break;
+
+ case MIPS16e_lwu_op:
+#ifdef CONFIG_64BIT
+ /*
+ * A 32-bit kernel might be running on a 64-bit processor. But
+ * if we're on a 32-bit processor and an i-cache incoherency
+ * or race makes us see a 64-bit instruction here the sdl/sdr
+ * would blow up, so for now we don't handle unaligned 64-bit
+ * instructions on 32-bit kernels.
+ */
+ if (user && !access_ok(addr, 4))
+ goto sigbus;
+
+ LoadWU(addr, value, res);
+ if (res)
+ goto fault;
+ MIPS16e_compute_return_epc(regs, &oldinst);
+ regs->regs[reg] = value;
+ break;
+#endif /* CONFIG_64BIT */
+
+ /* Cannot handle 64-bit instructions in 32-bit kernel */
+ goto sigill;
+
+ case MIPS16e_ld_op:
+loadDW:
+#ifdef CONFIG_64BIT
+ /*
+ * A 32-bit kernel might be running on a 64-bit processor. But
+ * if we're on a 32-bit processor and an i-cache incoherency
+ * or race makes us see a 64-bit instruction here the sdl/sdr
+ * would blow up, so for now we don't handle unaligned 64-bit
+ * instructions on 32-bit kernels.
+ */
+ if (user && !access_ok(addr, 8))
+ goto sigbus;
+
+ LoadDW(addr, value, res);
+ if (res)
+ goto fault;
+ MIPS16e_compute_return_epc(regs, &oldinst);
+ regs->regs[reg] = value;
+ break;
+#endif /* CONFIG_64BIT */
+
+ /* Cannot handle 64-bit instructions in 32-bit kernel */
+ goto sigill;
+
+ case MIPS16e_sh_op:
+ if (user && !access_ok(addr, 2))
+ goto sigbus;
+
+ MIPS16e_compute_return_epc(regs, &oldinst);
+ value = regs->regs[reg];
+ StoreHW(addr, value, res);
+ if (res)
+ goto fault;
+ break;
+
+ case MIPS16e_sw_op:
+ case MIPS16e_swsp_op:
+ case MIPS16e_i8_op: /* actually - MIPS16e_swrasp_func */
+ if (user && !access_ok(addr, 4))
+ goto sigbus;
+
+ MIPS16e_compute_return_epc(regs, &oldinst);
+ value = regs->regs[reg];
+ StoreW(addr, value, res);
+ if (res)
+ goto fault;
+ break;
+
+ case MIPS16e_sd_op:
+writeDW:
+#ifdef CONFIG_64BIT
+ /*
+ * A 32-bit kernel might be running on a 64-bit processor. But
+ * if we're on a 32-bit processor and an i-cache incoherency
+ * or race makes us see a 64-bit instruction here the sdl/sdr
+ * would blow up, so for now we don't handle unaligned 64-bit
+ * instructions on 32-bit kernels.
+ */
+ if (user && !access_ok(addr, 8))
+ goto sigbus;
+
+ MIPS16e_compute_return_epc(regs, &oldinst);
+ value = regs->regs[reg];
+ StoreDW(addr, value, res);
+ if (res)
+ goto fault;
+ break;
+#endif /* CONFIG_64BIT */
+
+ /* Cannot handle 64-bit instructions in 32-bit kernel */
+ goto sigill;
+
+ default:
+ /*
+ * Pheeee... We encountered an yet unknown instruction or
+ * cache coherence problem. Die sucker, die ...
+ */
+ goto sigill;
+ }
+
+#ifdef CONFIG_DEBUG_FS
+ unaligned_instructions++;
+#endif
+
+ return;
+
+fault:
+ /* roll back jump/branch */
+ regs->cp0_epc = origpc;
+ regs->regs[31] = orig31;
+ /* Did we have an exception handler installed? */
+ if (fixup_exception(regs))
+ return;
+
+ die_if_kernel("Unhandled kernel unaligned access", regs);
+ force_sig(SIGSEGV);
+
+ return;
+
+sigbus:
+ die_if_kernel("Unhandled kernel unaligned access", regs);
+ force_sig(SIGBUS);
+
+ return;
+
+sigill:
+ die_if_kernel
+ ("Unhandled kernel unaligned access or invalid instruction", regs);
+ force_sig(SIGILL);
+}
+
+asmlinkage void do_ade(struct pt_regs *regs)
+{
+ enum ctx_state prev_state;
+ unsigned int *pc;
+
+ prev_state = exception_enter();
+ perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS,
+ 1, regs, regs->cp0_badvaddr);
+
+#ifdef CONFIG_64BIT
+ /*
+ * check, if we are hitting space between CPU implemented maximum
+ * virtual user address and 64bit maximum virtual user address
+ * and do exception handling to get EFAULTs for get_user/put_user
+ */
+ if ((regs->cp0_badvaddr >= (1UL << cpu_vmbits)) &&
+ (regs->cp0_badvaddr < XKSSEG)) {
+ if (fixup_exception(regs)) {
+ current->thread.cp0_baduaddr = regs->cp0_badvaddr;
+ return;
+ }
+ goto sigbus;
+ }
+#endif
+
+ /*
+ * Did we catch a fault trying to load an instruction?
+ */
+ if (regs->cp0_badvaddr == regs->cp0_epc)
+ goto sigbus;
+
+ if (user_mode(regs) && !test_thread_flag(TIF_FIXADE))
+ goto sigbus;
+ if (unaligned_action == UNALIGNED_ACTION_SIGNAL)
+ goto sigbus;
+
+ /*
+ * Do branch emulation only if we didn't forward the exception.
+ * This is all so but ugly ...
+ */
+
+ /*
+ * Are we running in microMIPS mode?
+ */
+ if (get_isa16_mode(regs->cp0_epc)) {
+ /*
+ * Did we catch a fault trying to load an instruction in
+ * 16-bit mode?
+ */
+ if (regs->cp0_badvaddr == msk_isa16_mode(regs->cp0_epc))
+ goto sigbus;
+ if (unaligned_action == UNALIGNED_ACTION_SHOW)
+ show_registers(regs);
+
+ if (cpu_has_mmips) {
+ emulate_load_store_microMIPS(regs,
+ (void __user *)regs->cp0_badvaddr);
+ return;
+ }
+
+ if (cpu_has_mips16) {
+ emulate_load_store_MIPS16e(regs,
+ (void __user *)regs->cp0_badvaddr);
+ return;
+ }
+
+ goto sigbus;
+ }
+
+ if (unaligned_action == UNALIGNED_ACTION_SHOW)
+ show_registers(regs);
+ pc = (unsigned int *)exception_epc(regs);
+
+ emulate_load_store_insn(regs, (void __user *)regs->cp0_badvaddr, pc);
+
+ return;
+
+sigbus:
+ die_if_kernel("Kernel unaligned instruction access", regs);
+ force_sig(SIGBUS);
+
+ /*
+ * XXX On return from the signal handler we should advance the epc
+ */
+ exception_exit(prev_state);
+}
+
+#ifdef CONFIG_DEBUG_FS
+static int __init debugfs_unaligned(void)
+{
+ debugfs_create_u32("unaligned_instructions", S_IRUGO, mips_debugfs_dir,
+ &unaligned_instructions);
+ debugfs_create_u32("unaligned_action", S_IRUGO | S_IWUSR,
+ mips_debugfs_dir, &unaligned_action);
+ return 0;
+}
+arch_initcall(debugfs_unaligned);
+#endif
diff --git a/arch/mips/kernel/uprobes.c b/arch/mips/kernel/uprobes.c
new file mode 100644
index 000000000..6c063aa18
--- /dev/null
+++ b/arch/mips/kernel/uprobes.c
@@ -0,0 +1,263 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/highmem.h>
+#include <linux/kdebug.h>
+#include <linux/types.h>
+#include <linux/notifier.h>
+#include <linux/sched.h>
+#include <linux/uprobes.h>
+
+#include <asm/branch.h>
+#include <asm/cpu-features.h>
+#include <asm/ptrace.h>
+
+#include "probes-common.h"
+
+static inline int insn_has_delay_slot(const union mips_instruction insn)
+{
+ return __insn_has_delay_slot(insn);
+}
+
+/**
+ * arch_uprobe_analyze_insn - instruction analysis including validity and fixups.
+ * @mm: the probed address space.
+ * @arch_uprobe: the probepoint information.
+ * @addr: virtual address at which to install the probepoint
+ * Return 0 on success or a -ve number on error.
+ */
+int arch_uprobe_analyze_insn(struct arch_uprobe *aup,
+ struct mm_struct *mm, unsigned long addr)
+{
+ union mips_instruction inst;
+
+ /*
+ * For the time being this also blocks attempts to use uprobes with
+ * MIPS16 and microMIPS.
+ */
+ if (addr & 0x03)
+ return -EINVAL;
+
+ inst.word = aup->insn[0];
+
+ if (__insn_is_compact_branch(inst)) {
+ pr_notice("Uprobes for compact branches are not supported\n");
+ return -EINVAL;
+ }
+
+ aup->ixol[0] = aup->insn[insn_has_delay_slot(inst)];
+ aup->ixol[1] = UPROBE_BRK_UPROBE_XOL; /* NOP */
+
+ return 0;
+}
+
+/**
+ * is_trap_insn - check if the instruction is a trap variant
+ * @insn: instruction to be checked.
+ * Returns true if @insn is a trap variant.
+ *
+ * This definition overrides the weak definition in kernel/events/uprobes.c.
+ * and is needed for the case where an architecture has multiple trap
+ * instructions (like PowerPC or MIPS). We treat BREAK just like the more
+ * modern conditional trap instructions.
+ */
+bool is_trap_insn(uprobe_opcode_t *insn)
+{
+ union mips_instruction inst;
+
+ inst.word = *insn;
+
+ switch (inst.i_format.opcode) {
+ case spec_op:
+ switch (inst.r_format.func) {
+ case break_op:
+ case teq_op:
+ case tge_op:
+ case tgeu_op:
+ case tlt_op:
+ case tltu_op:
+ case tne_op:
+ return true;
+ }
+ break;
+
+ case bcond_op: /* Yes, really ... */
+ switch (inst.u_format.rt) {
+ case teqi_op:
+ case tgei_op:
+ case tgeiu_op:
+ case tlti_op:
+ case tltiu_op:
+ case tnei_op:
+ return true;
+ }
+ break;
+ }
+
+ return false;
+}
+
+#define UPROBE_TRAP_NR ULONG_MAX
+
+/*
+ * arch_uprobe_pre_xol - prepare to execute out of line.
+ * @auprobe: the probepoint information.
+ * @regs: reflects the saved user state of current task.
+ */
+int arch_uprobe_pre_xol(struct arch_uprobe *aup, struct pt_regs *regs)
+{
+ struct uprobe_task *utask = current->utask;
+
+ /*
+ * Now find the EPC where to resume after the breakpoint has been
+ * dealt with. This may require emulation of a branch.
+ */
+ aup->resume_epc = regs->cp0_epc + 4;
+ if (insn_has_delay_slot((union mips_instruction) aup->insn[0])) {
+ __compute_return_epc_for_insn(regs,
+ (union mips_instruction) aup->insn[0]);
+ aup->resume_epc = regs->cp0_epc;
+ }
+ utask->autask.saved_trap_nr = current->thread.trap_nr;
+ current->thread.trap_nr = UPROBE_TRAP_NR;
+ regs->cp0_epc = current->utask->xol_vaddr;
+
+ return 0;
+}
+
+int arch_uprobe_post_xol(struct arch_uprobe *aup, struct pt_regs *regs)
+{
+ struct uprobe_task *utask = current->utask;
+
+ current->thread.trap_nr = utask->autask.saved_trap_nr;
+ regs->cp0_epc = aup->resume_epc;
+
+ return 0;
+}
+
+/*
+ * If xol insn itself traps and generates a signal(Say,
+ * SIGILL/SIGSEGV/etc), then detect the case where a singlestepped
+ * instruction jumps back to its own address. It is assumed that anything
+ * like do_page_fault/do_trap/etc sets thread.trap_nr != -1.
+ *
+ * arch_uprobe_pre_xol/arch_uprobe_post_xol save/restore thread.trap_nr,
+ * arch_uprobe_xol_was_trapped() simply checks that ->trap_nr is not equal to
+ * UPROBE_TRAP_NR == -1 set by arch_uprobe_pre_xol().
+ */
+bool arch_uprobe_xol_was_trapped(struct task_struct *tsk)
+{
+ if (tsk->thread.trap_nr != UPROBE_TRAP_NR)
+ return true;
+
+ return false;
+}
+
+int arch_uprobe_exception_notify(struct notifier_block *self,
+ unsigned long val, void *data)
+{
+ struct die_args *args = data;
+ struct pt_regs *regs = args->regs;
+
+ /* regs == NULL is a kernel bug */
+ if (WARN_ON(!regs))
+ return NOTIFY_DONE;
+
+ /* We are only interested in userspace traps */
+ if (!user_mode(regs))
+ return NOTIFY_DONE;
+
+ switch (val) {
+ case DIE_UPROBE:
+ if (uprobe_pre_sstep_notifier(regs))
+ return NOTIFY_STOP;
+ break;
+ case DIE_UPROBE_XOL:
+ if (uprobe_post_sstep_notifier(regs))
+ return NOTIFY_STOP;
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+/*
+ * This function gets called when XOL instruction either gets trapped or
+ * the thread has a fatal signal. Reset the instruction pointer to its
+ * probed address for the potential restart or for post mortem analysis.
+ */
+void arch_uprobe_abort_xol(struct arch_uprobe *aup,
+ struct pt_regs *regs)
+{
+ struct uprobe_task *utask = current->utask;
+
+ instruction_pointer_set(regs, utask->vaddr);
+}
+
+unsigned long arch_uretprobe_hijack_return_addr(
+ unsigned long trampoline_vaddr, struct pt_regs *regs)
+{
+ unsigned long ra;
+
+ ra = regs->regs[31];
+
+ /* Replace the return address with the trampoline address */
+ regs->regs[31] = trampoline_vaddr;
+
+ return ra;
+}
+
+/**
+ * set_swbp - store breakpoint at a given address.
+ * @auprobe: arch specific probepoint information.
+ * @mm: the probed process address space.
+ * @vaddr: the virtual address to insert the opcode.
+ *
+ * For mm @mm, store the breakpoint instruction at @vaddr.
+ * Return 0 (success) or a negative errno.
+ *
+ * This version overrides the weak version in kernel/events/uprobes.c.
+ * It is required to handle MIPS16 and microMIPS.
+ */
+int __weak set_swbp(struct arch_uprobe *auprobe, struct mm_struct *mm,
+ unsigned long vaddr)
+{
+ return uprobe_write_opcode(auprobe, mm, vaddr, UPROBE_SWBP_INSN);
+}
+
+void arch_uprobe_copy_ixol(struct page *page, unsigned long vaddr,
+ void *src, unsigned long len)
+{
+ unsigned long kaddr, kstart;
+
+ /* Initialize the slot */
+ kaddr = (unsigned long)kmap_atomic(page);
+ kstart = kaddr + (vaddr & ~PAGE_MASK);
+ memcpy((void *)kstart, src, len);
+ flush_icache_range(kstart, kstart + len);
+ kunmap_atomic((void *)kaddr);
+}
+
+/**
+ * uprobe_get_swbp_addr - compute address of swbp given post-swbp regs
+ * @regs: Reflects the saved state of the task after it has hit a breakpoint
+ * instruction.
+ * Return the address of the breakpoint instruction.
+ *
+ * This overrides the weak version in kernel/events/uprobes.c.
+ */
+unsigned long uprobe_get_swbp_addr(struct pt_regs *regs)
+{
+ return instruction_pointer(regs);
+}
+
+/*
+ * See if the instruction can be emulated.
+ * Returns true if instruction was emulated, false otherwise.
+ *
+ * For now we always emulate so this function just returns false.
+ */
+bool arch_uprobe_skip_sstep(struct arch_uprobe *auprobe, struct pt_regs *regs)
+{
+ return false;
+}
diff --git a/arch/mips/kernel/vdso.c b/arch/mips/kernel/vdso.c
new file mode 100644
index 000000000..5fd9bf1d5
--- /dev/null
+++ b/arch/mips/kernel/vdso.c
@@ -0,0 +1,193 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2015 Imagination Technologies
+ * Author: Alex Smith <alex.smith@imgtec.com>
+ */
+
+#include <linux/binfmts.h>
+#include <linux/elf.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/ioport.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/random.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/timekeeper_internal.h>
+
+#include <asm/abi.h>
+#include <asm/mips-cps.h>
+#include <asm/page.h>
+#include <asm/vdso.h>
+#include <vdso/helpers.h>
+#include <vdso/vsyscall.h>
+
+/* Kernel-provided data used by the VDSO. */
+static union mips_vdso_data mips_vdso_data __page_aligned_data;
+struct vdso_data *vdso_data = mips_vdso_data.data;
+
+/*
+ * Mapping for the VDSO data/GIC pages. The real pages are mapped manually, as
+ * what we map and where within the area they are mapped is determined at
+ * runtime.
+ */
+static struct page *no_pages[] = { NULL };
+static struct vm_special_mapping vdso_vvar_mapping = {
+ .name = "[vvar]",
+ .pages = no_pages,
+};
+
+static void __init init_vdso_image(struct mips_vdso_image *image)
+{
+ unsigned long num_pages, i;
+ unsigned long data_pfn;
+
+ BUG_ON(!PAGE_ALIGNED(image->data));
+ BUG_ON(!PAGE_ALIGNED(image->size));
+
+ num_pages = image->size / PAGE_SIZE;
+
+ data_pfn = __phys_to_pfn(__pa_symbol(image->data));
+ for (i = 0; i < num_pages; i++)
+ image->mapping.pages[i] = pfn_to_page(data_pfn + i);
+}
+
+static int __init init_vdso(void)
+{
+ init_vdso_image(&vdso_image);
+
+#ifdef CONFIG_MIPS32_O32
+ init_vdso_image(&vdso_image_o32);
+#endif
+
+#ifdef CONFIG_MIPS32_N32
+ init_vdso_image(&vdso_image_n32);
+#endif
+
+ return 0;
+}
+subsys_initcall(init_vdso);
+
+static unsigned long vdso_base(void)
+{
+ unsigned long base = STACK_TOP;
+
+ if (IS_ENABLED(CONFIG_MIPS_FP_SUPPORT)) {
+ /* Skip the delay slot emulation page */
+ base += PAGE_SIZE;
+ }
+
+ if (current->flags & PF_RANDOMIZE) {
+ base += prandom_u32_max(VDSO_RANDOMIZE_SIZE);
+ base = PAGE_ALIGN(base);
+ }
+
+ return base;
+}
+
+int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
+{
+ struct mips_vdso_image *image = current->thread.abi->vdso;
+ struct mm_struct *mm = current->mm;
+ unsigned long gic_size, vvar_size, size, base, data_addr, vdso_addr, gic_pfn, gic_base;
+ struct vm_area_struct *vma;
+ int ret;
+
+ if (mmap_write_lock_killable(mm))
+ return -EINTR;
+
+ if (IS_ENABLED(CONFIG_MIPS_FP_SUPPORT)) {
+ /* Map delay slot emulation page */
+ base = mmap_region(NULL, STACK_TOP, PAGE_SIZE,
+ VM_READ | VM_EXEC |
+ VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC,
+ 0, NULL);
+ if (IS_ERR_VALUE(base)) {
+ ret = base;
+ goto out;
+ }
+ }
+
+ /*
+ * Determine total area size. This includes the VDSO data itself, the
+ * data page, and the GIC user page if present. Always create a mapping
+ * for the GIC user area if the GIC is present regardless of whether it
+ * is the current clocksource, in case it comes into use later on. We
+ * only map a page even though the total area is 64K, as we only need
+ * the counter registers at the start.
+ */
+ gic_size = mips_gic_present() ? PAGE_SIZE : 0;
+ vvar_size = gic_size + PAGE_SIZE;
+ size = vvar_size + image->size;
+
+ /*
+ * Find a region that's large enough for us to perform the
+ * colour-matching alignment below.
+ */
+ if (cpu_has_dc_aliases)
+ size += shm_align_mask + 1;
+
+ base = get_unmapped_area(NULL, vdso_base(), size, 0, 0);
+ if (IS_ERR_VALUE(base)) {
+ ret = base;
+ goto out;
+ }
+
+ /*
+ * If we suffer from dcache aliasing, ensure that the VDSO data page
+ * mapping is coloured the same as the kernel's mapping of that memory.
+ * This ensures that when the kernel updates the VDSO data userland
+ * will observe it without requiring cache invalidations.
+ */
+ if (cpu_has_dc_aliases) {
+ base = __ALIGN_MASK(base, shm_align_mask);
+ base += ((unsigned long)vdso_data - gic_size) & shm_align_mask;
+ }
+
+ data_addr = base + gic_size;
+ vdso_addr = data_addr + PAGE_SIZE;
+
+ vma = _install_special_mapping(mm, base, vvar_size,
+ VM_READ | VM_MAYREAD,
+ &vdso_vvar_mapping);
+ if (IS_ERR(vma)) {
+ ret = PTR_ERR(vma);
+ goto out;
+ }
+
+ /* Map GIC user page. */
+ if (gic_size) {
+ gic_base = (unsigned long)mips_gic_base + MIPS_GIC_USER_OFS;
+ gic_pfn = PFN_DOWN(__pa(gic_base));
+
+ ret = io_remap_pfn_range(vma, base, gic_pfn, gic_size,
+ pgprot_noncached(vma->vm_page_prot));
+ if (ret)
+ goto out;
+ }
+
+ /* Map data page. */
+ ret = remap_pfn_range(vma, data_addr,
+ virt_to_phys(vdso_data) >> PAGE_SHIFT,
+ PAGE_SIZE, vma->vm_page_prot);
+ if (ret)
+ goto out;
+
+ /* Map VDSO image. */
+ vma = _install_special_mapping(mm, vdso_addr, image->size,
+ VM_READ | VM_EXEC |
+ VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC,
+ &image->mapping);
+ if (IS_ERR(vma)) {
+ ret = PTR_ERR(vma);
+ goto out;
+ }
+
+ mm->context.vdso = (void *)vdso_addr;
+ ret = 0;
+
+out:
+ mmap_write_unlock(mm);
+ return ret;
+}
diff --git a/arch/mips/kernel/vmlinux.lds.S b/arch/mips/kernel/vmlinux.lds.S
new file mode 100644
index 000000000..91d6a5360
--- /dev/null
+++ b/arch/mips/kernel/vmlinux.lds.S
@@ -0,0 +1,243 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#include <asm/asm-offsets.h>
+#include <asm/thread_info.h>
+
+#define PAGE_SIZE _PAGE_SIZE
+
+/*
+ * Put .bss..swapper_pg_dir as the first thing in .bss. This will
+ * ensure that it has .bss alignment (64K).
+ */
+#define BSS_FIRST_SECTIONS *(.bss..swapper_pg_dir)
+
+/* Cavium Octeon should not have a separate PT_NOTE Program Header. */
+#ifndef CONFIG_CAVIUM_OCTEON_SOC
+#define EMITS_PT_NOTE
+#endif
+
+#define RUNTIME_DISCARD_EXIT
+
+#include <asm-generic/vmlinux.lds.h>
+
+#undef mips
+#define mips mips
+OUTPUT_ARCH(mips)
+ENTRY(kernel_entry)
+PHDRS {
+ text PT_LOAD FLAGS(7); /* RWX */
+#ifndef CONFIG_CAVIUM_OCTEON_SOC
+ note PT_NOTE FLAGS(4); /* R__ */
+#endif /* CAVIUM_OCTEON_SOC */
+}
+
+#ifdef CONFIG_32BIT
+ #ifdef CONFIG_CPU_LITTLE_ENDIAN
+ jiffies = jiffies_64;
+ #else
+ jiffies = jiffies_64 + 4;
+ #endif
+#else
+ jiffies = jiffies_64;
+#endif
+
+SECTIONS
+{
+#ifdef CONFIG_BOOT_ELF64
+ /* Read-only sections, merged into text segment: */
+ /* . = 0xc000000000000000; */
+
+ /* This is the value for an Origin kernel, taken from an IRIX kernel. */
+ /* . = 0xc00000000001c000; */
+
+ /* Set the vaddr for the text segment to a value
+ * >= 0xa800 0000 0001 9000 if no symmon is going to configured
+ * >= 0xa800 0000 0030 0000 otherwise
+ */
+
+ /* . = 0xa800000000300000; */
+ . = 0xffffffff80300000;
+#endif
+ . = LINKER_LOAD_ADDRESS;
+ /* read-only */
+ _text = .; /* Text and read-only data */
+ .text : {
+ TEXT_TEXT
+ SCHED_TEXT
+ CPUIDLE_TEXT
+ LOCK_TEXT
+ KPROBES_TEXT
+ IRQENTRY_TEXT
+ SOFTIRQENTRY_TEXT
+ *(.fixup)
+ *(.gnu.warning)
+ . = ALIGN(16);
+ *(.got) /* Global offset table */
+ } :text = 0
+ _etext = .; /* End of text section */
+
+ EXCEPTION_TABLE(16)
+
+ /* Exception table for data bus errors */
+ __dbe_table : {
+ __start___dbe_table = .;
+ KEEP(*(__dbe_table))
+ __stop___dbe_table = .;
+ }
+
+ _sdata = .; /* Start of data section */
+ RO_DATA(4096)
+
+ /* writeable */
+ .data : { /* Data */
+ . = . + DATAOFFSET; /* for CONFIG_MAPPED_KERNEL */
+
+ INIT_TASK_DATA(THREAD_SIZE)
+ NOSAVE_DATA
+ PAGE_ALIGNED_DATA(PAGE_SIZE)
+ CACHELINE_ALIGNED_DATA(1 << CONFIG_MIPS_L1_CACHE_SHIFT)
+ READ_MOSTLY_DATA(1 << CONFIG_MIPS_L1_CACHE_SHIFT)
+ DATA_DATA
+ CONSTRUCTORS
+ }
+ BUG_TABLE
+ _gp = . + 0x8000;
+ .lit8 : {
+ *(.lit8)
+ }
+ .lit4 : {
+ *(.lit4)
+ }
+ /* We want the small data sections together, so single-instruction offsets
+ can access them all, and initialized data all before uninitialized, so
+ we can shorten the on-disk segment size. */
+ .sdata : {
+ *(.sdata)
+ }
+ _edata = .; /* End of data section */
+
+ /* will be freed after init */
+ . = ALIGN(PAGE_SIZE); /* Init code and data */
+ __init_begin = .;
+ INIT_TEXT_SECTION(PAGE_SIZE)
+ INIT_DATA_SECTION(16)
+
+ . = ALIGN(4);
+ .mips.machines.init : AT(ADDR(.mips.machines.init) - LOAD_OFFSET) {
+ __mips_machines_start = .;
+ KEEP(*(.mips.machines.init))
+ __mips_machines_end = .;
+ }
+
+ /* .exit.text is discarded at runtime, not link time, to deal with
+ * references from .rodata
+ */
+ .exit.text : {
+ EXIT_TEXT
+ }
+ .exit.data : {
+ EXIT_DATA
+ }
+#ifdef CONFIG_SMP
+ PERCPU_SECTION(1 << CONFIG_MIPS_L1_CACHE_SHIFT)
+#endif
+
+ .rel.dyn : ALIGN(8) {
+ *(.rel)
+ *(.rel*)
+ }
+
+#ifdef CONFIG_MIPS_ELF_APPENDED_DTB
+ STRUCT_ALIGN();
+ .appended_dtb : AT(ADDR(.appended_dtb) - LOAD_OFFSET) {
+ *(.appended_dtb)
+ KEEP(*(.appended_dtb))
+ }
+#endif
+
+#ifdef CONFIG_RELOCATABLE
+ . = ALIGN(4);
+
+ .data.reloc : {
+ _relocation_start = .;
+ /*
+ * Space for relocation table
+ * This needs to be filled so that the
+ * relocs tool can overwrite the content.
+ * An invalid value is left at the start of the
+ * section to abort relocation if the table
+ * has not been filled in.
+ */
+ LONG(0xFFFFFFFF);
+ FILL(0);
+ . += CONFIG_RELOCATION_TABLE_SIZE - 4;
+ _relocation_end = .;
+ }
+#endif
+
+#ifdef CONFIG_MIPS_RAW_APPENDED_DTB
+ .fill : {
+ FILL(0);
+ BYTE(0);
+ STRUCT_ALIGN();
+ }
+ __appended_dtb = .;
+ /* leave space for appended DTB */
+ . += 0x100000;
+#endif
+ /*
+ * Align to 64K in attempt to eliminate holes before the
+ * .bss..swapper_pg_dir section at the start of .bss. This
+ * also satisfies PAGE_SIZE alignment as the largest page size
+ * allowed is 64K.
+ */
+ . = ALIGN(0x10000);
+ __init_end = .;
+ /* freed after init ends here */
+
+ /*
+ * Force .bss to 64K alignment so that .bss..swapper_pg_dir
+ * gets that alignment. .sbss should be empty, so there will be
+ * no holes after __init_end. */
+ BSS_SECTION(0, 0x10000, 8)
+
+ _end = . ;
+
+ /* These mark the ABI of the kernel for debuggers. */
+ .mdebug.abi32 : {
+ KEEP(*(.mdebug.abi32))
+ }
+ .mdebug.abi64 : {
+ KEEP(*(.mdebug.abi64))
+ }
+
+ /* This is the MIPS specific mdebug section. */
+ .mdebug : {
+ *(.mdebug)
+ }
+
+ STABS_DEBUG
+ DWARF_DEBUG
+ ELF_DETAILS
+
+ /* These must appear regardless of . */
+ .gptab.sdata : {
+ *(.gptab.data)
+ *(.gptab.sdata)
+ }
+ .gptab.sbss : {
+ *(.gptab.bss)
+ *(.gptab.sbss)
+ }
+
+ /* Sections to be discarded */
+ DISCARDS
+ /DISCARD/ : {
+ /* ABI crap starts here */
+ *(.MIPS.abiflags)
+ *(.MIPS.options)
+ *(.gnu.attributes)
+ *(.options)
+ *(.pdr)
+ *(.reginfo)
+ }
+}
diff --git a/arch/mips/kernel/vpe-cmp.c b/arch/mips/kernel/vpe-cmp.c
new file mode 100644
index 000000000..92140edb3
--- /dev/null
+++ b/arch/mips/kernel/vpe-cmp.c
@@ -0,0 +1,180 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2004, 2005 MIPS Technologies, Inc. All rights reserved.
+ * Copyright (C) 2013 Imagination Technologies Ltd.
+ */
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/fs.h>
+#include <linux/slab.h>
+#include <linux/export.h>
+
+#include <asm/vpe.h>
+
+static int major;
+
+void cleanup_tc(struct tc *tc)
+{
+
+}
+
+static ssize_t store_kill(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct vpe *vpe = get_vpe(aprp_cpu_index());
+ struct vpe_notifications *notifier;
+
+ list_for_each_entry(notifier, &vpe->notify, list)
+ notifier->stop(aprp_cpu_index());
+
+ release_progmem(vpe->load_addr);
+ vpe->state = VPE_STATE_UNUSED;
+
+ return len;
+}
+static DEVICE_ATTR(kill, S_IWUSR, NULL, store_kill);
+
+static ssize_t ntcs_show(struct device *cd, struct device_attribute *attr,
+ char *buf)
+{
+ struct vpe *vpe = get_vpe(aprp_cpu_index());
+
+ return sprintf(buf, "%d\n", vpe->ntcs);
+}
+
+static ssize_t ntcs_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct vpe *vpe = get_vpe(aprp_cpu_index());
+ unsigned long new;
+ int ret;
+
+ ret = kstrtoul(buf, 0, &new);
+ if (ret < 0)
+ return ret;
+
+ /* APRP can only reserve one TC in a VPE and no more. */
+ if (new != 1)
+ return -EINVAL;
+
+ vpe->ntcs = new;
+
+ return len;
+}
+static DEVICE_ATTR_RW(ntcs);
+
+static struct attribute *vpe_attrs[] = {
+ &dev_attr_kill.attr,
+ &dev_attr_ntcs.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(vpe);
+
+static void vpe_device_release(struct device *cd)
+{
+}
+
+static struct class vpe_class = {
+ .name = "vpe",
+ .owner = THIS_MODULE,
+ .dev_release = vpe_device_release,
+ .dev_groups = vpe_groups,
+};
+
+static struct device vpe_device;
+
+int __init vpe_module_init(void)
+{
+ struct vpe *v = NULL;
+ struct tc *t;
+ int err;
+
+ if (!cpu_has_mipsmt) {
+ pr_warn("VPE loader: not a MIPS MT capable processor\n");
+ return -ENODEV;
+ }
+
+ if (num_possible_cpus() - aprp_cpu_index() < 1) {
+ pr_warn("No VPEs reserved for AP/SP, not initialize VPE loader\n"
+ "Pass maxcpus=<n> argument as kernel argument\n");
+ return -ENODEV;
+ }
+
+ major = register_chrdev(0, VPE_MODULE_NAME, &vpe_fops);
+ if (major < 0) {
+ pr_warn("VPE loader: unable to register character device\n");
+ return major;
+ }
+
+ err = class_register(&vpe_class);
+ if (err) {
+ pr_err("vpe_class registration failed\n");
+ goto out_chrdev;
+ }
+
+ device_initialize(&vpe_device);
+ vpe_device.class = &vpe_class;
+ vpe_device.parent = NULL;
+ dev_set_name(&vpe_device, "vpe_sp");
+ vpe_device.devt = MKDEV(major, VPE_MODULE_MINOR);
+ err = device_add(&vpe_device);
+ if (err) {
+ pr_err("Adding vpe_device failed\n");
+ goto out_class;
+ }
+
+ t = alloc_tc(aprp_cpu_index());
+ if (!t) {
+ pr_warn("VPE: unable to allocate TC\n");
+ err = -ENOMEM;
+ goto out_dev;
+ }
+
+ /* VPE */
+ v = alloc_vpe(aprp_cpu_index());
+ if (v == NULL) {
+ pr_warn("VPE: unable to allocate VPE\n");
+ kfree(t);
+ err = -ENOMEM;
+ goto out_dev;
+ }
+
+ v->ntcs = 1;
+
+ /* add the tc to the list of this vpe's tc's. */
+ list_add(&t->tc, &v->tc);
+
+ /* TC */
+ t->pvpe = v; /* set the parent vpe */
+
+ return 0;
+
+out_dev:
+ device_del(&vpe_device);
+
+out_class:
+ put_device(&vpe_device);
+ class_unregister(&vpe_class);
+
+out_chrdev:
+ unregister_chrdev(major, VPE_MODULE_NAME);
+
+ return err;
+}
+
+void __exit vpe_module_exit(void)
+{
+ struct vpe *v, *n;
+
+ device_unregister(&vpe_device);
+ class_unregister(&vpe_class);
+ unregister_chrdev(major, VPE_MODULE_NAME);
+
+ /* No locking needed here */
+ list_for_each_entry_safe(v, n, &vpecontrol.vpe_list, list)
+ if (v->state != VPE_STATE_UNUSED)
+ release_vpe(v);
+}
diff --git a/arch/mips/kernel/vpe-mt.c b/arch/mips/kernel/vpe-mt.c
new file mode 100644
index 000000000..84a82b551
--- /dev/null
+++ b/arch/mips/kernel/vpe-mt.c
@@ -0,0 +1,521 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2004, 2005 MIPS Technologies, Inc. All rights reserved.
+ * Copyright (C) 2013 Imagination Technologies Ltd.
+ */
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/fs.h>
+#include <linux/slab.h>
+#include <linux/export.h>
+
+#include <asm/mipsregs.h>
+#include <asm/mipsmtregs.h>
+#include <asm/mips_mt.h>
+#include <asm/vpe.h>
+
+static int major;
+
+/* The number of TCs and VPEs physically available on the core */
+static int hw_tcs, hw_vpes;
+
+/* We are prepared so configure and start the VPE... */
+int vpe_run(struct vpe *v)
+{
+ unsigned long flags, val, dmt_flag;
+ struct vpe_notifications *notifier;
+ unsigned int vpeflags;
+ struct tc *t;
+
+ /* check we are the Master VPE */
+ local_irq_save(flags);
+ val = read_c0_vpeconf0();
+ if (!(val & VPECONF0_MVP)) {
+ pr_warn("VPE loader: only Master VPE's are able to config MT\n");
+ local_irq_restore(flags);
+
+ return -1;
+ }
+
+ dmt_flag = dmt();
+ vpeflags = dvpe();
+
+ if (list_empty(&v->tc)) {
+ evpe(vpeflags);
+ emt(dmt_flag);
+ local_irq_restore(flags);
+
+ pr_warn("VPE loader: No TC's associated with VPE %d\n",
+ v->minor);
+
+ return -ENOEXEC;
+ }
+
+ t = list_first_entry(&v->tc, struct tc, tc);
+
+ /* Put MVPE's into 'configuration state' */
+ set_c0_mvpcontrol(MVPCONTROL_VPC);
+
+ settc(t->index);
+
+ /* should check it is halted, and not activated */
+ if ((read_tc_c0_tcstatus() & TCSTATUS_A) ||
+ !(read_tc_c0_tchalt() & TCHALT_H)) {
+ evpe(vpeflags);
+ emt(dmt_flag);
+ local_irq_restore(flags);
+
+ pr_warn("VPE loader: TC %d is already active!\n",
+ t->index);
+
+ return -ENOEXEC;
+ }
+
+ /*
+ * Write the address we want it to start running from in the TCPC
+ * register.
+ */
+ write_tc_c0_tcrestart((unsigned long)v->__start);
+ write_tc_c0_tccontext((unsigned long)0);
+
+ /*
+ * Mark the TC as activated, not interrupt exempt and not dynamically
+ * allocatable
+ */
+ val = read_tc_c0_tcstatus();
+ val = (val & ~(TCSTATUS_DA | TCSTATUS_IXMT)) | TCSTATUS_A;
+ write_tc_c0_tcstatus(val);
+
+ write_tc_c0_tchalt(read_tc_c0_tchalt() & ~TCHALT_H);
+
+ /*
+ * The sde-kit passes 'memsize' to __start in $a3, so set something
+ * here... Or set $a3 to zero and define DFLT_STACK_SIZE and
+ * DFLT_HEAP_SIZE when you compile your program
+ */
+ mttgpr(6, v->ntcs);
+ mttgpr(7, physical_memsize);
+
+ /* set up VPE1 */
+ /*
+ * bind the TC to VPE 1 as late as possible so we only have the final
+ * VPE registers to set up, and so an EJTAG probe can trigger on it
+ */
+ write_tc_c0_tcbind((read_tc_c0_tcbind() & ~TCBIND_CURVPE) | 1);
+
+ write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() & ~(VPECONF0_VPA));
+
+ back_to_back_c0_hazard();
+
+ /* Set up the XTC bit in vpeconf0 to point at our tc */
+ write_vpe_c0_vpeconf0((read_vpe_c0_vpeconf0() & ~(VPECONF0_XTC))
+ | (t->index << VPECONF0_XTC_SHIFT));
+
+ back_to_back_c0_hazard();
+
+ /* enable this VPE */
+ write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() | VPECONF0_VPA);
+
+ /* clear out any left overs from a previous program */
+ write_vpe_c0_status(0);
+ write_vpe_c0_cause(0);
+
+ /* take system out of configuration state */
+ clear_c0_mvpcontrol(MVPCONTROL_VPC);
+
+ /*
+ * SMVP kernels manage VPE enable independently, but uniprocessor
+ * kernels need to turn it on, even if that wasn't the pre-dvpe() state.
+ */
+#ifdef CONFIG_SMP
+ evpe(vpeflags);
+#else
+ evpe(EVPE_ENABLE);
+#endif
+ emt(dmt_flag);
+ local_irq_restore(flags);
+
+ list_for_each_entry(notifier, &v->notify, list)
+ notifier->start(VPE_MODULE_MINOR);
+
+ return 0;
+}
+
+void cleanup_tc(struct tc *tc)
+{
+ unsigned long flags;
+ unsigned int mtflags, vpflags;
+ int tmp;
+
+ local_irq_save(flags);
+ mtflags = dmt();
+ vpflags = dvpe();
+ /* Put MVPE's into 'configuration state' */
+ set_c0_mvpcontrol(MVPCONTROL_VPC);
+
+ settc(tc->index);
+ tmp = read_tc_c0_tcstatus();
+
+ /* mark not allocated and not dynamically allocatable */
+ tmp &= ~(TCSTATUS_A | TCSTATUS_DA);
+ tmp |= TCSTATUS_IXMT; /* interrupt exempt */
+ write_tc_c0_tcstatus(tmp);
+
+ write_tc_c0_tchalt(TCHALT_H);
+ mips_ihb();
+
+ clear_c0_mvpcontrol(MVPCONTROL_VPC);
+ evpe(vpflags);
+ emt(mtflags);
+ local_irq_restore(flags);
+}
+
+/* module wrapper entry points */
+/* give me a vpe */
+void *vpe_alloc(void)
+{
+ int i;
+ struct vpe *v;
+
+ /* find a vpe */
+ for (i = 1; i < MAX_VPES; i++) {
+ v = get_vpe(i);
+ if (v != NULL) {
+ v->state = VPE_STATE_INUSE;
+ return v;
+ }
+ }
+ return NULL;
+}
+EXPORT_SYMBOL(vpe_alloc);
+
+/* start running from here */
+int vpe_start(void *vpe, unsigned long start)
+{
+ struct vpe *v = vpe;
+
+ v->__start = start;
+ return vpe_run(v);
+}
+EXPORT_SYMBOL(vpe_start);
+
+/* halt it for now */
+int vpe_stop(void *vpe)
+{
+ struct vpe *v = vpe;
+ struct tc *t;
+ unsigned int evpe_flags;
+
+ evpe_flags = dvpe();
+
+ t = list_entry(v->tc.next, struct tc, tc);
+ if (t != NULL) {
+ settc(t->index);
+ write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() & ~VPECONF0_VPA);
+ }
+
+ evpe(evpe_flags);
+
+ return 0;
+}
+EXPORT_SYMBOL(vpe_stop);
+
+/* I've done with it thank you */
+int vpe_free(void *vpe)
+{
+ struct vpe *v = vpe;
+ struct tc *t;
+ unsigned int evpe_flags;
+
+ t = list_entry(v->tc.next, struct tc, tc);
+ if (t == NULL)
+ return -ENOEXEC;
+
+ evpe_flags = dvpe();
+
+ /* Put MVPE's into 'configuration state' */
+ set_c0_mvpcontrol(MVPCONTROL_VPC);
+
+ settc(t->index);
+ write_vpe_c0_vpeconf0(read_vpe_c0_vpeconf0() & ~VPECONF0_VPA);
+
+ /* halt the TC */
+ write_tc_c0_tchalt(TCHALT_H);
+ mips_ihb();
+
+ /* mark the TC unallocated */
+ write_tc_c0_tcstatus(read_tc_c0_tcstatus() & ~TCSTATUS_A);
+
+ v->state = VPE_STATE_UNUSED;
+
+ clear_c0_mvpcontrol(MVPCONTROL_VPC);
+ evpe(evpe_flags);
+
+ return 0;
+}
+EXPORT_SYMBOL(vpe_free);
+
+static ssize_t store_kill(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct vpe *vpe = get_vpe(aprp_cpu_index());
+ struct vpe_notifications *notifier;
+
+ list_for_each_entry(notifier, &vpe->notify, list)
+ notifier->stop(aprp_cpu_index());
+
+ release_progmem(vpe->load_addr);
+ cleanup_tc(get_tc(aprp_cpu_index()));
+ vpe_stop(vpe);
+ vpe_free(vpe);
+
+ return len;
+}
+static DEVICE_ATTR(kill, S_IWUSR, NULL, store_kill);
+
+static ssize_t ntcs_show(struct device *cd, struct device_attribute *attr,
+ char *buf)
+{
+ struct vpe *vpe = get_vpe(aprp_cpu_index());
+
+ return sprintf(buf, "%d\n", vpe->ntcs);
+}
+
+static ssize_t ntcs_store(struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t len)
+{
+ struct vpe *vpe = get_vpe(aprp_cpu_index());
+ unsigned long new;
+ int ret;
+
+ ret = kstrtoul(buf, 0, &new);
+ if (ret < 0)
+ return ret;
+
+ if (new == 0 || new > (hw_tcs - aprp_cpu_index()))
+ return -EINVAL;
+
+ vpe->ntcs = new;
+
+ return len;
+}
+static DEVICE_ATTR_RW(ntcs);
+
+static struct attribute *vpe_attrs[] = {
+ &dev_attr_kill.attr,
+ &dev_attr_ntcs.attr,
+ NULL,
+};
+ATTRIBUTE_GROUPS(vpe);
+
+static void vpe_device_release(struct device *cd)
+{
+}
+
+static struct class vpe_class = {
+ .name = "vpe",
+ .owner = THIS_MODULE,
+ .dev_release = vpe_device_release,
+ .dev_groups = vpe_groups,
+};
+
+static struct device vpe_device;
+
+int __init vpe_module_init(void)
+{
+ unsigned int mtflags, vpflags;
+ unsigned long flags, val;
+ struct vpe *v = NULL;
+ struct tc *t;
+ int tc, err;
+
+ if (!cpu_has_mipsmt) {
+ pr_warn("VPE loader: not a MIPS MT capable processor\n");
+ return -ENODEV;
+ }
+
+ if (vpelimit == 0) {
+ pr_warn("No VPEs reserved for AP/SP, not initialize VPE loader\n"
+ "Pass maxvpes=<n> argument as kernel argument\n");
+
+ return -ENODEV;
+ }
+
+ if (aprp_cpu_index() == 0) {
+ pr_warn("No TCs reserved for AP/SP, not initialize VPE loader\n"
+ "Pass maxtcs=<n> argument as kernel argument\n");
+
+ return -ENODEV;
+ }
+
+ major = register_chrdev(0, VPE_MODULE_NAME, &vpe_fops);
+ if (major < 0) {
+ pr_warn("VPE loader: unable to register character device\n");
+ return major;
+ }
+
+ err = class_register(&vpe_class);
+ if (err) {
+ pr_err("vpe_class registration failed\n");
+ goto out_chrdev;
+ }
+
+ device_initialize(&vpe_device);
+ vpe_device.class = &vpe_class;
+ vpe_device.parent = NULL;
+ dev_set_name(&vpe_device, "vpe1");
+ vpe_device.devt = MKDEV(major, VPE_MODULE_MINOR);
+ err = device_add(&vpe_device);
+ if (err) {
+ pr_err("Adding vpe_device failed\n");
+ goto out_class;
+ }
+
+ local_irq_save(flags);
+ mtflags = dmt();
+ vpflags = dvpe();
+
+ /* Put MVPE's into 'configuration state' */
+ set_c0_mvpcontrol(MVPCONTROL_VPC);
+
+ val = read_c0_mvpconf0();
+ hw_tcs = (val & MVPCONF0_PTC) + 1;
+ hw_vpes = ((val & MVPCONF0_PVPE) >> MVPCONF0_PVPE_SHIFT) + 1;
+
+ for (tc = aprp_cpu_index(); tc < hw_tcs; tc++) {
+ /*
+ * Must re-enable multithreading temporarily or in case we
+ * reschedule send IPIs or similar we might hang.
+ */
+ clear_c0_mvpcontrol(MVPCONTROL_VPC);
+ evpe(vpflags);
+ emt(mtflags);
+ local_irq_restore(flags);
+ t = alloc_tc(tc);
+ if (!t) {
+ err = -ENOMEM;
+ goto out_dev;
+ }
+
+ local_irq_save(flags);
+ mtflags = dmt();
+ vpflags = dvpe();
+ set_c0_mvpcontrol(MVPCONTROL_VPC);
+
+ /* VPE's */
+ if (tc < hw_tcs) {
+ settc(tc);
+
+ v = alloc_vpe(tc);
+ if (v == NULL) {
+ pr_warn("VPE: unable to allocate VPE\n");
+ goto out_reenable;
+ }
+
+ v->ntcs = hw_tcs - aprp_cpu_index();
+
+ /* add the tc to the list of this vpe's tc's. */
+ list_add(&t->tc, &v->tc);
+
+ /* deactivate all but vpe0 */
+ if (tc >= aprp_cpu_index()) {
+ unsigned long tmp = read_vpe_c0_vpeconf0();
+
+ tmp &= ~VPECONF0_VPA;
+
+ /* master VPE */
+ tmp |= VPECONF0_MVP;
+ write_vpe_c0_vpeconf0(tmp);
+ }
+
+ /* disable multi-threading with TC's */
+ write_vpe_c0_vpecontrol(read_vpe_c0_vpecontrol() &
+ ~VPECONTROL_TE);
+
+ if (tc >= vpelimit) {
+ /*
+ * Set config to be the same as vpe0,
+ * particularly kseg0 coherency alg
+ */
+ write_vpe_c0_config(read_c0_config());
+ }
+ }
+
+ /* TC's */
+ t->pvpe = v; /* set the parent vpe */
+
+ if (tc >= aprp_cpu_index()) {
+ unsigned long tmp;
+
+ settc(tc);
+
+ /*
+ * A TC that is bound to any other VPE gets bound to
+ * VPE0, ideally I'd like to make it homeless but it
+ * doesn't appear to let me bind a TC to a non-existent
+ * VPE. Which is perfectly reasonable.
+ *
+ * The (un)bound state is visible to an EJTAG probe so
+ * may notify GDB...
+ */
+ tmp = read_tc_c0_tcbind();
+ if (tmp & TCBIND_CURVPE) {
+ /* tc is bound >vpe0 */
+ write_tc_c0_tcbind(tmp & ~TCBIND_CURVPE);
+
+ t->pvpe = get_vpe(0); /* set the parent vpe */
+ }
+
+ /* halt the TC */
+ write_tc_c0_tchalt(TCHALT_H);
+ mips_ihb();
+
+ tmp = read_tc_c0_tcstatus();
+
+ /* mark not activated and not dynamically allocatable */
+ tmp &= ~(TCSTATUS_A | TCSTATUS_DA);
+ tmp |= TCSTATUS_IXMT; /* interrupt exempt */
+ write_tc_c0_tcstatus(tmp);
+ }
+ }
+
+out_reenable:
+ /* release config state */
+ clear_c0_mvpcontrol(MVPCONTROL_VPC);
+
+ evpe(vpflags);
+ emt(mtflags);
+ local_irq_restore(flags);
+
+ return 0;
+
+out_dev:
+ device_del(&vpe_device);
+
+out_class:
+ put_device(&vpe_device);
+ class_unregister(&vpe_class);
+
+out_chrdev:
+ unregister_chrdev(major, VPE_MODULE_NAME);
+
+ return err;
+}
+
+void __exit vpe_module_exit(void)
+{
+ struct vpe *v, *n;
+
+ device_unregister(&vpe_device);
+ class_unregister(&vpe_class);
+ unregister_chrdev(major, VPE_MODULE_NAME);
+
+ /* No locking needed here */
+ list_for_each_entry_safe(v, n, &vpecontrol.vpe_list, list) {
+ if (v->state != VPE_STATE_UNUSED)
+ release_vpe(v);
+ }
+}
diff --git a/arch/mips/kernel/vpe.c b/arch/mips/kernel/vpe.c
new file mode 100644
index 000000000..132949727
--- /dev/null
+++ b/arch/mips/kernel/vpe.c
@@ -0,0 +1,900 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2004, 2005 MIPS Technologies, Inc. All rights reserved.
+ * Copyright (C) 2013 Imagination Technologies Ltd.
+ *
+ * VPE spport module for loading a MIPS SP program into VPE1. The SP
+ * environment is rather simple since there are no TLBs. It needs
+ * to be relocatable (or partiall linked). Initialize your stack in
+ * the startup-code. The loader looks for the symbol __start and sets
+ * up the execution to resume from there. To load and run, simply do
+ * a cat SP 'binary' to the /dev/vpe1 device.
+ */
+#include <linux/kernel.h>
+#include <linux/device.h>
+#include <linux/fs.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/list.h>
+#include <linux/vmalloc.h>
+#include <linux/elf.h>
+#include <linux/seq_file.h>
+#include <linux/syscalls.h>
+#include <linux/moduleloader.h>
+#include <linux/interrupt.h>
+#include <linux/poll.h>
+#include <linux/memblock.h>
+#include <asm/mipsregs.h>
+#include <asm/mipsmtregs.h>
+#include <asm/cacheflush.h>
+#include <linux/atomic.h>
+#include <asm/mips_mt.h>
+#include <asm/processor.h>
+#include <asm/vpe.h>
+
+#ifndef ARCH_SHF_SMALL
+#define ARCH_SHF_SMALL 0
+#endif
+
+/* If this is set, the section belongs in the init part of the module */
+#define INIT_OFFSET_MASK (1UL << (BITS_PER_LONG-1))
+
+struct vpe_control vpecontrol = {
+ .vpe_list_lock = __SPIN_LOCK_UNLOCKED(vpe_list_lock),
+ .vpe_list = LIST_HEAD_INIT(vpecontrol.vpe_list),
+ .tc_list_lock = __SPIN_LOCK_UNLOCKED(tc_list_lock),
+ .tc_list = LIST_HEAD_INIT(vpecontrol.tc_list)
+};
+
+/* get the vpe associated with this minor */
+struct vpe *get_vpe(int minor)
+{
+ struct vpe *res, *v;
+
+ if (!cpu_has_mipsmt)
+ return NULL;
+
+ res = NULL;
+ spin_lock(&vpecontrol.vpe_list_lock);
+ list_for_each_entry(v, &vpecontrol.vpe_list, list) {
+ if (v->minor == VPE_MODULE_MINOR) {
+ res = v;
+ break;
+ }
+ }
+ spin_unlock(&vpecontrol.vpe_list_lock);
+
+ return res;
+}
+
+/* get the vpe associated with this minor */
+struct tc *get_tc(int index)
+{
+ struct tc *res, *t;
+
+ res = NULL;
+ spin_lock(&vpecontrol.tc_list_lock);
+ list_for_each_entry(t, &vpecontrol.tc_list, list) {
+ if (t->index == index) {
+ res = t;
+ break;
+ }
+ }
+ spin_unlock(&vpecontrol.tc_list_lock);
+
+ return res;
+}
+
+/* allocate a vpe and associate it with this minor (or index) */
+struct vpe *alloc_vpe(int minor)
+{
+ struct vpe *v;
+
+ v = kzalloc(sizeof(struct vpe), GFP_KERNEL);
+ if (v == NULL)
+ goto out;
+
+ INIT_LIST_HEAD(&v->tc);
+ spin_lock(&vpecontrol.vpe_list_lock);
+ list_add_tail(&v->list, &vpecontrol.vpe_list);
+ spin_unlock(&vpecontrol.vpe_list_lock);
+
+ INIT_LIST_HEAD(&v->notify);
+ v->minor = VPE_MODULE_MINOR;
+
+out:
+ return v;
+}
+
+/* allocate a tc. At startup only tc0 is running, all other can be halted. */
+struct tc *alloc_tc(int index)
+{
+ struct tc *tc;
+
+ tc = kzalloc(sizeof(struct tc), GFP_KERNEL);
+ if (tc == NULL)
+ goto out;
+
+ INIT_LIST_HEAD(&tc->tc);
+ tc->index = index;
+
+ spin_lock(&vpecontrol.tc_list_lock);
+ list_add_tail(&tc->list, &vpecontrol.tc_list);
+ spin_unlock(&vpecontrol.tc_list_lock);
+
+out:
+ return tc;
+}
+
+/* clean up and free everything */
+void release_vpe(struct vpe *v)
+{
+ list_del(&v->list);
+ if (v->load_addr)
+ release_progmem(v->load_addr);
+ kfree(v);
+}
+
+/* Find some VPE program space */
+void *alloc_progmem(unsigned long len)
+{
+ void *addr;
+
+#ifdef CONFIG_MIPS_VPE_LOADER_TOM
+ /*
+ * This means you must tell Linux to use less memory than you
+ * physically have, for example by passing a mem= boot argument.
+ */
+ addr = pfn_to_kaddr(max_low_pfn);
+ memset(addr, 0, len);
+#else
+ /* simple grab some mem for now */
+ addr = kzalloc(len, GFP_KERNEL);
+#endif
+
+ return addr;
+}
+
+void release_progmem(void *ptr)
+{
+#ifndef CONFIG_MIPS_VPE_LOADER_TOM
+ kfree(ptr);
+#endif
+}
+
+/* Update size with this section: return offset. */
+static long get_offset(unsigned long *size, Elf_Shdr *sechdr)
+{
+ long ret;
+
+ ret = ALIGN(*size, sechdr->sh_addralign ? : 1);
+ *size = ret + sechdr->sh_size;
+ return ret;
+}
+
+/* Lay out the SHF_ALLOC sections in a way not dissimilar to how ld
+ might -- code, read-only data, read-write data, small data. Tally
+ sizes, and place the offsets into sh_entsize fields: high bit means it
+ belongs in init. */
+static void layout_sections(struct module *mod, const Elf_Ehdr *hdr,
+ Elf_Shdr *sechdrs, const char *secstrings)
+{
+ static unsigned long const masks[][2] = {
+ /* NOTE: all executable code must be the first section
+ * in this array; otherwise modify the text_size
+ * finder in the two loops below */
+ {SHF_EXECINSTR | SHF_ALLOC, ARCH_SHF_SMALL},
+ {SHF_ALLOC, SHF_WRITE | ARCH_SHF_SMALL},
+ {SHF_WRITE | SHF_ALLOC, ARCH_SHF_SMALL},
+ {ARCH_SHF_SMALL | SHF_ALLOC, 0}
+ };
+ unsigned int m, i;
+
+ for (i = 0; i < hdr->e_shnum; i++)
+ sechdrs[i].sh_entsize = ~0UL;
+
+ for (m = 0; m < ARRAY_SIZE(masks); ++m) {
+ for (i = 0; i < hdr->e_shnum; ++i) {
+ Elf_Shdr *s = &sechdrs[i];
+
+ if ((s->sh_flags & masks[m][0]) != masks[m][0]
+ || (s->sh_flags & masks[m][1])
+ || s->sh_entsize != ~0UL)
+ continue;
+ s->sh_entsize =
+ get_offset((unsigned long *)&mod->core_layout.size, s);
+ }
+
+ if (m == 0)
+ mod->core_layout.text_size = mod->core_layout.size;
+
+ }
+}
+
+/* from module-elf32.c, but subverted a little */
+
+struct mips_hi16 {
+ struct mips_hi16 *next;
+ Elf32_Addr *addr;
+ Elf32_Addr value;
+};
+
+static struct mips_hi16 *mips_hi16_list;
+static unsigned int gp_offs, gp_addr;
+
+static int apply_r_mips_none(struct module *me, uint32_t *location,
+ Elf32_Addr v)
+{
+ return 0;
+}
+
+static int apply_r_mips_gprel16(struct module *me, uint32_t *location,
+ Elf32_Addr v)
+{
+ int rel;
+
+ if (!(*location & 0xffff)) {
+ rel = (int)v - gp_addr;
+ } else {
+ /* .sbss + gp(relative) + offset */
+ /* kludge! */
+ rel = (int)(short)((int)v + gp_offs +
+ (int)(short)(*location & 0xffff) - gp_addr);
+ }
+
+ if ((rel > 32768) || (rel < -32768)) {
+ pr_debug("VPE loader: apply_r_mips_gprel16: relative address 0x%x out of range of gp register\n",
+ rel);
+ return -ENOEXEC;
+ }
+
+ *location = (*location & 0xffff0000) | (rel & 0xffff);
+
+ return 0;
+}
+
+static int apply_r_mips_pc16(struct module *me, uint32_t *location,
+ Elf32_Addr v)
+{
+ int rel;
+ rel = (((unsigned int)v - (unsigned int)location));
+ rel >>= 2; /* because the offset is in _instructions_ not bytes. */
+ rel -= 1; /* and one instruction less due to the branch delay slot. */
+
+ if ((rel > 32768) || (rel < -32768)) {
+ pr_debug("VPE loader: apply_r_mips_pc16: relative address out of range 0x%x\n",
+ rel);
+ return -ENOEXEC;
+ }
+
+ *location = (*location & 0xffff0000) | (rel & 0xffff);
+
+ return 0;
+}
+
+static int apply_r_mips_32(struct module *me, uint32_t *location,
+ Elf32_Addr v)
+{
+ *location += v;
+
+ return 0;
+}
+
+static int apply_r_mips_26(struct module *me, uint32_t *location,
+ Elf32_Addr v)
+{
+ if (v % 4) {
+ pr_debug("VPE loader: apply_r_mips_26: unaligned relocation\n");
+ return -ENOEXEC;
+ }
+
+/*
+ * Not desperately convinced this is a good check of an overflow condition
+ * anyway. But it gets in the way of handling undefined weak symbols which
+ * we want to set to zero.
+ * if ((v & 0xf0000000) != (((unsigned long)location + 4) & 0xf0000000)) {
+ * printk(KERN_ERR
+ * "module %s: relocation overflow\n",
+ * me->name);
+ * return -ENOEXEC;
+ * }
+ */
+
+ *location = (*location & ~0x03ffffff) |
+ ((*location + (v >> 2)) & 0x03ffffff);
+ return 0;
+}
+
+static int apply_r_mips_hi16(struct module *me, uint32_t *location,
+ Elf32_Addr v)
+{
+ struct mips_hi16 *n;
+
+ /*
+ * We cannot relocate this one now because we don't know the value of
+ * the carry we need to add. Save the information, and let LO16 do the
+ * actual relocation.
+ */
+ n = kmalloc(sizeof(*n), GFP_KERNEL);
+ if (!n)
+ return -ENOMEM;
+
+ n->addr = location;
+ n->value = v;
+ n->next = mips_hi16_list;
+ mips_hi16_list = n;
+
+ return 0;
+}
+
+static int apply_r_mips_lo16(struct module *me, uint32_t *location,
+ Elf32_Addr v)
+{
+ unsigned long insnlo = *location;
+ Elf32_Addr val, vallo;
+ struct mips_hi16 *l, *next;
+
+ /* Sign extend the addend we extract from the lo insn. */
+ vallo = ((insnlo & 0xffff) ^ 0x8000) - 0x8000;
+
+ if (mips_hi16_list != NULL) {
+
+ l = mips_hi16_list;
+ while (l != NULL) {
+ unsigned long insn;
+
+ /*
+ * The value for the HI16 had best be the same.
+ */
+ if (v != l->value) {
+ pr_debug("VPE loader: apply_r_mips_lo16/hi16: inconsistent value information\n");
+ goto out_free;
+ }
+
+ /*
+ * Do the HI16 relocation. Note that we actually don't
+ * need to know anything about the LO16 itself, except
+ * where to find the low 16 bits of the addend needed
+ * by the LO16.
+ */
+ insn = *l->addr;
+ val = ((insn & 0xffff) << 16) + vallo;
+ val += v;
+
+ /*
+ * Account for the sign extension that will happen in
+ * the low bits.
+ */
+ val = ((val >> 16) + ((val & 0x8000) != 0)) & 0xffff;
+
+ insn = (insn & ~0xffff) | val;
+ *l->addr = insn;
+
+ next = l->next;
+ kfree(l);
+ l = next;
+ }
+
+ mips_hi16_list = NULL;
+ }
+
+ /*
+ * Ok, we're done with the HI16 relocs. Now deal with the LO16.
+ */
+ val = v + vallo;
+ insnlo = (insnlo & ~0xffff) | (val & 0xffff);
+ *location = insnlo;
+
+ return 0;
+
+out_free:
+ while (l != NULL) {
+ next = l->next;
+ kfree(l);
+ l = next;
+ }
+ mips_hi16_list = NULL;
+
+ return -ENOEXEC;
+}
+
+static int (*reloc_handlers[]) (struct module *me, uint32_t *location,
+ Elf32_Addr v) = {
+ [R_MIPS_NONE] = apply_r_mips_none,
+ [R_MIPS_32] = apply_r_mips_32,
+ [R_MIPS_26] = apply_r_mips_26,
+ [R_MIPS_HI16] = apply_r_mips_hi16,
+ [R_MIPS_LO16] = apply_r_mips_lo16,
+ [R_MIPS_GPREL16] = apply_r_mips_gprel16,
+ [R_MIPS_PC16] = apply_r_mips_pc16
+};
+
+static char *rstrs[] = {
+ [R_MIPS_NONE] = "MIPS_NONE",
+ [R_MIPS_32] = "MIPS_32",
+ [R_MIPS_26] = "MIPS_26",
+ [R_MIPS_HI16] = "MIPS_HI16",
+ [R_MIPS_LO16] = "MIPS_LO16",
+ [R_MIPS_GPREL16] = "MIPS_GPREL16",
+ [R_MIPS_PC16] = "MIPS_PC16"
+};
+
+static int apply_relocations(Elf32_Shdr *sechdrs,
+ const char *strtab,
+ unsigned int symindex,
+ unsigned int relsec,
+ struct module *me)
+{
+ Elf32_Rel *rel = (void *) sechdrs[relsec].sh_addr;
+ Elf32_Sym *sym;
+ uint32_t *location;
+ unsigned int i;
+ Elf32_Addr v;
+ int res;
+
+ for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
+ Elf32_Word r_info = rel[i].r_info;
+
+ /* This is where to make the change */
+ location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
+ + rel[i].r_offset;
+ /* This is the symbol it is referring to */
+ sym = (Elf32_Sym *)sechdrs[symindex].sh_addr
+ + ELF32_R_SYM(r_info);
+
+ if (!sym->st_value) {
+ pr_debug("%s: undefined weak symbol %s\n",
+ me->name, strtab + sym->st_name);
+ /* just print the warning, dont barf */
+ }
+
+ v = sym->st_value;
+
+ res = reloc_handlers[ELF32_R_TYPE(r_info)](me, location, v);
+ if (res) {
+ char *r = rstrs[ELF32_R_TYPE(r_info)];
+ pr_warn("VPE loader: .text+0x%x relocation type %s for symbol \"%s\" failed\n",
+ rel[i].r_offset, r ? r : "UNKNOWN",
+ strtab + sym->st_name);
+ return res;
+ }
+ }
+
+ return 0;
+}
+
+static inline void save_gp_address(unsigned int secbase, unsigned int rel)
+{
+ gp_addr = secbase + rel;
+ gp_offs = gp_addr - (secbase & 0xffff0000);
+}
+/* end module-elf32.c */
+
+/* Change all symbols so that sh_value encodes the pointer directly. */
+static void simplify_symbols(Elf_Shdr *sechdrs,
+ unsigned int symindex,
+ const char *strtab,
+ const char *secstrings,
+ unsigned int nsecs, struct module *mod)
+{
+ Elf_Sym *sym = (void *)sechdrs[symindex].sh_addr;
+ unsigned long secbase, bssbase = 0;
+ unsigned int i, n = sechdrs[symindex].sh_size / sizeof(Elf_Sym);
+ int size;
+
+ /* find the .bss section for COMMON symbols */
+ for (i = 0; i < nsecs; i++) {
+ if (strncmp(secstrings + sechdrs[i].sh_name, ".bss", 4) == 0) {
+ bssbase = sechdrs[i].sh_addr;
+ break;
+ }
+ }
+
+ for (i = 1; i < n; i++) {
+ switch (sym[i].st_shndx) {
+ case SHN_COMMON:
+ /* Allocate space for the symbol in the .bss section.
+ st_value is currently size.
+ We want it to have the address of the symbol. */
+
+ size = sym[i].st_value;
+ sym[i].st_value = bssbase;
+
+ bssbase += size;
+ break;
+
+ case SHN_ABS:
+ /* Don't need to do anything */
+ break;
+
+ case SHN_UNDEF:
+ /* ret = -ENOENT; */
+ break;
+
+ case SHN_MIPS_SCOMMON:
+ pr_debug("simplify_symbols: ignoring SHN_MIPS_SCOMMON symbol <%s> st_shndx %d\n",
+ strtab + sym[i].st_name, sym[i].st_shndx);
+ /* .sbss section */
+ break;
+
+ default:
+ secbase = sechdrs[sym[i].st_shndx].sh_addr;
+
+ if (strncmp(strtab + sym[i].st_name, "_gp", 3) == 0)
+ save_gp_address(secbase, sym[i].st_value);
+
+ sym[i].st_value += secbase;
+ break;
+ }
+ }
+}
+
+#ifdef DEBUG_ELFLOADER
+static void dump_elfsymbols(Elf_Shdr *sechdrs, unsigned int symindex,
+ const char *strtab, struct module *mod)
+{
+ Elf_Sym *sym = (void *)sechdrs[symindex].sh_addr;
+ unsigned int i, n = sechdrs[symindex].sh_size / sizeof(Elf_Sym);
+
+ pr_debug("dump_elfsymbols: n %d\n", n);
+ for (i = 1; i < n; i++) {
+ pr_debug(" i %d name <%s> 0x%x\n", i, strtab + sym[i].st_name,
+ sym[i].st_value);
+ }
+}
+#endif
+
+static int find_vpe_symbols(struct vpe *v, Elf_Shdr *sechdrs,
+ unsigned int symindex, const char *strtab,
+ struct module *mod)
+{
+ Elf_Sym *sym = (void *)sechdrs[symindex].sh_addr;
+ unsigned int i, n = sechdrs[symindex].sh_size / sizeof(Elf_Sym);
+
+ for (i = 1; i < n; i++) {
+ if (strcmp(strtab + sym[i].st_name, "__start") == 0)
+ v->__start = sym[i].st_value;
+
+ if (strcmp(strtab + sym[i].st_name, "vpe_shared") == 0)
+ v->shared_ptr = (void *)sym[i].st_value;
+ }
+
+ if ((v->__start == 0) || (v->shared_ptr == NULL))
+ return -1;
+
+ return 0;
+}
+
+/*
+ * Allocates a VPE with some program code space(the load address), copies the
+ * contents of the program (p)buffer performing relocatations/etc, free's it
+ * when finished.
+ */
+static int vpe_elfload(struct vpe *v)
+{
+ Elf_Ehdr *hdr;
+ Elf_Shdr *sechdrs;
+ long err = 0;
+ char *secstrings, *strtab = NULL;
+ unsigned int len, i, symindex = 0, strindex = 0, relocate = 0;
+ struct module mod; /* so we can re-use the relocations code */
+
+ memset(&mod, 0, sizeof(struct module));
+ strcpy(mod.name, "VPE loader");
+
+ hdr = (Elf_Ehdr *) v->pbuffer;
+ len = v->plen;
+
+ /* Sanity checks against insmoding binaries or wrong arch,
+ weird elf version */
+ if (memcmp(hdr->e_ident, ELFMAG, SELFMAG) != 0
+ || (hdr->e_type != ET_REL && hdr->e_type != ET_EXEC)
+ || !elf_check_arch(hdr)
+ || hdr->e_shentsize != sizeof(*sechdrs)) {
+ pr_warn("VPE loader: program wrong arch or weird elf version\n");
+
+ return -ENOEXEC;
+ }
+
+ if (hdr->e_type == ET_REL)
+ relocate = 1;
+
+ if (len < hdr->e_shoff + hdr->e_shnum * sizeof(Elf_Shdr)) {
+ pr_err("VPE loader: program length %u truncated\n", len);
+
+ return -ENOEXEC;
+ }
+
+ /* Convenience variables */
+ sechdrs = (void *)hdr + hdr->e_shoff;
+ secstrings = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
+ sechdrs[0].sh_addr = 0;
+
+ /* And these should exist, but gcc whinges if we don't init them */
+ symindex = strindex = 0;
+
+ if (relocate) {
+ for (i = 1; i < hdr->e_shnum; i++) {
+ if ((sechdrs[i].sh_type != SHT_NOBITS) &&
+ (len < sechdrs[i].sh_offset + sechdrs[i].sh_size)) {
+ pr_err("VPE program length %u truncated\n",
+ len);
+ return -ENOEXEC;
+ }
+
+ /* Mark all sections sh_addr with their address in the
+ temporary image. */
+ sechdrs[i].sh_addr = (size_t) hdr +
+ sechdrs[i].sh_offset;
+
+ /* Internal symbols and strings. */
+ if (sechdrs[i].sh_type == SHT_SYMTAB) {
+ symindex = i;
+ strindex = sechdrs[i].sh_link;
+ strtab = (char *)hdr +
+ sechdrs[strindex].sh_offset;
+ }
+ }
+ layout_sections(&mod, hdr, sechdrs, secstrings);
+ }
+
+ v->load_addr = alloc_progmem(mod.core_layout.size);
+ if (!v->load_addr)
+ return -ENOMEM;
+
+ pr_info("VPE loader: loading to %p\n", v->load_addr);
+
+ if (relocate) {
+ for (i = 0; i < hdr->e_shnum; i++) {
+ void *dest;
+
+ if (!(sechdrs[i].sh_flags & SHF_ALLOC))
+ continue;
+
+ dest = v->load_addr + sechdrs[i].sh_entsize;
+
+ if (sechdrs[i].sh_type != SHT_NOBITS)
+ memcpy(dest, (void *)sechdrs[i].sh_addr,
+ sechdrs[i].sh_size);
+ /* Update sh_addr to point to copy in image. */
+ sechdrs[i].sh_addr = (unsigned long)dest;
+
+ pr_debug(" section sh_name %s sh_addr 0x%x\n",
+ secstrings + sechdrs[i].sh_name,
+ sechdrs[i].sh_addr);
+ }
+
+ /* Fix up syms, so that st_value is a pointer to location. */
+ simplify_symbols(sechdrs, symindex, strtab, secstrings,
+ hdr->e_shnum, &mod);
+
+ /* Now do relocations. */
+ for (i = 1; i < hdr->e_shnum; i++) {
+ const char *strtab = (char *)sechdrs[strindex].sh_addr;
+ unsigned int info = sechdrs[i].sh_info;
+
+ /* Not a valid relocation section? */
+ if (info >= hdr->e_shnum)
+ continue;
+
+ /* Don't bother with non-allocated sections */
+ if (!(sechdrs[info].sh_flags & SHF_ALLOC))
+ continue;
+
+ if (sechdrs[i].sh_type == SHT_REL)
+ err = apply_relocations(sechdrs, strtab,
+ symindex, i, &mod);
+ else if (sechdrs[i].sh_type == SHT_RELA)
+ err = apply_relocate_add(sechdrs, strtab,
+ symindex, i, &mod);
+ if (err < 0)
+ return err;
+
+ }
+ } else {
+ struct elf_phdr *phdr = (struct elf_phdr *)
+ ((char *)hdr + hdr->e_phoff);
+
+ for (i = 0; i < hdr->e_phnum; i++) {
+ if (phdr->p_type == PT_LOAD) {
+ memcpy((void *)phdr->p_paddr,
+ (char *)hdr + phdr->p_offset,
+ phdr->p_filesz);
+ memset((void *)phdr->p_paddr + phdr->p_filesz,
+ 0, phdr->p_memsz - phdr->p_filesz);
+ }
+ phdr++;
+ }
+
+ for (i = 0; i < hdr->e_shnum; i++) {
+ /* Internal symbols and strings. */
+ if (sechdrs[i].sh_type == SHT_SYMTAB) {
+ symindex = i;
+ strindex = sechdrs[i].sh_link;
+ strtab = (char *)hdr +
+ sechdrs[strindex].sh_offset;
+
+ /*
+ * mark symtab's address for when we try
+ * to find the magic symbols
+ */
+ sechdrs[i].sh_addr = (size_t) hdr +
+ sechdrs[i].sh_offset;
+ }
+ }
+ }
+
+ /* make sure it's physically written out */
+ flush_icache_range((unsigned long)v->load_addr,
+ (unsigned long)v->load_addr + v->len);
+
+ if ((find_vpe_symbols(v, sechdrs, symindex, strtab, &mod)) < 0) {
+ if (v->__start == 0) {
+ pr_warn("VPE loader: program does not contain a __start symbol\n");
+ return -ENOEXEC;
+ }
+
+ if (v->shared_ptr == NULL)
+ pr_warn("VPE loader: program does not contain vpe_shared symbol.\n"
+ " Unable to use AMVP (AP/SP) facilities.\n");
+ }
+
+ pr_info(" elf loaded\n");
+ return 0;
+}
+
+/* checks VPE is unused and gets ready to load program */
+static int vpe_open(struct inode *inode, struct file *filp)
+{
+ enum vpe_state state;
+ struct vpe_notifications *notifier;
+ struct vpe *v;
+
+ if (VPE_MODULE_MINOR != iminor(inode)) {
+ /* assume only 1 device at the moment. */
+ pr_warn("VPE loader: only vpe1 is supported\n");
+
+ return -ENODEV;
+ }
+
+ v = get_vpe(aprp_cpu_index());
+ if (v == NULL) {
+ pr_warn("VPE loader: unable to get vpe\n");
+
+ return -ENODEV;
+ }
+
+ state = xchg(&v->state, VPE_STATE_INUSE);
+ if (state != VPE_STATE_UNUSED) {
+ pr_debug("VPE loader: tc in use dumping regs\n");
+
+ list_for_each_entry(notifier, &v->notify, list)
+ notifier->stop(aprp_cpu_index());
+
+ release_progmem(v->load_addr);
+ cleanup_tc(get_tc(aprp_cpu_index()));
+ }
+
+ /* this of-course trashes what was there before... */
+ v->pbuffer = vmalloc(P_SIZE);
+ if (!v->pbuffer) {
+ pr_warn("VPE loader: unable to allocate memory\n");
+ return -ENOMEM;
+ }
+ v->plen = P_SIZE;
+ v->load_addr = NULL;
+ v->len = 0;
+ v->shared_ptr = NULL;
+ v->__start = 0;
+
+ return 0;
+}
+
+static int vpe_release(struct inode *inode, struct file *filp)
+{
+#if defined(CONFIG_MIPS_VPE_LOADER_MT) || defined(CONFIG_MIPS_VPE_LOADER_CMP)
+ struct vpe *v;
+ Elf_Ehdr *hdr;
+ int ret = 0;
+
+ v = get_vpe(aprp_cpu_index());
+ if (v == NULL)
+ return -ENODEV;
+
+ hdr = (Elf_Ehdr *) v->pbuffer;
+ if (memcmp(hdr->e_ident, ELFMAG, SELFMAG) == 0) {
+ if (vpe_elfload(v) >= 0) {
+ vpe_run(v);
+ } else {
+ pr_warn("VPE loader: ELF load failed.\n");
+ ret = -ENOEXEC;
+ }
+ } else {
+ pr_warn("VPE loader: only elf files are supported\n");
+ ret = -ENOEXEC;
+ }
+
+ /* It's good to be able to run the SP and if it chokes have a look at
+ the /dev/rt?. But if we reset the pointer to the shared struct we
+ lose what has happened. So perhaps if garbage is sent to the vpe
+ device, use it as a trigger for the reset. Hopefully a nice
+ executable will be along shortly. */
+ if (ret < 0)
+ v->shared_ptr = NULL;
+
+ vfree(v->pbuffer);
+ v->plen = 0;
+
+ return ret;
+#else
+ pr_warn("VPE loader: ELF load failed.\n");
+ return -ENOEXEC;
+#endif
+}
+
+static ssize_t vpe_write(struct file *file, const char __user *buffer,
+ size_t count, loff_t *ppos)
+{
+ size_t ret = count;
+ struct vpe *v;
+
+ if (iminor(file_inode(file)) != VPE_MODULE_MINOR)
+ return -ENODEV;
+
+ v = get_vpe(aprp_cpu_index());
+
+ if (v == NULL)
+ return -ENODEV;
+
+ if ((count + v->len) > v->plen) {
+ pr_warn("VPE loader: elf size too big. Perhaps strip unneeded symbols\n");
+ return -ENOMEM;
+ }
+
+ count -= copy_from_user(v->pbuffer + v->len, buffer, count);
+ if (!count)
+ return -EFAULT;
+
+ v->len += count;
+ return ret;
+}
+
+const struct file_operations vpe_fops = {
+ .owner = THIS_MODULE,
+ .open = vpe_open,
+ .release = vpe_release,
+ .write = vpe_write,
+ .llseek = noop_llseek,
+};
+
+void *vpe_get_shared(int index)
+{
+ struct vpe *v = get_vpe(index);
+
+ if (v == NULL)
+ return NULL;
+
+ return v->shared_ptr;
+}
+EXPORT_SYMBOL(vpe_get_shared);
+
+int vpe_notify(int index, struct vpe_notifications *notify)
+{
+ struct vpe *v = get_vpe(index);
+
+ if (v == NULL)
+ return -1;
+
+ list_add(&notify->list, &v->notify);
+ return 0;
+}
+EXPORT_SYMBOL(vpe_notify);
+
+module_init(vpe_module_init);
+module_exit(vpe_module_exit);
+MODULE_DESCRIPTION("MIPS VPE Loader");
+MODULE_AUTHOR("Elizabeth Oldham, MIPS Technologies, Inc.");
+MODULE_LICENSE("GPL");
diff --git a/arch/mips/kernel/watch.c b/arch/mips/kernel/watch.c
new file mode 100644
index 000000000..c9263b95c
--- /dev/null
+++ b/arch/mips/kernel/watch.c
@@ -0,0 +1,211 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2008 David Daney
+ */
+
+#include <linux/sched.h>
+
+#include <asm/processor.h>
+#include <asm/watch.h>
+
+/*
+ * Install the watch registers for the current thread. A maximum of
+ * four registers are installed although the machine may have more.
+ */
+void mips_install_watch_registers(struct task_struct *t)
+{
+ struct mips3264_watch_reg_state *watches = &t->thread.watch.mips3264;
+ unsigned int watchhi = MIPS_WATCHHI_G | /* Trap all ASIDs */
+ MIPS_WATCHHI_IRW; /* Clear result bits */
+
+ switch (current_cpu_data.watch_reg_use_cnt) {
+ default:
+ BUG();
+ case 4:
+ write_c0_watchlo3(watches->watchlo[3]);
+ write_c0_watchhi3(watchhi | watches->watchhi[3]);
+ fallthrough;
+ case 3:
+ write_c0_watchlo2(watches->watchlo[2]);
+ write_c0_watchhi2(watchhi | watches->watchhi[2]);
+ fallthrough;
+ case 2:
+ write_c0_watchlo1(watches->watchlo[1]);
+ write_c0_watchhi1(watchhi | watches->watchhi[1]);
+ fallthrough;
+ case 1:
+ write_c0_watchlo0(watches->watchlo[0]);
+ write_c0_watchhi0(watchhi | watches->watchhi[0]);
+ }
+}
+
+/*
+ * Read back the watchhi registers so the user space debugger has
+ * access to the I, R, and W bits. A maximum of four registers are
+ * read although the machine may have more.
+ */
+void mips_read_watch_registers(void)
+{
+ struct mips3264_watch_reg_state *watches =
+ &current->thread.watch.mips3264;
+ unsigned int watchhi_mask = MIPS_WATCHHI_MASK | MIPS_WATCHHI_IRW;
+
+ switch (current_cpu_data.watch_reg_use_cnt) {
+ default:
+ BUG();
+ case 4:
+ watches->watchhi[3] = (read_c0_watchhi3() & watchhi_mask);
+ fallthrough;
+ case 3:
+ watches->watchhi[2] = (read_c0_watchhi2() & watchhi_mask);
+ fallthrough;
+ case 2:
+ watches->watchhi[1] = (read_c0_watchhi1() & watchhi_mask);
+ fallthrough;
+ case 1:
+ watches->watchhi[0] = (read_c0_watchhi0() & watchhi_mask);
+ }
+ if (current_cpu_data.watch_reg_use_cnt == 1 &&
+ (watches->watchhi[0] & MIPS_WATCHHI_IRW) == 0) {
+ /* Pathological case of release 1 architecture that
+ * doesn't set the condition bits. We assume that
+ * since we got here, the watch condition was met and
+ * signal that the conditions requested in watchlo
+ * were met. */
+ watches->watchhi[0] |= (watches->watchlo[0] & MIPS_WATCHHI_IRW);
+ }
+ }
+
+/*
+ * Disable all watch registers. Although only four registers are
+ * installed, all are cleared to eliminate the possibility of endless
+ * looping in the watch handler.
+ */
+void mips_clear_watch_registers(void)
+{
+ switch (current_cpu_data.watch_reg_count) {
+ default:
+ BUG();
+ case 8:
+ write_c0_watchlo7(0);
+ fallthrough;
+ case 7:
+ write_c0_watchlo6(0);
+ fallthrough;
+ case 6:
+ write_c0_watchlo5(0);
+ fallthrough;
+ case 5:
+ write_c0_watchlo4(0);
+ fallthrough;
+ case 4:
+ write_c0_watchlo3(0);
+ fallthrough;
+ case 3:
+ write_c0_watchlo2(0);
+ fallthrough;
+ case 2:
+ write_c0_watchlo1(0);
+ fallthrough;
+ case 1:
+ write_c0_watchlo0(0);
+ }
+}
+
+void mips_probe_watch_registers(struct cpuinfo_mips *c)
+{
+ unsigned int t;
+
+ if ((c->options & MIPS_CPU_WATCH) == 0)
+ return;
+ /*
+ * Check which of the I,R and W bits are supported, then
+ * disable the register.
+ */
+ write_c0_watchlo0(MIPS_WATCHLO_IRW);
+ back_to_back_c0_hazard();
+ t = read_c0_watchlo0();
+ write_c0_watchlo0(0);
+ c->watch_reg_masks[0] = t & MIPS_WATCHLO_IRW;
+
+ /* Write the mask bits and read them back to determine which
+ * can be used. */
+ c->watch_reg_count = 1;
+ c->watch_reg_use_cnt = 1;
+ t = read_c0_watchhi0();
+ write_c0_watchhi0(t | MIPS_WATCHHI_MASK);
+ back_to_back_c0_hazard();
+ t = read_c0_watchhi0();
+ c->watch_reg_masks[0] |= (t & MIPS_WATCHHI_MASK);
+ if ((t & MIPS_WATCHHI_M) == 0)
+ return;
+
+ write_c0_watchlo1(MIPS_WATCHLO_IRW);
+ back_to_back_c0_hazard();
+ t = read_c0_watchlo1();
+ write_c0_watchlo1(0);
+ c->watch_reg_masks[1] = t & MIPS_WATCHLO_IRW;
+
+ c->watch_reg_count = 2;
+ c->watch_reg_use_cnt = 2;
+ t = read_c0_watchhi1();
+ write_c0_watchhi1(t | MIPS_WATCHHI_MASK);
+ back_to_back_c0_hazard();
+ t = read_c0_watchhi1();
+ c->watch_reg_masks[1] |= (t & MIPS_WATCHHI_MASK);
+ if ((t & MIPS_WATCHHI_M) == 0)
+ return;
+
+ write_c0_watchlo2(MIPS_WATCHLO_IRW);
+ back_to_back_c0_hazard();
+ t = read_c0_watchlo2();
+ write_c0_watchlo2(0);
+ c->watch_reg_masks[2] = t & MIPS_WATCHLO_IRW;
+
+ c->watch_reg_count = 3;
+ c->watch_reg_use_cnt = 3;
+ t = read_c0_watchhi2();
+ write_c0_watchhi2(t | MIPS_WATCHHI_MASK);
+ back_to_back_c0_hazard();
+ t = read_c0_watchhi2();
+ c->watch_reg_masks[2] |= (t & MIPS_WATCHHI_MASK);
+ if ((t & MIPS_WATCHHI_M) == 0)
+ return;
+
+ write_c0_watchlo3(MIPS_WATCHLO_IRW);
+ back_to_back_c0_hazard();
+ t = read_c0_watchlo3();
+ write_c0_watchlo3(0);
+ c->watch_reg_masks[3] = t & MIPS_WATCHLO_IRW;
+
+ c->watch_reg_count = 4;
+ c->watch_reg_use_cnt = 4;
+ t = read_c0_watchhi3();
+ write_c0_watchhi3(t | MIPS_WATCHHI_MASK);
+ back_to_back_c0_hazard();
+ t = read_c0_watchhi3();
+ c->watch_reg_masks[3] |= (t & MIPS_WATCHHI_MASK);
+ if ((t & MIPS_WATCHHI_M) == 0)
+ return;
+
+ /* We use at most 4, but probe and report up to 8. */
+ c->watch_reg_count = 5;
+ t = read_c0_watchhi4();
+ if ((t & MIPS_WATCHHI_M) == 0)
+ return;
+
+ c->watch_reg_count = 6;
+ t = read_c0_watchhi5();
+ if ((t & MIPS_WATCHHI_M) == 0)
+ return;
+
+ c->watch_reg_count = 7;
+ t = read_c0_watchhi6();
+ if ((t & MIPS_WATCHHI_M) == 0)
+ return;
+
+ c->watch_reg_count = 8;
+}