summaryrefslogtreecommitdiffstats
path: root/arch/nios2/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/nios2/kernel')
-rw-r--r--arch/nios2/kernel/.gitignore2
-rw-r--r--arch/nios2/kernel/Makefile26
-rw-r--r--arch/nios2/kernel/asm-offsets.c74
-rw-r--r--arch/nios2/kernel/cpuinfo.c195
-rw-r--r--arch/nios2/kernel/entry.S568
-rw-r--r--arch/nios2/kernel/head.S175
-rw-r--r--arch/nios2/kernel/insnemu.S580
-rw-r--r--arch/nios2/kernel/irq.c80
-rw-r--r--arch/nios2/kernel/kgdb.c158
-rw-r--r--arch/nios2/kernel/misaligned.c238
-rw-r--r--arch/nios2/kernel/module.c137
-rw-r--r--arch/nios2/kernel/nios2_ksyms.c45
-rw-r--r--arch/nios2/kernel/process.c270
-rw-r--r--arch/nios2/kernel/prom.c37
-rw-r--r--arch/nios2/kernel/ptrace.c146
-rw-r--r--arch/nios2/kernel/setup.c199
-rw-r--r--arch/nios2/kernel/signal.c328
-rw-r--r--arch/nios2/kernel/sys_nios2.c60
-rw-r--r--arch/nios2/kernel/syscall_table.c18
-rw-r--r--arch/nios2/kernel/time.c359
-rw-r--r--arch/nios2/kernel/traps.c201
-rw-r--r--arch/nios2/kernel/vmlinux.lds.S64
22 files changed, 3960 insertions, 0 deletions
diff --git a/arch/nios2/kernel/.gitignore b/arch/nios2/kernel/.gitignore
new file mode 100644
index 000000000..bbb90f92d
--- /dev/null
+++ b/arch/nios2/kernel/.gitignore
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
+vmlinux.lds
diff --git a/arch/nios2/kernel/Makefile b/arch/nios2/kernel/Makefile
new file mode 100644
index 000000000..0b645e1e3
--- /dev/null
+++ b/arch/nios2/kernel/Makefile
@@ -0,0 +1,26 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile for the nios2 linux kernel.
+#
+
+extra-y += head.o
+extra-y += vmlinux.lds
+
+obj-y += cpuinfo.o
+obj-y += entry.o
+obj-y += insnemu.o
+obj-y += irq.o
+obj-y += nios2_ksyms.o
+obj-y += process.o
+obj-y += prom.o
+obj-y += ptrace.o
+obj-y += setup.o
+obj-y += signal.o
+obj-y += sys_nios2.o
+obj-y += syscall_table.o
+obj-y += time.o
+obj-y += traps.o
+
+obj-$(CONFIG_KGDB) += kgdb.o
+obj-$(CONFIG_MODULES) += module.o
+obj-$(CONFIG_NIOS2_ALIGNMENT_TRAP) += misaligned.o
diff --git a/arch/nios2/kernel/asm-offsets.c b/arch/nios2/kernel/asm-offsets.c
new file mode 100644
index 000000000..e3d9b7b6f
--- /dev/null
+++ b/arch/nios2/kernel/asm-offsets.c
@@ -0,0 +1,74 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2011 Tobias Klauser <tklauser@distanz.ch>
+ */
+
+#include <linux/stddef.h>
+#include <linux/sched.h>
+#include <linux/kernel_stat.h>
+#include <linux/ptrace.h>
+#include <linux/hardirq.h>
+#include <linux/thread_info.h>
+#include <linux/kbuild.h>
+
+int main(void)
+{
+ /* struct task_struct */
+ OFFSET(TASK_THREAD, task_struct, thread);
+ BLANK();
+
+ /* struct thread_struct */
+ OFFSET(THREAD_KSP, thread_struct, ksp);
+ OFFSET(THREAD_KPSR, thread_struct, kpsr);
+ BLANK();
+
+ /* struct pt_regs */
+ OFFSET(PT_ORIG_R2, pt_regs, orig_r2);
+ OFFSET(PT_ORIG_R7, pt_regs, orig_r7);
+
+ OFFSET(PT_R1, pt_regs, r1);
+ OFFSET(PT_R2, pt_regs, r2);
+ OFFSET(PT_R3, pt_regs, r3);
+ OFFSET(PT_R4, pt_regs, r4);
+ OFFSET(PT_R5, pt_regs, r5);
+ OFFSET(PT_R6, pt_regs, r6);
+ OFFSET(PT_R7, pt_regs, r7);
+ OFFSET(PT_R8, pt_regs, r8);
+ OFFSET(PT_R9, pt_regs, r9);
+ OFFSET(PT_R10, pt_regs, r10);
+ OFFSET(PT_R11, pt_regs, r11);
+ OFFSET(PT_R12, pt_regs, r12);
+ OFFSET(PT_R13, pt_regs, r13);
+ OFFSET(PT_R14, pt_regs, r14);
+ OFFSET(PT_R15, pt_regs, r15);
+ OFFSET(PT_EA, pt_regs, ea);
+ OFFSET(PT_RA, pt_regs, ra);
+ OFFSET(PT_FP, pt_regs, fp);
+ OFFSET(PT_SP, pt_regs, sp);
+ OFFSET(PT_GP, pt_regs, gp);
+ OFFSET(PT_ESTATUS, pt_regs, estatus);
+ DEFINE(PT_REGS_SIZE, sizeof(struct pt_regs));
+ BLANK();
+
+ /* struct switch_stack */
+ OFFSET(SW_R16, switch_stack, r16);
+ OFFSET(SW_R17, switch_stack, r17);
+ OFFSET(SW_R18, switch_stack, r18);
+ OFFSET(SW_R19, switch_stack, r19);
+ OFFSET(SW_R20, switch_stack, r20);
+ OFFSET(SW_R21, switch_stack, r21);
+ OFFSET(SW_R22, switch_stack, r22);
+ OFFSET(SW_R23, switch_stack, r23);
+ OFFSET(SW_FP, switch_stack, fp);
+ OFFSET(SW_GP, switch_stack, gp);
+ OFFSET(SW_RA, switch_stack, ra);
+ DEFINE(SWITCH_STACK_SIZE, sizeof(struct switch_stack));
+ BLANK();
+
+ /* struct thread_info */
+ OFFSET(TI_FLAGS, thread_info, flags);
+ OFFSET(TI_PREEMPT_COUNT, thread_info, preempt_count);
+ BLANK();
+
+ return 0;
+}
diff --git a/arch/nios2/kernel/cpuinfo.c b/arch/nios2/kernel/cpuinfo.c
new file mode 100644
index 000000000..203870c4b
--- /dev/null
+++ b/arch/nios2/kernel/cpuinfo.c
@@ -0,0 +1,195 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2013 Altera Corporation
+ * Copyright (C) 2011 Tobias Klauser <tklauser@distanz.ch>
+ *
+ * Based on cpuinfo.c from microblaze
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/seq_file.h>
+#include <linux/string.h>
+#include <linux/of.h>
+#include <asm/cpuinfo.h>
+
+struct cpuinfo cpuinfo;
+
+#define err_cpu(x) \
+ pr_err("ERROR: Nios II " x " different for kernel and DTS\n")
+
+static inline u32 fcpu(struct device_node *cpu, const char *n)
+{
+ u32 val = 0;
+
+ of_property_read_u32(cpu, n, &val);
+
+ return val;
+}
+
+void __init setup_cpuinfo(void)
+{
+ struct device_node *cpu;
+ const char *str;
+ int len;
+
+ cpu = of_get_cpu_node(0, NULL);
+ if (!cpu)
+ panic("%s: No CPU found in devicetree!\n", __func__);
+
+ if (!of_property_read_bool(cpu, "altr,has-initda"))
+ panic("initda instruction is unimplemented. Please update your "
+ "hardware system to have more than 4-byte line data "
+ "cache\n");
+
+ cpuinfo.cpu_clock_freq = fcpu(cpu, "clock-frequency");
+
+ str = of_get_property(cpu, "altr,implementation", &len);
+ if (str)
+ strlcpy(cpuinfo.cpu_impl, str, sizeof(cpuinfo.cpu_impl));
+ else
+ strcpy(cpuinfo.cpu_impl, "<unknown>");
+
+ cpuinfo.has_div = of_property_read_bool(cpu, "altr,has-div");
+ cpuinfo.has_mul = of_property_read_bool(cpu, "altr,has-mul");
+ cpuinfo.has_mulx = of_property_read_bool(cpu, "altr,has-mulx");
+ cpuinfo.has_bmx = of_property_read_bool(cpu, "altr,has-bmx");
+ cpuinfo.has_cdx = of_property_read_bool(cpu, "altr,has-cdx");
+ cpuinfo.mmu = of_property_read_bool(cpu, "altr,has-mmu");
+
+ if (IS_ENABLED(CONFIG_NIOS2_HW_DIV_SUPPORT) && !cpuinfo.has_div)
+ err_cpu("DIV");
+
+ if (IS_ENABLED(CONFIG_NIOS2_HW_MUL_SUPPORT) && !cpuinfo.has_mul)
+ err_cpu("MUL");
+
+ if (IS_ENABLED(CONFIG_NIOS2_HW_MULX_SUPPORT) && !cpuinfo.has_mulx)
+ err_cpu("MULX");
+
+ if (IS_ENABLED(CONFIG_NIOS2_BMX_SUPPORT) && !cpuinfo.has_bmx)
+ err_cpu("BMX");
+
+ if (IS_ENABLED(CONFIG_NIOS2_CDX_SUPPORT) && !cpuinfo.has_cdx)
+ err_cpu("CDX");
+
+ cpuinfo.tlb_num_ways = fcpu(cpu, "altr,tlb-num-ways");
+ if (!cpuinfo.tlb_num_ways)
+ panic("altr,tlb-num-ways can't be 0. Please check your hardware "
+ "system\n");
+ cpuinfo.icache_line_size = fcpu(cpu, "icache-line-size");
+ cpuinfo.icache_size = fcpu(cpu, "icache-size");
+ if (CONFIG_NIOS2_ICACHE_SIZE != cpuinfo.icache_size)
+ pr_warn("Warning: icache size configuration mismatch "
+ "(0x%x vs 0x%x) of CONFIG_NIOS2_ICACHE_SIZE vs "
+ "device tree icache-size\n",
+ CONFIG_NIOS2_ICACHE_SIZE, cpuinfo.icache_size);
+
+ cpuinfo.dcache_line_size = fcpu(cpu, "dcache-line-size");
+ if (CONFIG_NIOS2_DCACHE_LINE_SIZE != cpuinfo.dcache_line_size)
+ pr_warn("Warning: dcache line size configuration mismatch "
+ "(0x%x vs 0x%x) of CONFIG_NIOS2_DCACHE_LINE_SIZE vs "
+ "device tree dcache-line-size\n",
+ CONFIG_NIOS2_DCACHE_LINE_SIZE, cpuinfo.dcache_line_size);
+ cpuinfo.dcache_size = fcpu(cpu, "dcache-size");
+ if (CONFIG_NIOS2_DCACHE_SIZE != cpuinfo.dcache_size)
+ pr_warn("Warning: dcache size configuration mismatch "
+ "(0x%x vs 0x%x) of CONFIG_NIOS2_DCACHE_SIZE vs "
+ "device tree dcache-size\n",
+ CONFIG_NIOS2_DCACHE_SIZE, cpuinfo.dcache_size);
+
+ cpuinfo.tlb_pid_num_bits = fcpu(cpu, "altr,pid-num-bits");
+ cpuinfo.tlb_num_ways_log2 = ilog2(cpuinfo.tlb_num_ways);
+ cpuinfo.tlb_num_entries = fcpu(cpu, "altr,tlb-num-entries");
+ cpuinfo.tlb_num_lines = cpuinfo.tlb_num_entries / cpuinfo.tlb_num_ways;
+ cpuinfo.tlb_ptr_sz = fcpu(cpu, "altr,tlb-ptr-sz");
+
+ cpuinfo.reset_addr = fcpu(cpu, "altr,reset-addr");
+ cpuinfo.exception_addr = fcpu(cpu, "altr,exception-addr");
+ cpuinfo.fast_tlb_miss_exc_addr = fcpu(cpu, "altr,fast-tlb-miss-addr");
+
+ of_node_put(cpu);
+}
+
+#ifdef CONFIG_PROC_FS
+
+/*
+ * Get CPU information for use by the procfs.
+ */
+static int show_cpuinfo(struct seq_file *m, void *v)
+{
+ const u32 clockfreq = cpuinfo.cpu_clock_freq;
+
+ seq_printf(m,
+ "CPU:\t\tNios II/%s\n"
+ "REV:\t\t%i\n"
+ "MMU:\t\t%s\n"
+ "FPU:\t\tnone\n"
+ "Clocking:\t%u.%02u MHz\n"
+ "BogoMips:\t%lu.%02lu\n"
+ "Calibration:\t%lu loops\n",
+ cpuinfo.cpu_impl,
+ CONFIG_NIOS2_ARCH_REVISION,
+ cpuinfo.mmu ? "present" : "none",
+ clockfreq / 1000000, (clockfreq / 100000) % 10,
+ (loops_per_jiffy * HZ) / 500000,
+ ((loops_per_jiffy * HZ) / 5000) % 100,
+ (loops_per_jiffy * HZ));
+
+ seq_printf(m,
+ "HW:\n"
+ " MUL:\t\t%s\n"
+ " MULX:\t\t%s\n"
+ " DIV:\t\t%s\n"
+ " BMX:\t\t%s\n"
+ " CDX:\t\t%s\n",
+ cpuinfo.has_mul ? "yes" : "no",
+ cpuinfo.has_mulx ? "yes" : "no",
+ cpuinfo.has_div ? "yes" : "no",
+ cpuinfo.has_bmx ? "yes" : "no",
+ cpuinfo.has_cdx ? "yes" : "no");
+
+ seq_printf(m,
+ "Icache:\t\t%ukB, line length: %u\n",
+ cpuinfo.icache_size >> 10,
+ cpuinfo.icache_line_size);
+
+ seq_printf(m,
+ "Dcache:\t\t%ukB, line length: %u\n",
+ cpuinfo.dcache_size >> 10,
+ cpuinfo.dcache_line_size);
+
+ seq_printf(m,
+ "TLB:\t\t%u ways, %u entries, %u PID bits\n",
+ cpuinfo.tlb_num_ways,
+ cpuinfo.tlb_num_entries,
+ cpuinfo.tlb_pid_num_bits);
+
+ return 0;
+}
+
+static void *cpuinfo_start(struct seq_file *m, loff_t *pos)
+{
+ unsigned long i = *pos;
+
+ return i < num_possible_cpus() ? (void *) (i + 1) : NULL;
+}
+
+static void *cpuinfo_next(struct seq_file *m, void *v, loff_t *pos)
+{
+ ++*pos;
+ return cpuinfo_start(m, pos);
+}
+
+static void cpuinfo_stop(struct seq_file *m, void *v)
+{
+}
+
+const struct seq_operations cpuinfo_op = {
+ .start = cpuinfo_start,
+ .next = cpuinfo_next,
+ .stop = cpuinfo_stop,
+ .show = show_cpuinfo
+};
+
+#endif /* CONFIG_PROC_FS */
diff --git a/arch/nios2/kernel/entry.S b/arch/nios2/kernel/entry.S
new file mode 100644
index 000000000..99f0a65e6
--- /dev/null
+++ b/arch/nios2/kernel/entry.S
@@ -0,0 +1,568 @@
+/*
+ * linux/arch/nios2/kernel/entry.S
+ *
+ * Copyright (C) 2013-2014 Altera Corporation
+ * Copyright (C) 2009, Wind River Systems Inc
+ *
+ * Implemented by fredrik.markstrom@gmail.com and ivarholmqvist@gmail.com
+ *
+ * Copyright (C) 1999-2002, Greg Ungerer (gerg@snapgear.com)
+ * Copyright (C) 1998 D. Jeff Dionne <jeff@lineo.ca>,
+ * Kenneth Albanowski <kjahds@kjahds.com>,
+ * Copyright (C) 2000 Lineo Inc. (www.lineo.com)
+ * Copyright (C) 2004 Microtronix Datacom Ltd.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Linux/m68k support by Hamish Macdonald
+ *
+ * 68060 fixes by Jesper Skov
+ * ColdFire support by Greg Ungerer (gerg@snapgear.com)
+ * 5307 fixes by David W. Miller
+ * linux 2.4 support David McCullough <davidm@snapgear.com>
+ */
+
+#include <linux/sys.h>
+#include <linux/linkage.h>
+#include <asm/asm-offsets.h>
+#include <asm/asm-macros.h>
+#include <asm/thread_info.h>
+#include <asm/errno.h>
+#include <asm/setup.h>
+#include <asm/entry.h>
+#include <asm/unistd.h>
+#include <asm/processor.h>
+
+.macro GET_THREAD_INFO reg
+.if THREAD_SIZE & 0xffff0000
+ andhi \reg, sp, %hi(~(THREAD_SIZE-1))
+.else
+ addi \reg, r0, %lo(~(THREAD_SIZE-1))
+ and \reg, \reg, sp
+.endif
+.endm
+
+.macro kuser_cmpxchg_check
+ /*
+ * Make sure our user space atomic helper is restarted if it was
+ * interrupted in a critical region.
+ * ea-4 = address of interrupted insn (ea must be preserved).
+ * sp = saved regs.
+ * cmpxchg_ldw = first critical insn, cmpxchg_stw = last critical insn.
+ * If ea <= cmpxchg_stw and ea > cmpxchg_ldw then saved EA is set to
+ * cmpxchg_ldw + 4.
+ */
+ /* et = cmpxchg_stw + 4 */
+ movui et, (KUSER_BASE + 4 + (cmpxchg_stw - __kuser_helper_start))
+ bgtu ea, et, 1f
+
+ subi et, et, (cmpxchg_stw - cmpxchg_ldw) /* et = cmpxchg_ldw + 4 */
+ bltu ea, et, 1f
+ stw et, PT_EA(sp) /* fix up EA */
+ mov ea, et
+1:
+.endm
+
+.section .rodata
+.align 4
+exception_table:
+ .word unhandled_exception /* 0 - Reset */
+ .word unhandled_exception /* 1 - Processor-only Reset */
+ .word external_interrupt /* 2 - Interrupt */
+ .word handle_trap /* 3 - Trap Instruction */
+
+ .word instruction_trap /* 4 - Unimplemented instruction */
+ .word handle_illegal /* 5 - Illegal instruction */
+ .word handle_unaligned /* 6 - Misaligned data access */
+ .word handle_unaligned /* 7 - Misaligned destination address */
+
+ .word handle_diverror /* 8 - Division error */
+ .word protection_exception_ba /* 9 - Supervisor-only instr. address */
+ .word protection_exception_instr /* 10 - Supervisor only instruction */
+ .word protection_exception_ba /* 11 - Supervisor only data address */
+
+ .word unhandled_exception /* 12 - Double TLB miss (data) */
+ .word protection_exception_pte /* 13 - TLB permission violation (x) */
+ .word protection_exception_pte /* 14 - TLB permission violation (r) */
+ .word protection_exception_pte /* 15 - TLB permission violation (w) */
+
+ .word unhandled_exception /* 16 - MPU region violation */
+
+trap_table:
+ .word handle_system_call /* 0 */
+ .word handle_trap_1 /* 1 */
+ .word handle_trap_2 /* 2 */
+ .word handle_trap_3 /* 3 */
+ .word handle_trap_reserved /* 4 */
+ .word handle_trap_reserved /* 5 */
+ .word handle_trap_reserved /* 6 */
+ .word handle_trap_reserved /* 7 */
+ .word handle_trap_reserved /* 8 */
+ .word handle_trap_reserved /* 9 */
+ .word handle_trap_reserved /* 10 */
+ .word handle_trap_reserved /* 11 */
+ .word handle_trap_reserved /* 12 */
+ .word handle_trap_reserved /* 13 */
+ .word handle_trap_reserved /* 14 */
+ .word handle_trap_reserved /* 15 */
+ .word handle_trap_reserved /* 16 */
+ .word handle_trap_reserved /* 17 */
+ .word handle_trap_reserved /* 18 */
+ .word handle_trap_reserved /* 19 */
+ .word handle_trap_reserved /* 20 */
+ .word handle_trap_reserved /* 21 */
+ .word handle_trap_reserved /* 22 */
+ .word handle_trap_reserved /* 23 */
+ .word handle_trap_reserved /* 24 */
+ .word handle_trap_reserved /* 25 */
+ .word handle_trap_reserved /* 26 */
+ .word handle_trap_reserved /* 27 */
+ .word handle_trap_reserved /* 28 */
+ .word handle_trap_reserved /* 29 */
+#ifdef CONFIG_KGDB
+ .word handle_kgdb_breakpoint /* 30 KGDB breakpoint */
+#else
+ .word instruction_trap /* 30 */
+#endif
+ .word handle_breakpoint /* 31 */
+
+.text
+.set noat
+.set nobreak
+
+ENTRY(inthandler)
+ SAVE_ALL
+
+ kuser_cmpxchg_check
+
+ /* Clear EH bit before we get a new excpetion in the kernel
+ * and after we have saved it to the exception frame. This is done
+ * whether it's trap, tlb-miss or interrupt. If we don't do this
+ * estatus is not updated the next exception.
+ */
+ rdctl r24, status
+ movi r9, %lo(~STATUS_EH)
+ and r24, r24, r9
+ wrctl status, r24
+
+ /* Read cause and vector and branch to the associated handler */
+ mov r4, sp
+ rdctl r5, exception
+ movia r9, exception_table
+ add r24, r9, r5
+ ldw r24, 0(r24)
+ jmp r24
+
+
+/***********************************************************************
+ * Handle traps
+ ***********************************************************************
+ */
+ENTRY(handle_trap)
+ ldwio r24, -4(ea) /* instruction that caused the exception */
+ srli r24, r24, 4
+ andi r24, r24, 0x7c
+ movia r9,trap_table
+ add r24, r24, r9
+ ldw r24, 0(r24)
+ jmp r24
+
+
+/***********************************************************************
+ * Handle system calls
+ ***********************************************************************
+ */
+ENTRY(handle_system_call)
+ /* Enable interrupts */
+ rdctl r10, status
+ ori r10, r10, STATUS_PIE
+ wrctl status, r10
+
+ /* Reload registers destroyed by common code. */
+ ldw r4, PT_R4(sp)
+ ldw r5, PT_R5(sp)
+
+local_restart:
+ stw r2, PT_ORIG_R2(sp)
+ /* Check that the requested system call is within limits */
+ movui r1, __NR_syscalls
+ bgeu r2, r1, ret_invsyscall
+ slli r1, r2, 2
+ movhi r11, %hiadj(sys_call_table)
+ add r1, r1, r11
+ ldw r1, %lo(sys_call_table)(r1)
+
+ /* Check if we are being traced */
+ GET_THREAD_INFO r11
+ ldw r11,TI_FLAGS(r11)
+ BTBNZ r11,r11,TIF_SYSCALL_TRACE,traced_system_call
+
+ /* Execute the system call */
+ callr r1
+
+ /* If the syscall returns a negative result:
+ * Set r7 to 1 to indicate error,
+ * Negate r2 to get a positive error code
+ * If the syscall returns zero or a positive value:
+ * Set r7 to 0.
+ * The sigreturn system calls will skip the code below by
+ * adding to register ra. To avoid destroying registers
+ */
+translate_rc_and_ret:
+ movi r1, 0
+ bge r2, zero, 3f
+ ldw r1, PT_ORIG_R2(sp)
+ addi r1, r1, 1
+ beq r1, zero, 3f
+ sub r2, zero, r2
+ movi r1, 1
+3:
+ stw r2, PT_R2(sp)
+ stw r1, PT_R7(sp)
+end_translate_rc_and_ret:
+
+ret_from_exception:
+ ldw r1, PT_ESTATUS(sp)
+ /* if so, skip resched, signals */
+ TSTBNZ r1, r1, ESTATUS_EU, Luser_return
+
+restore_all:
+ rdctl r10, status /* disable intrs */
+ andi r10, r10, %lo(~STATUS_PIE)
+ wrctl status, r10
+ RESTORE_ALL
+ eret
+
+ /* If the syscall number was invalid return ENOSYS */
+ret_invsyscall:
+ movi r2, -ENOSYS
+ br translate_rc_and_ret
+
+ /* This implements the same as above, except it calls
+ * do_syscall_trace_enter and do_syscall_trace_exit before and after the
+ * syscall in order for utilities like strace and gdb to work.
+ */
+traced_system_call:
+ SAVE_SWITCH_STACK
+ call do_syscall_trace_enter
+ RESTORE_SWITCH_STACK
+
+ /* Create system call register arguments. The 5th and 6th
+ arguments on stack are already in place at the beginning
+ of pt_regs. */
+ ldw r2, PT_R2(sp)
+ ldw r4, PT_R4(sp)
+ ldw r5, PT_R5(sp)
+ ldw r6, PT_R6(sp)
+ ldw r7, PT_R7(sp)
+
+ /* Fetch the syscall function. */
+ movui r1, __NR_syscalls
+ bgeu r2, r1, traced_invsyscall
+ slli r1, r2, 2
+ movhi r11,%hiadj(sys_call_table)
+ add r1, r1, r11
+ ldw r1, %lo(sys_call_table)(r1)
+
+ callr r1
+
+ /* If the syscall returns a negative result:
+ * Set r7 to 1 to indicate error,
+ * Negate r2 to get a positive error code
+ * If the syscall returns zero or a positive value:
+ * Set r7 to 0.
+ * The sigreturn system calls will skip the code below by
+ * adding to register ra. To avoid destroying registers
+ */
+translate_rc_and_ret2:
+ movi r1, 0
+ bge r2, zero, 4f
+ ldw r1, PT_ORIG_R2(sp)
+ addi r1, r1, 1
+ beq r1, zero, 4f
+ sub r2, zero, r2
+ movi r1, 1
+4:
+ stw r2, PT_R2(sp)
+ stw r1, PT_R7(sp)
+end_translate_rc_and_ret2:
+ SAVE_SWITCH_STACK
+ call do_syscall_trace_exit
+ RESTORE_SWITCH_STACK
+ br ret_from_exception
+
+ /* If the syscall number was invalid return ENOSYS */
+traced_invsyscall:
+ movi r2, -ENOSYS
+ br translate_rc_and_ret2
+
+Luser_return:
+ GET_THREAD_INFO r11 /* get thread_info pointer */
+ ldw r10, TI_FLAGS(r11) /* get thread_info->flags */
+ ANDI32 r11, r10, _TIF_WORK_MASK
+ beq r11, r0, restore_all /* Nothing to do */
+ BTBZ r1, r10, TIF_NEED_RESCHED, Lsignal_return
+
+ /* Reschedule work */
+ call schedule
+ br ret_from_exception
+
+Lsignal_return:
+ ANDI32 r1, r10, _TIF_SIGPENDING | _TIF_NOTIFY_RESUME
+ beq r1, r0, restore_all
+ mov r4, sp /* pt_regs */
+ SAVE_SWITCH_STACK
+ call do_notify_resume
+ beq r2, r0, no_work_pending
+ RESTORE_SWITCH_STACK
+ /* prepare restart syscall here without leaving kernel */
+ ldw r2, PT_R2(sp) /* reload syscall number in r2 */
+ ldw r4, PT_R4(sp) /* reload syscall arguments r4-r9 */
+ ldw r5, PT_R5(sp)
+ ldw r6, PT_R6(sp)
+ ldw r7, PT_R7(sp)
+ ldw r8, PT_R8(sp)
+ ldw r9, PT_R9(sp)
+ br local_restart /* restart syscall */
+
+no_work_pending:
+ RESTORE_SWITCH_STACK
+ br ret_from_exception
+
+/***********************************************************************
+ * Handle external interrupts.
+ ***********************************************************************
+ */
+/*
+ * This is the generic interrupt handler (for all hardware interrupt
+ * sources). It figures out the vector number and calls the appropriate
+ * interrupt service routine directly.
+ */
+external_interrupt:
+ rdctl r12, ipending
+ rdctl r9, ienable
+ and r12, r12, r9
+ /* skip if no interrupt is pending */
+ beq r12, r0, ret_from_interrupt
+
+ /*
+ * Process an external hardware interrupt.
+ */
+
+ addi ea, ea, -4 /* re-issue the interrupted instruction */
+ stw ea, PT_EA(sp)
+2: movi r4, %lo(-1) /* Start from bit position 0,
+ highest priority */
+ /* This is the IRQ # for handler call */
+1: andi r10, r12, 1 /* Isolate bit we are interested in */
+ srli r12, r12, 1 /* shift count is costly without hardware
+ multiplier */
+ addi r4, r4, 1
+ beq r10, r0, 1b
+ mov r5, sp /* Setup pt_regs pointer for handler call */
+ call do_IRQ
+ rdctl r12, ipending /* check again if irq still pending */
+ rdctl r9, ienable /* Isolate possible interrupts */
+ and r12, r12, r9
+ bne r12, r0, 2b
+ /* br ret_from_interrupt */ /* fall through to ret_from_interrupt */
+
+ENTRY(ret_from_interrupt)
+ ldw r1, PT_ESTATUS(sp) /* check if returning to kernel */
+ TSTBNZ r1, r1, ESTATUS_EU, Luser_return
+
+#ifdef CONFIG_PREEMPTION
+ GET_THREAD_INFO r1
+ ldw r4, TI_PREEMPT_COUNT(r1)
+ bne r4, r0, restore_all
+ ldw r4, TI_FLAGS(r1) /* ? Need resched set */
+ BTBZ r10, r4, TIF_NEED_RESCHED, restore_all
+ ldw r4, PT_ESTATUS(sp) /* ? Interrupts off */
+ andi r10, r4, ESTATUS_EPIE
+ beq r10, r0, restore_all
+ call preempt_schedule_irq
+#endif
+ br restore_all
+
+/***********************************************************************
+ * A few syscall wrappers
+ ***********************************************************************
+ */
+/*
+ * int clone(unsigned long clone_flags, unsigned long newsp,
+ * int __user * parent_tidptr, int __user * child_tidptr,
+ * int tls_val)
+ */
+ENTRY(sys_clone)
+ SAVE_SWITCH_STACK
+ subi sp, sp, 4 /* make space for tls pointer */
+ stw r8, 0(sp) /* pass tls pointer (r8) via stack (5th argument) */
+ call nios2_clone
+ addi sp, sp, 4
+ RESTORE_SWITCH_STACK
+ ret
+
+ENTRY(sys_rt_sigreturn)
+ SAVE_SWITCH_STACK
+ mov r4, sp
+ call do_rt_sigreturn
+ RESTORE_SWITCH_STACK
+ addi ra, ra, (end_translate_rc_and_ret - translate_rc_and_ret)
+ ret
+
+/***********************************************************************
+ * A few other wrappers and stubs
+ ***********************************************************************
+ */
+protection_exception_pte:
+ rdctl r6, pteaddr
+ slli r6, r6, 10
+ call do_page_fault
+ br ret_from_exception
+
+protection_exception_ba:
+ rdctl r6, badaddr
+ call do_page_fault
+ br ret_from_exception
+
+protection_exception_instr:
+ call handle_supervisor_instr
+ br ret_from_exception
+
+handle_breakpoint:
+ call breakpoint_c
+ br ret_from_exception
+
+#ifdef CONFIG_NIOS2_ALIGNMENT_TRAP
+handle_unaligned:
+ SAVE_SWITCH_STACK
+ call handle_unaligned_c
+ RESTORE_SWITCH_STACK
+ br ret_from_exception
+#else
+handle_unaligned:
+ call handle_unaligned_c
+ br ret_from_exception
+#endif
+
+handle_illegal:
+ call handle_illegal_c
+ br ret_from_exception
+
+handle_diverror:
+ call handle_diverror_c
+ br ret_from_exception
+
+#ifdef CONFIG_KGDB
+handle_kgdb_breakpoint:
+ call kgdb_breakpoint_c
+ br ret_from_exception
+#endif
+
+handle_trap_1:
+ call handle_trap_1_c
+ br ret_from_exception
+
+handle_trap_2:
+ call handle_trap_2_c
+ br ret_from_exception
+
+handle_trap_3:
+handle_trap_reserved:
+ call handle_trap_3_c
+ br ret_from_exception
+
+/*
+ * Beware - when entering resume, prev (the current task) is
+ * in r4, next (the new task) is in r5, don't change these
+ * registers.
+ */
+ENTRY(resume)
+
+ rdctl r7, status /* save thread status reg */
+ stw r7, TASK_THREAD + THREAD_KPSR(r4)
+
+ andi r7, r7, %lo(~STATUS_PIE) /* disable interrupts */
+ wrctl status, r7
+
+ SAVE_SWITCH_STACK
+ stw sp, TASK_THREAD + THREAD_KSP(r4)/* save kernel stack pointer */
+ ldw sp, TASK_THREAD + THREAD_KSP(r5)/* restore new thread stack */
+ movia r24, _current_thread /* save thread */
+ GET_THREAD_INFO r1
+ stw r1, 0(r24)
+ RESTORE_SWITCH_STACK
+
+ ldw r7, TASK_THREAD + THREAD_KPSR(r5)/* restore thread status reg */
+ wrctl status, r7
+ ret
+
+ENTRY(ret_from_fork)
+ call schedule_tail
+ br ret_from_exception
+
+ENTRY(ret_from_kernel_thread)
+ call schedule_tail
+ mov r4,r17 /* arg */
+ callr r16 /* function */
+ br ret_from_exception
+
+/*
+ * Kernel user helpers.
+ *
+ * Each segment is 64-byte aligned and will be mapped to the <User space>.
+ * New segments (if ever needed) must be added after the existing ones.
+ * This mechanism should be used only for things that are really small and
+ * justified, and not be abused freely.
+ *
+ */
+
+ /* Filling pads with undefined instructions. */
+.macro kuser_pad sym size
+ .if ((. - \sym) & 3)
+ .rept (4 - (. - \sym) & 3)
+ .byte 0
+ .endr
+ .endif
+ .rept ((\size - (. - \sym)) / 4)
+ .word 0xdeadbeef
+ .endr
+.endm
+
+ .align 6
+ .globl __kuser_helper_start
+__kuser_helper_start:
+
+__kuser_helper_version: /* @ 0x1000 */
+ .word ((__kuser_helper_end - __kuser_helper_start) >> 6)
+
+__kuser_cmpxchg: /* @ 0x1004 */
+ /*
+ * r4 pointer to exchange variable
+ * r5 old value
+ * r6 new value
+ */
+cmpxchg_ldw:
+ ldw r2, 0(r4) /* load current value */
+ sub r2, r2, r5 /* compare with old value */
+ bne r2, zero, cmpxchg_ret
+
+ /* We had a match, store the new value */
+cmpxchg_stw:
+ stw r6, 0(r4)
+cmpxchg_ret:
+ ret
+
+ kuser_pad __kuser_cmpxchg, 64
+
+ .globl __kuser_sigtramp
+__kuser_sigtramp:
+ movi r2, __NR_rt_sigreturn
+ trap
+
+ kuser_pad __kuser_sigtramp, 64
+
+ .globl __kuser_helper_end
+__kuser_helper_end:
diff --git a/arch/nios2/kernel/head.S b/arch/nios2/kernel/head.S
new file mode 100644
index 000000000..372ce4a33
--- /dev/null
+++ b/arch/nios2/kernel/head.S
@@ -0,0 +1,175 @@
+/*
+ * Copyright (C) 2009 Wind River Systems Inc
+ * Implemented by fredrik.markstrom@gmail.com and ivarholmqvist@gmail.com
+ * Copyright (C) 2004 Microtronix Datacom Ltd
+ * Copyright (C) 2001 Vic Phillips, Microtronix Datacom Ltd.
+ *
+ * Based on head.S for Altera's Excalibur development board with nios processor
+ *
+ * Based on the following from the Excalibur sdk distribution:
+ * NA_MemoryMap.s, NR_JumpToStart.s, NR_Setup.s, NR_CWPManager.s
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/init.h>
+#include <linux/linkage.h>
+#include <asm/thread_info.h>
+#include <asm/processor.h>
+#include <asm/cache.h>
+#include <asm/page.h>
+#include <asm/asm-offsets.h>
+#include <asm/asm-macros.h>
+
+/*
+ * ZERO_PAGE is a special page that is used for zero-initialized
+ * data and COW.
+ */
+.data
+.global empty_zero_page
+.align 12
+empty_zero_page:
+ .space PAGE_SIZE
+
+/*
+ * This global variable is used as an extension to the nios'
+ * STATUS register to emulate a user/supervisor mode.
+ */
+ .data
+ .align 2
+ .set noat
+
+ .global _current_thread
+_current_thread:
+ .long 0
+/*
+ * Input(s): passed from u-boot
+ * r4 - Optional pointer to a board information structure.
+ * r5 - Optional pointer to the physical starting address of the init RAM
+ * disk.
+ * r6 - Optional pointer to the physical ending address of the init RAM
+ * disk.
+ * r7 - Optional pointer to the physical starting address of any kernel
+ * command-line parameters.
+ */
+
+/*
+ * First executable code - detected and jumped to by the ROM bootstrap
+ * if the code resides in flash (looks for "Nios" at offset 0x0c from
+ * the potential executable image).
+ */
+ __HEAD
+ENTRY(_start)
+ wrctl status, r0 /* Disable interrupts */
+
+ /* Initialize all cache lines within the instruction cache */
+ movia r1, NIOS2_ICACHE_SIZE
+ movui r2, NIOS2_ICACHE_LINE_SIZE
+
+icache_init:
+ initi r1
+ sub r1, r1, r2
+ bgt r1, r0, icache_init
+ br 1f
+
+ /*
+ * This is the default location for the exception handler. Code in jump
+ * to our handler
+ */
+ENTRY(exception_handler_hook)
+ movia r24, inthandler
+ jmp r24
+
+ENTRY(fast_handler)
+ nextpc et
+helper:
+ stw r3, r3save - helper(et)
+
+ rdctl r3 , pteaddr
+ srli r3, r3, 12
+ slli r3, r3, 2
+ movia et, pgd_current
+
+ ldw et, 0(et)
+ add r3, et, r3
+ ldw et, 0(r3)
+
+ rdctl r3, pteaddr
+ andi r3, r3, 0xfff
+ add et, r3, et
+ ldw et, 0(et)
+ wrctl tlbacc, et
+ nextpc et
+helper2:
+ ldw r3, r3save - helper2(et)
+ subi ea, ea, 4
+ eret
+r3save:
+ .word 0x0
+ENTRY(fast_handler_end)
+
+1:
+ /*
+ * After the instruction cache is initialized, the data cache must
+ * also be initialized.
+ */
+ movia r1, NIOS2_DCACHE_SIZE
+ movui r2, NIOS2_DCACHE_LINE_SIZE
+
+dcache_init:
+ initd 0(r1)
+ sub r1, r1, r2
+ bgt r1, r0, dcache_init
+
+ nextpc r1 /* Find out where we are */
+chkadr:
+ movia r2, chkadr
+ beq r1, r2,finish_move /* We are running in RAM done */
+ addi r1, r1,(_start - chkadr) /* Source */
+ movia r2, _start /* Destination */
+ movia r3, __bss_start /* End of copy */
+
+loop_move: /* r1: src, r2: dest, r3: last dest */
+ ldw r8, 0(r1) /* load a word from [r1] */
+ stw r8, 0(r2) /* store a word to dest [r2] */
+ flushd 0(r2) /* Flush cache for safety */
+ addi r1, r1, 4 /* inc the src addr */
+ addi r2, r2, 4 /* inc the dest addr */
+ blt r2, r3, loop_move
+
+ movia r1, finish_move /* VMA(_start)->l1 */
+ jmp r1 /* jmp to _start */
+
+finish_move:
+
+ /* Mask off all possible interrupts */
+ wrctl ienable, r0
+
+ /* Clear .bss */
+ movia r2, __bss_start
+ movia r1, __bss_stop
+1:
+ stb r0, 0(r2)
+ addi r2, r2, 1
+ bne r1, r2, 1b
+
+ movia r1, init_thread_union /* set stack at top of the task union */
+ addi sp, r1, THREAD_SIZE
+ movia r2, _current_thread /* Remember current thread */
+ stw r1, 0(r2)
+
+ movia r1, nios2_boot_init /* save args r4-r7 passed from u-boot */
+ callr r1
+
+ movia r1, start_kernel /* call start_kernel as a subroutine */
+ callr r1
+
+ /* If we return from start_kernel, break to the oci debugger and
+ * buggered we are.
+ */
+ break
+
+ /* End of startup code */
+.set at
diff --git a/arch/nios2/kernel/insnemu.S b/arch/nios2/kernel/insnemu.S
new file mode 100644
index 000000000..a027cc68b
--- /dev/null
+++ b/arch/nios2/kernel/insnemu.S
@@ -0,0 +1,580 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (C) 2003-2013 Altera Corporation
+ * All rights reserved.
+ */
+
+
+#include <linux/linkage.h>
+#include <asm/entry.h>
+
+.set noat
+.set nobreak
+
+/*
+* Explicitly allow the use of r1 (the assembler temporary register)
+* within this code. This register is normally reserved for the use of
+* the compiler.
+*/
+
+ENTRY(instruction_trap)
+ ldw r1, PT_R1(sp) // Restore registers
+ ldw r2, PT_R2(sp)
+ ldw r3, PT_R3(sp)
+ ldw r4, PT_R4(sp)
+ ldw r5, PT_R5(sp)
+ ldw r6, PT_R6(sp)
+ ldw r7, PT_R7(sp)
+ ldw r8, PT_R8(sp)
+ ldw r9, PT_R9(sp)
+ ldw r10, PT_R10(sp)
+ ldw r11, PT_R11(sp)
+ ldw r12, PT_R12(sp)
+ ldw r13, PT_R13(sp)
+ ldw r14, PT_R14(sp)
+ ldw r15, PT_R15(sp)
+ ldw ra, PT_RA(sp)
+ ldw fp, PT_FP(sp)
+ ldw gp, PT_GP(sp)
+ ldw et, PT_ESTATUS(sp)
+ wrctl estatus, et
+ ldw ea, PT_EA(sp)
+ ldw et, PT_SP(sp) /* backup sp in et */
+
+ addi sp, sp, PT_REGS_SIZE
+
+ /* INSTRUCTION EMULATION
+ * ---------------------
+ *
+ * Nios II processors generate exceptions for unimplemented instructions.
+ * The routines below emulate these instructions. Depending on the
+ * processor core, the only instructions that might need to be emulated
+ * are div, divu, mul, muli, mulxss, mulxsu, and mulxuu.
+ *
+ * The emulations match the instructions, except for the following
+ * limitations:
+ *
+ * 1) The emulation routines do not emulate the use of the exception
+ * temporary register (et) as a source operand because the exception
+ * handler already has modified it.
+ *
+ * 2) The routines do not emulate the use of the stack pointer (sp) or
+ * the exception return address register (ea) as a destination because
+ * modifying these registers crashes the exception handler or the
+ * interrupted routine.
+ *
+ * Detailed Design
+ * ---------------
+ *
+ * The emulation routines expect the contents of integer registers r0-r31
+ * to be on the stack at addresses sp, 4(sp), 8(sp), ... 124(sp). The
+ * routines retrieve source operands from the stack and modify the
+ * destination register's value on the stack prior to the end of the
+ * exception handler. Then all registers except the destination register
+ * are restored to their previous values.
+ *
+ * The instruction that causes the exception is found at address -4(ea).
+ * The instruction's OP and OPX fields identify the operation to be
+ * performed.
+ *
+ * One instruction, muli, is an I-type instruction that is identified by
+ * an OP field of 0x24.
+ *
+ * muli AAAAA,BBBBB,IIIIIIIIIIIIIIII,-0x24-
+ * 27 22 6 0 <-- LSB of field
+ *
+ * The remaining emulated instructions are R-type and have an OP field
+ * of 0x3a. Their OPX fields identify them.
+ *
+ * R-type AAAAA,BBBBB,CCCCC,XXXXXX,NNNNN,-0x3a-
+ * 27 22 17 11 6 0 <-- LSB of field
+ *
+ *
+ * Opcode Encoding. muli is identified by its OP value. Then OPX & 0x02
+ * is used to differentiate between the division opcodes and the
+ * remaining multiplication opcodes.
+ *
+ * Instruction OP OPX OPX & 0x02
+ * ----------- ---- ---- ----------
+ * muli 0x24
+ * divu 0x3a 0x24 0
+ * div 0x3a 0x25 0
+ * mul 0x3a 0x27 != 0
+ * mulxuu 0x3a 0x07 != 0
+ * mulxsu 0x3a 0x17 != 0
+ * mulxss 0x3a 0x1f != 0
+ */
+
+
+ /*
+ * Save everything on the stack to make it easy for the emulation
+ * routines to retrieve the source register operands.
+ */
+
+ addi sp, sp, -128
+ stw zero, 0(sp) /* Save zero on stack to avoid special case for r0. */
+ stw r1, 4(sp)
+ stw r2, 8(sp)
+ stw r3, 12(sp)
+ stw r4, 16(sp)
+ stw r5, 20(sp)
+ stw r6, 24(sp)
+ stw r7, 28(sp)
+ stw r8, 32(sp)
+ stw r9, 36(sp)
+ stw r10, 40(sp)
+ stw r11, 44(sp)
+ stw r12, 48(sp)
+ stw r13, 52(sp)
+ stw r14, 56(sp)
+ stw r15, 60(sp)
+ stw r16, 64(sp)
+ stw r17, 68(sp)
+ stw r18, 72(sp)
+ stw r19, 76(sp)
+ stw r20, 80(sp)
+ stw r21, 84(sp)
+ stw r22, 88(sp)
+ stw r23, 92(sp)
+ /* Don't bother to save et. It's already been changed. */
+ rdctl r5, estatus
+ stw r5, 100(sp)
+
+ stw gp, 104(sp)
+ stw et, 108(sp) /* et contains previous sp value. */
+ stw fp, 112(sp)
+ stw ea, 116(sp)
+ stw ra, 120(sp)
+
+
+ /*
+ * Split the instruction into its fields. We need 4*A, 4*B, and 4*C as
+ * offsets to the stack pointer for access to the stored register values.
+ */
+ ldw r2,-4(ea) /* r2 = AAAAA,BBBBB,IIIIIIIIIIIIIIII,PPPPPP */
+ roli r3, r2, 7 /* r3 = BBB,IIIIIIIIIIIIIIII,PPPPPP,AAAAA,BB */
+ roli r4, r3, 3 /* r4 = IIIIIIIIIIIIIIII,PPPPPP,AAAAA,BBBBB */
+ roli r5, r4, 2 /* r5 = IIIIIIIIIIIIII,PPPPPP,AAAAA,BBBBB,II */
+ srai r4, r4, 16 /* r4 = (sign-extended) IMM16 */
+ roli r6, r5, 5 /* r6 = XXXX,NNNNN,PPPPPP,AAAAA,BBBBB,CCCCC,XX */
+ andi r2, r2, 0x3f /* r2 = 00000000000000000000000000,PPPPPP */
+ andi r3, r3, 0x7c /* r3 = 0000000000000000000000000,AAAAA,00 */
+ andi r5, r5, 0x7c /* r5 = 0000000000000000000000000,BBBBB,00 */
+ andi r6, r6, 0x7c /* r6 = 0000000000000000000000000,CCCCC,00 */
+
+ /* Now
+ * r2 = OP
+ * r3 = 4*A
+ * r4 = IMM16 (sign extended)
+ * r5 = 4*B
+ * r6 = 4*C
+ */
+
+ /*
+ * Get the operands.
+ *
+ * It is necessary to check for muli because it uses an I-type
+ * instruction format, while the other instructions are have an R-type
+ * format.
+ *
+ * Prepare for either multiplication or division loop.
+ * They both loop 32 times.
+ */
+ movi r14, 32
+
+ add r3, r3, sp /* r3 = address of A-operand. */
+ ldw r3, 0(r3) /* r3 = A-operand. */
+ movi r7, 0x24 /* muli opcode (I-type instruction format) */
+ beq r2, r7, mul_immed /* muli doesn't use the B register as a source */
+
+ add r5, r5, sp /* r5 = address of B-operand. */
+ ldw r5, 0(r5) /* r5 = B-operand. */
+ /* r4 = SSSSSSSSSSSSSSSS,-----IMM16------ */
+ /* IMM16 not needed, align OPX portion */
+ /* r4 = SSSSSSSSSSSSSSSS,CCCCC,-OPX--,00000 */
+ srli r4, r4, 5 /* r4 = 00000,SSSSSSSSSSSSSSSS,CCCCC,-OPX-- */
+ andi r4, r4, 0x3f /* r4 = 00000000000000000000000000,-OPX-- */
+
+ /* Now
+ * r2 = OP
+ * r3 = src1
+ * r5 = src2
+ * r4 = OPX (no longer can be muli)
+ * r6 = 4*C
+ */
+
+
+ /*
+ * Multiply or Divide?
+ */
+ andi r7, r4, 0x02 /* For R-type multiply instructions,
+ OPX & 0x02 != 0 */
+ bne r7, zero, multiply
+
+
+ /* DIVISION
+ *
+ * Divide an unsigned dividend by an unsigned divisor using
+ * a shift-and-subtract algorithm. The example below shows
+ * 43 div 7 = 6 for 8-bit integers. This classic algorithm uses a
+ * single register to store both the dividend and the quotient,
+ * allowing both values to be shifted with a single instruction.
+ *
+ * remainder dividend:quotient
+ * --------- -----------------
+ * initialize 00000000 00101011:
+ * shift 00000000 0101011:_
+ * remainder >= divisor? no 00000000 0101011:0
+ * shift 00000000 101011:0_
+ * remainder >= divisor? no 00000000 101011:00
+ * shift 00000001 01011:00_
+ * remainder >= divisor? no 00000001 01011:000
+ * shift 00000010 1011:000_
+ * remainder >= divisor? no 00000010 1011:0000
+ * shift 00000101 011:0000_
+ * remainder >= divisor? no 00000101 011:00000
+ * shift 00001010 11:00000_
+ * remainder >= divisor? yes 00001010 11:000001
+ * remainder -= divisor - 00000111
+ * ----------
+ * 00000011 11:000001
+ * shift 00000111 1:000001_
+ * remainder >= divisor? yes 00000111 1:0000011
+ * remainder -= divisor - 00000111
+ * ----------
+ * 00000000 1:0000011
+ * shift 00000001 :0000011_
+ * remainder >= divisor? no 00000001 :00000110
+ *
+ * The quotient is 00000110.
+ */
+
+divide:
+ /*
+ * Prepare for division by assuming the result
+ * is unsigned, and storing its "sign" as 0.
+ */
+ movi r17, 0
+
+
+ /* Which division opcode? */
+ xori r7, r4, 0x25 /* OPX of div */
+ bne r7, zero, unsigned_division
+
+
+ /*
+ * OPX is div. Determine and store the sign of the quotient.
+ * Then take the absolute value of both operands.
+ */
+ xor r17, r3, r5 /* MSB contains sign of quotient */
+ bge r3,zero,dividend_is_nonnegative
+ sub r3, zero, r3 /* -r3 */
+dividend_is_nonnegative:
+ bge r5, zero, divisor_is_nonnegative
+ sub r5, zero, r5 /* -r5 */
+divisor_is_nonnegative:
+
+
+unsigned_division:
+ /* Initialize the unsigned-division loop. */
+ movi r13, 0 /* remainder = 0 */
+
+ /* Now
+ * r3 = dividend : quotient
+ * r4 = 0x25 for div, 0x24 for divu
+ * r5 = divisor
+ * r13 = remainder
+ * r14 = loop counter (already initialized to 32)
+ * r17 = MSB contains sign of quotient
+ */
+
+
+ /*
+ * for (count = 32; count > 0; --count)
+ * {
+ */
+divide_loop:
+
+ /*
+ * Division:
+ *
+ * (remainder:dividend:quotient) <<= 1;
+ */
+ slli r13, r13, 1
+ cmplt r7, r3, zero /* r7 = MSB of r3 */
+ or r13, r13, r7
+ slli r3, r3, 1
+
+
+ /*
+ * if (remainder >= divisor)
+ * {
+ * set LSB of quotient
+ * remainder -= divisor;
+ * }
+ */
+ bltu r13, r5, div_skip
+ ori r3, r3, 1
+ sub r13, r13, r5
+div_skip:
+
+ /*
+ * }
+ */
+ subi r14, r14, 1
+ bne r14, zero, divide_loop
+
+
+ /* Now
+ * r3 = quotient
+ * r4 = 0x25 for div, 0x24 for divu
+ * r6 = 4*C
+ * r17 = MSB contains sign of quotient
+ */
+
+
+ /*
+ * Conditionally negate signed quotient. If quotient is unsigned,
+ * the sign already is initialized to 0.
+ */
+ bge r17, zero, quotient_is_nonnegative
+ sub r3, zero, r3 /* -r3 */
+ quotient_is_nonnegative:
+
+
+ /*
+ * Final quotient is in r3.
+ */
+ add r6, r6, sp
+ stw r3, 0(r6) /* write quotient to stack */
+ br restore_registers
+
+
+
+
+ /* MULTIPLICATION
+ *
+ * A "product" is the number that one gets by summing a "multiplicand"
+ * several times. The "multiplier" specifies the number of copies of the
+ * multiplicand that are summed.
+ *
+ * Actual multiplication algorithms don't use repeated addition, however.
+ * Shift-and-add algorithms get the same answer as repeated addition, and
+ * they are faster. To compute the lower half of a product (pppp below)
+ * one shifts the product left before adding in each of the partial
+ * products (a * mmmm) through (d * mmmm).
+ *
+ * To compute the upper half of a product (PPPP below), one adds in the
+ * partial products (d * mmmm) through (a * mmmm), each time following
+ * the add by a right shift of the product.
+ *
+ * mmmm
+ * * abcd
+ * ------
+ * #### = d * mmmm
+ * #### = c * mmmm
+ * #### = b * mmmm
+ * #### = a * mmmm
+ * --------
+ * PPPPpppp
+ *
+ * The example above shows 4 partial products. Computing actual Nios II
+ * products requires 32 partials.
+ *
+ * It is possible to compute the result of mulxsu from the result of
+ * mulxuu because the only difference between the results of these two
+ * opcodes is the value of the partial product associated with the sign
+ * bit of rA.
+ *
+ * mulxsu = mulxuu - (rA < 0) ? rB : 0;
+ *
+ * It is possible to compute the result of mulxss from the result of
+ * mulxsu because the only difference between the results of these two
+ * opcodes is the value of the partial product associated with the sign
+ * bit of rB.
+ *
+ * mulxss = mulxsu - (rB < 0) ? rA : 0;
+ *
+ */
+
+mul_immed:
+ /* Opcode is muli. Change it into mul for remainder of algorithm. */
+ mov r6, r5 /* Field B is dest register, not field C. */
+ mov r5, r4 /* Field IMM16 is src2, not field B. */
+ movi r4, 0x27 /* OPX of mul is 0x27 */
+
+multiply:
+ /* Initialize the multiplication loop. */
+ movi r9, 0 /* mul_product = 0 */
+ movi r10, 0 /* mulxuu_product = 0 */
+ mov r11, r5 /* save original multiplier for mulxsu and mulxss */
+ mov r12, r5 /* mulxuu_multiplier (will be shifted) */
+ movi r16, 1 /* used to create "rori B,A,1" from "ror B,A,r16" */
+
+ /* Now
+ * r3 = multiplicand
+ * r5 = mul_multiplier
+ * r6 = 4 * dest_register (used later as offset to sp)
+ * r7 = temp
+ * r9 = mul_product
+ * r10 = mulxuu_product
+ * r11 = original multiplier
+ * r12 = mulxuu_multiplier
+ * r14 = loop counter (already initialized)
+ * r16 = 1
+ */
+
+
+ /*
+ * for (count = 32; count > 0; --count)
+ * {
+ */
+multiply_loop:
+
+ /*
+ * mul_product <<= 1;
+ * lsb = multiplier & 1;
+ */
+ slli r9, r9, 1
+ andi r7, r12, 1
+
+ /*
+ * if (lsb == 1)
+ * {
+ * mulxuu_product += multiplicand;
+ * }
+ */
+ beq r7, zero, mulx_skip
+ add r10, r10, r3
+ cmpltu r7, r10, r3 /* Save the carry from the MSB of mulxuu_product. */
+ ror r7, r7, r16 /* r7 = 0x80000000 on carry, or else 0x00000000 */
+mulx_skip:
+
+ /*
+ * if (MSB of mul_multiplier == 1)
+ * {
+ * mul_product += multiplicand;
+ * }
+ */
+ bge r5, zero, mul_skip
+ add r9, r9, r3
+mul_skip:
+
+ /*
+ * mulxuu_product >>= 1; logical shift
+ * mul_multiplier <<= 1; done with MSB
+ * mulx_multiplier >>= 1; done with LSB
+ */
+ srli r10, r10, 1
+ or r10, r10, r7 /* OR in the saved carry bit. */
+ slli r5, r5, 1
+ srli r12, r12, 1
+
+
+ /*
+ * }
+ */
+ subi r14, r14, 1
+ bne r14, zero, multiply_loop
+
+
+ /*
+ * Multiply emulation loop done.
+ */
+
+ /* Now
+ * r3 = multiplicand
+ * r4 = OPX
+ * r6 = 4 * dest_register (used later as offset to sp)
+ * r7 = temp
+ * r9 = mul_product
+ * r10 = mulxuu_product
+ * r11 = original multiplier
+ */
+
+
+ /* Calculate address for result from 4 * dest_register */
+ add r6, r6, sp
+
+
+ /*
+ * Select/compute the result based on OPX.
+ */
+
+
+ /* OPX == mul? Then store. */
+ xori r7, r4, 0x27
+ beq r7, zero, store_product
+
+ /* It's one of the mulx.. opcodes. Move over the result. */
+ mov r9, r10
+
+ /* OPX == mulxuu? Then store. */
+ xori r7, r4, 0x07
+ beq r7, zero, store_product
+
+ /* Compute mulxsu
+ *
+ * mulxsu = mulxuu - (rA < 0) ? rB : 0;
+ */
+ bge r3, zero, mulxsu_skip
+ sub r9, r9, r11
+mulxsu_skip:
+
+ /* OPX == mulxsu? Then store. */
+ xori r7, r4, 0x17
+ beq r7, zero, store_product
+
+ /* Compute mulxss
+ *
+ * mulxss = mulxsu - (rB < 0) ? rA : 0;
+ */
+ bge r11,zero,mulxss_skip
+ sub r9, r9, r3
+mulxss_skip:
+ /* At this point, assume that OPX is mulxss, so store*/
+
+
+store_product:
+ stw r9, 0(r6)
+
+
+restore_registers:
+ /* No need to restore r0. */
+ ldw r5, 100(sp)
+ wrctl estatus, r5
+
+ ldw r1, 4(sp)
+ ldw r2, 8(sp)
+ ldw r3, 12(sp)
+ ldw r4, 16(sp)
+ ldw r5, 20(sp)
+ ldw r6, 24(sp)
+ ldw r7, 28(sp)
+ ldw r8, 32(sp)
+ ldw r9, 36(sp)
+ ldw r10, 40(sp)
+ ldw r11, 44(sp)
+ ldw r12, 48(sp)
+ ldw r13, 52(sp)
+ ldw r14, 56(sp)
+ ldw r15, 60(sp)
+ ldw r16, 64(sp)
+ ldw r17, 68(sp)
+ ldw r18, 72(sp)
+ ldw r19, 76(sp)
+ ldw r20, 80(sp)
+ ldw r21, 84(sp)
+ ldw r22, 88(sp)
+ ldw r23, 92(sp)
+ /* Does not need to restore et */
+ ldw gp, 104(sp)
+
+ ldw fp, 112(sp)
+ ldw ea, 116(sp)
+ ldw ra, 120(sp)
+ ldw sp, 108(sp) /* last restore sp */
+ eret
+
+.set at
+.set break
diff --git a/arch/nios2/kernel/irq.c b/arch/nios2/kernel/irq.c
new file mode 100644
index 000000000..5f3555ce4
--- /dev/null
+++ b/arch/nios2/kernel/irq.c
@@ -0,0 +1,80 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2013 Altera Corporation
+ * Copyright (C) 2011 Tobias Klauser <tklauser@distanz.ch>
+ * Copyright (C) 2008 Thomas Chou <thomas@wytron.com.tw>
+ *
+ * based on irq.c from m68k which is:
+ *
+ * Copyright (C) 2007 Greg Ungerer <gerg@snapgear.com>
+ */
+
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/of.h>
+
+static u32 ienable;
+
+asmlinkage void do_IRQ(int hwirq, struct pt_regs *regs)
+{
+ struct pt_regs *oldregs = set_irq_regs(regs);
+ int irq;
+
+ irq_enter();
+ irq = irq_find_mapping(NULL, hwirq);
+ generic_handle_irq(irq);
+ irq_exit();
+
+ set_irq_regs(oldregs);
+}
+
+static void chip_unmask(struct irq_data *d)
+{
+ ienable |= (1 << d->hwirq);
+ WRCTL(CTL_IENABLE, ienable);
+}
+
+static void chip_mask(struct irq_data *d)
+{
+ ienable &= ~(1 << d->hwirq);
+ WRCTL(CTL_IENABLE, ienable);
+}
+
+static struct irq_chip m_irq_chip = {
+ .name = "NIOS2-INTC",
+ .irq_unmask = chip_unmask,
+ .irq_mask = chip_mask,
+};
+
+static int irq_map(struct irq_domain *h, unsigned int virq,
+ irq_hw_number_t hw_irq_num)
+{
+ irq_set_chip_and_handler(virq, &m_irq_chip, handle_level_irq);
+
+ return 0;
+}
+
+static const struct irq_domain_ops irq_ops = {
+ .map = irq_map,
+ .xlate = irq_domain_xlate_onecell,
+};
+
+void __init init_IRQ(void)
+{
+ struct irq_domain *domain;
+ struct device_node *node;
+
+ node = of_find_compatible_node(NULL, NULL, "altr,nios2-1.0");
+ if (!node)
+ node = of_find_compatible_node(NULL, NULL, "altr,nios2-1.1");
+
+ BUG_ON(!node);
+
+ domain = irq_domain_add_linear(node, NIOS2_CPU_NR_IRQS, &irq_ops, NULL);
+ BUG_ON(!domain);
+
+ irq_set_default_host(domain);
+ of_node_put(node);
+ /* Load the initial ienable value */
+ ienable = RDCTL(CTL_IENABLE);
+}
diff --git a/arch/nios2/kernel/kgdb.c b/arch/nios2/kernel/kgdb.c
new file mode 100644
index 000000000..d0963fcb1
--- /dev/null
+++ b/arch/nios2/kernel/kgdb.c
@@ -0,0 +1,158 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Nios2 KGDB support
+ *
+ * Copyright (C) 2015 Altera Corporation
+ * Copyright (C) 2011 Tobias Klauser <tklauser@distanz.ch>
+ *
+ * Based on the code posted by Kazuyasu on the Altera Forum at:
+ * http://www.alteraforum.com/forum/showpost.php?p=77003&postcount=20
+ */
+#include <linux/ptrace.h>
+#include <linux/kgdb.h>
+#include <linux/kdebug.h>
+#include <linux/io.h>
+
+static int wait_for_remote_debugger;
+
+struct dbg_reg_def_t dbg_reg_def[DBG_MAX_REG_NUM] =
+{
+ { "zero", GDB_SIZEOF_REG, -1 },
+ { "at", GDB_SIZEOF_REG, offsetof(struct pt_regs, r1) },
+ { "r2", GDB_SIZEOF_REG, offsetof(struct pt_regs, r2) },
+ { "r3", GDB_SIZEOF_REG, offsetof(struct pt_regs, r3) },
+ { "r4", GDB_SIZEOF_REG, offsetof(struct pt_regs, r4) },
+ { "r5", GDB_SIZEOF_REG, offsetof(struct pt_regs, r5) },
+ { "r6", GDB_SIZEOF_REG, offsetof(struct pt_regs, r6) },
+ { "r7", GDB_SIZEOF_REG, offsetof(struct pt_regs, r7) },
+ { "r8", GDB_SIZEOF_REG, offsetof(struct pt_regs, r8) },
+ { "r9", GDB_SIZEOF_REG, offsetof(struct pt_regs, r9) },
+ { "r10", GDB_SIZEOF_REG, offsetof(struct pt_regs, r10) },
+ { "r11", GDB_SIZEOF_REG, offsetof(struct pt_regs, r11) },
+ { "r12", GDB_SIZEOF_REG, offsetof(struct pt_regs, r12) },
+ { "r13", GDB_SIZEOF_REG, offsetof(struct pt_regs, r13) },
+ { "r14", GDB_SIZEOF_REG, offsetof(struct pt_regs, r14) },
+ { "r15", GDB_SIZEOF_REG, offsetof(struct pt_regs, r15) },
+ { "r16", GDB_SIZEOF_REG, -1 },
+ { "r17", GDB_SIZEOF_REG, -1 },
+ { "r18", GDB_SIZEOF_REG, -1 },
+ { "r19", GDB_SIZEOF_REG, -1 },
+ { "r20", GDB_SIZEOF_REG, -1 },
+ { "r21", GDB_SIZEOF_REG, -1 },
+ { "r22", GDB_SIZEOF_REG, -1 },
+ { "r23", GDB_SIZEOF_REG, -1 },
+ { "et", GDB_SIZEOF_REG, -1 },
+ { "bt", GDB_SIZEOF_REG, -1 },
+ { "gp", GDB_SIZEOF_REG, offsetof(struct pt_regs, gp) },
+ { "sp", GDB_SIZEOF_REG, offsetof(struct pt_regs, sp) },
+ { "fp", GDB_SIZEOF_REG, offsetof(struct pt_regs, fp) },
+ { "ea", GDB_SIZEOF_REG, -1 },
+ { "ba", GDB_SIZEOF_REG, -1 },
+ { "ra", GDB_SIZEOF_REG, offsetof(struct pt_regs, ra) },
+ { "pc", GDB_SIZEOF_REG, offsetof(struct pt_regs, ea) },
+ { "status", GDB_SIZEOF_REG, -1 },
+ { "estatus", GDB_SIZEOF_REG, offsetof(struct pt_regs, estatus) },
+ { "bstatus", GDB_SIZEOF_REG, -1 },
+ { "ienable", GDB_SIZEOF_REG, -1 },
+ { "ipending", GDB_SIZEOF_REG, -1},
+ { "cpuid", GDB_SIZEOF_REG, -1 },
+ { "ctl6", GDB_SIZEOF_REG, -1 },
+ { "exception", GDB_SIZEOF_REG, -1 },
+ { "pteaddr", GDB_SIZEOF_REG, -1 },
+ { "tlbacc", GDB_SIZEOF_REG, -1 },
+ { "tlbmisc", GDB_SIZEOF_REG, -1 },
+ { "eccinj", GDB_SIZEOF_REG, -1 },
+ { "badaddr", GDB_SIZEOF_REG, -1 },
+ { "config", GDB_SIZEOF_REG, -1 },
+ { "mpubase", GDB_SIZEOF_REG, -1 },
+ { "mpuacc", GDB_SIZEOF_REG, -1 },
+};
+
+char *dbg_get_reg(int regno, void *mem, struct pt_regs *regs)
+{
+ if (regno >= DBG_MAX_REG_NUM || regno < 0)
+ return NULL;
+
+ if (dbg_reg_def[regno].offset != -1)
+ memcpy(mem, (void *)regs + dbg_reg_def[regno].offset,
+ dbg_reg_def[regno].size);
+ else
+ memset(mem, 0, dbg_reg_def[regno].size);
+
+ return dbg_reg_def[regno].name;
+}
+
+int dbg_set_reg(int regno, void *mem, struct pt_regs *regs)
+{
+ if (regno >= DBG_MAX_REG_NUM || regno < 0)
+ return -EINVAL;
+
+ if (dbg_reg_def[regno].offset != -1)
+ memcpy((void *)regs + dbg_reg_def[regno].offset, mem,
+ dbg_reg_def[regno].size);
+
+ return 0;
+}
+
+void sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *p)
+{
+ memset((char *)gdb_regs, 0, NUMREGBYTES);
+ gdb_regs[GDB_SP] = p->thread.kregs->sp;
+ gdb_regs[GDB_PC] = p->thread.kregs->ea;
+}
+
+void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long pc)
+{
+ regs->ea = pc;
+}
+
+int kgdb_arch_handle_exception(int vector, int signo, int err_code,
+ char *remcom_in_buffer, char *remcom_out_buffer,
+ struct pt_regs *regs)
+{
+ char *ptr;
+ unsigned long addr;
+
+ switch (remcom_in_buffer[0]) {
+ case 's':
+ case 'c':
+ /* handle the optional parameters */
+ ptr = &remcom_in_buffer[1];
+ if (kgdb_hex2long(&ptr, &addr))
+ regs->ea = addr;
+
+ return 0;
+ }
+
+ return -1; /* this means that we do not want to exit from the handler */
+}
+
+asmlinkage void kgdb_breakpoint_c(struct pt_regs *regs)
+{
+ /*
+ * The breakpoint entry code has moved the PC on by 4 bytes, so we must
+ * move it back. This could be done on the host but we do it here
+ */
+ if (!wait_for_remote_debugger)
+ regs->ea -= 4;
+ else /* pass the first trap 30 code */
+ wait_for_remote_debugger = 0;
+
+ kgdb_handle_exception(30, SIGTRAP, 0, regs);
+}
+
+int kgdb_arch_init(void)
+{
+ wait_for_remote_debugger = 1;
+ return 0;
+}
+
+void kgdb_arch_exit(void)
+{
+ /* Nothing to do */
+}
+
+const struct kgdb_arch arch_kgdb_ops = {
+ /* Breakpoint instruction: trap 30 */
+ .gdb_bpt_instr = { 0xba, 0x6f, 0x3b, 0x00 },
+};
diff --git a/arch/nios2/kernel/misaligned.c b/arch/nios2/kernel/misaligned.c
new file mode 100644
index 000000000..23e0544e1
--- /dev/null
+++ b/arch/nios2/kernel/misaligned.c
@@ -0,0 +1,238 @@
+/*
+ * linux/arch/nios2/kernel/misaligned.c
+ *
+ * basic emulation for mis-aligned accesses on the NIOS II cpu
+ * modelled after the version for arm in arm/alignment.c
+ *
+ * Brad Parker <brad@heeltoe.com>
+ * Copyright (C) 2010 Ambient Corporation
+ * Copyright (c) 2010 Altera Corporation, San Jose, California, USA.
+ * Copyright (c) 2010 Arrow Electronics, Inc.
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License. See the file COPYING in the main directory of
+ * this archive for more details.
+ */
+
+#include <linux/errno.h>
+#include <linux/string.h>
+#include <linux/proc_fs.h>
+#include <linux/init.h>
+#include <linux/sched.h>
+#include <linux/uaccess.h>
+#include <linux/seq_file.h>
+
+#include <asm/traps.h>
+#include <asm/unaligned.h>
+
+/* instructions we emulate */
+#define INST_LDHU 0x0b
+#define INST_STH 0x0d
+#define INST_LDH 0x0f
+#define INST_STW 0x15
+#define INST_LDW 0x17
+
+static unsigned int ma_usermode;
+#define UM_WARN 0x01
+#define UM_FIXUP 0x02
+#define UM_SIGNAL 0x04
+#define KM_WARN 0x08
+
+/* see arch/nios2/include/asm/ptrace.h */
+static u8 sys_stack_frame_reg_offset[] = {
+ /* struct pt_regs */
+ 8, 9, 10, 11, 12, 13, 14, 15, 1, 2, 3, 4, 5, 6, 7, 0,
+ /* struct switch_stack */
+ 16, 17, 18, 19, 20, 21, 22, 23, 0, 0, 0, 0, 0, 0, 0, 0
+};
+
+static int reg_offsets[32];
+
+static inline u32 get_reg_val(struct pt_regs *fp, int reg)
+{
+ u8 *p = ((u8 *)fp) + reg_offsets[reg];
+ return *(u32 *)p;
+}
+
+static inline void put_reg_val(struct pt_regs *fp, int reg, u32 val)
+{
+ u8 *p = ((u8 *)fp) + reg_offsets[reg];
+ *(u32 *)p = val;
+}
+
+/*
+ * (mis)alignment handler
+ */
+asmlinkage void handle_unaligned_c(struct pt_regs *fp, int cause)
+{
+ u32 isn, addr, val;
+ int in_kernel;
+ u8 a, b, d0, d1, d2, d3;
+ s16 imm16;
+ unsigned int fault;
+
+ /* back up one instruction */
+ fp->ea -= 4;
+
+ if (fixup_exception(fp)) {
+ return;
+ }
+
+ in_kernel = !user_mode(fp);
+
+ isn = *(unsigned long *)(fp->ea);
+
+ fault = 0;
+
+ /* do fixup if in kernel or mode turned on */
+ if (in_kernel || (ma_usermode & UM_FIXUP)) {
+ /* decompose instruction */
+ a = (isn >> 27) & 0x1f;
+ b = (isn >> 22) & 0x1f;
+ imm16 = (isn >> 6) & 0xffff;
+ addr = get_reg_val(fp, a) + imm16;
+
+ /* do fixup to saved registers */
+ switch (isn & 0x3f) {
+ case INST_LDHU:
+ fault |= __get_user(d0, (u8 *)(addr+0));
+ fault |= __get_user(d1, (u8 *)(addr+1));
+ val = (d1 << 8) | d0;
+ put_reg_val(fp, b, val);
+ break;
+ case INST_STH:
+ val = get_reg_val(fp, b);
+ d1 = val >> 8;
+ d0 = val >> 0;
+ if (in_kernel) {
+ *(u8 *)(addr+0) = d0;
+ *(u8 *)(addr+1) = d1;
+ } else {
+ fault |= __put_user(d0, (u8 *)(addr+0));
+ fault |= __put_user(d1, (u8 *)(addr+1));
+ }
+ break;
+ case INST_LDH:
+ fault |= __get_user(d0, (u8 *)(addr+0));
+ fault |= __get_user(d1, (u8 *)(addr+1));
+ val = (short)((d1 << 8) | d0);
+ put_reg_val(fp, b, val);
+ break;
+ case INST_STW:
+ val = get_reg_val(fp, b);
+ d3 = val >> 24;
+ d2 = val >> 16;
+ d1 = val >> 8;
+ d0 = val >> 0;
+ if (in_kernel) {
+ *(u8 *)(addr+0) = d0;
+ *(u8 *)(addr+1) = d1;
+ *(u8 *)(addr+2) = d2;
+ *(u8 *)(addr+3) = d3;
+ } else {
+ fault |= __put_user(d0, (u8 *)(addr+0));
+ fault |= __put_user(d1, (u8 *)(addr+1));
+ fault |= __put_user(d2, (u8 *)(addr+2));
+ fault |= __put_user(d3, (u8 *)(addr+3));
+ }
+ break;
+ case INST_LDW:
+ fault |= __get_user(d0, (u8 *)(addr+0));
+ fault |= __get_user(d1, (u8 *)(addr+1));
+ fault |= __get_user(d2, (u8 *)(addr+2));
+ fault |= __get_user(d3, (u8 *)(addr+3));
+ val = (d3 << 24) | (d2 << 16) | (d1 << 8) | d0;
+ put_reg_val(fp, b, val);
+ break;
+ }
+ }
+
+ addr = RDCTL(CTL_BADADDR);
+ cause >>= 2;
+
+ if (fault) {
+ if (in_kernel) {
+ pr_err("fault during kernel misaligned fixup @ %#lx; addr 0x%08x; isn=0x%08x\n",
+ fp->ea, (unsigned int)addr,
+ (unsigned int)isn);
+ } else {
+ pr_err("fault during user misaligned fixup @ %#lx; isn=%08x addr=0x%08x sp=0x%08lx pid=%d\n",
+ fp->ea,
+ (unsigned int)isn, addr, fp->sp,
+ current->pid);
+
+ _exception(SIGSEGV, fp, SEGV_MAPERR, fp->ea);
+ return;
+ }
+ }
+
+ /*
+ * kernel mode -
+ * note exception and skip bad instruction (return)
+ */
+ if (in_kernel) {
+ fp->ea += 4;
+
+ if (ma_usermode & KM_WARN) {
+ pr_err("kernel unaligned access @ %#lx; BADADDR 0x%08x; cause=%d, isn=0x%08x\n",
+ fp->ea,
+ (unsigned int)addr, cause,
+ (unsigned int)isn);
+ /* show_regs(fp); */
+ }
+
+ return;
+ }
+
+ /*
+ * user mode -
+ * possibly warn,
+ * possibly send SIGBUS signal to process
+ */
+ if (ma_usermode & UM_WARN) {
+ pr_err("user unaligned access @ %#lx; isn=0x%08lx ea=0x%08lx ra=0x%08lx sp=0x%08lx\n",
+ (unsigned long)addr, (unsigned long)isn,
+ fp->ea, fp->ra, fp->sp);
+ }
+
+ if (ma_usermode & UM_SIGNAL)
+ _exception(SIGBUS, fp, BUS_ADRALN, fp->ea);
+ else
+ fp->ea += 4; /* else advance */
+}
+
+static void __init misaligned_calc_reg_offsets(void)
+{
+ int i, r, offset;
+
+ /* pre-calc offsets of registers on sys call stack frame */
+ offset = 0;
+
+ /* struct pt_regs */
+ for (i = 0; i < 16; i++) {
+ r = sys_stack_frame_reg_offset[i];
+ reg_offsets[r] = offset;
+ offset += 4;
+ }
+
+ /* struct switch_stack */
+ offset = -sizeof(struct switch_stack);
+ for (i = 16; i < 32; i++) {
+ r = sys_stack_frame_reg_offset[i];
+ reg_offsets[r] = offset;
+ offset += 4;
+ }
+}
+
+
+static int __init misaligned_init(void)
+{
+ /* default mode - silent fix */
+ ma_usermode = UM_FIXUP | KM_WARN;
+
+ misaligned_calc_reg_offsets();
+
+ return 0;
+}
+
+fs_initcall(misaligned_init);
diff --git a/arch/nios2/kernel/module.c b/arch/nios2/kernel/module.c
new file mode 100644
index 000000000..76e0a42d6
--- /dev/null
+++ b/arch/nios2/kernel/module.c
@@ -0,0 +1,137 @@
+/*
+ * Kernel module support for Nios II.
+ *
+ * Copyright (C) 2004 Microtronix Datacom Ltd.
+ * Written by Wentao Xu <xuwentao@microtronix.com>
+ * Copyright (C) 2001, 2003 Rusty Russell
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License. See the file COPYING in the main directory of this
+ * archive for more details.
+ */
+
+#include <linux/moduleloader.h>
+#include <linux/elf.h>
+#include <linux/mm.h>
+#include <linux/vmalloc.h>
+#include <linux/slab.h>
+#include <linux/fs.h>
+#include <linux/string.h>
+#include <linux/kernel.h>
+
+#include <asm/cacheflush.h>
+
+/*
+ * Modules should NOT be allocated with kmalloc for (obvious) reasons.
+ * But we do it for now to avoid relocation issues. CALL26/PCREL26 cannot reach
+ * from 0x80000000 (vmalloc area) to 0xc00000000 (kernel) (kmalloc returns
+ * addresses in 0xc0000000)
+ */
+void *module_alloc(unsigned long size)
+{
+ if (size == 0)
+ return NULL;
+ return kmalloc(size, GFP_KERNEL);
+}
+
+/* Free memory returned from module_alloc */
+void module_memfree(void *module_region)
+{
+ kfree(module_region);
+}
+
+int apply_relocate_add(Elf32_Shdr *sechdrs, const char *strtab,
+ unsigned int symindex, unsigned int relsec,
+ struct module *mod)
+{
+ unsigned int i;
+ Elf32_Rela *rela = (void *)sechdrs[relsec].sh_addr;
+
+ pr_debug("Applying relocate section %u to %u\n", relsec,
+ sechdrs[relsec].sh_info);
+
+ for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rela); i++) {
+ /* This is where to make the change */
+ uint32_t word;
+ uint32_t *loc
+ = ((void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
+ + rela[i].r_offset);
+ /* This is the symbol it is referring to. Note that all
+ undefined symbols have been resolved. */
+ Elf32_Sym *sym
+ = ((Elf32_Sym *)sechdrs[symindex].sh_addr
+ + ELF32_R_SYM(rela[i].r_info));
+ uint32_t v = sym->st_value + rela[i].r_addend;
+
+ pr_debug("reltype %d 0x%x name:<%s>\n",
+ ELF32_R_TYPE(rela[i].r_info),
+ rela[i].r_offset, strtab + sym->st_name);
+
+ switch (ELF32_R_TYPE(rela[i].r_info)) {
+ case R_NIOS2_NONE:
+ break;
+ case R_NIOS2_BFD_RELOC_32:
+ *loc += v;
+ break;
+ case R_NIOS2_PCREL16:
+ v -= (uint32_t)loc + 4;
+ if ((int32_t)v > 0x7fff ||
+ (int32_t)v < -(int32_t)0x8000) {
+ pr_err("module %s: relocation overflow\n",
+ mod->name);
+ return -ENOEXEC;
+ }
+ word = *loc;
+ *loc = ((((word >> 22) << 16) | (v & 0xffff)) << 6) |
+ (word & 0x3f);
+ break;
+ case R_NIOS2_CALL26:
+ if (v & 3) {
+ pr_err("module %s: dangerous relocation\n",
+ mod->name);
+ return -ENOEXEC;
+ }
+ if ((v >> 28) != ((uint32_t)loc >> 28)) {
+ pr_err("module %s: relocation overflow\n",
+ mod->name);
+ return -ENOEXEC;
+ }
+ *loc = (*loc & 0x3f) | ((v >> 2) << 6);
+ break;
+ case R_NIOS2_HI16:
+ word = *loc;
+ *loc = ((((word >> 22) << 16) |
+ ((v >> 16) & 0xffff)) << 6) | (word & 0x3f);
+ break;
+ case R_NIOS2_LO16:
+ word = *loc;
+ *loc = ((((word >> 22) << 16) | (v & 0xffff)) << 6) |
+ (word & 0x3f);
+ break;
+ case R_NIOS2_HIADJ16:
+ {
+ Elf32_Addr word2;
+
+ word = *loc;
+ word2 = ((v >> 16) + ((v >> 15) & 1)) & 0xffff;
+ *loc = ((((word >> 22) << 16) | word2) << 6) |
+ (word & 0x3f);
+ }
+ break;
+
+ default:
+ pr_err("module %s: Unknown reloc: %u\n",
+ mod->name, ELF32_R_TYPE(rela[i].r_info));
+ return -ENOEXEC;
+ }
+ }
+
+ return 0;
+}
+
+int module_finalize(const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs,
+ struct module *me)
+{
+ flush_cache_all();
+ return 0;
+}
diff --git a/arch/nios2/kernel/nios2_ksyms.c b/arch/nios2/kernel/nios2_ksyms.c
new file mode 100644
index 000000000..54f7b23df
--- /dev/null
+++ b/arch/nios2/kernel/nios2_ksyms.c
@@ -0,0 +1,45 @@
+/*
+ * Copyright (C) 2004 Microtronix Datacom Ltd
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License. See the file COPYING in the main directory of this
+ * archive for more details.
+ */
+
+#include <linux/export.h>
+#include <linux/string.h>
+#include <linux/pgtable.h>
+
+#include <asm/cacheflush.h>
+
+/* string functions */
+
+EXPORT_SYMBOL(memcpy);
+EXPORT_SYMBOL(memset);
+EXPORT_SYMBOL(memmove);
+
+/* memory management */
+
+EXPORT_SYMBOL(empty_zero_page);
+EXPORT_SYMBOL(flush_icache_range);
+
+/*
+ * libgcc functions - functions that are used internally by the
+ * compiler... (prototypes are not correct though, but that
+ * doesn't really matter since they're not versioned).
+ */
+#define DECLARE_EXPORT(name) extern void name(void); EXPORT_SYMBOL(name)
+
+DECLARE_EXPORT(__gcc_bcmp);
+DECLARE_EXPORT(__divsi3);
+DECLARE_EXPORT(__moddi3);
+DECLARE_EXPORT(__modsi3);
+DECLARE_EXPORT(__udivmoddi4);
+DECLARE_EXPORT(__udivsi3);
+DECLARE_EXPORT(__umoddi3);
+DECLARE_EXPORT(__umodsi3);
+DECLARE_EXPORT(__muldi3);
+DECLARE_EXPORT(__ucmpdi2);
+DECLARE_EXPORT(__lshrdi3);
+DECLARE_EXPORT(__ashldi3);
+DECLARE_EXPORT(__ashrdi3);
diff --git a/arch/nios2/kernel/process.c b/arch/nios2/kernel/process.c
new file mode 100644
index 000000000..c5f916ca6
--- /dev/null
+++ b/arch/nios2/kernel/process.c
@@ -0,0 +1,270 @@
+/*
+ * Architecture-dependent parts of process handling.
+ *
+ * Copyright (C) 2013 Altera Corporation
+ * Copyright (C) 2010 Tobias Klauser <tklauser@distanz.ch>
+ * Copyright (C) 2009 Wind River Systems Inc
+ * Implemented by fredrik.markstrom@gmail.com and ivarholmqvist@gmail.com
+ * Copyright (C) 2004 Microtronix Datacom Ltd
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/export.h>
+#include <linux/sched.h>
+#include <linux/sched/debug.h>
+#include <linux/sched/task.h>
+#include <linux/sched/task_stack.h>
+#include <linux/mm_types.h>
+#include <linux/tick.h>
+#include <linux/uaccess.h>
+
+#include <asm/unistd.h>
+#include <asm/traps.h>
+#include <asm/cpuinfo.h>
+
+asmlinkage void ret_from_fork(void);
+asmlinkage void ret_from_kernel_thread(void);
+
+void (*pm_power_off)(void) = NULL;
+EXPORT_SYMBOL(pm_power_off);
+
+void arch_cpu_idle(void)
+{
+ raw_local_irq_enable();
+}
+
+/*
+ * The development boards have no way to pull a board reset. Just jump to the
+ * cpu reset address and let the boot loader or the code in head.S take care of
+ * resetting peripherals.
+ */
+void machine_restart(char *__unused)
+{
+ pr_notice("Machine restart (%08x)...\n", cpuinfo.reset_addr);
+ local_irq_disable();
+ __asm__ __volatile__ (
+ "jmp %0\n\t"
+ :
+ : "r" (cpuinfo.reset_addr)
+ : "r4");
+}
+
+void machine_halt(void)
+{
+ pr_notice("Machine halt...\n");
+ local_irq_disable();
+ for (;;)
+ ;
+}
+
+/*
+ * There is no way to power off the development boards. So just spin for now. If
+ * we ever have a way of resetting a board using a GPIO we should add that here.
+ */
+void machine_power_off(void)
+{
+ pr_notice("Machine power off...\n");
+ local_irq_disable();
+ for (;;)
+ ;
+}
+
+void show_regs(struct pt_regs *regs)
+{
+ pr_notice("\n");
+ show_regs_print_info(KERN_DEFAULT);
+
+ pr_notice("r1: %08lx r2: %08lx r3: %08lx r4: %08lx\n",
+ regs->r1, regs->r2, regs->r3, regs->r4);
+
+ pr_notice("r5: %08lx r6: %08lx r7: %08lx r8: %08lx\n",
+ regs->r5, regs->r6, regs->r7, regs->r8);
+
+ pr_notice("r9: %08lx r10: %08lx r11: %08lx r12: %08lx\n",
+ regs->r9, regs->r10, regs->r11, regs->r12);
+
+ pr_notice("r13: %08lx r14: %08lx r15: %08lx\n",
+ regs->r13, regs->r14, regs->r15);
+
+ pr_notice("ra: %08lx fp: %08lx sp: %08lx gp: %08lx\n",
+ regs->ra, regs->fp, regs->sp, regs->gp);
+
+ pr_notice("ea: %08lx estatus: %08lx\n",
+ regs->ea, regs->estatus);
+}
+
+void flush_thread(void)
+{
+}
+
+int copy_thread(unsigned long clone_flags, unsigned long usp, unsigned long arg,
+ struct task_struct *p, unsigned long tls)
+{
+ struct pt_regs *childregs = task_pt_regs(p);
+ struct pt_regs *regs;
+ struct switch_stack *stack;
+ struct switch_stack *childstack =
+ ((struct switch_stack *)childregs) - 1;
+
+ if (unlikely(p->flags & (PF_KTHREAD | PF_IO_WORKER))) {
+ memset(childstack, 0,
+ sizeof(struct switch_stack) + sizeof(struct pt_regs));
+
+ childstack->r16 = usp; /* fn */
+ childstack->r17 = arg;
+ childstack->ra = (unsigned long) ret_from_kernel_thread;
+ childregs->estatus = STATUS_PIE;
+ childregs->sp = (unsigned long) childstack;
+
+ p->thread.ksp = (unsigned long) childstack;
+ p->thread.kregs = childregs;
+ return 0;
+ }
+
+ regs = current_pt_regs();
+ *childregs = *regs;
+ childregs->r2 = 0; /* Set the return value for the child. */
+ childregs->r7 = 0;
+
+ stack = ((struct switch_stack *) regs) - 1;
+ *childstack = *stack;
+ childstack->ra = (unsigned long)ret_from_fork;
+ p->thread.kregs = childregs;
+ p->thread.ksp = (unsigned long) childstack;
+
+ if (usp)
+ childregs->sp = usp;
+
+ /* Initialize tls register. */
+ if (clone_flags & CLONE_SETTLS)
+ childstack->r23 = tls;
+
+ return 0;
+}
+
+/*
+ * Generic dumping code. Used for panic and debug.
+ */
+void dump(struct pt_regs *fp)
+{
+ unsigned long *sp;
+ unsigned char *tp;
+ int i;
+
+ pr_emerg("\nCURRENT PROCESS:\n\n");
+ pr_emerg("COMM=%s PID=%d\n", current->comm, current->pid);
+
+ if (current->mm) {
+ pr_emerg("TEXT=%08x-%08x DATA=%08x-%08x BSS=%08x-%08x\n",
+ (int) current->mm->start_code,
+ (int) current->mm->end_code,
+ (int) current->mm->start_data,
+ (int) current->mm->end_data,
+ (int) current->mm->end_data,
+ (int) current->mm->brk);
+ pr_emerg("USER-STACK=%08x KERNEL-STACK=%08x\n\n",
+ (int) current->mm->start_stack,
+ (int)(((unsigned long) current) + THREAD_SIZE));
+ }
+
+ pr_emerg("PC: %08lx\n", fp->ea);
+ pr_emerg("SR: %08lx SP: %08lx\n",
+ (long) fp->estatus, (long) fp);
+
+ pr_emerg("r1: %08lx r2: %08lx r3: %08lx\n",
+ fp->r1, fp->r2, fp->r3);
+
+ pr_emerg("r4: %08lx r5: %08lx r6: %08lx r7: %08lx\n",
+ fp->r4, fp->r5, fp->r6, fp->r7);
+ pr_emerg("r8: %08lx r9: %08lx r10: %08lx r11: %08lx\n",
+ fp->r8, fp->r9, fp->r10, fp->r11);
+ pr_emerg("r12: %08lx r13: %08lx r14: %08lx r15: %08lx\n",
+ fp->r12, fp->r13, fp->r14, fp->r15);
+ pr_emerg("or2: %08lx ra: %08lx fp: %08lx sp: %08lx\n",
+ fp->orig_r2, fp->ra, fp->fp, fp->sp);
+ pr_emerg("\nUSP: %08x TRAPFRAME: %08x\n",
+ (unsigned int) fp->sp, (unsigned int) fp);
+
+ pr_emerg("\nCODE:");
+ tp = ((unsigned char *) fp->ea) - 0x20;
+ for (sp = (unsigned long *) tp, i = 0; (i < 0x40); i += 4) {
+ if ((i % 0x10) == 0)
+ pr_emerg("\n%08x: ", (int) (tp + i));
+ pr_emerg("%08x ", (int) *sp++);
+ }
+ pr_emerg("\n");
+
+ pr_emerg("\nKERNEL STACK:");
+ tp = ((unsigned char *) fp) - 0x40;
+ for (sp = (unsigned long *) tp, i = 0; (i < 0xc0); i += 4) {
+ if ((i % 0x10) == 0)
+ pr_emerg("\n%08x: ", (int) (tp + i));
+ pr_emerg("%08x ", (int) *sp++);
+ }
+ pr_emerg("\n");
+ pr_emerg("\n");
+
+ pr_emerg("\nUSER STACK:");
+ tp = (unsigned char *) (fp->sp - 0x10);
+ for (sp = (unsigned long *) tp, i = 0; (i < 0x80); i += 4) {
+ if ((i % 0x10) == 0)
+ pr_emerg("\n%08x: ", (int) (tp + i));
+ pr_emerg("%08x ", (int) *sp++);
+ }
+ pr_emerg("\n\n");
+}
+
+unsigned long get_wchan(struct task_struct *p)
+{
+ unsigned long fp, pc;
+ unsigned long stack_page;
+ int count = 0;
+
+ if (!p || p == current || p->state == TASK_RUNNING)
+ return 0;
+
+ stack_page = (unsigned long)p;
+ fp = ((struct switch_stack *)p->thread.ksp)->fp; /* ;dgt2 */
+ do {
+ if (fp < stack_page+sizeof(struct task_struct) ||
+ fp >= 8184+stack_page) /* ;dgt2;tmp */
+ return 0;
+ pc = ((unsigned long *)fp)[1];
+ if (!in_sched_functions(pc))
+ return pc;
+ fp = *(unsigned long *) fp;
+ } while (count++ < 16); /* ;dgt2;tmp */
+ return 0;
+}
+
+/*
+ * Do necessary setup to start up a newly executed thread.
+ * Will startup in user mode (status_extension = 0).
+ */
+void start_thread(struct pt_regs *regs, unsigned long pc, unsigned long sp)
+{
+ memset((void *) regs, 0, sizeof(struct pt_regs));
+ regs->estatus = ESTATUS_EPIE | ESTATUS_EU;
+ regs->ea = pc;
+ regs->sp = sp;
+}
+
+asmlinkage int nios2_clone(unsigned long clone_flags, unsigned long newsp,
+ int __user *parent_tidptr, int __user *child_tidptr,
+ unsigned long tls)
+{
+ struct kernel_clone_args args = {
+ .flags = (lower_32_bits(clone_flags) & ~CSIGNAL),
+ .pidfd = parent_tidptr,
+ .child_tid = child_tidptr,
+ .parent_tid = parent_tidptr,
+ .exit_signal = (lower_32_bits(clone_flags) & CSIGNAL),
+ .stack = newsp,
+ .tls = tls,
+ };
+
+ return kernel_clone(&args);
+}
diff --git a/arch/nios2/kernel/prom.c b/arch/nios2/kernel/prom.c
new file mode 100644
index 000000000..8d98af5c7
--- /dev/null
+++ b/arch/nios2/kernel/prom.c
@@ -0,0 +1,37 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Device tree support
+ *
+ * Copyright (C) 2013, 2015 Altera Corporation
+ * Copyright (C) 2010 Thomas Chou <thomas@wytron.com.tw>
+ *
+ * Based on MIPS support for CONFIG_OF device tree support
+ *
+ * Copyright (C) 2010 Cisco Systems Inc. <dediao@cisco.com>
+ */
+
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/memblock.h>
+#include <linux/of.h>
+#include <linux/of_fdt.h>
+#include <linux/io.h>
+
+#include <asm/sections.h>
+
+void __init early_init_devtree(void *params)
+{
+ __be32 *dtb = (u32 *)__dtb_start;
+#if defined(CONFIG_NIOS2_DTB_AT_PHYS_ADDR)
+ if (be32_to_cpup((__be32 *)CONFIG_NIOS2_DTB_PHYS_ADDR) ==
+ OF_DT_HEADER) {
+ params = (void *)CONFIG_NIOS2_DTB_PHYS_ADDR;
+ early_init_dt_scan(params);
+ return;
+ }
+#endif
+ if (be32_to_cpu((__be32) *dtb) == OF_DT_HEADER)
+ params = (void *)__dtb_start;
+
+ early_init_dt_scan(params);
+}
diff --git a/arch/nios2/kernel/ptrace.c b/arch/nios2/kernel/ptrace.c
new file mode 100644
index 000000000..a6ea9e1b4
--- /dev/null
+++ b/arch/nios2/kernel/ptrace.c
@@ -0,0 +1,146 @@
+/*
+ * Copyright (C) 2014 Altera Corporation
+ * Copyright (C) 2010 Tobias Klauser <tklauser@distanz.ch>
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License. See the file COPYING in the main directory of this
+ * archive for more details.
+ */
+
+#include <linux/elf.h>
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/ptrace.h>
+#include <linux/regset.h>
+#include <linux/sched.h>
+#include <linux/sched/task_stack.h>
+#include <linux/tracehook.h>
+#include <linux/uaccess.h>
+#include <linux/user.h>
+
+static int genregs_get(struct task_struct *target,
+ const struct user_regset *regset,
+ struct membuf to)
+{
+ const struct pt_regs *regs = task_pt_regs(target);
+ const struct switch_stack *sw = (struct switch_stack *)regs - 1;
+
+ membuf_zero(&to, 4); // R0
+ membuf_write(&to, &regs->r1, 7 * 4); // R1..R7
+ membuf_write(&to, &regs->r8, 8 * 4); // R8..R15
+ membuf_write(&to, sw, 8 * 4); // R16..R23
+ membuf_zero(&to, 2 * 4); /* et and bt */
+ membuf_store(&to, regs->gp);
+ membuf_store(&to, regs->sp);
+ membuf_store(&to, regs->fp);
+ membuf_store(&to, regs->ea);
+ membuf_zero(&to, 4); // PTR_BA
+ membuf_store(&to, regs->ra);
+ membuf_store(&to, regs->ea); /* use ea for PC */
+ return membuf_zero(&to, (NUM_PTRACE_REG - PTR_PC) * 4);
+}
+
+/*
+ * Set the thread state from a regset passed in via ptrace
+ */
+static int genregs_set(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ const void *kbuf, const void __user *ubuf)
+{
+ struct pt_regs *regs = task_pt_regs(target);
+ const struct switch_stack *sw = (struct switch_stack *)regs - 1;
+ int ret = 0;
+
+#define REG_IGNORE_RANGE(START, END) \
+ if (!ret) \
+ ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf, \
+ START * 4, (END * 4) + 4);
+
+#define REG_IN_ONE(PTR, LOC) \
+ if (!ret) \
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, \
+ (void *)(PTR), LOC * 4, (LOC * 4) + 4);
+
+#define REG_IN_RANGE(PTR, START, END) \
+ if (!ret) \
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, \
+ (void *)(PTR), START * 4, (END * 4) + 4);
+
+ REG_IGNORE_RANGE(PTR_R0, PTR_R0);
+ REG_IN_RANGE(&regs->r1, PTR_R1, PTR_R7);
+ REG_IN_RANGE(&regs->r8, PTR_R8, PTR_R15);
+ REG_IN_RANGE(sw, PTR_R16, PTR_R23);
+ REG_IGNORE_RANGE(PTR_R24, PTR_R25); /* et and bt */
+ REG_IN_ONE(&regs->gp, PTR_GP);
+ REG_IN_ONE(&regs->sp, PTR_SP);
+ REG_IN_ONE(&regs->fp, PTR_FP);
+ REG_IN_ONE(&regs->ea, PTR_EA);
+ REG_IGNORE_RANGE(PTR_BA, PTR_BA);
+ REG_IN_ONE(&regs->ra, PTR_RA);
+ REG_IN_ONE(&regs->ea, PTR_PC); /* use ea for PC */
+ if (!ret)
+ ret = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
+ PTR_STATUS * 4, -1);
+
+ return ret;
+}
+
+/*
+ * Define the register sets available on Nios2 under Linux
+ */
+enum nios2_regset {
+ REGSET_GENERAL,
+};
+
+static const struct user_regset nios2_regsets[] = {
+ [REGSET_GENERAL] = {
+ .core_note_type = NT_PRSTATUS,
+ .n = NUM_PTRACE_REG,
+ .size = sizeof(unsigned long),
+ .align = sizeof(unsigned long),
+ .regset_get = genregs_get,
+ .set = genregs_set,
+ }
+};
+
+static const struct user_regset_view nios2_user_view = {
+ .name = "nios2",
+ .e_machine = ELF_ARCH,
+ .ei_osabi = ELF_OSABI,
+ .regsets = nios2_regsets,
+ .n = ARRAY_SIZE(nios2_regsets)
+};
+
+const struct user_regset_view *task_user_regset_view(struct task_struct *task)
+{
+ return &nios2_user_view;
+}
+
+void ptrace_disable(struct task_struct *child)
+{
+
+}
+
+long arch_ptrace(struct task_struct *child, long request, unsigned long addr,
+ unsigned long data)
+{
+ return ptrace_request(child, request, addr, data);
+}
+
+asmlinkage int do_syscall_trace_enter(void)
+{
+ int ret = 0;
+
+ if (test_thread_flag(TIF_SYSCALL_TRACE))
+ ret = tracehook_report_syscall_entry(task_pt_regs(current));
+
+ return ret;
+}
+
+asmlinkage void do_syscall_trace_exit(void)
+{
+ if (test_thread_flag(TIF_SYSCALL_TRACE))
+ tracehook_report_syscall_exit(task_pt_regs(current), 0);
+}
diff --git a/arch/nios2/kernel/setup.c b/arch/nios2/kernel/setup.c
new file mode 100644
index 000000000..3c6e3c813
--- /dev/null
+++ b/arch/nios2/kernel/setup.c
@@ -0,0 +1,199 @@
+/*
+ * Nios2-specific parts of system setup
+ *
+ * Copyright (C) 2010 Tobias Klauser <tklauser@distanz.ch>
+ * Copyright (C) 2004 Microtronix Datacom Ltd.
+ * Copyright (C) 2001 Vic Phillips <vic@microtronix.com>
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/export.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/sched.h>
+#include <linux/sched/task.h>
+#include <linux/console.h>
+#include <linux/memblock.h>
+#include <linux/initrd.h>
+#include <linux/of_fdt.h>
+#include <linux/screen_info.h>
+
+#include <asm/mmu_context.h>
+#include <asm/sections.h>
+#include <asm/setup.h>
+#include <asm/cpuinfo.h>
+
+unsigned long memory_start;
+EXPORT_SYMBOL(memory_start);
+
+unsigned long memory_end;
+EXPORT_SYMBOL(memory_end);
+
+unsigned long memory_size;
+
+static struct pt_regs fake_regs = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
+ 0, 0, 0, 0, 0, 0,
+ 0};
+
+#ifdef CONFIG_VT
+struct screen_info screen_info;
+#endif
+
+/* Copy a short hook instruction sequence to the exception address */
+static inline void copy_exception_handler(unsigned int addr)
+{
+ unsigned int start = (unsigned int) exception_handler_hook;
+ volatile unsigned int tmp = 0;
+
+ if (start == addr) {
+ /* The CPU exception address already points to the handler. */
+ return;
+ }
+
+ __asm__ __volatile__ (
+ "ldw %2,0(%0)\n"
+ "stw %2,0(%1)\n"
+ "ldw %2,4(%0)\n"
+ "stw %2,4(%1)\n"
+ "ldw %2,8(%0)\n"
+ "stw %2,8(%1)\n"
+ "flushd 0(%1)\n"
+ "flushd 4(%1)\n"
+ "flushd 8(%1)\n"
+ "flushi %1\n"
+ "addi %1,%1,4\n"
+ "flushi %1\n"
+ "addi %1,%1,4\n"
+ "flushi %1\n"
+ "flushp\n"
+ : /* no output registers */
+ : "r" (start), "r" (addr), "r" (tmp)
+ : "memory"
+ );
+}
+
+/* Copy the fast TLB miss handler */
+static inline void copy_fast_tlb_miss_handler(unsigned int addr)
+{
+ unsigned int start = (unsigned int) fast_handler;
+ unsigned int end = (unsigned int) fast_handler_end;
+ volatile unsigned int tmp = 0;
+
+ __asm__ __volatile__ (
+ "1:\n"
+ " ldw %3,0(%0)\n"
+ " stw %3,0(%1)\n"
+ " flushd 0(%1)\n"
+ " flushi %1\n"
+ " flushp\n"
+ " addi %0,%0,4\n"
+ " addi %1,%1,4\n"
+ " bne %0,%2,1b\n"
+ : /* no output registers */
+ : "r" (start), "r" (addr), "r" (end), "r" (tmp)
+ : "memory"
+ );
+}
+
+/*
+ * save args passed from u-boot, called from head.S
+ *
+ * @r4: NIOS magic
+ * @r5: initrd start
+ * @r6: initrd end or fdt
+ * @r7: kernel command line
+ */
+asmlinkage void __init nios2_boot_init(unsigned r4, unsigned r5, unsigned r6,
+ unsigned r7)
+{
+ unsigned dtb_passed = 0;
+ char cmdline_passed[COMMAND_LINE_SIZE] __maybe_unused = { 0, };
+
+#if defined(CONFIG_NIOS2_PASS_CMDLINE)
+ if (r4 == 0x534f494e) { /* r4 is magic NIOS */
+#if defined(CONFIG_BLK_DEV_INITRD)
+ if (r5) { /* initramfs */
+ initrd_start = r5;
+ initrd_end = r6;
+ }
+#endif /* CONFIG_BLK_DEV_INITRD */
+ dtb_passed = r6;
+
+ if (r7)
+ strlcpy(cmdline_passed, (char *)r7, COMMAND_LINE_SIZE);
+ }
+#endif
+
+ early_init_devtree((void *)dtb_passed);
+
+#ifndef CONFIG_CMDLINE_FORCE
+ if (cmdline_passed[0])
+ strlcpy(boot_command_line, cmdline_passed, COMMAND_LINE_SIZE);
+#ifdef CONFIG_NIOS2_CMDLINE_IGNORE_DTB
+ else
+ strlcpy(boot_command_line, CONFIG_CMDLINE, COMMAND_LINE_SIZE);
+#endif
+#endif
+
+ parse_early_param();
+}
+
+void __init setup_arch(char **cmdline_p)
+{
+ int dram_start;
+
+ console_verbose();
+
+ dram_start = memblock_start_of_DRAM();
+ memory_size = memblock_phys_mem_size();
+ memory_start = PAGE_ALIGN((unsigned long)__pa(_end));
+ memory_end = (unsigned long) CONFIG_NIOS2_MEM_BASE + memory_size;
+
+ init_mm.start_code = (unsigned long) _stext;
+ init_mm.end_code = (unsigned long) _etext;
+ init_mm.end_data = (unsigned long) _edata;
+ init_mm.brk = (unsigned long) _end;
+ init_task.thread.kregs = &fake_regs;
+
+ /* Keep a copy of command line */
+ *cmdline_p = boot_command_line;
+
+ min_low_pfn = PFN_UP(memory_start);
+ max_low_pfn = PFN_DOWN(memory_end);
+ max_mapnr = max_low_pfn;
+
+ memblock_reserve(dram_start, memory_start - dram_start);
+#ifdef CONFIG_BLK_DEV_INITRD
+ if (initrd_start) {
+ memblock_reserve(virt_to_phys((void *)initrd_start),
+ initrd_end - initrd_start);
+ }
+#endif /* CONFIG_BLK_DEV_INITRD */
+
+ early_init_fdt_reserve_self();
+ early_init_fdt_scan_reserved_mem();
+
+ unflatten_and_copy_device_tree();
+
+ setup_cpuinfo();
+
+ copy_exception_handler(cpuinfo.exception_addr);
+
+ mmu_init();
+
+ copy_fast_tlb_miss_handler(cpuinfo.fast_tlb_miss_exc_addr);
+
+ /*
+ * Initialize MMU context handling here because data from cpuinfo is
+ * needed for this.
+ */
+ mmu_context_init();
+
+ /*
+ * get kmalloc into gear
+ */
+ paging_init();
+}
diff --git a/arch/nios2/kernel/signal.c b/arch/nios2/kernel/signal.c
new file mode 100644
index 000000000..68d626c4f
--- /dev/null
+++ b/arch/nios2/kernel/signal.c
@@ -0,0 +1,328 @@
+/*
+ * Copyright (C) 2013-2014 Altera Corporation
+ * Copyright (C) 2011-2012 Tobias Klauser <tklauser@distanz.ch>
+ * Copyright (C) 2004 Microtronix Datacom Ltd
+ * Copyright (C) 1991, 1992 Linus Torvalds
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file COPYING in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/signal.h>
+#include <linux/errno.h>
+#include <linux/ptrace.h>
+#include <linux/uaccess.h>
+#include <linux/unistd.h>
+#include <linux/personality.h>
+#include <linux/tracehook.h>
+
+#include <asm/ucontext.h>
+#include <asm/cacheflush.h>
+
+#define _BLOCKABLE (~(sigmask(SIGKILL) | sigmask(SIGSTOP)))
+
+/*
+ * Do a signal return; undo the signal stack.
+ *
+ * Keep the return code on the stack quadword aligned!
+ * That makes the cache flush below easier.
+ */
+
+struct rt_sigframe {
+ struct siginfo info;
+ struct ucontext uc;
+};
+
+static inline int rt_restore_ucontext(struct pt_regs *regs,
+ struct switch_stack *sw,
+ struct ucontext __user *uc, int *pr2)
+{
+ int temp;
+ unsigned long __user *gregs = uc->uc_mcontext.gregs;
+ int err;
+
+ /* Always make any pending restarted system calls return -EINTR */
+ current->restart_block.fn = do_no_restart_syscall;
+
+ err = __get_user(temp, &uc->uc_mcontext.version);
+ if (temp != MCONTEXT_VERSION)
+ goto badframe;
+ /* restore passed registers */
+ err |= __get_user(regs->r1, &gregs[0]);
+ err |= __get_user(regs->r2, &gregs[1]);
+ err |= __get_user(regs->r3, &gregs[2]);
+ err |= __get_user(regs->r4, &gregs[3]);
+ err |= __get_user(regs->r5, &gregs[4]);
+ err |= __get_user(regs->r6, &gregs[5]);
+ err |= __get_user(regs->r7, &gregs[6]);
+ err |= __get_user(regs->r8, &gregs[7]);
+ err |= __get_user(regs->r9, &gregs[8]);
+ err |= __get_user(regs->r10, &gregs[9]);
+ err |= __get_user(regs->r11, &gregs[10]);
+ err |= __get_user(regs->r12, &gregs[11]);
+ err |= __get_user(regs->r13, &gregs[12]);
+ err |= __get_user(regs->r14, &gregs[13]);
+ err |= __get_user(regs->r15, &gregs[14]);
+ err |= __get_user(sw->r16, &gregs[15]);
+ err |= __get_user(sw->r17, &gregs[16]);
+ err |= __get_user(sw->r18, &gregs[17]);
+ err |= __get_user(sw->r19, &gregs[18]);
+ err |= __get_user(sw->r20, &gregs[19]);
+ err |= __get_user(sw->r21, &gregs[20]);
+ err |= __get_user(sw->r22, &gregs[21]);
+ err |= __get_user(sw->r23, &gregs[22]);
+ /* gregs[23] is handled below */
+ err |= __get_user(sw->fp, &gregs[24]); /* Verify, should this be
+ settable */
+ err |= __get_user(sw->gp, &gregs[25]); /* Verify, should this be
+ settable */
+
+ err |= __get_user(temp, &gregs[26]); /* Not really necessary no user
+ settable bits */
+ err |= __get_user(regs->ea, &gregs[27]);
+
+ err |= __get_user(regs->ra, &gregs[23]);
+ err |= __get_user(regs->sp, &gregs[28]);
+
+ regs->orig_r2 = -1; /* disable syscall checks */
+
+ err |= restore_altstack(&uc->uc_stack);
+ if (err)
+ goto badframe;
+
+ *pr2 = regs->r2;
+ return err;
+
+badframe:
+ return 1;
+}
+
+asmlinkage int do_rt_sigreturn(struct switch_stack *sw)
+{
+ struct pt_regs *regs = (struct pt_regs *)(sw + 1);
+ /* Verify, can we follow the stack back */
+ struct rt_sigframe __user *frame;
+ sigset_t set;
+ int rval;
+
+ frame = (struct rt_sigframe __user *) regs->sp;
+ if (!access_ok(frame, sizeof(*frame)))
+ goto badframe;
+
+ if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
+ goto badframe;
+
+ set_current_blocked(&set);
+
+ if (rt_restore_ucontext(regs, sw, &frame->uc, &rval))
+ goto badframe;
+
+ return rval;
+
+badframe:
+ force_sig(SIGSEGV);
+ return 0;
+}
+
+static inline int rt_setup_ucontext(struct ucontext __user *uc, struct pt_regs *regs)
+{
+ struct switch_stack *sw = (struct switch_stack *)regs - 1;
+ unsigned long __user *gregs = uc->uc_mcontext.gregs;
+ int err = 0;
+
+ err |= __put_user(MCONTEXT_VERSION, &uc->uc_mcontext.version);
+ err |= __put_user(regs->r1, &gregs[0]);
+ err |= __put_user(regs->r2, &gregs[1]);
+ err |= __put_user(regs->r3, &gregs[2]);
+ err |= __put_user(regs->r4, &gregs[3]);
+ err |= __put_user(regs->r5, &gregs[4]);
+ err |= __put_user(regs->r6, &gregs[5]);
+ err |= __put_user(regs->r7, &gregs[6]);
+ err |= __put_user(regs->r8, &gregs[7]);
+ err |= __put_user(regs->r9, &gregs[8]);
+ err |= __put_user(regs->r10, &gregs[9]);
+ err |= __put_user(regs->r11, &gregs[10]);
+ err |= __put_user(regs->r12, &gregs[11]);
+ err |= __put_user(regs->r13, &gregs[12]);
+ err |= __put_user(regs->r14, &gregs[13]);
+ err |= __put_user(regs->r15, &gregs[14]);
+ err |= __put_user(sw->r16, &gregs[15]);
+ err |= __put_user(sw->r17, &gregs[16]);
+ err |= __put_user(sw->r18, &gregs[17]);
+ err |= __put_user(sw->r19, &gregs[18]);
+ err |= __put_user(sw->r20, &gregs[19]);
+ err |= __put_user(sw->r21, &gregs[20]);
+ err |= __put_user(sw->r22, &gregs[21]);
+ err |= __put_user(sw->r23, &gregs[22]);
+ err |= __put_user(regs->ra, &gregs[23]);
+ err |= __put_user(sw->fp, &gregs[24]);
+ err |= __put_user(sw->gp, &gregs[25]);
+ err |= __put_user(regs->ea, &gregs[27]);
+ err |= __put_user(regs->sp, &gregs[28]);
+ return err;
+}
+
+static inline void __user *get_sigframe(struct ksignal *ksig,
+ struct pt_regs *regs,
+ size_t frame_size)
+{
+ unsigned long usp;
+
+ /* Default to using normal stack. */
+ usp = regs->sp;
+
+ /* This is the X/Open sanctioned signal stack switching. */
+ usp = sigsp(usp, ksig);
+
+ /* Verify, is it 32 or 64 bit aligned */
+ return (void __user *)((usp - frame_size) & -8UL);
+}
+
+static int setup_rt_frame(struct ksignal *ksig, sigset_t *set,
+ struct pt_regs *regs)
+{
+ struct rt_sigframe __user *frame;
+ int err = 0;
+
+ frame = get_sigframe(ksig, regs, sizeof(*frame));
+
+ if (ksig->ka.sa.sa_flags & SA_SIGINFO)
+ err |= copy_siginfo_to_user(&frame->info, &ksig->info);
+
+ /* Create the ucontext. */
+ err |= __put_user(0, &frame->uc.uc_flags);
+ err |= __put_user(0, &frame->uc.uc_link);
+ err |= __save_altstack(&frame->uc.uc_stack, regs->sp);
+ err |= rt_setup_ucontext(&frame->uc, regs);
+ err |= copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
+
+ if (err)
+ goto give_sigsegv;
+
+ /* Set up to return from userspace; jump to fixed address sigreturn
+ trampoline on kuser page. */
+ regs->ra = (unsigned long) (0x1044);
+
+ /* Set up registers for signal handler */
+ regs->sp = (unsigned long) frame;
+ regs->r4 = (unsigned long) ksig->sig;
+ regs->r5 = (unsigned long) &frame->info;
+ regs->r6 = (unsigned long) &frame->uc;
+ regs->ea = (unsigned long) ksig->ka.sa.sa_handler;
+ return 0;
+
+give_sigsegv:
+ force_sigsegv(ksig->sig);
+ return -EFAULT;
+}
+
+/*
+ * OK, we're invoking a handler
+ */
+static void handle_signal(struct ksignal *ksig, struct pt_regs *regs)
+{
+ int ret;
+ sigset_t *oldset = sigmask_to_save();
+
+ /* set up the stack frame */
+ ret = setup_rt_frame(ksig, oldset, regs);
+
+ signal_setup_done(ret, ksig, 0);
+}
+
+static int do_signal(struct pt_regs *regs)
+{
+ unsigned int retval = 0, continue_addr = 0, restart_addr = 0;
+ int restart = 0;
+ struct ksignal ksig;
+
+ current->thread.kregs = regs;
+
+ /*
+ * If we were from a system call, check for system call restarting...
+ */
+ if (regs->orig_r2 >= 0 && regs->r1) {
+ continue_addr = regs->ea;
+ restart_addr = continue_addr - 4;
+ retval = regs->r2;
+
+ /*
+ * Prepare for system call restart. We do this here so that a
+ * debugger will see the already changed PC.
+ */
+ switch (retval) {
+ case ERESTART_RESTARTBLOCK:
+ restart = -2;
+ fallthrough;
+ case ERESTARTNOHAND:
+ case ERESTARTSYS:
+ case ERESTARTNOINTR:
+ restart++;
+ regs->r2 = regs->orig_r2;
+ regs->r7 = regs->orig_r7;
+ regs->ea = restart_addr;
+ break;
+ }
+ regs->orig_r2 = -1;
+ }
+
+ if (get_signal(&ksig)) {
+ /* handler */
+ if (unlikely(restart && regs->ea == restart_addr)) {
+ if (retval == ERESTARTNOHAND ||
+ retval == ERESTART_RESTARTBLOCK ||
+ (retval == ERESTARTSYS
+ && !(ksig.ka.sa.sa_flags & SA_RESTART))) {
+ regs->r2 = EINTR;
+ regs->r7 = 1;
+ regs->ea = continue_addr;
+ }
+ }
+ handle_signal(&ksig, regs);
+ return 0;
+ }
+
+ /*
+ * No handler present
+ */
+ if (unlikely(restart) && regs->ea == restart_addr) {
+ regs->ea = continue_addr;
+ regs->r2 = __NR_restart_syscall;
+ }
+
+ /*
+ * If there's no signal to deliver, we just put the saved sigmask back.
+ */
+ restore_saved_sigmask();
+
+ return restart;
+}
+
+asmlinkage int do_notify_resume(struct pt_regs *regs)
+{
+ /*
+ * We want the common case to go fast, which is why we may in certain
+ * cases get here from kernel mode. Just return without doing anything
+ * if so.
+ */
+ if (!user_mode(regs))
+ return 0;
+
+ if (test_thread_flag(TIF_SIGPENDING) ||
+ test_thread_flag(TIF_NOTIFY_SIGNAL)) {
+ int restart = do_signal(regs);
+
+ if (unlikely(restart)) {
+ /*
+ * Restart without handlers.
+ * Deal with it without leaving
+ * the kernel space.
+ */
+ return restart;
+ }
+ } else if (test_thread_flag(TIF_NOTIFY_RESUME))
+ tracehook_notify_resume(regs);
+
+ return 0;
+}
diff --git a/arch/nios2/kernel/sys_nios2.c b/arch/nios2/kernel/sys_nios2.c
new file mode 100644
index 000000000..b1ca85699
--- /dev/null
+++ b/arch/nios2/kernel/sys_nios2.c
@@ -0,0 +1,60 @@
+/*
+ * Copyright (C) 2013 Altera Corporation
+ * Copyright (C) 2011-2012 Tobias Klauser <tklauser@distanz.ch>
+ * Copyright (C) 2004 Microtronix Datacom Ltd.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/export.h>
+#include <linux/file.h>
+#include <linux/fs.h>
+#include <linux/slab.h>
+#include <linux/syscalls.h>
+
+#include <asm/cacheflush.h>
+#include <asm/traps.h>
+
+/* sys_cacheflush -- flush the processor cache. */
+asmlinkage int sys_cacheflush(unsigned long addr, unsigned long len,
+ unsigned int op)
+{
+ struct vm_area_struct *vma;
+ struct mm_struct *mm = current->mm;
+
+ if (len == 0)
+ return 0;
+
+ /* We only support op 0 now, return error if op is non-zero.*/
+ if (op)
+ return -EINVAL;
+
+ /* Check for overflow */
+ if (addr + len < addr)
+ return -EFAULT;
+
+ if (mmap_read_lock_killable(mm))
+ return -EINTR;
+
+ /*
+ * Verify that the specified address region actually belongs
+ * to this process.
+ */
+ vma = find_vma(mm, addr);
+ if (vma == NULL || addr < vma->vm_start || addr + len > vma->vm_end) {
+ mmap_read_unlock(mm);
+ return -EFAULT;
+ }
+
+ flush_cache_range(vma, addr, addr + len);
+
+ mmap_read_unlock(mm);
+ return 0;
+}
+
+asmlinkage int sys_getpagesize(void)
+{
+ return PAGE_SIZE;
+}
diff --git a/arch/nios2/kernel/syscall_table.c b/arch/nios2/kernel/syscall_table.c
new file mode 100644
index 000000000..c2875a6dd
--- /dev/null
+++ b/arch/nios2/kernel/syscall_table.c
@@ -0,0 +1,18 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright Altera Corporation (C) 2013. All rights reserved
+ */
+
+#include <linux/syscalls.h>
+#include <linux/signal.h>
+#include <linux/unistd.h>
+
+#include <asm/syscalls.h>
+
+#undef __SYSCALL
+#define __SYSCALL(nr, call) [nr] = (call),
+
+void *sys_call_table[__NR_syscalls] = {
+ [0 ... __NR_syscalls-1] = sys_ni_syscall,
+#include <asm/unistd.h>
+};
diff --git a/arch/nios2/kernel/time.c b/arch/nios2/kernel/time.c
new file mode 100644
index 000000000..54467d008
--- /dev/null
+++ b/arch/nios2/kernel/time.c
@@ -0,0 +1,359 @@
+/*
+ * Copyright (C) 2013-2014 Altera Corporation
+ * Copyright (C) 2010 Tobias Klauser <tklauser@distanz.ch>
+ * Copyright (C) 2004 Microtronix Datacom Ltd.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/export.h>
+#include <linux/interrupt.h>
+#include <linux/clockchips.h>
+#include <linux/clocksource.h>
+#include <linux/delay.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+
+#define ALTR_TIMER_COMPATIBLE "altr,timer-1.0"
+
+#define ALTERA_TIMER_STATUS_REG 0
+#define ALTERA_TIMER_CONTROL_REG 4
+#define ALTERA_TIMER_PERIODL_REG 8
+#define ALTERA_TIMER_PERIODH_REG 12
+#define ALTERA_TIMER_SNAPL_REG 16
+#define ALTERA_TIMER_SNAPH_REG 20
+
+#define ALTERA_TIMER_CONTROL_ITO_MSK (0x1)
+#define ALTERA_TIMER_CONTROL_CONT_MSK (0x2)
+#define ALTERA_TIMER_CONTROL_START_MSK (0x4)
+#define ALTERA_TIMER_CONTROL_STOP_MSK (0x8)
+
+struct nios2_timer {
+ void __iomem *base;
+ unsigned long freq;
+};
+
+struct nios2_clockevent_dev {
+ struct nios2_timer timer;
+ struct clock_event_device ced;
+};
+
+struct nios2_clocksource {
+ struct nios2_timer timer;
+ struct clocksource cs;
+};
+
+static inline struct nios2_clockevent_dev *
+ to_nios2_clkevent(struct clock_event_device *evt)
+{
+ return container_of(evt, struct nios2_clockevent_dev, ced);
+}
+
+static inline struct nios2_clocksource *
+ to_nios2_clksource(struct clocksource *cs)
+{
+ return container_of(cs, struct nios2_clocksource, cs);
+}
+
+static u16 timer_readw(struct nios2_timer *timer, u32 offs)
+{
+ return readw(timer->base + offs);
+}
+
+static void timer_writew(struct nios2_timer *timer, u16 val, u32 offs)
+{
+ writew(val, timer->base + offs);
+}
+
+static inline unsigned long read_timersnapshot(struct nios2_timer *timer)
+{
+ unsigned long count;
+
+ timer_writew(timer, 0, ALTERA_TIMER_SNAPL_REG);
+ count = timer_readw(timer, ALTERA_TIMER_SNAPH_REG) << 16 |
+ timer_readw(timer, ALTERA_TIMER_SNAPL_REG);
+
+ return count;
+}
+
+static u64 nios2_timer_read(struct clocksource *cs)
+{
+ struct nios2_clocksource *nios2_cs = to_nios2_clksource(cs);
+ unsigned long flags;
+ u32 count;
+
+ local_irq_save(flags);
+ count = read_timersnapshot(&nios2_cs->timer);
+ local_irq_restore(flags);
+
+ /* Counter is counting down */
+ return ~count;
+}
+
+static struct nios2_clocksource nios2_cs = {
+ .cs = {
+ .name = "nios2-clksrc",
+ .rating = 250,
+ .read = nios2_timer_read,
+ .mask = CLOCKSOURCE_MASK(32),
+ .flags = CLOCK_SOURCE_IS_CONTINUOUS,
+ },
+};
+
+cycles_t get_cycles(void)
+{
+ /* Only read timer if it has been initialized */
+ if (nios2_cs.timer.base)
+ return nios2_timer_read(&nios2_cs.cs);
+ return 0;
+}
+EXPORT_SYMBOL(get_cycles);
+
+static void nios2_timer_start(struct nios2_timer *timer)
+{
+ u16 ctrl;
+
+ ctrl = timer_readw(timer, ALTERA_TIMER_CONTROL_REG);
+ ctrl |= ALTERA_TIMER_CONTROL_START_MSK;
+ timer_writew(timer, ctrl, ALTERA_TIMER_CONTROL_REG);
+}
+
+static void nios2_timer_stop(struct nios2_timer *timer)
+{
+ u16 ctrl;
+
+ ctrl = timer_readw(timer, ALTERA_TIMER_CONTROL_REG);
+ ctrl |= ALTERA_TIMER_CONTROL_STOP_MSK;
+ timer_writew(timer, ctrl, ALTERA_TIMER_CONTROL_REG);
+}
+
+static void nios2_timer_config(struct nios2_timer *timer, unsigned long period,
+ bool periodic)
+{
+ u16 ctrl;
+
+ /* The timer's actual period is one cycle greater than the value
+ * stored in the period register. */
+ period--;
+
+ ctrl = timer_readw(timer, ALTERA_TIMER_CONTROL_REG);
+ /* stop counter */
+ timer_writew(timer, ctrl | ALTERA_TIMER_CONTROL_STOP_MSK,
+ ALTERA_TIMER_CONTROL_REG);
+
+ /* write new count */
+ timer_writew(timer, period, ALTERA_TIMER_PERIODL_REG);
+ timer_writew(timer, period >> 16, ALTERA_TIMER_PERIODH_REG);
+
+ ctrl |= ALTERA_TIMER_CONTROL_START_MSK | ALTERA_TIMER_CONTROL_ITO_MSK;
+ if (periodic)
+ ctrl |= ALTERA_TIMER_CONTROL_CONT_MSK;
+ else
+ ctrl &= ~ALTERA_TIMER_CONTROL_CONT_MSK;
+ timer_writew(timer, ctrl, ALTERA_TIMER_CONTROL_REG);
+}
+
+static int nios2_timer_set_next_event(unsigned long delta,
+ struct clock_event_device *evt)
+{
+ struct nios2_clockevent_dev *nios2_ced = to_nios2_clkevent(evt);
+
+ nios2_timer_config(&nios2_ced->timer, delta, false);
+
+ return 0;
+}
+
+static int nios2_timer_shutdown(struct clock_event_device *evt)
+{
+ struct nios2_clockevent_dev *nios2_ced = to_nios2_clkevent(evt);
+ struct nios2_timer *timer = &nios2_ced->timer;
+
+ nios2_timer_stop(timer);
+ return 0;
+}
+
+static int nios2_timer_set_periodic(struct clock_event_device *evt)
+{
+ unsigned long period;
+ struct nios2_clockevent_dev *nios2_ced = to_nios2_clkevent(evt);
+ struct nios2_timer *timer = &nios2_ced->timer;
+
+ period = DIV_ROUND_UP(timer->freq, HZ);
+ nios2_timer_config(timer, period, true);
+ return 0;
+}
+
+static int nios2_timer_resume(struct clock_event_device *evt)
+{
+ struct nios2_clockevent_dev *nios2_ced = to_nios2_clkevent(evt);
+ struct nios2_timer *timer = &nios2_ced->timer;
+
+ nios2_timer_start(timer);
+ return 0;
+}
+
+irqreturn_t timer_interrupt(int irq, void *dev_id)
+{
+ struct clock_event_device *evt = (struct clock_event_device *) dev_id;
+ struct nios2_clockevent_dev *nios2_ced = to_nios2_clkevent(evt);
+
+ /* Clear the interrupt condition */
+ timer_writew(&nios2_ced->timer, 0, ALTERA_TIMER_STATUS_REG);
+ evt->event_handler(evt);
+
+ return IRQ_HANDLED;
+}
+
+static int __init nios2_timer_get_base_and_freq(struct device_node *np,
+ void __iomem **base, u32 *freq)
+{
+ *base = of_iomap(np, 0);
+ if (!*base) {
+ pr_crit("Unable to map reg for %pOFn\n", np);
+ return -ENXIO;
+ }
+
+ if (of_property_read_u32(np, "clock-frequency", freq)) {
+ pr_crit("Unable to get %pOFn clock frequency\n", np);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static struct nios2_clockevent_dev nios2_ce = {
+ .ced = {
+ .name = "nios2-clkevent",
+ .features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
+ .rating = 250,
+ .shift = 32,
+ .set_next_event = nios2_timer_set_next_event,
+ .set_state_shutdown = nios2_timer_shutdown,
+ .set_state_periodic = nios2_timer_set_periodic,
+ .set_state_oneshot = nios2_timer_shutdown,
+ .tick_resume = nios2_timer_resume,
+ },
+};
+
+static __init int nios2_clockevent_init(struct device_node *timer)
+{
+ void __iomem *iobase;
+ u32 freq;
+ int irq, ret;
+
+ ret = nios2_timer_get_base_and_freq(timer, &iobase, &freq);
+ if (ret)
+ return ret;
+
+ irq = irq_of_parse_and_map(timer, 0);
+ if (!irq) {
+ pr_crit("Unable to parse timer irq\n");
+ return -EINVAL;
+ }
+
+ nios2_ce.timer.base = iobase;
+ nios2_ce.timer.freq = freq;
+
+ nios2_ce.ced.cpumask = cpumask_of(0);
+ nios2_ce.ced.irq = irq;
+
+ nios2_timer_stop(&nios2_ce.timer);
+ /* clear pending interrupt */
+ timer_writew(&nios2_ce.timer, 0, ALTERA_TIMER_STATUS_REG);
+
+ ret = request_irq(irq, timer_interrupt, IRQF_TIMER, timer->name,
+ &nios2_ce.ced);
+ if (ret) {
+ pr_crit("Unable to setup timer irq\n");
+ return ret;
+ }
+
+ clockevents_config_and_register(&nios2_ce.ced, freq, 1, ULONG_MAX);
+
+ return 0;
+}
+
+static __init int nios2_clocksource_init(struct device_node *timer)
+{
+ unsigned int ctrl;
+ void __iomem *iobase;
+ u32 freq;
+ int ret;
+
+ ret = nios2_timer_get_base_and_freq(timer, &iobase, &freq);
+ if (ret)
+ return ret;
+
+ nios2_cs.timer.base = iobase;
+ nios2_cs.timer.freq = freq;
+
+ ret = clocksource_register_hz(&nios2_cs.cs, freq);
+ if (ret)
+ return ret;
+
+ timer_writew(&nios2_cs.timer, USHRT_MAX, ALTERA_TIMER_PERIODL_REG);
+ timer_writew(&nios2_cs.timer, USHRT_MAX, ALTERA_TIMER_PERIODH_REG);
+
+ /* interrupt disable + continuous + start */
+ ctrl = ALTERA_TIMER_CONTROL_CONT_MSK | ALTERA_TIMER_CONTROL_START_MSK;
+ timer_writew(&nios2_cs.timer, ctrl, ALTERA_TIMER_CONTROL_REG);
+
+ /* Calibrate the delay loop directly */
+ lpj_fine = freq / HZ;
+
+ return 0;
+}
+
+/*
+ * The first timer instance will use as a clockevent. If there are two or
+ * more instances, the second one gets used as clocksource and all
+ * others are unused.
+*/
+static int __init nios2_time_init(struct device_node *timer)
+{
+ static int num_called;
+ int ret;
+
+ switch (num_called) {
+ case 0:
+ ret = nios2_clockevent_init(timer);
+ break;
+ case 1:
+ ret = nios2_clocksource_init(timer);
+ break;
+ default:
+ ret = 0;
+ break;
+ }
+
+ num_called++;
+
+ return ret;
+}
+
+void read_persistent_clock64(struct timespec64 *ts)
+{
+ ts->tv_sec = mktime64(2007, 1, 1, 0, 0, 0);
+ ts->tv_nsec = 0;
+}
+
+void __init time_init(void)
+{
+ struct device_node *np;
+ int count = 0;
+
+ for_each_compatible_node(np, NULL, ALTR_TIMER_COMPATIBLE)
+ count++;
+
+ if (count < 2)
+ panic("%d timer is found, it needs 2 timers in system\n", count);
+
+ timer_probe();
+}
+
+TIMER_OF_DECLARE(nios2_timer, ALTR_TIMER_COMPATIBLE, nios2_time_init);
diff --git a/arch/nios2/kernel/traps.c b/arch/nios2/kernel/traps.c
new file mode 100644
index 000000000..862081780
--- /dev/null
+++ b/arch/nios2/kernel/traps.c
@@ -0,0 +1,201 @@
+/*
+ * Hardware exception handling
+ *
+ * Copyright (C) 2010 Tobias Klauser <tklauser@distanz.ch>
+ * Copyright (C) 2004 Microtronix Datacom Ltd.
+ * Copyright (C) 2001 Vic Phillips
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License. See the file COPYING in the main directory of this
+ * archive for more details.
+ */
+
+#include <linux/sched.h>
+#include <linux/sched/debug.h>
+#include <linux/kernel.h>
+#include <linux/signal.h>
+#include <linux/export.h>
+#include <linux/mm.h>
+#include <linux/ptrace.h>
+
+#include <asm/traps.h>
+#include <asm/sections.h>
+#include <linux/uaccess.h>
+
+static DEFINE_SPINLOCK(die_lock);
+
+static void _send_sig(int signo, int code, unsigned long addr)
+{
+ force_sig_fault(signo, code, (void __user *) addr);
+}
+
+void die(const char *str, struct pt_regs *regs, long err)
+{
+ console_verbose();
+ spin_lock_irq(&die_lock);
+ pr_warn("Oops: %s, sig: %ld\n", str, err);
+ show_regs(regs);
+ spin_unlock_irq(&die_lock);
+ /*
+ * make_task_dead() should take care of panic'ing from an interrupt
+ * context so we don't handle it here
+ */
+ make_task_dead(err);
+}
+
+void _exception(int signo, struct pt_regs *regs, int code, unsigned long addr)
+{
+ if (!user_mode(regs))
+ die("Exception in kernel mode", regs, signo);
+
+ _send_sig(signo, code, addr);
+}
+
+/*
+ * The show_stack() is external API which we do not use ourselves.
+ */
+
+int kstack_depth_to_print = 48;
+
+void show_stack(struct task_struct *task, unsigned long *stack,
+ const char *loglvl)
+{
+ unsigned long *endstack, addr;
+ int i;
+
+ if (!stack) {
+ if (task)
+ stack = (unsigned long *)task->thread.ksp;
+ else
+ stack = (unsigned long *)&stack;
+ }
+
+ addr = (unsigned long) stack;
+ endstack = (unsigned long *) PAGE_ALIGN(addr);
+
+ printk("%sStack from %08lx:", loglvl, (unsigned long)stack);
+ for (i = 0; i < kstack_depth_to_print; i++) {
+ if (stack + 1 > endstack)
+ break;
+ if (i % 8 == 0)
+ printk("%s\n ", loglvl);
+ printk("%s %08lx", loglvl, *stack++);
+ }
+
+ printk("%s\nCall Trace:", loglvl);
+ i = 0;
+ while (stack + 1 <= endstack) {
+ addr = *stack++;
+ /*
+ * If the address is either in the text segment of the
+ * kernel, or in the region which contains vmalloc'ed
+ * memory, it *may* be the address of a calling
+ * routine; if so, print it so that someone tracing
+ * down the cause of the crash will be able to figure
+ * out the call path that was taken.
+ */
+ if (((addr >= (unsigned long) _stext) &&
+ (addr <= (unsigned long) _etext))) {
+ if (i % 4 == 0)
+ pr_emerg("\n ");
+ printk("%s [<%08lx>]", loglvl, addr);
+ i++;
+ }
+ }
+ printk("%s\n", loglvl);
+}
+
+void __init trap_init(void)
+{
+ /* Nothing to do here */
+}
+
+/* Breakpoint handler */
+asmlinkage void breakpoint_c(struct pt_regs *fp)
+{
+ /*
+ * The breakpoint entry code has moved the PC on by 4 bytes, so we must
+ * move it back. This could be done on the host but we do it here
+ * because monitor.S of JTAG gdbserver does it too.
+ */
+ fp->ea -= 4;
+ _exception(SIGTRAP, fp, TRAP_BRKPT, fp->ea);
+}
+
+#ifndef CONFIG_NIOS2_ALIGNMENT_TRAP
+/* Alignment exception handler */
+asmlinkage void handle_unaligned_c(struct pt_regs *fp, int cause)
+{
+ unsigned long addr = RDCTL(CTL_BADADDR);
+
+ cause >>= 2;
+ fp->ea -= 4;
+
+ if (fixup_exception(fp))
+ return;
+
+ if (!user_mode(fp)) {
+ pr_alert("Unaligned access from kernel mode, this might be a hardware\n");
+ pr_alert("problem, dump registers and restart the instruction\n");
+ pr_alert(" BADADDR 0x%08lx\n", addr);
+ pr_alert(" cause %d\n", cause);
+ pr_alert(" op-code 0x%08lx\n", *(unsigned long *)(fp->ea));
+ show_regs(fp);
+ return;
+ }
+
+ _exception(SIGBUS, fp, BUS_ADRALN, addr);
+}
+#endif /* CONFIG_NIOS2_ALIGNMENT_TRAP */
+
+/* Illegal instruction handler */
+asmlinkage void handle_illegal_c(struct pt_regs *fp)
+{
+ fp->ea -= 4;
+ _exception(SIGILL, fp, ILL_ILLOPC, fp->ea);
+}
+
+/* Supervisor instruction handler */
+asmlinkage void handle_supervisor_instr(struct pt_regs *fp)
+{
+ fp->ea -= 4;
+ _exception(SIGILL, fp, ILL_PRVOPC, fp->ea);
+}
+
+/* Division error handler */
+asmlinkage void handle_diverror_c(struct pt_regs *fp)
+{
+ fp->ea -= 4;
+ _exception(SIGFPE, fp, FPE_INTDIV, fp->ea);
+}
+
+/* Unhandled exception handler */
+asmlinkage void unhandled_exception(struct pt_regs *regs, int cause)
+{
+ unsigned long addr = RDCTL(CTL_BADADDR);
+
+ cause /= 4;
+
+ pr_emerg("Unhandled exception #%d in %s mode (badaddr=0x%08lx)\n",
+ cause, user_mode(regs) ? "user" : "kernel", addr);
+
+ regs->ea -= 4;
+ show_regs(regs);
+
+ pr_emerg("opcode: 0x%08lx\n", *(unsigned long *)(regs->ea));
+}
+
+asmlinkage void handle_trap_1_c(struct pt_regs *fp)
+{
+ _send_sig(SIGUSR1, 0, fp->ea);
+}
+
+asmlinkage void handle_trap_2_c(struct pt_regs *fp)
+{
+ _send_sig(SIGUSR2, 0, fp->ea);
+}
+
+asmlinkage void handle_trap_3_c(struct pt_regs *fp)
+{
+ _send_sig(SIGILL, ILL_ILLTRP, fp->ea);
+}
diff --git a/arch/nios2/kernel/vmlinux.lds.S b/arch/nios2/kernel/vmlinux.lds.S
new file mode 100644
index 000000000..126e11474
--- /dev/null
+++ b/arch/nios2/kernel/vmlinux.lds.S
@@ -0,0 +1,64 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (C) 2009 Thomas Chou <thomas@wytron.com.tw>
+ */
+#include <asm/page.h>
+#include <asm-generic/vmlinux.lds.h>
+#include <asm/cache.h>
+#include <asm/thread_info.h>
+
+OUTPUT_FORMAT("elf32-littlenios2", "elf32-littlenios2", "elf32-littlenios2")
+
+OUTPUT_ARCH(nios)
+ENTRY(_start) /* Defined in head.S */
+
+jiffies = jiffies_64;
+
+SECTIONS
+{
+ . = CONFIG_NIOS2_MEM_BASE | CONFIG_NIOS2_KERNEL_REGION_BASE;
+
+ _text = .;
+ _stext = .;
+ HEAD_TEXT_SECTION
+ .text : {
+ TEXT_TEXT
+ SCHED_TEXT
+ CPUIDLE_TEXT
+ LOCK_TEXT
+ IRQENTRY_TEXT
+ SOFTIRQENTRY_TEXT
+ KPROBES_TEXT
+ } =0
+ _etext = .;
+
+ .got : {
+ *(.got.plt)
+ *(.igot.plt)
+ *(.got)
+ *(.igot)
+ }
+
+ EXCEPTION_TABLE(L1_CACHE_BYTES)
+
+ . = ALIGN(PAGE_SIZE);
+ __init_begin = .;
+ INIT_TEXT_SECTION(PAGE_SIZE)
+ INIT_DATA_SECTION(PAGE_SIZE)
+ PERCPU_SECTION(L1_CACHE_BYTES)
+ __init_end = .;
+
+ _sdata = .;
+ RO_DATA(PAGE_SIZE)
+ RW_DATA(L1_CACHE_BYTES, PAGE_SIZE, THREAD_SIZE)
+ _edata = .;
+
+ BSS_SECTION(0, 0, 0)
+ _end = .;
+
+ STABS_DEBUG
+ DWARF_DEBUG
+ ELF_DETAILS
+
+ DISCARDS
+}