summaryrefslogtreecommitdiffstats
path: root/arch/microblaze/kernel
diff options
context:
space:
mode:
Diffstat (limited to 'arch/microblaze/kernel')
-rw-r--r--arch/microblaze/kernel/.gitignore2
-rw-r--r--arch/microblaze/kernel/Makefile30
-rw-r--r--arch/microblaze/kernel/asm-offsets.c128
-rw-r--r--arch/microblaze/kernel/cpu/Makefile13
-rw-r--r--arch/microblaze/kernel/cpu/cache.c656
-rw-r--r--arch/microblaze/kernel/cpu/cpuinfo-pvr-full.c110
-rw-r--r--arch/microblaze/kernel/cpu/cpuinfo-static.c145
-rw-r--r--arch/microblaze/kernel/cpu/cpuinfo.c142
-rw-r--r--arch/microblaze/kernel/cpu/mb.c158
-rw-r--r--arch/microblaze/kernel/cpu/pvr.c81
-rw-r--r--arch/microblaze/kernel/dma.c43
-rw-r--r--arch/microblaze/kernel/entry-nommu.S622
-rw-r--r--arch/microblaze/kernel/entry.S1011
-rw-r--r--arch/microblaze/kernel/exceptions.c149
-rw-r--r--arch/microblaze/kernel/ftrace.c222
-rw-r--r--arch/microblaze/kernel/head.S386
-rw-r--r--arch/microblaze/kernel/hw_exception_handler.S1222
-rw-r--r--arch/microblaze/kernel/irq.c53
-rw-r--r--arch/microblaze/kernel/kgdb.c152
-rw-r--r--arch/microblaze/kernel/mcount.S165
-rw-r--r--arch/microblaze/kernel/microblaze_ksyms.c51
-rw-r--r--arch/microblaze/kernel/misc.S66
-rw-r--r--arch/microblaze/kernel/module.c122
-rw-r--r--arch/microblaze/kernel/process.c153
-rw-r--r--arch/microblaze/kernel/prom.c30
-rw-r--r--arch/microblaze/kernel/ptrace.c170
-rw-r--r--arch/microblaze/kernel/reset.c43
-rw-r--r--arch/microblaze/kernel/setup.c201
-rw-r--r--arch/microblaze/kernel/signal.c322
-rw-r--r--arch/microblaze/kernel/stacktrace.c31
-rw-r--r--arch/microblaze/kernel/sys_microblaze.c55
-rw-r--r--arch/microblaze/kernel/syscall_table.S6
-rw-r--r--arch/microblaze/kernel/syscalls/Makefile38
-rw-r--r--arch/microblaze/kernel/syscalls/syscall.tbl448
-rw-r--r--arch/microblaze/kernel/syscalls/syscallhdr.sh36
-rw-r--r--arch/microblaze/kernel/syscalls/syscalltbl.sh32
-rw-r--r--arch/microblaze/kernel/timer.c326
-rw-r--r--arch/microblaze/kernel/traps.c78
-rw-r--r--arch/microblaze/kernel/unwind.c328
-rw-r--r--arch/microblaze/kernel/vmlinux.lds.S143
40 files changed, 8169 insertions, 0 deletions
diff --git a/arch/microblaze/kernel/.gitignore b/arch/microblaze/kernel/.gitignore
new file mode 100644
index 000000000..bbb90f92d
--- /dev/null
+++ b/arch/microblaze/kernel/.gitignore
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
+vmlinux.lds
diff --git a/arch/microblaze/kernel/Makefile b/arch/microblaze/kernel/Makefile
new file mode 100644
index 000000000..dd7163743
--- /dev/null
+++ b/arch/microblaze/kernel/Makefile
@@ -0,0 +1,30 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Makefile
+#
+
+ifdef CONFIG_FUNCTION_TRACER
+# Do not trace early boot code and low level code
+CFLAGS_REMOVE_timer.o = -pg
+CFLAGS_REMOVE_intc.o = -pg
+CFLAGS_REMOVE_early_printk.o = -pg
+CFLAGS_REMOVE_ftrace.o = -pg
+CFLAGS_REMOVE_process.o = -pg
+endif
+
+extra-y := head.o vmlinux.lds
+
+obj-y += dma.o exceptions.o \
+ hw_exception_handler.o irq.o \
+ process.o prom.o ptrace.o \
+ reset.o setup.o signal.o sys_microblaze.o timer.o traps.o unwind.o
+
+obj-y += cpu/
+
+obj-$(CONFIG_MODULES) += microblaze_ksyms.o module.o
+obj-$(CONFIG_MMU) += misc.o
+obj-$(CONFIG_STACKTRACE) += stacktrace.o
+obj-$(CONFIG_FUNCTION_TRACER) += ftrace.o mcount.o
+obj-$(CONFIG_KGDB) += kgdb.o
+
+obj-y += entry$(MMU).o
diff --git a/arch/microblaze/kernel/asm-offsets.c b/arch/microblaze/kernel/asm-offsets.c
new file mode 100644
index 000000000..c1b459c97
--- /dev/null
+++ b/arch/microblaze/kernel/asm-offsets.c
@@ -0,0 +1,128 @@
+/*
+ * Copyright (C) 2007-2009 Michal Simek <monstr@monstr.eu>
+ * Copyright (C) 2007-2009 PetaLogix
+ * Copyright (C) 2006 Atmark Techno, Inc.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/init.h>
+#include <linux/stddef.h>
+#include <linux/sched.h>
+#include <linux/kernel_stat.h>
+#include <linux/ptrace.h>
+#include <linux/hardirq.h>
+#include <linux/thread_info.h>
+#include <linux/kbuild.h>
+#include <asm/cpuinfo.h>
+
+int main(int argc, char *argv[])
+{
+ /* struct pt_regs */
+ DEFINE(PT_SIZE, sizeof(struct pt_regs));
+ DEFINE(PT_MSR, offsetof(struct pt_regs, msr));
+ DEFINE(PT_EAR, offsetof(struct pt_regs, ear));
+ DEFINE(PT_ESR, offsetof(struct pt_regs, esr));
+ DEFINE(PT_FSR, offsetof(struct pt_regs, fsr));
+ DEFINE(PT_PC, offsetof(struct pt_regs, pc));
+ DEFINE(PT_R0, offsetof(struct pt_regs, r0));
+ DEFINE(PT_R1, offsetof(struct pt_regs, r1));
+ DEFINE(PT_R2, offsetof(struct pt_regs, r2));
+ DEFINE(PT_R3, offsetof(struct pt_regs, r3));
+ DEFINE(PT_R4, offsetof(struct pt_regs, r4));
+ DEFINE(PT_R5, offsetof(struct pt_regs, r5));
+ DEFINE(PT_R6, offsetof(struct pt_regs, r6));
+ DEFINE(PT_R7, offsetof(struct pt_regs, r7));
+ DEFINE(PT_R8, offsetof(struct pt_regs, r8));
+ DEFINE(PT_R9, offsetof(struct pt_regs, r9));
+ DEFINE(PT_R10, offsetof(struct pt_regs, r10));
+ DEFINE(PT_R11, offsetof(struct pt_regs, r11));
+ DEFINE(PT_R12, offsetof(struct pt_regs, r12));
+ DEFINE(PT_R13, offsetof(struct pt_regs, r13));
+ DEFINE(PT_R14, offsetof(struct pt_regs, r14));
+ DEFINE(PT_R15, offsetof(struct pt_regs, r15));
+ DEFINE(PT_R16, offsetof(struct pt_regs, r16));
+ DEFINE(PT_R17, offsetof(struct pt_regs, r17));
+ DEFINE(PT_R18, offsetof(struct pt_regs, r18));
+ DEFINE(PT_R19, offsetof(struct pt_regs, r19));
+ DEFINE(PT_R20, offsetof(struct pt_regs, r20));
+ DEFINE(PT_R21, offsetof(struct pt_regs, r21));
+ DEFINE(PT_R22, offsetof(struct pt_regs, r22));
+ DEFINE(PT_R23, offsetof(struct pt_regs, r23));
+ DEFINE(PT_R24, offsetof(struct pt_regs, r24));
+ DEFINE(PT_R25, offsetof(struct pt_regs, r25));
+ DEFINE(PT_R26, offsetof(struct pt_regs, r26));
+ DEFINE(PT_R27, offsetof(struct pt_regs, r27));
+ DEFINE(PT_R28, offsetof(struct pt_regs, r28));
+ DEFINE(PT_R29, offsetof(struct pt_regs, r29));
+ DEFINE(PT_R30, offsetof(struct pt_regs, r30));
+ DEFINE(PT_R31, offsetof(struct pt_regs, r31));
+ DEFINE(PT_MODE, offsetof(struct pt_regs, pt_mode));
+ BLANK();
+
+ /* Magic offsets for PTRACE PEEK/POKE etc */
+ DEFINE(PT_TEXT_ADDR, sizeof(struct pt_regs) + 1);
+ DEFINE(PT_TEXT_LEN, sizeof(struct pt_regs) + 2);
+ DEFINE(PT_DATA_ADDR, sizeof(struct pt_regs) + 3);
+ BLANK();
+
+ /* struct task_struct */
+ DEFINE(TS_THREAD_INFO, offsetof(struct task_struct, stack));
+#ifdef CONFIG_MMU
+ DEFINE(TASK_STATE, offsetof(struct task_struct, state));
+ DEFINE(TASK_FLAGS, offsetof(struct task_struct, flags));
+ DEFINE(TASK_PTRACE, offsetof(struct task_struct, ptrace));
+ DEFINE(TASK_BLOCKED, offsetof(struct task_struct, blocked));
+ DEFINE(TASK_MM, offsetof(struct task_struct, mm));
+ DEFINE(TASK_ACTIVE_MM, offsetof(struct task_struct, active_mm));
+ DEFINE(TASK_PID, offsetof(struct task_struct, pid));
+ DEFINE(TASK_THREAD, offsetof(struct task_struct, thread));
+ DEFINE(THREAD_KSP, offsetof(struct thread_struct, ksp));
+ BLANK();
+
+ DEFINE(PGDIR, offsetof(struct thread_struct, pgdir));
+ BLANK();
+#endif
+
+ /* struct thread_info */
+ DEFINE(TI_TASK, offsetof(struct thread_info, task));
+ DEFINE(TI_FLAGS, offsetof(struct thread_info, flags));
+ DEFINE(TI_ADDR_LIMIT, offsetof(struct thread_info, addr_limit));
+ DEFINE(TI_CPU_CONTEXT, offsetof(struct thread_info, cpu_context));
+ DEFINE(TI_PREEMPT_COUNT, offsetof(struct thread_info, preempt_count));
+ BLANK();
+
+ /* struct cpu_context */
+ DEFINE(CC_R1, offsetof(struct cpu_context, r1)); /* r1 */
+ DEFINE(CC_R2, offsetof(struct cpu_context, r2));
+ /* dedicated registers */
+ DEFINE(CC_R13, offsetof(struct cpu_context, r13));
+ DEFINE(CC_R14, offsetof(struct cpu_context, r14));
+ DEFINE(CC_R15, offsetof(struct cpu_context, r15));
+ DEFINE(CC_R16, offsetof(struct cpu_context, r16));
+ DEFINE(CC_R17, offsetof(struct cpu_context, r17));
+ DEFINE(CC_R18, offsetof(struct cpu_context, r18));
+ /* non-volatile registers */
+ DEFINE(CC_R19, offsetof(struct cpu_context, r19));
+ DEFINE(CC_R20, offsetof(struct cpu_context, r20));
+ DEFINE(CC_R21, offsetof(struct cpu_context, r21));
+ DEFINE(CC_R22, offsetof(struct cpu_context, r22));
+ DEFINE(CC_R23, offsetof(struct cpu_context, r23));
+ DEFINE(CC_R24, offsetof(struct cpu_context, r24));
+ DEFINE(CC_R25, offsetof(struct cpu_context, r25));
+ DEFINE(CC_R26, offsetof(struct cpu_context, r26));
+ DEFINE(CC_R27, offsetof(struct cpu_context, r27));
+ DEFINE(CC_R28, offsetof(struct cpu_context, r28));
+ DEFINE(CC_R29, offsetof(struct cpu_context, r29));
+ DEFINE(CC_R30, offsetof(struct cpu_context, r30));
+ /* special purpose registers */
+ DEFINE(CC_MSR, offsetof(struct cpu_context, msr));
+ DEFINE(CC_EAR, offsetof(struct cpu_context, ear));
+ DEFINE(CC_ESR, offsetof(struct cpu_context, esr));
+ DEFINE(CC_FSR, offsetof(struct cpu_context, fsr));
+ BLANK();
+
+ return 0;
+}
diff --git a/arch/microblaze/kernel/cpu/Makefile b/arch/microblaze/kernel/cpu/Makefile
new file mode 100644
index 000000000..059afc75a
--- /dev/null
+++ b/arch/microblaze/kernel/cpu/Makefile
@@ -0,0 +1,13 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# Build the appropriate CPU version support
+#
+
+ifdef CONFIG_FUNCTION_TRACER
+CFLAGS_REMOVE_cache.o = -pg
+endif
+
+ccflags-y := -DCPU_MAJOR=$(CPU_MAJOR) -DCPU_MINOR=$(CPU_MINOR) \
+ -DCPU_REV=$(CPU_REV)
+
+obj-y += cache.o cpuinfo.o cpuinfo-pvr-full.o cpuinfo-static.o mb.o pvr.o
diff --git a/arch/microblaze/kernel/cpu/cache.c b/arch/microblaze/kernel/cpu/cache.c
new file mode 100644
index 000000000..dcba53803
--- /dev/null
+++ b/arch/microblaze/kernel/cpu/cache.c
@@ -0,0 +1,656 @@
+/*
+ * Cache control for MicroBlaze cache memories
+ *
+ * Copyright (C) 2007-2009 Michal Simek <monstr@monstr.eu>
+ * Copyright (C) 2007-2009 PetaLogix
+ * Copyright (C) 2007-2009 John Williams <john.williams@petalogix.com>
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License. See the file COPYING in the main directory of this
+ * archive for more details.
+ */
+
+#include <asm/cacheflush.h>
+#include <linux/cache.h>
+#include <asm/cpuinfo.h>
+#include <asm/pvr.h>
+
+static inline void __enable_icache_msr(void)
+{
+ __asm__ __volatile__ (" msrset r0, %0;" \
+ "nop;" \
+ : : "i" (MSR_ICE) : "memory");
+}
+
+static inline void __disable_icache_msr(void)
+{
+ __asm__ __volatile__ (" msrclr r0, %0;" \
+ "nop;" \
+ : : "i" (MSR_ICE) : "memory");
+}
+
+static inline void __enable_dcache_msr(void)
+{
+ __asm__ __volatile__ (" msrset r0, %0;" \
+ "nop;" \
+ : : "i" (MSR_DCE) : "memory");
+}
+
+static inline void __disable_dcache_msr(void)
+{
+ __asm__ __volatile__ (" msrclr r0, %0;" \
+ "nop; " \
+ : : "i" (MSR_DCE) : "memory");
+}
+
+static inline void __enable_icache_nomsr(void)
+{
+ __asm__ __volatile__ (" mfs r12, rmsr;" \
+ "nop;" \
+ "ori r12, r12, %0;" \
+ "mts rmsr, r12;" \
+ "nop;" \
+ : : "i" (MSR_ICE) : "memory", "r12");
+}
+
+static inline void __disable_icache_nomsr(void)
+{
+ __asm__ __volatile__ (" mfs r12, rmsr;" \
+ "nop;" \
+ "andi r12, r12, ~%0;" \
+ "mts rmsr, r12;" \
+ "nop;" \
+ : : "i" (MSR_ICE) : "memory", "r12");
+}
+
+static inline void __enable_dcache_nomsr(void)
+{
+ __asm__ __volatile__ (" mfs r12, rmsr;" \
+ "nop;" \
+ "ori r12, r12, %0;" \
+ "mts rmsr, r12;" \
+ "nop;" \
+ : : "i" (MSR_DCE) : "memory", "r12");
+}
+
+static inline void __disable_dcache_nomsr(void)
+{
+ __asm__ __volatile__ (" mfs r12, rmsr;" \
+ "nop;" \
+ "andi r12, r12, ~%0;" \
+ "mts rmsr, r12;" \
+ "nop;" \
+ : : "i" (MSR_DCE) : "memory", "r12");
+}
+
+
+/* Helper macro for computing the limits of cache range loops
+ *
+ * End address can be unaligned which is OK for C implementation.
+ * ASM implementation align it in ASM macros
+ */
+#define CACHE_LOOP_LIMITS(start, end, cache_line_length, cache_size) \
+do { \
+ int align = ~(cache_line_length - 1); \
+ if (start < UINT_MAX - cache_size) \
+ end = min(start + cache_size, end); \
+ start &= align; \
+} while (0)
+
+/*
+ * Helper macro to loop over the specified cache_size/line_length and
+ * execute 'op' on that cacheline
+ */
+#define CACHE_ALL_LOOP(cache_size, line_length, op) \
+do { \
+ unsigned int len = cache_size - line_length; \
+ int step = -line_length; \
+ WARN_ON(step >= 0); \
+ \
+ __asm__ __volatile__ (" 1: " #op " %0, r0;" \
+ "bgtid %0, 1b;" \
+ "addk %0, %0, %1;" \
+ : : "r" (len), "r" (step) \
+ : "memory"); \
+} while (0)
+
+/* Used for wdc.flush/clear which can use rB for offset which is not possible
+ * to use for simple wdc or wic.
+ *
+ * start address is cache aligned
+ * end address is not aligned, if end is aligned then I have to subtract
+ * cacheline length because I can't flush/invalidate the next cacheline.
+ * If is not, I align it because I will flush/invalidate whole line.
+ */
+#define CACHE_RANGE_LOOP_2(start, end, line_length, op) \
+do { \
+ int step = -line_length; \
+ int align = ~(line_length - 1); \
+ int count; \
+ end = ((end & align) == end) ? end - line_length : end & align; \
+ count = end - start; \
+ WARN_ON(count < 0); \
+ \
+ __asm__ __volatile__ (" 1: " #op " %0, %1;" \
+ "bgtid %1, 1b;" \
+ "addk %1, %1, %2;" \
+ : : "r" (start), "r" (count), \
+ "r" (step) : "memory"); \
+} while (0)
+
+/* It is used only first parameter for OP - for wic, wdc */
+#define CACHE_RANGE_LOOP_1(start, end, line_length, op) \
+do { \
+ unsigned int volatile temp = 0; \
+ unsigned int align = ~(line_length - 1); \
+ end = ((end & align) == end) ? end - line_length : end & align; \
+ WARN_ON(end < start); \
+ \
+ __asm__ __volatile__ (" 1: " #op " %1, r0;" \
+ "cmpu %0, %1, %2;" \
+ "bgtid %0, 1b;" \
+ "addk %1, %1, %3;" \
+ : : "r" (temp), "r" (start), "r" (end), \
+ "r" (line_length) : "memory"); \
+} while (0)
+
+#define ASM_LOOP
+
+static void __flush_icache_range_msr_irq(unsigned long start, unsigned long end)
+{
+ unsigned long flags;
+#ifndef ASM_LOOP
+ int i;
+#endif
+ pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
+ (unsigned int)start, (unsigned int) end);
+
+ CACHE_LOOP_LIMITS(start, end,
+ cpuinfo.icache_line_length, cpuinfo.icache_size);
+
+ local_irq_save(flags);
+ __disable_icache_msr();
+
+#ifdef ASM_LOOP
+ CACHE_RANGE_LOOP_1(start, end, cpuinfo.icache_line_length, wic);
+#else
+ for (i = start; i < end; i += cpuinfo.icache_line_length)
+ __asm__ __volatile__ ("wic %0, r0;" \
+ : : "r" (i));
+#endif
+ __enable_icache_msr();
+ local_irq_restore(flags);
+}
+
+static void __flush_icache_range_nomsr_irq(unsigned long start,
+ unsigned long end)
+{
+ unsigned long flags;
+#ifndef ASM_LOOP
+ int i;
+#endif
+ pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
+ (unsigned int)start, (unsigned int) end);
+
+ CACHE_LOOP_LIMITS(start, end,
+ cpuinfo.icache_line_length, cpuinfo.icache_size);
+
+ local_irq_save(flags);
+ __disable_icache_nomsr();
+
+#ifdef ASM_LOOP
+ CACHE_RANGE_LOOP_1(start, end, cpuinfo.icache_line_length, wic);
+#else
+ for (i = start; i < end; i += cpuinfo.icache_line_length)
+ __asm__ __volatile__ ("wic %0, r0;" \
+ : : "r" (i));
+#endif
+
+ __enable_icache_nomsr();
+ local_irq_restore(flags);
+}
+
+static void __flush_icache_range_noirq(unsigned long start,
+ unsigned long end)
+{
+#ifndef ASM_LOOP
+ int i;
+#endif
+ pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
+ (unsigned int)start, (unsigned int) end);
+
+ CACHE_LOOP_LIMITS(start, end,
+ cpuinfo.icache_line_length, cpuinfo.icache_size);
+#ifdef ASM_LOOP
+ CACHE_RANGE_LOOP_1(start, end, cpuinfo.icache_line_length, wic);
+#else
+ for (i = start; i < end; i += cpuinfo.icache_line_length)
+ __asm__ __volatile__ ("wic %0, r0;" \
+ : : "r" (i));
+#endif
+}
+
+static void __flush_icache_all_msr_irq(void)
+{
+ unsigned long flags;
+#ifndef ASM_LOOP
+ int i;
+#endif
+ pr_debug("%s\n", __func__);
+
+ local_irq_save(flags);
+ __disable_icache_msr();
+#ifdef ASM_LOOP
+ CACHE_ALL_LOOP(cpuinfo.icache_size, cpuinfo.icache_line_length, wic);
+#else
+ for (i = 0; i < cpuinfo.icache_size;
+ i += cpuinfo.icache_line_length)
+ __asm__ __volatile__ ("wic %0, r0;" \
+ : : "r" (i));
+#endif
+ __enable_icache_msr();
+ local_irq_restore(flags);
+}
+
+static void __flush_icache_all_nomsr_irq(void)
+{
+ unsigned long flags;
+#ifndef ASM_LOOP
+ int i;
+#endif
+ pr_debug("%s\n", __func__);
+
+ local_irq_save(flags);
+ __disable_icache_nomsr();
+#ifdef ASM_LOOP
+ CACHE_ALL_LOOP(cpuinfo.icache_size, cpuinfo.icache_line_length, wic);
+#else
+ for (i = 0; i < cpuinfo.icache_size;
+ i += cpuinfo.icache_line_length)
+ __asm__ __volatile__ ("wic %0, r0;" \
+ : : "r" (i));
+#endif
+ __enable_icache_nomsr();
+ local_irq_restore(flags);
+}
+
+static void __flush_icache_all_noirq(void)
+{
+#ifndef ASM_LOOP
+ int i;
+#endif
+ pr_debug("%s\n", __func__);
+#ifdef ASM_LOOP
+ CACHE_ALL_LOOP(cpuinfo.icache_size, cpuinfo.icache_line_length, wic);
+#else
+ for (i = 0; i < cpuinfo.icache_size;
+ i += cpuinfo.icache_line_length)
+ __asm__ __volatile__ ("wic %0, r0;" \
+ : : "r" (i));
+#endif
+}
+
+static void __invalidate_dcache_all_msr_irq(void)
+{
+ unsigned long flags;
+#ifndef ASM_LOOP
+ int i;
+#endif
+ pr_debug("%s\n", __func__);
+
+ local_irq_save(flags);
+ __disable_dcache_msr();
+#ifdef ASM_LOOP
+ CACHE_ALL_LOOP(cpuinfo.dcache_size, cpuinfo.dcache_line_length, wdc);
+#else
+ for (i = 0; i < cpuinfo.dcache_size;
+ i += cpuinfo.dcache_line_length)
+ __asm__ __volatile__ ("wdc %0, r0;" \
+ : : "r" (i));
+#endif
+ __enable_dcache_msr();
+ local_irq_restore(flags);
+}
+
+static void __invalidate_dcache_all_nomsr_irq(void)
+{
+ unsigned long flags;
+#ifndef ASM_LOOP
+ int i;
+#endif
+ pr_debug("%s\n", __func__);
+
+ local_irq_save(flags);
+ __disable_dcache_nomsr();
+#ifdef ASM_LOOP
+ CACHE_ALL_LOOP(cpuinfo.dcache_size, cpuinfo.dcache_line_length, wdc);
+#else
+ for (i = 0; i < cpuinfo.dcache_size;
+ i += cpuinfo.dcache_line_length)
+ __asm__ __volatile__ ("wdc %0, r0;" \
+ : : "r" (i));
+#endif
+ __enable_dcache_nomsr();
+ local_irq_restore(flags);
+}
+
+static void __invalidate_dcache_all_noirq_wt(void)
+{
+#ifndef ASM_LOOP
+ int i;
+#endif
+ pr_debug("%s\n", __func__);
+#ifdef ASM_LOOP
+ CACHE_ALL_LOOP(cpuinfo.dcache_size, cpuinfo.dcache_line_length, wdc);
+#else
+ for (i = 0; i < cpuinfo.dcache_size;
+ i += cpuinfo.dcache_line_length)
+ __asm__ __volatile__ ("wdc %0, r0;" \
+ : : "r" (i));
+#endif
+}
+
+/*
+ * FIXME It is blindly invalidation as is expected
+ * but can't be called on noMMU in microblaze_cache_init below
+ *
+ * MS: noMMU kernel won't boot if simple wdc is used
+ * The reason should be that there are discared data which kernel needs
+ */
+static void __invalidate_dcache_all_wb(void)
+{
+#ifndef ASM_LOOP
+ int i;
+#endif
+ pr_debug("%s\n", __func__);
+#ifdef ASM_LOOP
+ CACHE_ALL_LOOP(cpuinfo.dcache_size, cpuinfo.dcache_line_length,
+ wdc);
+#else
+ for (i = 0; i < cpuinfo.dcache_size;
+ i += cpuinfo.dcache_line_length)
+ __asm__ __volatile__ ("wdc %0, r0;" \
+ : : "r" (i));
+#endif
+}
+
+static void __invalidate_dcache_range_wb(unsigned long start,
+ unsigned long end)
+{
+#ifndef ASM_LOOP
+ int i;
+#endif
+ pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
+ (unsigned int)start, (unsigned int) end);
+
+ CACHE_LOOP_LIMITS(start, end,
+ cpuinfo.dcache_line_length, cpuinfo.dcache_size);
+#ifdef ASM_LOOP
+ CACHE_RANGE_LOOP_2(start, end, cpuinfo.dcache_line_length, wdc.clear);
+#else
+ for (i = start; i < end; i += cpuinfo.dcache_line_length)
+ __asm__ __volatile__ ("wdc.clear %0, r0;" \
+ : : "r" (i));
+#endif
+}
+
+static void __invalidate_dcache_range_nomsr_wt(unsigned long start,
+ unsigned long end)
+{
+#ifndef ASM_LOOP
+ int i;
+#endif
+ pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
+ (unsigned int)start, (unsigned int) end);
+ CACHE_LOOP_LIMITS(start, end,
+ cpuinfo.dcache_line_length, cpuinfo.dcache_size);
+
+#ifdef ASM_LOOP
+ CACHE_RANGE_LOOP_1(start, end, cpuinfo.dcache_line_length, wdc);
+#else
+ for (i = start; i < end; i += cpuinfo.dcache_line_length)
+ __asm__ __volatile__ ("wdc %0, r0;" \
+ : : "r" (i));
+#endif
+}
+
+static void __invalidate_dcache_range_msr_irq_wt(unsigned long start,
+ unsigned long end)
+{
+ unsigned long flags;
+#ifndef ASM_LOOP
+ int i;
+#endif
+ pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
+ (unsigned int)start, (unsigned int) end);
+ CACHE_LOOP_LIMITS(start, end,
+ cpuinfo.dcache_line_length, cpuinfo.dcache_size);
+
+ local_irq_save(flags);
+ __disable_dcache_msr();
+
+#ifdef ASM_LOOP
+ CACHE_RANGE_LOOP_1(start, end, cpuinfo.dcache_line_length, wdc);
+#else
+ for (i = start; i < end; i += cpuinfo.dcache_line_length)
+ __asm__ __volatile__ ("wdc %0, r0;" \
+ : : "r" (i));
+#endif
+
+ __enable_dcache_msr();
+ local_irq_restore(flags);
+}
+
+static void __invalidate_dcache_range_nomsr_irq(unsigned long start,
+ unsigned long end)
+{
+ unsigned long flags;
+#ifndef ASM_LOOP
+ int i;
+#endif
+ pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
+ (unsigned int)start, (unsigned int) end);
+
+ CACHE_LOOP_LIMITS(start, end,
+ cpuinfo.dcache_line_length, cpuinfo.dcache_size);
+
+ local_irq_save(flags);
+ __disable_dcache_nomsr();
+
+#ifdef ASM_LOOP
+ CACHE_RANGE_LOOP_1(start, end, cpuinfo.dcache_line_length, wdc);
+#else
+ for (i = start; i < end; i += cpuinfo.dcache_line_length)
+ __asm__ __volatile__ ("wdc %0, r0;" \
+ : : "r" (i));
+#endif
+
+ __enable_dcache_nomsr();
+ local_irq_restore(flags);
+}
+
+static void __flush_dcache_all_wb(void)
+{
+#ifndef ASM_LOOP
+ int i;
+#endif
+ pr_debug("%s\n", __func__);
+#ifdef ASM_LOOP
+ CACHE_ALL_LOOP(cpuinfo.dcache_size, cpuinfo.dcache_line_length,
+ wdc.flush);
+#else
+ for (i = 0; i < cpuinfo.dcache_size;
+ i += cpuinfo.dcache_line_length)
+ __asm__ __volatile__ ("wdc.flush %0, r0;" \
+ : : "r" (i));
+#endif
+}
+
+static void __flush_dcache_range_wb(unsigned long start, unsigned long end)
+{
+#ifndef ASM_LOOP
+ int i;
+#endif
+ pr_debug("%s: start 0x%x, end 0x%x\n", __func__,
+ (unsigned int)start, (unsigned int) end);
+
+ CACHE_LOOP_LIMITS(start, end,
+ cpuinfo.dcache_line_length, cpuinfo.dcache_size);
+#ifdef ASM_LOOP
+ CACHE_RANGE_LOOP_2(start, end, cpuinfo.dcache_line_length, wdc.flush);
+#else
+ for (i = start; i < end; i += cpuinfo.dcache_line_length)
+ __asm__ __volatile__ ("wdc.flush %0, r0;" \
+ : : "r" (i));
+#endif
+}
+
+/* struct for wb caches and for wt caches */
+struct scache *mbc;
+
+/* new wb cache model */
+static const struct scache wb_msr = {
+ .ie = __enable_icache_msr,
+ .id = __disable_icache_msr,
+ .ifl = __flush_icache_all_noirq,
+ .iflr = __flush_icache_range_noirq,
+ .iin = __flush_icache_all_noirq,
+ .iinr = __flush_icache_range_noirq,
+ .de = __enable_dcache_msr,
+ .dd = __disable_dcache_msr,
+ .dfl = __flush_dcache_all_wb,
+ .dflr = __flush_dcache_range_wb,
+ .din = __invalidate_dcache_all_wb,
+ .dinr = __invalidate_dcache_range_wb,
+};
+
+/* There is only difference in ie, id, de, dd functions */
+static const struct scache wb_nomsr = {
+ .ie = __enable_icache_nomsr,
+ .id = __disable_icache_nomsr,
+ .ifl = __flush_icache_all_noirq,
+ .iflr = __flush_icache_range_noirq,
+ .iin = __flush_icache_all_noirq,
+ .iinr = __flush_icache_range_noirq,
+ .de = __enable_dcache_nomsr,
+ .dd = __disable_dcache_nomsr,
+ .dfl = __flush_dcache_all_wb,
+ .dflr = __flush_dcache_range_wb,
+ .din = __invalidate_dcache_all_wb,
+ .dinr = __invalidate_dcache_range_wb,
+};
+
+/* Old wt cache model with disabling irq and turn off cache */
+static const struct scache wt_msr = {
+ .ie = __enable_icache_msr,
+ .id = __disable_icache_msr,
+ .ifl = __flush_icache_all_msr_irq,
+ .iflr = __flush_icache_range_msr_irq,
+ .iin = __flush_icache_all_msr_irq,
+ .iinr = __flush_icache_range_msr_irq,
+ .de = __enable_dcache_msr,
+ .dd = __disable_dcache_msr,
+ .dfl = __invalidate_dcache_all_msr_irq,
+ .dflr = __invalidate_dcache_range_msr_irq_wt,
+ .din = __invalidate_dcache_all_msr_irq,
+ .dinr = __invalidate_dcache_range_msr_irq_wt,
+};
+
+static const struct scache wt_nomsr = {
+ .ie = __enable_icache_nomsr,
+ .id = __disable_icache_nomsr,
+ .ifl = __flush_icache_all_nomsr_irq,
+ .iflr = __flush_icache_range_nomsr_irq,
+ .iin = __flush_icache_all_nomsr_irq,
+ .iinr = __flush_icache_range_nomsr_irq,
+ .de = __enable_dcache_nomsr,
+ .dd = __disable_dcache_nomsr,
+ .dfl = __invalidate_dcache_all_nomsr_irq,
+ .dflr = __invalidate_dcache_range_nomsr_irq,
+ .din = __invalidate_dcache_all_nomsr_irq,
+ .dinr = __invalidate_dcache_range_nomsr_irq,
+};
+
+/* New wt cache model for newer Microblaze versions */
+static const struct scache wt_msr_noirq = {
+ .ie = __enable_icache_msr,
+ .id = __disable_icache_msr,
+ .ifl = __flush_icache_all_noirq,
+ .iflr = __flush_icache_range_noirq,
+ .iin = __flush_icache_all_noirq,
+ .iinr = __flush_icache_range_noirq,
+ .de = __enable_dcache_msr,
+ .dd = __disable_dcache_msr,
+ .dfl = __invalidate_dcache_all_noirq_wt,
+ .dflr = __invalidate_dcache_range_nomsr_wt,
+ .din = __invalidate_dcache_all_noirq_wt,
+ .dinr = __invalidate_dcache_range_nomsr_wt,
+};
+
+static const struct scache wt_nomsr_noirq = {
+ .ie = __enable_icache_nomsr,
+ .id = __disable_icache_nomsr,
+ .ifl = __flush_icache_all_noirq,
+ .iflr = __flush_icache_range_noirq,
+ .iin = __flush_icache_all_noirq,
+ .iinr = __flush_icache_range_noirq,
+ .de = __enable_dcache_nomsr,
+ .dd = __disable_dcache_nomsr,
+ .dfl = __invalidate_dcache_all_noirq_wt,
+ .dflr = __invalidate_dcache_range_nomsr_wt,
+ .din = __invalidate_dcache_all_noirq_wt,
+ .dinr = __invalidate_dcache_range_nomsr_wt,
+};
+
+/* CPU version code for 7.20.c - see arch/microblaze/kernel/cpu/cpuinfo.c */
+#define CPUVER_7_20_A 0x0c
+#define CPUVER_7_20_D 0x0f
+
+void microblaze_cache_init(void)
+{
+ if (cpuinfo.use_instr & PVR2_USE_MSR_INSTR) {
+ if (cpuinfo.dcache_wb) {
+ pr_info("wb_msr\n");
+ mbc = (struct scache *)&wb_msr;
+ if (cpuinfo.ver_code <= CPUVER_7_20_D) {
+ /* MS: problem with signal handling - hw bug */
+ pr_info("WB won't work properly\n");
+ }
+ } else {
+ if (cpuinfo.ver_code >= CPUVER_7_20_A) {
+ pr_info("wt_msr_noirq\n");
+ mbc = (struct scache *)&wt_msr_noirq;
+ } else {
+ pr_info("wt_msr\n");
+ mbc = (struct scache *)&wt_msr;
+ }
+ }
+ } else {
+ if (cpuinfo.dcache_wb) {
+ pr_info("wb_nomsr\n");
+ mbc = (struct scache *)&wb_nomsr;
+ if (cpuinfo.ver_code <= CPUVER_7_20_D) {
+ /* MS: problem with signal handling - hw bug */
+ pr_info("WB won't work properly\n");
+ }
+ } else {
+ if (cpuinfo.ver_code >= CPUVER_7_20_A) {
+ pr_info("wt_nomsr_noirq\n");
+ mbc = (struct scache *)&wt_nomsr_noirq;
+ } else {
+ pr_info("wt_nomsr\n");
+ mbc = (struct scache *)&wt_nomsr;
+ }
+ }
+ }
+ /*
+ * FIXME Invalidation is done in U-BOOT
+ * WT cache: Data is already written to main memory
+ * WB cache: Discard data on noMMU which caused that kernel doesn't boot
+ */
+ /* invalidate_dcache(); */
+ enable_dcache();
+
+ invalidate_icache();
+ enable_icache();
+}
diff --git a/arch/microblaze/kernel/cpu/cpuinfo-pvr-full.c b/arch/microblaze/kernel/cpu/cpuinfo-pvr-full.c
new file mode 100644
index 000000000..c7ee51b09
--- /dev/null
+++ b/arch/microblaze/kernel/cpu/cpuinfo-pvr-full.c
@@ -0,0 +1,110 @@
+/*
+ * Support for MicroBlaze PVR (processor version register)
+ *
+ * Copyright (C) 2007-2009 Michal Simek <monstr@monstr.eu>
+ * Copyright (C) 2007-2009 PetaLogix
+ * Copyright (C) 2007 John Williams <john.williams@petalogix.com>
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/init.h>
+#include <linux/string.h>
+#include <asm/pvr.h>
+#include <asm/cpuinfo.h>
+
+/*
+ * Helper macro to map between fields in our struct cpuinfo, and
+ * the PVR macros in pvr.h.
+ */
+
+#define CI(c, p) { ci->c = PVR_##p(pvr); }
+
+#define err_printk(x) \
+ pr_err("ERROR: Microblaze " x "-different for PVR and DTS\n");
+
+void set_cpuinfo_pvr_full(struct cpuinfo *ci, struct device_node *cpu)
+{
+ struct pvr_s pvr;
+ u32 temp; /* for saving temp value */
+ get_pvr(&pvr);
+
+ CI(ver_code, VERSION);
+ if (!ci->ver_code) {
+ pr_err("ERROR: MB has broken PVR regs -> use DTS setting\n");
+ return;
+ }
+
+ temp = PVR_USE_BARREL(pvr) | PVR_USE_MSR_INSTR(pvr) |
+ PVR_USE_PCMP_INSTR(pvr) | PVR_USE_DIV(pvr);
+ if (ci->use_instr != temp)
+ err_printk("BARREL, MSR, PCMP or DIV");
+ ci->use_instr = temp;
+
+ temp = PVR_USE_HW_MUL(pvr) | PVR_USE_MUL64(pvr);
+ if (ci->use_mult != temp)
+ err_printk("HW_MUL");
+ ci->use_mult = temp;
+
+ temp = PVR_USE_FPU(pvr) | PVR_USE_FPU2(pvr);
+ if (ci->use_fpu != temp)
+ err_printk("HW_FPU");
+ ci->use_fpu = temp;
+
+ ci->use_exc = PVR_OPCODE_0x0_ILLEGAL(pvr) |
+ PVR_UNALIGNED_EXCEPTION(pvr) |
+ PVR_ILL_OPCODE_EXCEPTION(pvr) |
+ PVR_IOPB_BUS_EXCEPTION(pvr) |
+ PVR_DOPB_BUS_EXCEPTION(pvr) |
+ PVR_DIV_ZERO_EXCEPTION(pvr) |
+ PVR_FPU_EXCEPTION(pvr) |
+ PVR_FSL_EXCEPTION(pvr);
+
+ CI(pvr_user1, USER1);
+ CI(pvr_user2, USER2);
+
+ CI(mmu, USE_MMU);
+ CI(mmu_privins, MMU_PRIVINS);
+ CI(endian, ENDIAN);
+
+ CI(use_icache, USE_ICACHE);
+ CI(icache_tagbits, ICACHE_ADDR_TAG_BITS);
+ CI(icache_write, ICACHE_ALLOW_WR);
+ ci->icache_line_length = PVR_ICACHE_LINE_LEN(pvr) << 2;
+ CI(icache_size, ICACHE_BYTE_SIZE);
+ CI(icache_base, ICACHE_BASEADDR);
+ CI(icache_high, ICACHE_HIGHADDR);
+
+ CI(use_dcache, USE_DCACHE);
+ CI(dcache_tagbits, DCACHE_ADDR_TAG_BITS);
+ CI(dcache_write, DCACHE_ALLOW_WR);
+ ci->dcache_line_length = PVR_DCACHE_LINE_LEN(pvr) << 2;
+ CI(dcache_size, DCACHE_BYTE_SIZE);
+ CI(dcache_base, DCACHE_BASEADDR);
+ CI(dcache_high, DCACHE_HIGHADDR);
+
+ temp = PVR_DCACHE_USE_WRITEBACK(pvr);
+ if (ci->dcache_wb != temp)
+ err_printk("DCACHE WB");
+ ci->dcache_wb = temp;
+
+ CI(use_dopb, D_OPB);
+ CI(use_iopb, I_OPB);
+ CI(use_dlmb, D_LMB);
+ CI(use_ilmb, I_LMB);
+ CI(num_fsl, FSL_LINKS);
+
+ CI(irq_edge, INTERRUPT_IS_EDGE);
+ CI(irq_positive, EDGE_IS_POSITIVE);
+
+ CI(area_optimised, AREA_OPTIMISED);
+
+ CI(hw_debug, DEBUG_ENABLED);
+ CI(num_pc_brk, NUMBER_OF_PC_BRK);
+ CI(num_rd_brk, NUMBER_OF_RD_ADDR_BRK);
+ CI(num_wr_brk, NUMBER_OF_WR_ADDR_BRK);
+
+ CI(fpga_family_code, TARGET_FAMILY);
+}
diff --git a/arch/microblaze/kernel/cpu/cpuinfo-static.c b/arch/microblaze/kernel/cpu/cpuinfo-static.c
new file mode 100644
index 000000000..85dbda4a0
--- /dev/null
+++ b/arch/microblaze/kernel/cpu/cpuinfo-static.c
@@ -0,0 +1,145 @@
+/*
+ * Copyright (C) 2007-2009 Michal Simek <monstr@monstr.eu>
+ * Copyright (C) 2007-2009 PetaLogix
+ * Copyright (C) 2007 John Williams <john.williams@petalogix.com>
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/string.h>
+#include <asm/cpuinfo.h>
+#include <asm/pvr.h>
+
+static const char family_string[] = CONFIG_XILINX_MICROBLAZE0_FAMILY;
+static const char cpu_ver_string[] = CONFIG_XILINX_MICROBLAZE0_HW_VER;
+
+#define err_printk(x) \
+ early_printk("ERROR: Microblaze " x "-different for kernel and DTS\n");
+
+void __init set_cpuinfo_static(struct cpuinfo *ci, struct device_node *cpu)
+{
+ u32 i = 0;
+
+ ci->use_instr =
+ (fcpu(cpu, "xlnx,use-barrel") ? PVR0_USE_BARREL_MASK : 0) |
+ (fcpu(cpu, "xlnx,use-msr-instr") ? PVR2_USE_MSR_INSTR : 0) |
+ (fcpu(cpu, "xlnx,use-pcmp-instr") ? PVR2_USE_PCMP_INSTR : 0) |
+ (fcpu(cpu, "xlnx,use-div") ? PVR0_USE_DIV_MASK : 0);
+ if (CONFIG_XILINX_MICROBLAZE0_USE_BARREL)
+ i |= PVR0_USE_BARREL_MASK;
+ if (CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR)
+ i |= PVR2_USE_MSR_INSTR;
+ if (CONFIG_XILINX_MICROBLAZE0_USE_PCMP_INSTR)
+ i |= PVR2_USE_PCMP_INSTR;
+ if (CONFIG_XILINX_MICROBLAZE0_USE_DIV)
+ i |= PVR0_USE_DIV_MASK;
+ if (ci->use_instr != i)
+ err_printk("BARREL, MSR, PCMP or DIV");
+
+ ci->use_mult = fcpu(cpu, "xlnx,use-hw-mul");
+ if (ci->use_mult != CONFIG_XILINX_MICROBLAZE0_USE_HW_MUL)
+ err_printk("HW_MUL");
+ ci->use_mult =
+ (ci->use_mult > 1 ?
+ (PVR2_USE_MUL64_MASK | PVR0_USE_HW_MUL_MASK) :
+ (ci->use_mult == 1 ? PVR0_USE_HW_MUL_MASK : 0));
+
+ ci->use_fpu = fcpu(cpu, "xlnx,use-fpu");
+ if (ci->use_fpu != CONFIG_XILINX_MICROBLAZE0_USE_FPU)
+ err_printk("HW_FPU");
+ ci->use_fpu = (ci->use_fpu > 1 ?
+ (PVR2_USE_FPU2_MASK | PVR0_USE_FPU_MASK) :
+ (ci->use_fpu == 1 ? PVR0_USE_FPU_MASK : 0));
+
+ ci->use_exc =
+ (fcpu(cpu, "xlnx,unaligned-exceptions") ?
+ PVR2_UNALIGNED_EXC_MASK : 0) |
+ (fcpu(cpu, "xlnx,ill-opcode-exception") ?
+ PVR2_ILL_OPCODE_EXC_MASK : 0) |
+ (fcpu(cpu, "xlnx,iopb-bus-exception") ?
+ PVR2_IOPB_BUS_EXC_MASK : 0) |
+ (fcpu(cpu, "xlnx,dopb-bus-exception") ?
+ PVR2_DOPB_BUS_EXC_MASK : 0) |
+ (fcpu(cpu, "xlnx,div-zero-exception") ?
+ PVR2_DIV_ZERO_EXC_MASK : 0) |
+ (fcpu(cpu, "xlnx,fpu-exception") ? PVR2_FPU_EXC_MASK : 0) |
+ (fcpu(cpu, "xlnx,fsl-exception") ? PVR2_USE_EXTEND_FSL : 0);
+
+ ci->use_icache = fcpu(cpu, "xlnx,use-icache");
+ ci->icache_tagbits = fcpu(cpu, "xlnx,addr-tag-bits");
+ ci->icache_write = fcpu(cpu, "xlnx,allow-icache-wr");
+ ci->icache_line_length = fcpu(cpu, "xlnx,icache-line-len") << 2;
+ if (!ci->icache_line_length) {
+ if (fcpu(cpu, "xlnx,icache-use-fsl"))
+ ci->icache_line_length = 4 << 2;
+ else
+ ci->icache_line_length = 1 << 2;
+ }
+ ci->icache_size = fcpu(cpu, "i-cache-size");
+ ci->icache_base = fcpu(cpu, "i-cache-baseaddr");
+ ci->icache_high = fcpu(cpu, "i-cache-highaddr");
+
+ ci->use_dcache = fcpu(cpu, "xlnx,use-dcache");
+ ci->dcache_tagbits = fcpu(cpu, "xlnx,dcache-addr-tag");
+ ci->dcache_write = fcpu(cpu, "xlnx,allow-dcache-wr");
+ ci->dcache_line_length = fcpu(cpu, "xlnx,dcache-line-len") << 2;
+ if (!ci->dcache_line_length) {
+ if (fcpu(cpu, "xlnx,dcache-use-fsl"))
+ ci->dcache_line_length = 4 << 2;
+ else
+ ci->dcache_line_length = 1 << 2;
+ }
+ ci->dcache_size = fcpu(cpu, "d-cache-size");
+ ci->dcache_base = fcpu(cpu, "d-cache-baseaddr");
+ ci->dcache_high = fcpu(cpu, "d-cache-highaddr");
+ ci->dcache_wb = fcpu(cpu, "xlnx,dcache-use-writeback");
+
+ ci->use_dopb = fcpu(cpu, "xlnx,d-opb");
+ ci->use_iopb = fcpu(cpu, "xlnx,i-opb");
+ ci->use_dlmb = fcpu(cpu, "xlnx,d-lmb");
+ ci->use_ilmb = fcpu(cpu, "xlnx,i-lmb");
+
+ ci->num_fsl = fcpu(cpu, "xlnx,fsl-links");
+ ci->irq_edge = fcpu(cpu, "xlnx,interrupt-is-edge");
+ ci->irq_positive = fcpu(cpu, "xlnx,edge-is-positive");
+ ci->area_optimised = 0;
+
+ ci->hw_debug = fcpu(cpu, "xlnx,debug-enabled");
+ ci->num_pc_brk = fcpu(cpu, "xlnx,number-of-pc-brk");
+ ci->num_rd_brk = fcpu(cpu, "xlnx,number-of-rd-addr-brk");
+ ci->num_wr_brk = fcpu(cpu, "xlnx,number-of-wr-addr-brk");
+
+ ci->pvr_user1 = fcpu(cpu, "xlnx,pvr-user1");
+ ci->pvr_user2 = fcpu(cpu, "xlnx,pvr-user2");
+
+ ci->mmu = fcpu(cpu, "xlnx,use-mmu");
+ ci->mmu_privins = fcpu(cpu, "xlnx,mmu-privileged-instr");
+ ci->endian = fcpu(cpu, "xlnx,endianness");
+
+ ci->ver_code = 0;
+ ci->fpga_family_code = 0;
+
+ /* Do various fixups based on CPU version and FPGA family strings */
+
+ /* Resolved the CPU version code */
+ for (i = 0; cpu_ver_lookup[i].s != NULL; i++) {
+ if (strcmp(cpu_ver_lookup[i].s, cpu_ver_string) == 0)
+ ci->ver_code = cpu_ver_lookup[i].k;
+ }
+
+ /* Resolved the fpga family code */
+ for (i = 0; family_string_lookup[i].s != NULL; i++) {
+ if (strcmp(family_string_lookup[i].s, family_string) == 0)
+ ci->fpga_family_code = family_string_lookup[i].k;
+ }
+
+ /* FIXME - mb3 and spartan2 do not exist in PVR */
+ /* This is mb3 and on a non Spartan2 */
+ if (ci->ver_code == 0x20 && ci->fpga_family_code != 0xf0)
+ /* Hardware Multiplier in use */
+ ci->use_mult = 1;
+}
diff --git a/arch/microblaze/kernel/cpu/cpuinfo.c b/arch/microblaze/kernel/cpu/cpuinfo.c
new file mode 100644
index 000000000..cd9b44507
--- /dev/null
+++ b/arch/microblaze/kernel/cpu/cpuinfo.c
@@ -0,0 +1,142 @@
+/*
+ * Copyright (C) 2007-2009 Michal Simek <monstr@monstr.eu>
+ * Copyright (C) 2007-2009 PetaLogix
+ * Copyright (C) 2007 John Williams <john.williams@petalogix.com>
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/clk.h>
+#include <linux/init.h>
+#include <asm/cpuinfo.h>
+#include <asm/pvr.h>
+
+const struct cpu_ver_key cpu_ver_lookup[] = {
+ /* These key value are as per MBV field in PVR0 */
+ {"5.00.a", 0x01},
+ {"5.00.b", 0x02},
+ {"5.00.c", 0x03},
+ {"6.00.a", 0x04},
+ {"6.00.b", 0x06},
+ {"7.00.a", 0x05},
+ {"7.00.b", 0x07},
+ {"7.10.a", 0x08},
+ {"7.10.b", 0x09},
+ {"7.10.c", 0x0a},
+ {"7.10.d", 0x0b},
+ {"7.20.a", 0x0c},
+ {"7.20.b", 0x0d},
+ {"7.20.c", 0x0e},
+ {"7.20.d", 0x0f},
+ {"7.30.a", 0x10},
+ {"7.30.b", 0x11},
+ {"8.00.a", 0x12},
+ {"8.00.b", 0x13},
+ {"8.10.a", 0x14},
+ {"8.20.a", 0x15},
+ {"8.20.b", 0x16},
+ {"8.30.a", 0x17},
+ {"8.40.a", 0x18},
+ {"8.40.b", 0x19},
+ {"8.50.a", 0x1a},
+ {"8.50.b", 0x1c},
+ {"8.50.c", 0x1e},
+ {"9.0", 0x1b},
+ {"9.1", 0x1d},
+ {"9.2", 0x1f},
+ {"9.3", 0x20},
+ {"9.4", 0x21},
+ {"9.5", 0x22},
+ {"9.6", 0x23},
+ {"10.0", 0x24},
+ {"11.0", 0x25},
+ {NULL, 0},
+};
+
+/*
+ * FIXME Not sure if the actual key is defined by Xilinx in the PVR
+ */
+const struct family_string_key family_string_lookup[] = {
+ {"virtex2", 0x4},
+ {"virtex2pro", 0x5},
+ {"spartan3", 0x6},
+ {"virtex4", 0x7},
+ {"virtex5", 0x8},
+ {"spartan3e", 0x9},
+ {"spartan3a", 0xa},
+ {"spartan3an", 0xb},
+ {"spartan3adsp", 0xc},
+ {"spartan6", 0xd},
+ {"virtex6", 0xe},
+ {"virtex7", 0xf},
+ /* FIXME There is no key code defined for spartan2 */
+ {"spartan2", 0xf0},
+ {"kintex7", 0x10},
+ {"artix7", 0x11},
+ {"zynq7000", 0x12},
+ {"UltraScale Virtex", 0x13},
+ {"UltraScale Kintex", 0x14},
+ {"UltraScale+ Zynq", 0x15},
+ {"UltraScale+ Virtex", 0x16},
+ {"UltraScale+ Kintex", 0x17},
+ {"Spartan7", 0x18},
+ {NULL, 0},
+};
+
+struct cpuinfo cpuinfo;
+static struct device_node *cpu;
+
+void __init setup_cpuinfo(void)
+{
+ cpu = of_get_cpu_node(0, NULL);
+ if (!cpu)
+ pr_err("You don't have cpu or are missing cpu reg property!!!\n");
+
+ pr_info("%s: initialising\n", __func__);
+
+ switch (cpu_has_pvr()) {
+ case 0:
+ pr_warn("%s: No PVR support. Using static CPU info from FDT\n",
+ __func__);
+ set_cpuinfo_static(&cpuinfo, cpu);
+ break;
+/* FIXME I found weird behavior with MB 7.00.a/b 7.10.a
+ * please do not use FULL PVR with MMU */
+ case 1:
+ pr_info("%s: Using full CPU PVR support\n",
+ __func__);
+ set_cpuinfo_static(&cpuinfo, cpu);
+ set_cpuinfo_pvr_full(&cpuinfo, cpu);
+ break;
+ default:
+ pr_warn("%s: Unsupported PVR setting\n", __func__);
+ set_cpuinfo_static(&cpuinfo, cpu);
+ }
+
+ if (cpuinfo.mmu_privins)
+ pr_warn("%s: Stream instructions enabled"
+ " - USERSPACE CAN LOCK THIS KERNEL!\n", __func__);
+
+ of_node_put(cpu);
+}
+
+void __init setup_cpuinfo_clk(void)
+{
+ struct clk *clk;
+
+ clk = of_clk_get(cpu, 0);
+ if (IS_ERR(clk)) {
+ pr_err("ERROR: CPU CCF input clock not found\n");
+ /* take timebase-frequency from DTS */
+ cpuinfo.cpu_clock_freq = fcpu(cpu, "timebase-frequency");
+ } else {
+ cpuinfo.cpu_clock_freq = clk_get_rate(clk);
+ }
+
+ if (!cpuinfo.cpu_clock_freq) {
+ pr_err("ERROR: CPU clock frequency not setup\n");
+ BUG();
+ }
+}
diff --git a/arch/microblaze/kernel/cpu/mb.c b/arch/microblaze/kernel/cpu/mb.c
new file mode 100644
index 000000000..9581d194d
--- /dev/null
+++ b/arch/microblaze/kernel/cpu/mb.c
@@ -0,0 +1,158 @@
+/*
+ * CPU-version specific code
+ *
+ * Copyright (C) 2007-2009 Michal Simek <monstr@monstr.eu>
+ * Copyright (C) 2006-2009 PetaLogix
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/init.h>
+#include <linux/string.h>
+#include <linux/seq_file.h>
+#include <linux/cpu.h>
+#include <linux/initrd.h>
+
+#include <linux/bug.h>
+#include <asm/cpuinfo.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <asm/page.h>
+#include <linux/param.h>
+#include <asm/pvr.h>
+#include <asm/sections.h>
+#include <asm/setup.h>
+
+static int show_cpuinfo(struct seq_file *m, void *v)
+{
+ char *fpga_family = "Unknown";
+ char *cpu_ver = "Unknown";
+ int i;
+
+ /* Denormalised to get the fpga family string */
+ for (i = 0; family_string_lookup[i].s != NULL; i++) {
+ if (cpuinfo.fpga_family_code == family_string_lookup[i].k) {
+ fpga_family = (char *)family_string_lookup[i].s;
+ break;
+ }
+ }
+
+ /* Denormalised to get the hw version string */
+ for (i = 0; cpu_ver_lookup[i].s != NULL; i++) {
+ if (cpuinfo.ver_code == cpu_ver_lookup[i].k) {
+ cpu_ver = (char *)cpu_ver_lookup[i].s;
+ break;
+ }
+ }
+
+ seq_printf(m,
+ "CPU-Family: MicroBlaze\n"
+ "FPGA-Arch: %s\n"
+ "CPU-Ver: %s, %s endian\n"
+ "CPU-MHz: %d.%02d\n"
+ "BogoMips: %lu.%02lu\n",
+ fpga_family,
+ cpu_ver,
+ cpuinfo.endian ? "little" : "big",
+ cpuinfo.cpu_clock_freq / 1000000,
+ cpuinfo.cpu_clock_freq % 1000000,
+ loops_per_jiffy / (500000 / HZ),
+ (loops_per_jiffy / (5000 / HZ)) % 100);
+
+ seq_printf(m,
+ "HW:\n Shift:\t\t%s\n"
+ " MSR:\t\t%s\n"
+ " PCMP:\t\t%s\n"
+ " DIV:\t\t%s\n",
+ (cpuinfo.use_instr & PVR0_USE_BARREL_MASK) ? "yes" : "no",
+ (cpuinfo.use_instr & PVR2_USE_MSR_INSTR) ? "yes" : "no",
+ (cpuinfo.use_instr & PVR2_USE_PCMP_INSTR) ? "yes" : "no",
+ (cpuinfo.use_instr & PVR0_USE_DIV_MASK) ? "yes" : "no");
+
+ seq_printf(m, " MMU:\t\t%x\n", cpuinfo.mmu);
+
+ seq_printf(m,
+ " MUL:\t\t%s\n"
+ " FPU:\t\t%s\n",
+ (cpuinfo.use_mult & PVR2_USE_MUL64_MASK) ? "v2" :
+ (cpuinfo.use_mult & PVR0_USE_HW_MUL_MASK) ? "v1" : "no",
+ (cpuinfo.use_fpu & PVR2_USE_FPU2_MASK) ? "v2" :
+ (cpuinfo.use_fpu & PVR0_USE_FPU_MASK) ? "v1" : "no");
+
+ seq_printf(m,
+ " Exc:\t\t%s%s%s%s%s%s%s%s\n",
+ (cpuinfo.use_exc & PVR2_OPCODE_0x0_ILL_MASK) ? "op0x0 " : "",
+ (cpuinfo.use_exc & PVR2_UNALIGNED_EXC_MASK) ? "unal " : "",
+ (cpuinfo.use_exc & PVR2_ILL_OPCODE_EXC_MASK) ? "ill " : "",
+ (cpuinfo.use_exc & PVR2_IOPB_BUS_EXC_MASK) ? "iopb " : "",
+ (cpuinfo.use_exc & PVR2_DOPB_BUS_EXC_MASK) ? "dopb " : "",
+ (cpuinfo.use_exc & PVR2_DIV_ZERO_EXC_MASK) ? "zero " : "",
+ (cpuinfo.use_exc & PVR2_FPU_EXC_MASK) ? "fpu " : "",
+ (cpuinfo.use_exc & PVR2_USE_FSL_EXC) ? "fsl " : "");
+
+ seq_printf(m,
+ "Stream-insns:\t%sprivileged\n",
+ cpuinfo.mmu_privins ? "un" : "");
+
+ if (cpuinfo.use_icache)
+ seq_printf(m,
+ "Icache:\t\t%ukB\tline length:\t%dB\n",
+ cpuinfo.icache_size >> 10,
+ cpuinfo.icache_line_length);
+ else
+ seq_puts(m, "Icache:\t\tno\n");
+
+ if (cpuinfo.use_dcache) {
+ seq_printf(m,
+ "Dcache:\t\t%ukB\tline length:\t%dB\n",
+ cpuinfo.dcache_size >> 10,
+ cpuinfo.dcache_line_length);
+ seq_puts(m, "Dcache-Policy:\t");
+ if (cpuinfo.dcache_wb)
+ seq_puts(m, "write-back\n");
+ else
+ seq_puts(m, "write-through\n");
+ } else {
+ seq_puts(m, "Dcache:\t\tno\n");
+ }
+
+ seq_printf(m,
+ "HW-Debug:\t%s\n",
+ cpuinfo.hw_debug ? "yes" : "no");
+
+ seq_printf(m,
+ "PVR-USR1:\t%02x\n"
+ "PVR-USR2:\t%08x\n",
+ cpuinfo.pvr_user1,
+ cpuinfo.pvr_user2);
+
+ seq_printf(m, "Page size:\t%lu\n", PAGE_SIZE);
+
+ return 0;
+}
+
+static void *c_start(struct seq_file *m, loff_t *pos)
+{
+ int i = *pos;
+
+ return i < NR_CPUS ? (void *) (i + 1) : NULL;
+}
+
+static void *c_next(struct seq_file *m, void *v, loff_t *pos)
+{
+ ++*pos;
+ return c_start(m, pos);
+}
+
+static void c_stop(struct seq_file *m, void *v)
+{
+}
+
+const struct seq_operations cpuinfo_op = {
+ .start = c_start,
+ .next = c_next,
+ .stop = c_stop,
+ .show = show_cpuinfo,
+};
diff --git a/arch/microblaze/kernel/cpu/pvr.c b/arch/microblaze/kernel/cpu/pvr.c
new file mode 100644
index 000000000..f139052a3
--- /dev/null
+++ b/arch/microblaze/kernel/cpu/pvr.c
@@ -0,0 +1,81 @@
+/*
+ * Support for MicroBlaze PVR (processor version register)
+ *
+ * Copyright (C) 2007-2009 Michal Simek <monstr@monstr.eu>
+ * Copyright (C) 2007-2009 PetaLogix
+ * Copyright (C) 2007 John Williams <john.williams@petalogix.com>
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/compiler.h>
+#include <asm/exceptions.h>
+#include <asm/pvr.h>
+#include <linux/irqflags.h>
+
+/*
+ * Until we get an assembler that knows about the pvr registers,
+ * this horrible cruft will have to do.
+ * That hardcoded opcode is mfs r3, rpvrNN
+ */
+
+#define get_single_pvr(pvrid, val) \
+{ \
+ register unsigned tmp __asm__("r3"); \
+ tmp = 0x0; /* Prevent warning about unused */ \
+ __asm__ __volatile__ ( \
+ "mfs %0, rpvr" #pvrid ";" \
+ : "=r" (tmp) : : "memory"); \
+ val = tmp; \
+}
+
+/*
+ * Does the CPU support the PVR register?
+ * return value:
+ * 0: no PVR
+ * 1: simple PVR
+ * 2: full PVR
+ *
+ * This must work on all CPU versions, including those before the
+ * PVR was even an option.
+ */
+
+int cpu_has_pvr(void)
+{
+ unsigned long flags;
+ unsigned pvr0;
+
+ local_save_flags(flags);
+
+ /* PVR bit in MSR tells us if there is any support */
+ if (!(flags & PVR_MSR_BIT))
+ return 0;
+
+ get_single_pvr(0, pvr0);
+ pr_debug("%s: pvr0 is 0x%08x\n", __func__, pvr0);
+
+ if (pvr0 & PVR0_PVR_FULL_MASK)
+ return 1;
+
+ /* for partial PVR use static cpuinfo */
+ return 2;
+}
+
+void get_pvr(struct pvr_s *p)
+{
+ get_single_pvr(0, p->pvr[0]);
+ get_single_pvr(1, p->pvr[1]);
+ get_single_pvr(2, p->pvr[2]);
+ get_single_pvr(3, p->pvr[3]);
+ get_single_pvr(4, p->pvr[4]);
+ get_single_pvr(5, p->pvr[5]);
+ get_single_pvr(6, p->pvr[6]);
+ get_single_pvr(7, p->pvr[7]);
+ get_single_pvr(8, p->pvr[8]);
+ get_single_pvr(9, p->pvr[9]);
+ get_single_pvr(10, p->pvr[10]);
+ get_single_pvr(11, p->pvr[11]);
+}
diff --git a/arch/microblaze/kernel/dma.c b/arch/microblaze/kernel/dma.c
new file mode 100644
index 000000000..04d091ade
--- /dev/null
+++ b/arch/microblaze/kernel/dma.c
@@ -0,0 +1,43 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2009-2010 PetaLogix
+ * Copyright (C) 2006 Benjamin Herrenschmidt, IBM Corporation
+ *
+ * Provide default implementations of the DMA mapping callbacks for
+ * directly mapped busses.
+ */
+
+#include <linux/device.h>
+#include <linux/dma-map-ops.h>
+#include <linux/gfp.h>
+#include <linux/export.h>
+#include <linux/bug.h>
+#include <asm/cacheflush.h>
+
+static void __dma_sync(phys_addr_t paddr, size_t size,
+ enum dma_data_direction direction)
+{
+ switch (direction) {
+ case DMA_TO_DEVICE:
+ case DMA_BIDIRECTIONAL:
+ flush_dcache_range(paddr, paddr + size);
+ break;
+ case DMA_FROM_DEVICE:
+ invalidate_dcache_range(paddr, paddr + size);
+ break;
+ default:
+ BUG();
+ }
+}
+
+void arch_sync_dma_for_device(phys_addr_t paddr, size_t size,
+ enum dma_data_direction dir)
+{
+ __dma_sync(paddr, size, dir);
+}
+
+void arch_sync_dma_for_cpu(phys_addr_t paddr, size_t size,
+ enum dma_data_direction dir)
+{
+ __dma_sync(paddr, size, dir);
+}
diff --git a/arch/microblaze/kernel/entry-nommu.S b/arch/microblaze/kernel/entry-nommu.S
new file mode 100644
index 000000000..7e394fc2c
--- /dev/null
+++ b/arch/microblaze/kernel/entry-nommu.S
@@ -0,0 +1,622 @@
+/*
+ * Copyright (C) 2007-2009 Michal Simek <monstr@monstr.eu>
+ * Copyright (C) 2007-2009 PetaLogix
+ * Copyright (C) 2006 Atmark Techno, Inc.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/linkage.h>
+#include <asm/thread_info.h>
+#include <linux/errno.h>
+#include <asm/entry.h>
+#include <asm/asm-offsets.h>
+#include <asm/registers.h>
+#include <asm/unistd.h>
+#include <asm/percpu.h>
+#include <asm/signal.h>
+
+#if CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR
+ .macro disable_irq
+ msrclr r0, MSR_IE
+ .endm
+
+ .macro enable_irq
+ msrset r0, MSR_IE
+ .endm
+
+ .macro clear_bip
+ msrclr r0, MSR_BIP
+ .endm
+#else
+ .macro disable_irq
+ mfs r11, rmsr
+ andi r11, r11, ~MSR_IE
+ mts rmsr, r11
+ .endm
+
+ .macro enable_irq
+ mfs r11, rmsr
+ ori r11, r11, MSR_IE
+ mts rmsr, r11
+ .endm
+
+ .macro clear_bip
+ mfs r11, rmsr
+ andi r11, r11, ~MSR_BIP
+ mts rmsr, r11
+ .endm
+#endif
+
+ENTRY(_interrupt)
+ swi r1, r0, PER_CPU(ENTRY_SP) /* save the current sp */
+ swi r11, r0, PER_CPU(R11_SAVE) /* temporarily save r11 */
+ lwi r11, r0, PER_CPU(KM) /* load mode indicator */
+ beqid r11, 1f
+ nop
+ brid 2f /* jump over */
+ addik r1, r1, (-PT_SIZE) /* room for pt_regs (delay slot) */
+1: /* switch to kernel stack */
+ lwi r1, r0, PER_CPU(CURRENT_SAVE) /* get the saved current */
+ lwi r1, r1, TS_THREAD_INFO /* get the thread info */
+ /* calculate kernel stack pointer */
+ addik r1, r1, THREAD_SIZE - PT_SIZE
+2:
+ swi r11, r1, PT_MODE /* store the mode */
+ lwi r11, r0, PER_CPU(R11_SAVE) /* reload r11 */
+ swi r2, r1, PT_R2
+ swi r3, r1, PT_R3
+ swi r4, r1, PT_R4
+ swi r5, r1, PT_R5
+ swi r6, r1, PT_R6
+ swi r7, r1, PT_R7
+ swi r8, r1, PT_R8
+ swi r9, r1, PT_R9
+ swi r10, r1, PT_R10
+ swi r11, r1, PT_R11
+ swi r12, r1, PT_R12
+ swi r13, r1, PT_R13
+ swi r14, r1, PT_R14
+ swi r14, r1, PT_PC
+ swi r15, r1, PT_R15
+ swi r16, r1, PT_R16
+ swi r17, r1, PT_R17
+ swi r18, r1, PT_R18
+ swi r19, r1, PT_R19
+ swi r20, r1, PT_R20
+ swi r21, r1, PT_R21
+ swi r22, r1, PT_R22
+ swi r23, r1, PT_R23
+ swi r24, r1, PT_R24
+ swi r25, r1, PT_R25
+ swi r26, r1, PT_R26
+ swi r27, r1, PT_R27
+ swi r28, r1, PT_R28
+ swi r29, r1, PT_R29
+ swi r30, r1, PT_R30
+ swi r31, r1, PT_R31
+ /* special purpose registers */
+ mfs r11, rmsr
+ swi r11, r1, PT_MSR
+ mfs r11, rear
+ swi r11, r1, PT_EAR
+ mfs r11, resr
+ swi r11, r1, PT_ESR
+ mfs r11, rfsr
+ swi r11, r1, PT_FSR
+ /* reload original stack pointer and save it */
+ lwi r11, r0, PER_CPU(ENTRY_SP)
+ swi r11, r1, PT_R1
+ /* update mode indicator we are in kernel mode */
+ addik r11, r0, 1
+ swi r11, r0, PER_CPU(KM)
+ /* restore r31 */
+ lwi r31, r0, PER_CPU(CURRENT_SAVE)
+ /* prepare the link register, the argument and jump */
+ addik r15, r0, ret_from_intr - 8
+ addk r6, r0, r15
+ braid do_IRQ
+ add r5, r0, r1
+
+ret_from_intr:
+ lwi r11, r1, PT_MODE
+ bneid r11, no_intr_resched
+
+3:
+ lwi r6, r31, TS_THREAD_INFO /* get thread info */
+ lwi r19, r6, TI_FLAGS /* get flags in thread info */
+ /* do an extra work if any bits are set */
+
+ andi r11, r19, _TIF_NEED_RESCHED
+ beqi r11, 1f
+ bralid r15, schedule
+ nop
+ bri 3b
+1: andi r11, r19, _TIF_SIGPENDING | _TIF_NOTIFY_RESUME
+ beqid r11, no_intr_resched
+ addk r5, r1, r0
+ bralid r15, do_notify_resume
+ addk r6, r0, r0
+ bri 3b
+
+no_intr_resched:
+ /* Disable interrupts, we are now committed to the state restore */
+ disable_irq
+
+ /* save mode indicator */
+ lwi r11, r1, PT_MODE
+ swi r11, r0, PER_CPU(KM)
+
+ /* save r31 */
+ swi r31, r0, PER_CPU(CURRENT_SAVE)
+restore_context:
+ /* special purpose registers */
+ lwi r11, r1, PT_FSR
+ mts rfsr, r11
+ lwi r11, r1, PT_ESR
+ mts resr, r11
+ lwi r11, r1, PT_EAR
+ mts rear, r11
+ lwi r11, r1, PT_MSR
+ mts rmsr, r11
+
+ lwi r31, r1, PT_R31
+ lwi r30, r1, PT_R30
+ lwi r29, r1, PT_R29
+ lwi r28, r1, PT_R28
+ lwi r27, r1, PT_R27
+ lwi r26, r1, PT_R26
+ lwi r25, r1, PT_R25
+ lwi r24, r1, PT_R24
+ lwi r23, r1, PT_R23
+ lwi r22, r1, PT_R22
+ lwi r21, r1, PT_R21
+ lwi r20, r1, PT_R20
+ lwi r19, r1, PT_R19
+ lwi r18, r1, PT_R18
+ lwi r17, r1, PT_R17
+ lwi r16, r1, PT_R16
+ lwi r15, r1, PT_R15
+ lwi r14, r1, PT_PC
+ lwi r13, r1, PT_R13
+ lwi r12, r1, PT_R12
+ lwi r11, r1, PT_R11
+ lwi r10, r1, PT_R10
+ lwi r9, r1, PT_R9
+ lwi r8, r1, PT_R8
+ lwi r7, r1, PT_R7
+ lwi r6, r1, PT_R6
+ lwi r5, r1, PT_R5
+ lwi r4, r1, PT_R4
+ lwi r3, r1, PT_R3
+ lwi r2, r1, PT_R2
+ lwi r1, r1, PT_R1
+ rtid r14, 0
+ nop
+
+ENTRY(_reset)
+ brai 0;
+
+ENTRY(_user_exception)
+ swi r1, r0, PER_CPU(ENTRY_SP) /* save the current sp */
+ swi r11, r0, PER_CPU(R11_SAVE) /* temporarily save r11 */
+ lwi r11, r0, PER_CPU(KM) /* load mode indicator */
+ beqid r11, 1f /* Already in kernel mode? */
+ nop
+ brid 2f /* jump over */
+ addik r1, r1, (-PT_SIZE) /* Room for pt_regs (delay slot) */
+1: /* Switch to kernel stack */
+ lwi r1, r0, PER_CPU(CURRENT_SAVE) /* get the saved current */
+ lwi r1, r1, TS_THREAD_INFO /* get the thread info */
+ /* calculate kernel stack pointer */
+ addik r1, r1, THREAD_SIZE - PT_SIZE
+2:
+ swi r11, r1, PT_MODE /* store the mode */
+ lwi r11, r0, PER_CPU(R11_SAVE) /* reload r11 */
+ /* save them on stack */
+ swi r2, r1, PT_R2
+ swi r3, r1, PT_R3 /* r3: _always_ in clobber list; see unistd.h */
+ swi r4, r1, PT_R4 /* r4: _always_ in clobber list; see unistd.h */
+ swi r5, r1, PT_R5
+ swi r6, r1, PT_R6
+ swi r7, r1, PT_R7
+ swi r8, r1, PT_R8
+ swi r9, r1, PT_R9
+ swi r10, r1, PT_R10
+ swi r11, r1, PT_R11
+ /* r12: _always_ in clobber list; see unistd.h */
+ swi r12, r1, PT_R12
+ swi r13, r1, PT_R13
+ /* r14: _always_ in clobber list; see unistd.h */
+ swi r14, r1, PT_R14
+ /* but we want to return to the next inst. */
+ addik r14, r14, 0x4
+ swi r14, r1, PT_PC /* increment by 4 and store in pc */
+ swi r15, r1, PT_R15
+ swi r16, r1, PT_R16
+ swi r17, r1, PT_R17
+ swi r18, r1, PT_R18
+ swi r19, r1, PT_R19
+ swi r20, r1, PT_R20
+ swi r21, r1, PT_R21
+ swi r22, r1, PT_R22
+ swi r23, r1, PT_R23
+ swi r24, r1, PT_R24
+ swi r25, r1, PT_R25
+ swi r26, r1, PT_R26
+ swi r27, r1, PT_R27
+ swi r28, r1, PT_R28
+ swi r29, r1, PT_R29
+ swi r30, r1, PT_R30
+ swi r31, r1, PT_R31
+
+ disable_irq
+ nop /* make sure IE bit is in effect */
+ clear_bip /* once IE is in effect it is safe to clear BIP */
+ nop
+
+ /* special purpose registers */
+ mfs r11, rmsr
+ swi r11, r1, PT_MSR
+ mfs r11, rear
+ swi r11, r1, PT_EAR
+ mfs r11, resr
+ swi r11, r1, PT_ESR
+ mfs r11, rfsr
+ swi r11, r1, PT_FSR
+ /* reload original stack pointer and save it */
+ lwi r11, r0, PER_CPU(ENTRY_SP)
+ swi r11, r1, PT_R1
+ /* update mode indicator we are in kernel mode */
+ addik r11, r0, 1
+ swi r11, r0, PER_CPU(KM)
+ /* restore r31 */
+ lwi r31, r0, PER_CPU(CURRENT_SAVE)
+ /* re-enable interrupts now we are in kernel mode */
+ enable_irq
+
+ /* See if the system call number is valid. */
+ addi r11, r12, -__NR_syscalls
+ bgei r11, 1f /* return to user if not valid */
+ /* Figure out which function to use for this system call. */
+ /* Note Microblaze barrel shift is optional, so don't rely on it */
+ add r12, r12, r12 /* convert num -> ptr */
+ addik r30, r0, 1 /* restarts allowed */
+ add r12, r12, r12
+ lwi r12, r12, sys_call_table /* Get function pointer */
+ addik r15, r0, ret_to_user-8 /* set return address */
+ bra r12 /* Make the system call. */
+ bri 0 /* won't reach here */
+1:
+ brid ret_to_user /* jump to syscall epilogue */
+ addi r3, r0, -ENOSYS /* set errno in delay slot */
+
+/*
+ * Debug traps are like a system call, but entered via brki r14, 0x60
+ * All we need to do is send the SIGTRAP signal to current, ptrace and
+ * do_notify_resume will handle the rest
+ */
+ENTRY(_debug_exception)
+ swi r1, r0, PER_CPU(ENTRY_SP) /* save the current sp */
+ lwi r1, r0, PER_CPU(CURRENT_SAVE) /* get the saved current */
+ lwi r1, r1, TS_THREAD_INFO /* get the thread info */
+ addik r1, r1, THREAD_SIZE - PT_SIZE /* get the kernel stack */
+ swi r11, r0, PER_CPU(R11_SAVE) /* temporarily save r11 */
+ lwi r11, r0, PER_CPU(KM) /* load mode indicator */
+//save_context:
+ swi r11, r1, PT_MODE /* store the mode */
+ lwi r11, r0, PER_CPU(R11_SAVE) /* reload r11 */
+ /* save them on stack */
+ swi r2, r1, PT_R2
+ swi r3, r1, PT_R3 /* r3: _always_ in clobber list; see unistd.h */
+ swi r4, r1, PT_R4 /* r4: _always_ in clobber list; see unistd.h */
+ swi r5, r1, PT_R5
+ swi r6, r1, PT_R6
+ swi r7, r1, PT_R7
+ swi r8, r1, PT_R8
+ swi r9, r1, PT_R9
+ swi r10, r1, PT_R10
+ swi r11, r1, PT_R11
+ /* r12: _always_ in clobber list; see unistd.h */
+ swi r12, r1, PT_R12
+ swi r13, r1, PT_R13
+ /* r14: _always_ in clobber list; see unistd.h */
+ swi r14, r1, PT_R14
+ swi r14, r1, PT_PC /* Will return to interrupted instruction */
+ swi r15, r1, PT_R15
+ swi r16, r1, PT_R16
+ swi r17, r1, PT_R17
+ swi r18, r1, PT_R18
+ swi r19, r1, PT_R19
+ swi r20, r1, PT_R20
+ swi r21, r1, PT_R21
+ swi r22, r1, PT_R22
+ swi r23, r1, PT_R23
+ swi r24, r1, PT_R24
+ swi r25, r1, PT_R25
+ swi r26, r1, PT_R26
+ swi r27, r1, PT_R27
+ swi r28, r1, PT_R28
+ swi r29, r1, PT_R29
+ swi r30, r1, PT_R30
+ swi r31, r1, PT_R31
+
+ disable_irq
+ nop /* make sure IE bit is in effect */
+ clear_bip /* once IE is in effect it is safe to clear BIP */
+ nop
+
+ /* special purpose registers */
+ mfs r11, rmsr
+ swi r11, r1, PT_MSR
+ mfs r11, rear
+ swi r11, r1, PT_EAR
+ mfs r11, resr
+ swi r11, r1, PT_ESR
+ mfs r11, rfsr
+ swi r11, r1, PT_FSR
+ /* reload original stack pointer and save it */
+ lwi r11, r0, PER_CPU(ENTRY_SP)
+ swi r11, r1, PT_R1
+ /* update mode indicator we are in kernel mode */
+ addik r11, r0, 1
+ swi r11, r0, PER_CPU(KM)
+ /* restore r31 */
+ lwi r31, r0, PER_CPU(CURRENT_SAVE)
+ /* re-enable interrupts now we are in kernel mode */
+ enable_irq
+
+ addi r5, r0, SIGTRAP /* sending the trap signal */
+ add r6, r0, r31 /* to current */
+ bralid r15, send_sig
+ add r7, r0, r0 /* 3rd param zero */
+
+ addik r30, r0, 1 /* restarts allowed ??? */
+ /* Restore r3/r4 to work around how ret_to_user works */
+ lwi r3, r1, PT_R3
+ lwi r4, r1, PT_R4
+ bri ret_to_user
+
+ENTRY(_break)
+ bri 0
+
+/* struct task_struct *_switch_to(struct thread_info *prev,
+ struct thread_info *next); */
+ENTRY(_switch_to)
+ /* prepare return value */
+ addk r3, r0, r31
+
+ /* save registers in cpu_context */
+ /* use r11 and r12, volatile registers, as temp register */
+ addik r11, r5, TI_CPU_CONTEXT
+ swi r1, r11, CC_R1
+ swi r2, r11, CC_R2
+ /* skip volatile registers.
+ * they are saved on stack when we jumped to _switch_to() */
+ /* dedicated registers */
+ swi r13, r11, CC_R13
+ swi r14, r11, CC_R14
+ swi r15, r11, CC_R15
+ swi r16, r11, CC_R16
+ swi r17, r11, CC_R17
+ swi r18, r11, CC_R18
+ /* save non-volatile registers */
+ swi r19, r11, CC_R19
+ swi r20, r11, CC_R20
+ swi r21, r11, CC_R21
+ swi r22, r11, CC_R22
+ swi r23, r11, CC_R23
+ swi r24, r11, CC_R24
+ swi r25, r11, CC_R25
+ swi r26, r11, CC_R26
+ swi r27, r11, CC_R27
+ swi r28, r11, CC_R28
+ swi r29, r11, CC_R29
+ swi r30, r11, CC_R30
+ /* special purpose registers */
+ mfs r12, rmsr
+ swi r12, r11, CC_MSR
+ mfs r12, rear
+ swi r12, r11, CC_EAR
+ mfs r12, resr
+ swi r12, r11, CC_ESR
+ mfs r12, rfsr
+ swi r12, r11, CC_FSR
+
+ /* update r31, the current */
+ lwi r31, r6, TI_TASK
+ swi r31, r0, PER_CPU(CURRENT_SAVE)
+
+ /* get new process' cpu context and restore */
+ addik r11, r6, TI_CPU_CONTEXT
+
+ /* special purpose registers */
+ lwi r12, r11, CC_FSR
+ mts rfsr, r12
+ lwi r12, r11, CC_ESR
+ mts resr, r12
+ lwi r12, r11, CC_EAR
+ mts rear, r12
+ lwi r12, r11, CC_MSR
+ mts rmsr, r12
+ /* non-volatile registers */
+ lwi r30, r11, CC_R30
+ lwi r29, r11, CC_R29
+ lwi r28, r11, CC_R28
+ lwi r27, r11, CC_R27
+ lwi r26, r11, CC_R26
+ lwi r25, r11, CC_R25
+ lwi r24, r11, CC_R24
+ lwi r23, r11, CC_R23
+ lwi r22, r11, CC_R22
+ lwi r21, r11, CC_R21
+ lwi r20, r11, CC_R20
+ lwi r19, r11, CC_R19
+ /* dedicated registers */
+ lwi r18, r11, CC_R18
+ lwi r17, r11, CC_R17
+ lwi r16, r11, CC_R16
+ lwi r15, r11, CC_R15
+ lwi r14, r11, CC_R14
+ lwi r13, r11, CC_R13
+ /* skip volatile registers */
+ lwi r2, r11, CC_R2
+ lwi r1, r11, CC_R1
+
+ rtsd r15, 8
+ nop
+
+ENTRY(ret_from_fork)
+ addk r5, r0, r3
+ brlid r15, schedule_tail
+ nop
+ swi r31, r1, PT_R31 /* save r31 in user context. */
+ /* will soon be restored to r31 in ret_to_user */
+ addk r3, r0, r0
+ brid ret_to_user
+ nop
+
+ENTRY(ret_from_kernel_thread)
+ brlid r15, schedule_tail
+ addk r5, r0, r3
+ brald r15, r20
+ addk r5, r0, r19
+ brid ret_to_user
+ addk r3, r0, r0
+
+work_pending:
+ lwi r11, r1, PT_MODE
+ bneid r11, 2f
+3:
+ enable_irq
+ andi r11, r19, _TIF_NEED_RESCHED
+ beqi r11, 1f
+ bralid r15, schedule
+ nop
+ bri 4f
+1: andi r11, r19, _TIF_SIGPENDING | _TIF_NOTIFY_RESUME
+ beqi r11, no_work_pending
+ addk r5, r30, r0
+ bralid r15, do_notify_resume
+ addik r6, r0, 1
+ addk r30, r0, r0 /* no restarts from now on */
+4:
+ disable_irq
+ lwi r6, r31, TS_THREAD_INFO /* get thread info */
+ lwi r19, r6, TI_FLAGS /* get flags in thread info */
+ bri 3b
+
+ENTRY(ret_to_user)
+ disable_irq
+
+ swi r4, r1, PT_R4 /* return val */
+ swi r3, r1, PT_R3 /* return val */
+
+ lwi r6, r31, TS_THREAD_INFO /* get thread info */
+ lwi r19, r6, TI_FLAGS /* get flags in thread info */
+ bnei r19, work_pending /* do an extra work if any bits are set */
+no_work_pending:
+ disable_irq
+
+2:
+ /* save r31 */
+ swi r31, r0, PER_CPU(CURRENT_SAVE)
+ /* save mode indicator */
+ lwi r18, r1, PT_MODE
+ swi r18, r0, PER_CPU(KM)
+//restore_context:
+ /* special purpose registers */
+ lwi r18, r1, PT_FSR
+ mts rfsr, r18
+ lwi r18, r1, PT_ESR
+ mts resr, r18
+ lwi r18, r1, PT_EAR
+ mts rear, r18
+ lwi r18, r1, PT_MSR
+ mts rmsr, r18
+
+ lwi r31, r1, PT_R31
+ lwi r30, r1, PT_R30
+ lwi r29, r1, PT_R29
+ lwi r28, r1, PT_R28
+ lwi r27, r1, PT_R27
+ lwi r26, r1, PT_R26
+ lwi r25, r1, PT_R25
+ lwi r24, r1, PT_R24
+ lwi r23, r1, PT_R23
+ lwi r22, r1, PT_R22
+ lwi r21, r1, PT_R21
+ lwi r20, r1, PT_R20
+ lwi r19, r1, PT_R19
+ lwi r18, r1, PT_R18
+ lwi r17, r1, PT_R17
+ lwi r16, r1, PT_R16
+ lwi r15, r1, PT_R15
+ lwi r14, r1, PT_PC
+ lwi r13, r1, PT_R13
+ lwi r12, r1, PT_R12
+ lwi r11, r1, PT_R11
+ lwi r10, r1, PT_R10
+ lwi r9, r1, PT_R9
+ lwi r8, r1, PT_R8
+ lwi r7, r1, PT_R7
+ lwi r6, r1, PT_R6
+ lwi r5, r1, PT_R5
+ lwi r4, r1, PT_R4 /* return val */
+ lwi r3, r1, PT_R3 /* return val */
+ lwi r2, r1, PT_R2
+ lwi r1, r1, PT_R1
+
+ rtid r14, 0
+ nop
+
+sys_rt_sigreturn_wrapper:
+ addk r30, r0, r0 /* no restarts for this one */
+ brid sys_rt_sigreturn
+ addk r5, r1, r0
+
+ /* Interrupt vector table */
+ .section .init.ivt, "ax"
+ .org 0x0
+ brai _reset
+ brai _user_exception
+ brai _interrupt
+ brai _break
+ brai _hw_exception_handler
+ .org 0x60
+ brai _debug_exception
+
+.section .rodata,"a"
+#include "syscall_table.S"
+
+syscall_table_size=(.-sys_call_table)
+
+type_SYSCALL:
+ .ascii "SYSCALL\0"
+type_IRQ:
+ .ascii "IRQ\0"
+type_IRQ_PREEMPT:
+ .ascii "IRQ (PREEMPTED)\0"
+type_SYSCALL_PREEMPT:
+ .ascii " SYSCALL (PREEMPTED)\0"
+
+ /*
+ * Trap decoding for stack unwinder
+ * Tuples are (start addr, end addr, string)
+ * If return address lies on [start addr, end addr],
+ * unwinder displays 'string'
+ */
+
+ .align 4
+.global microblaze_trap_handlers
+microblaze_trap_handlers:
+ /* Exact matches come first */
+ .word ret_to_user ; .word ret_to_user ; .word type_SYSCALL
+ .word ret_from_intr; .word ret_from_intr ; .word type_IRQ
+ /* Fuzzy matches go here */
+ .word ret_from_intr; .word no_intr_resched; .word type_IRQ_PREEMPT
+ .word work_pending ; .word no_work_pending; .word type_SYSCALL_PREEMPT
+ /* End of table */
+ .word 0 ; .word 0 ; .word 0
diff --git a/arch/microblaze/kernel/entry.S b/arch/microblaze/kernel/entry.S
new file mode 100644
index 000000000..b179f8f6d
--- /dev/null
+++ b/arch/microblaze/kernel/entry.S
@@ -0,0 +1,1011 @@
+/*
+ * Low-level system-call handling, trap handlers and context-switching
+ *
+ * Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu>
+ * Copyright (C) 2008-2009 PetaLogix
+ * Copyright (C) 2003 John Williams <jwilliams@itee.uq.edu.au>
+ * Copyright (C) 2001,2002 NEC Corporation
+ * Copyright (C) 2001,2002 Miles Bader <miles@gnu.org>
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License. See the file COPYING in the main directory of this
+ * archive for more details.
+ *
+ * Written by Miles Bader <miles@gnu.org>
+ * Heavily modified by John Williams for Microblaze
+ */
+
+#include <linux/sys.h>
+#include <linux/linkage.h>
+
+#include <asm/entry.h>
+#include <asm/current.h>
+#include <asm/processor.h>
+#include <asm/exceptions.h>
+#include <asm/asm-offsets.h>
+#include <asm/thread_info.h>
+
+#include <asm/page.h>
+#include <asm/unistd.h>
+
+#include <linux/errno.h>
+#include <asm/signal.h>
+
+#undef DEBUG
+
+#ifdef DEBUG
+/* Create space for syscalls counting. */
+.section .data
+.global syscall_debug_table
+.align 4
+syscall_debug_table:
+ .space (__NR_syscalls * 4)
+#endif /* DEBUG */
+
+#define C_ENTRY(name) .globl name; .align 4; name
+
+/*
+ * Various ways of setting and clearing BIP in flags reg.
+ * This is mucky, but necessary using microblaze version that
+ * allows msr ops to write to BIP
+ */
+#if CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR
+ .macro clear_bip
+ msrclr r0, MSR_BIP
+ .endm
+
+ .macro set_bip
+ msrset r0, MSR_BIP
+ .endm
+
+ .macro clear_eip
+ msrclr r0, MSR_EIP
+ .endm
+
+ .macro set_ee
+ msrset r0, MSR_EE
+ .endm
+
+ .macro disable_irq
+ msrclr r0, MSR_IE
+ .endm
+
+ .macro enable_irq
+ msrset r0, MSR_IE
+ .endm
+
+ .macro set_ums
+ msrset r0, MSR_UMS
+ msrclr r0, MSR_VMS
+ .endm
+
+ .macro set_vms
+ msrclr r0, MSR_UMS
+ msrset r0, MSR_VMS
+ .endm
+
+ .macro clear_ums
+ msrclr r0, MSR_UMS
+ .endm
+
+ .macro clear_vms_ums
+ msrclr r0, MSR_VMS | MSR_UMS
+ .endm
+#else
+ .macro clear_bip
+ mfs r11, rmsr
+ andi r11, r11, ~MSR_BIP
+ mts rmsr, r11
+ .endm
+
+ .macro set_bip
+ mfs r11, rmsr
+ ori r11, r11, MSR_BIP
+ mts rmsr, r11
+ .endm
+
+ .macro clear_eip
+ mfs r11, rmsr
+ andi r11, r11, ~MSR_EIP
+ mts rmsr, r11
+ .endm
+
+ .macro set_ee
+ mfs r11, rmsr
+ ori r11, r11, MSR_EE
+ mts rmsr, r11
+ .endm
+
+ .macro disable_irq
+ mfs r11, rmsr
+ andi r11, r11, ~MSR_IE
+ mts rmsr, r11
+ .endm
+
+ .macro enable_irq
+ mfs r11, rmsr
+ ori r11, r11, MSR_IE
+ mts rmsr, r11
+ .endm
+
+ .macro set_ums
+ mfs r11, rmsr
+ ori r11, r11, MSR_VMS
+ andni r11, r11, MSR_UMS
+ mts rmsr, r11
+ .endm
+
+ .macro set_vms
+ mfs r11, rmsr
+ ori r11, r11, MSR_VMS
+ andni r11, r11, MSR_UMS
+ mts rmsr, r11
+ .endm
+
+ .macro clear_ums
+ mfs r11, rmsr
+ andni r11, r11, MSR_UMS
+ mts rmsr,r11
+ .endm
+
+ .macro clear_vms_ums
+ mfs r11, rmsr
+ andni r11, r11, (MSR_VMS|MSR_UMS)
+ mts rmsr,r11
+ .endm
+#endif
+
+/* Define how to call high-level functions. With MMU, virtual mode must be
+ * enabled when calling the high-level function. Clobbers R11.
+ * VM_ON, VM_OFF, DO_JUMP_BIPCLR, DO_CALL
+ */
+
+/* turn on virtual protected mode save */
+#define VM_ON \
+ set_ums; \
+ rted r0, 2f; \
+ nop; \
+2:
+
+/* turn off virtual protected mode save and user mode save*/
+#define VM_OFF \
+ clear_vms_ums; \
+ rted r0, TOPHYS(1f); \
+ nop; \
+1:
+
+#define SAVE_REGS \
+ swi r2, r1, PT_R2; /* Save SDA */ \
+ swi r3, r1, PT_R3; \
+ swi r4, r1, PT_R4; \
+ swi r5, r1, PT_R5; \
+ swi r6, r1, PT_R6; \
+ swi r7, r1, PT_R7; \
+ swi r8, r1, PT_R8; \
+ swi r9, r1, PT_R9; \
+ swi r10, r1, PT_R10; \
+ swi r11, r1, PT_R11; /* save clobbered regs after rval */\
+ swi r12, r1, PT_R12; \
+ swi r13, r1, PT_R13; /* Save SDA2 */ \
+ swi r14, r1, PT_PC; /* PC, before IRQ/trap */ \
+ swi r15, r1, PT_R15; /* Save LP */ \
+ swi r16, r1, PT_R16; \
+ swi r17, r1, PT_R17; \
+ swi r18, r1, PT_R18; /* Save asm scratch reg */ \
+ swi r19, r1, PT_R19; \
+ swi r20, r1, PT_R20; \
+ swi r21, r1, PT_R21; \
+ swi r22, r1, PT_R22; \
+ swi r23, r1, PT_R23; \
+ swi r24, r1, PT_R24; \
+ swi r25, r1, PT_R25; \
+ swi r26, r1, PT_R26; \
+ swi r27, r1, PT_R27; \
+ swi r28, r1, PT_R28; \
+ swi r29, r1, PT_R29; \
+ swi r30, r1, PT_R30; \
+ swi r31, r1, PT_R31; /* Save current task reg */ \
+ mfs r11, rmsr; /* save MSR */ \
+ swi r11, r1, PT_MSR;
+
+#define RESTORE_REGS_GP \
+ lwi r2, r1, PT_R2; /* restore SDA */ \
+ lwi r3, r1, PT_R3; \
+ lwi r4, r1, PT_R4; \
+ lwi r5, r1, PT_R5; \
+ lwi r6, r1, PT_R6; \
+ lwi r7, r1, PT_R7; \
+ lwi r8, r1, PT_R8; \
+ lwi r9, r1, PT_R9; \
+ lwi r10, r1, PT_R10; \
+ lwi r11, r1, PT_R11; /* restore clobbered regs after rval */\
+ lwi r12, r1, PT_R12; \
+ lwi r13, r1, PT_R13; /* restore SDA2 */ \
+ lwi r14, r1, PT_PC; /* RESTORE_LINK PC, before IRQ/trap */\
+ lwi r15, r1, PT_R15; /* restore LP */ \
+ lwi r16, r1, PT_R16; \
+ lwi r17, r1, PT_R17; \
+ lwi r18, r1, PT_R18; /* restore asm scratch reg */ \
+ lwi r19, r1, PT_R19; \
+ lwi r20, r1, PT_R20; \
+ lwi r21, r1, PT_R21; \
+ lwi r22, r1, PT_R22; \
+ lwi r23, r1, PT_R23; \
+ lwi r24, r1, PT_R24; \
+ lwi r25, r1, PT_R25; \
+ lwi r26, r1, PT_R26; \
+ lwi r27, r1, PT_R27; \
+ lwi r28, r1, PT_R28; \
+ lwi r29, r1, PT_R29; \
+ lwi r30, r1, PT_R30; \
+ lwi r31, r1, PT_R31; /* Restore cur task reg */
+
+#define RESTORE_REGS \
+ lwi r11, r1, PT_MSR; \
+ mts rmsr , r11; \
+ RESTORE_REGS_GP
+
+#define RESTORE_REGS_RTBD \
+ lwi r11, r1, PT_MSR; \
+ andni r11, r11, MSR_EIP; /* clear EIP */ \
+ ori r11, r11, MSR_EE | MSR_BIP; /* set EE and BIP */ \
+ mts rmsr , r11; \
+ RESTORE_REGS_GP
+
+#define SAVE_STATE \
+ swi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)); /* save stack */ \
+ /* See if already in kernel mode.*/ \
+ mfs r1, rmsr; \
+ andi r1, r1, MSR_UMS; \
+ bnei r1, 1f; \
+ /* Kernel-mode state save. */ \
+ /* Reload kernel stack-ptr. */ \
+ lwi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)); \
+ /* FIXME: I can add these two lines to one */ \
+ /* tophys(r1,r1); */ \
+ /* addik r1, r1, -PT_SIZE; */ \
+ addik r1, r1, CONFIG_KERNEL_BASE_ADDR - CONFIG_KERNEL_START - PT_SIZE; \
+ SAVE_REGS \
+ brid 2f; \
+ swi r1, r1, PT_MODE; \
+1: /* User-mode state save. */ \
+ lwi r1, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */\
+ tophys(r1,r1); \
+ lwi r1, r1, TS_THREAD_INFO; /* get the thread info */ \
+ /* MS these three instructions can be added to one */ \
+ /* addik r1, r1, THREAD_SIZE; */ \
+ /* tophys(r1,r1); */ \
+ /* addik r1, r1, -PT_SIZE; */ \
+ addik r1, r1, THREAD_SIZE + CONFIG_KERNEL_BASE_ADDR - CONFIG_KERNEL_START - PT_SIZE; \
+ SAVE_REGS \
+ lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP)); \
+ swi r11, r1, PT_R1; /* Store user SP. */ \
+ swi r0, r1, PT_MODE; /* Was in user-mode. */ \
+ /* MS: I am clearing UMS even in case when I come from kernel space */ \
+ clear_ums; \
+2: lwi CURRENT_TASK, r0, TOPHYS(PER_CPU(CURRENT_SAVE));
+
+.text
+
+/*
+ * User trap.
+ *
+ * System calls are handled here.
+ *
+ * Syscall protocol:
+ * Syscall number in r12, args in r5-r10
+ * Return value in r3
+ *
+ * Trap entered via brki instruction, so BIP bit is set, and interrupts
+ * are masked. This is nice, means we don't have to CLI before state save
+ */
+C_ENTRY(_user_exception):
+ swi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)) /* save stack */
+ addi r14, r14, 4 /* return address is 4 byte after call */
+
+ lwi r1, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */
+ tophys(r1,r1);
+ lwi r1, r1, TS_THREAD_INFO; /* get stack from task_struct */
+/* calculate kernel stack pointer from task struct 8k */
+ addik r1, r1, THREAD_SIZE;
+ tophys(r1,r1);
+
+ addik r1, r1, -PT_SIZE; /* Make room on the stack. */
+ SAVE_REGS
+ swi r0, r1, PT_R3
+ swi r0, r1, PT_R4
+
+ swi r0, r1, PT_MODE; /* Was in user-mode. */
+ lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP));
+ swi r11, r1, PT_R1; /* Store user SP. */
+ clear_ums;
+2: lwi CURRENT_TASK, r0, TOPHYS(PER_CPU(CURRENT_SAVE));
+ /* Save away the syscall number. */
+ swi r12, r1, PT_R0;
+ tovirt(r1,r1)
+
+/* where the trap should return need -8 to adjust for rtsd r15, 8*/
+/* Jump to the appropriate function for the system call number in r12
+ * (r12 is not preserved), or return an error if r12 is not valid. The LP
+ * register should point to the location where
+ * the called function should return. [note that MAKE_SYS_CALL uses label 1] */
+
+ /* Step into virtual mode */
+ rtbd r0, 3f
+ nop
+3:
+ lwi r11, CURRENT_TASK, TS_THREAD_INFO /* get thread info */
+ lwi r11, r11, TI_FLAGS /* get flags in thread info */
+ andi r11, r11, _TIF_WORK_SYSCALL_MASK
+ beqi r11, 4f
+
+ addik r3, r0, -ENOSYS
+ swi r3, r1, PT_R3
+ brlid r15, do_syscall_trace_enter
+ addik r5, r1, PT_R0
+
+ # do_syscall_trace_enter returns the new syscall nr.
+ addk r12, r0, r3
+ lwi r5, r1, PT_R5;
+ lwi r6, r1, PT_R6;
+ lwi r7, r1, PT_R7;
+ lwi r8, r1, PT_R8;
+ lwi r9, r1, PT_R9;
+ lwi r10, r1, PT_R10;
+4:
+/* Jump to the appropriate function for the system call number in r12
+ * (r12 is not preserved), or return an error if r12 is not valid.
+ * The LP register should point to the location where the called function
+ * should return. [note that MAKE_SYS_CALL uses label 1] */
+ /* See if the system call number is valid */
+ blti r12, 5f
+ addi r11, r12, -__NR_syscalls;
+ bgei r11, 5f;
+ /* Figure out which function to use for this system call. */
+ /* Note Microblaze barrel shift is optional, so don't rely on it */
+ add r12, r12, r12; /* convert num -> ptr */
+ add r12, r12, r12;
+ addi r30, r0, 1 /* restarts allowed */
+
+#ifdef DEBUG
+ /* Trac syscalls and stored them to syscall_debug_table */
+ /* The first syscall location stores total syscall number */
+ lwi r3, r0, syscall_debug_table
+ addi r3, r3, 1
+ swi r3, r0, syscall_debug_table
+ lwi r3, r12, syscall_debug_table
+ addi r3, r3, 1
+ swi r3, r12, syscall_debug_table
+#endif
+
+ # Find and jump into the syscall handler.
+ lwi r12, r12, sys_call_table
+ /* where the trap should return need -8 to adjust for rtsd r15, 8 */
+ addi r15, r0, ret_from_trap-8
+ bra r12
+
+ /* The syscall number is invalid, return an error. */
+5:
+ braid ret_from_trap
+ addi r3, r0, -ENOSYS;
+
+/* Entry point used to return from a syscall/trap */
+/* We re-enable BIP bit before state restore */
+C_ENTRY(ret_from_trap):
+ swi r3, r1, PT_R3
+ swi r4, r1, PT_R4
+
+ lwi r11, r1, PT_MODE;
+/* See if returning to kernel mode, if so, skip resched &c. */
+ bnei r11, 2f;
+ /* We're returning to user mode, so check for various conditions that
+ * trigger rescheduling. */
+ /* FIXME: Restructure all these flag checks. */
+ lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */
+ lwi r11, r11, TI_FLAGS; /* get flags in thread info */
+ andi r11, r11, _TIF_WORK_SYSCALL_MASK
+ beqi r11, 1f
+
+ brlid r15, do_syscall_trace_leave
+ addik r5, r1, PT_R0
+1:
+ /* We're returning to user mode, so check for various conditions that
+ * trigger rescheduling. */
+ /* get thread info from current task */
+ lwi r11, CURRENT_TASK, TS_THREAD_INFO;
+ lwi r19, r11, TI_FLAGS; /* get flags in thread info */
+ andi r11, r19, _TIF_NEED_RESCHED;
+ beqi r11, 5f;
+
+ bralid r15, schedule; /* Call scheduler */
+ nop; /* delay slot */
+ bri 1b
+
+ /* Maybe handle a signal */
+5:
+ andi r11, r19, _TIF_SIGPENDING | _TIF_NOTIFY_RESUME;
+ beqi r11, 4f; /* Signals to handle, handle them */
+
+ addik r5, r1, 0; /* Arg 1: struct pt_regs *regs */
+ bralid r15, do_notify_resume; /* Handle any signals */
+ add r6, r30, r0; /* Arg 2: int in_syscall */
+ add r30, r0, r0 /* no more restarts */
+ bri 1b
+
+/* Finally, return to user state. */
+4: set_bip; /* Ints masked for state restore */
+ swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE); /* save current */
+ VM_OFF;
+ tophys(r1,r1);
+ RESTORE_REGS_RTBD;
+ addik r1, r1, PT_SIZE /* Clean up stack space. */
+ lwi r1, r1, PT_R1 - PT_SIZE;/* Restore user stack pointer. */
+ bri 6f;
+
+/* Return to kernel state. */
+2: set_bip; /* Ints masked for state restore */
+ VM_OFF;
+ tophys(r1,r1);
+ RESTORE_REGS_RTBD;
+ addik r1, r1, PT_SIZE /* Clean up stack space. */
+ tovirt(r1,r1);
+6:
+TRAP_return: /* Make global symbol for debugging */
+ rtbd r14, 0; /* Instructions to return from an IRQ */
+ nop;
+
+
+/* This the initial entry point for a new child thread, with an appropriate
+ stack in place that makes it look the the child is in the middle of an
+ syscall. This function is actually `returned to' from switch_thread
+ (copy_thread makes ret_from_fork the return address in each new thread's
+ saved context). */
+C_ENTRY(ret_from_fork):
+ bralid r15, schedule_tail; /* ...which is schedule_tail's arg */
+ add r5, r3, r0; /* switch_thread returns the prev task */
+ /* ( in the delay slot ) */
+ brid ret_from_trap; /* Do normal trap return */
+ add r3, r0, r0; /* Child's fork call should return 0. */
+
+C_ENTRY(ret_from_kernel_thread):
+ bralid r15, schedule_tail; /* ...which is schedule_tail's arg */
+ add r5, r3, r0; /* switch_thread returns the prev task */
+ /* ( in the delay slot ) */
+ brald r15, r20 /* fn was left in r20 */
+ addk r5, r0, r19 /* ... and argument - in r19 */
+ brid ret_from_trap
+ add r3, r0, r0
+
+C_ENTRY(sys_rt_sigreturn_wrapper):
+ addik r30, r0, 0 /* no restarts */
+ brid sys_rt_sigreturn /* Do real work */
+ addik r5, r1, 0; /* add user context as 1st arg */
+
+/*
+ * HW EXCEPTION rutine start
+ */
+C_ENTRY(full_exception_trap):
+ /* adjust exception address for privileged instruction
+ * for finding where is it */
+ addik r17, r17, -4
+ SAVE_STATE /* Save registers */
+ /* PC, before IRQ/trap - this is one instruction above */
+ swi r17, r1, PT_PC;
+ tovirt(r1,r1)
+ /* FIXME this can be store directly in PT_ESR reg.
+ * I tested it but there is a fault */
+ /* where the trap should return need -8 to adjust for rtsd r15, 8 */
+ addik r15, r0, ret_from_exc - 8
+ mfs r6, resr
+ mfs r7, rfsr; /* save FSR */
+ mts rfsr, r0; /* Clear sticky fsr */
+ rted r0, full_exception
+ addik r5, r1, 0 /* parameter struct pt_regs * regs */
+
+/*
+ * Unaligned data trap.
+ *
+ * Unaligned data trap last on 4k page is handled here.
+ *
+ * Trap entered via exception, so EE bit is set, and interrupts
+ * are masked. This is nice, means we don't have to CLI before state save
+ *
+ * The assembler routine is in "arch/microblaze/kernel/hw_exception_handler.S"
+ */
+C_ENTRY(unaligned_data_trap):
+ /* MS: I have to save r11 value and then restore it because
+ * set_bit, clear_eip, set_ee use r11 as temp register if MSR
+ * instructions are not used. We don't need to do if MSR instructions
+ * are used and they use r0 instead of r11.
+ * I am using ENTRY_SP which should be primary used only for stack
+ * pointer saving. */
+ swi r11, r0, TOPHYS(PER_CPU(ENTRY_SP));
+ set_bip; /* equalize initial state for all possible entries */
+ clear_eip;
+ set_ee;
+ lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP));
+ SAVE_STATE /* Save registers.*/
+ /* PC, before IRQ/trap - this is one instruction above */
+ swi r17, r1, PT_PC;
+ tovirt(r1,r1)
+ /* where the trap should return need -8 to adjust for rtsd r15, 8 */
+ addik r15, r0, ret_from_exc-8
+ mfs r3, resr /* ESR */
+ mfs r4, rear /* EAR */
+ rtbd r0, _unaligned_data_exception
+ addik r7, r1, 0 /* parameter struct pt_regs * regs */
+
+/*
+ * Page fault traps.
+ *
+ * If the real exception handler (from hw_exception_handler.S) didn't find
+ * the mapping for the process, then we're thrown here to handle such situation.
+ *
+ * Trap entered via exceptions, so EE bit is set, and interrupts
+ * are masked. This is nice, means we don't have to CLI before state save
+ *
+ * Build a standard exception frame for TLB Access errors. All TLB exceptions
+ * will bail out to this point if they can't resolve the lightweight TLB fault.
+ *
+ * The C function called is in "arch/microblaze/mm/fault.c", declared as:
+ * void do_page_fault(struct pt_regs *regs,
+ * unsigned long address,
+ * unsigned long error_code)
+ */
+/* data and intruction trap - which is choose is resolved int fault.c */
+C_ENTRY(page_fault_data_trap):
+ SAVE_STATE /* Save registers.*/
+ /* PC, before IRQ/trap - this is one instruction above */
+ swi r17, r1, PT_PC;
+ tovirt(r1,r1)
+ /* where the trap should return need -8 to adjust for rtsd r15, 8 */
+ addik r15, r0, ret_from_exc-8
+ mfs r6, rear /* parameter unsigned long address */
+ mfs r7, resr /* parameter unsigned long error_code */
+ rted r0, do_page_fault
+ addik r5, r1, 0 /* parameter struct pt_regs * regs */
+
+C_ENTRY(page_fault_instr_trap):
+ SAVE_STATE /* Save registers.*/
+ /* PC, before IRQ/trap - this is one instruction above */
+ swi r17, r1, PT_PC;
+ tovirt(r1,r1)
+ /* where the trap should return need -8 to adjust for rtsd r15, 8 */
+ addik r15, r0, ret_from_exc-8
+ mfs r6, rear /* parameter unsigned long address */
+ ori r7, r0, 0 /* parameter unsigned long error_code */
+ rted r0, do_page_fault
+ addik r5, r1, 0 /* parameter struct pt_regs * regs */
+
+/* Entry point used to return from an exception. */
+C_ENTRY(ret_from_exc):
+ lwi r11, r1, PT_MODE;
+ bnei r11, 2f; /* See if returning to kernel mode, */
+ /* ... if so, skip resched &c. */
+
+ /* We're returning to user mode, so check for various conditions that
+ trigger rescheduling. */
+1:
+ lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */
+ lwi r19, r11, TI_FLAGS; /* get flags in thread info */
+ andi r11, r19, _TIF_NEED_RESCHED;
+ beqi r11, 5f;
+
+/* Call the scheduler before returning from a syscall/trap. */
+ bralid r15, schedule; /* Call scheduler */
+ nop; /* delay slot */
+ bri 1b
+
+ /* Maybe handle a signal */
+5: andi r11, r19, _TIF_SIGPENDING | _TIF_NOTIFY_RESUME;
+ beqi r11, 4f; /* Signals to handle, handle them */
+
+ /*
+ * Handle a signal return; Pending signals should be in r18.
+ *
+ * Not all registers are saved by the normal trap/interrupt entry
+ * points (for instance, call-saved registers (because the normal
+ * C-compiler calling sequence in the kernel makes sure they're
+ * preserved), and call-clobbered registers in the case of
+ * traps), but signal handlers may want to examine or change the
+ * complete register state. Here we save anything not saved by
+ * the normal entry sequence, so that it may be safely restored
+ * (in a possibly modified form) after do_notify_resume returns. */
+ addik r5, r1, 0; /* Arg 1: struct pt_regs *regs */
+ bralid r15, do_notify_resume; /* Handle any signals */
+ addi r6, r0, 0; /* Arg 2: int in_syscall */
+ bri 1b
+
+/* Finally, return to user state. */
+4: set_bip; /* Ints masked for state restore */
+ swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE); /* save current */
+ VM_OFF;
+ tophys(r1,r1);
+
+ RESTORE_REGS_RTBD;
+ addik r1, r1, PT_SIZE /* Clean up stack space. */
+
+ lwi r1, r1, PT_R1 - PT_SIZE; /* Restore user stack pointer. */
+ bri 6f;
+/* Return to kernel state. */
+2: set_bip; /* Ints masked for state restore */
+ VM_OFF;
+ tophys(r1,r1);
+ RESTORE_REGS_RTBD;
+ addik r1, r1, PT_SIZE /* Clean up stack space. */
+
+ tovirt(r1,r1);
+6:
+EXC_return: /* Make global symbol for debugging */
+ rtbd r14, 0; /* Instructions to return from an IRQ */
+ nop;
+
+/*
+ * HW EXCEPTION rutine end
+ */
+
+/*
+ * Hardware maskable interrupts.
+ *
+ * The stack-pointer (r1) should have already been saved to the memory
+ * location PER_CPU(ENTRY_SP).
+ */
+C_ENTRY(_interrupt):
+/* MS: we are in physical address */
+/* Save registers, switch to proper stack, convert SP to virtual.*/
+ swi r1, r0, TOPHYS(PER_CPU(ENTRY_SP))
+ /* MS: See if already in kernel mode. */
+ mfs r1, rmsr
+ nop
+ andi r1, r1, MSR_UMS
+ bnei r1, 1f
+
+/* Kernel-mode state save. */
+ lwi r1, r0, TOPHYS(PER_CPU(ENTRY_SP))
+ tophys(r1,r1); /* MS: I have in r1 physical address where stack is */
+ /* save registers */
+/* MS: Make room on the stack -> activation record */
+ addik r1, r1, -PT_SIZE;
+ SAVE_REGS
+ brid 2f;
+ swi r1, r1, PT_MODE; /* 0 - user mode, 1 - kernel mode */
+1:
+/* User-mode state save. */
+ /* MS: get the saved current */
+ lwi r1, r0, TOPHYS(PER_CPU(CURRENT_SAVE));
+ tophys(r1,r1);
+ lwi r1, r1, TS_THREAD_INFO;
+ addik r1, r1, THREAD_SIZE;
+ tophys(r1,r1);
+ /* save registers */
+ addik r1, r1, -PT_SIZE;
+ SAVE_REGS
+ /* calculate mode */
+ swi r0, r1, PT_MODE;
+ lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP));
+ swi r11, r1, PT_R1;
+ clear_ums;
+2:
+ lwi CURRENT_TASK, r0, TOPHYS(PER_CPU(CURRENT_SAVE));
+ tovirt(r1,r1)
+ addik r15, r0, irq_call;
+irq_call:rtbd r0, do_IRQ;
+ addik r5, r1, 0;
+
+/* MS: we are in virtual mode */
+ret_from_irq:
+ lwi r11, r1, PT_MODE;
+ bnei r11, 2f;
+
+1:
+ lwi r11, CURRENT_TASK, TS_THREAD_INFO;
+ lwi r19, r11, TI_FLAGS; /* MS: get flags from thread info */
+ andi r11, r19, _TIF_NEED_RESCHED;
+ beqi r11, 5f
+ bralid r15, schedule;
+ nop; /* delay slot */
+ bri 1b
+
+ /* Maybe handle a signal */
+5: andi r11, r19, _TIF_SIGPENDING | _TIF_NOTIFY_RESUME;
+ beqid r11, no_intr_resched
+/* Handle a signal return; Pending signals should be in r18. */
+ addik r5, r1, 0; /* Arg 1: struct pt_regs *regs */
+ bralid r15, do_notify_resume; /* Handle any signals */
+ addi r6, r0, 0; /* Arg 2: int in_syscall */
+ bri 1b
+
+/* Finally, return to user state. */
+no_intr_resched:
+ /* Disable interrupts, we are now committed to the state restore */
+ disable_irq
+ swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE);
+ VM_OFF;
+ tophys(r1,r1);
+ RESTORE_REGS
+ addik r1, r1, PT_SIZE /* MS: Clean up stack space. */
+ lwi r1, r1, PT_R1 - PT_SIZE;
+ bri 6f;
+/* MS: Return to kernel state. */
+2:
+#ifdef CONFIG_PREEMPTION
+ lwi r11, CURRENT_TASK, TS_THREAD_INFO;
+ /* MS: get preempt_count from thread info */
+ lwi r5, r11, TI_PREEMPT_COUNT;
+ bgti r5, restore;
+
+ lwi r5, r11, TI_FLAGS; /* get flags in thread info */
+ andi r5, r5, _TIF_NEED_RESCHED;
+ beqi r5, restore /* if zero jump over */
+
+ /* interrupts are off that's why I am calling preempt_chedule_irq */
+ bralid r15, preempt_schedule_irq
+ nop
+restore:
+#endif
+ VM_OFF /* MS: turn off MMU */
+ tophys(r1,r1)
+ RESTORE_REGS
+ addik r1, r1, PT_SIZE /* MS: Clean up stack space. */
+ tovirt(r1,r1);
+6:
+IRQ_return: /* MS: Make global symbol for debugging */
+ rtid r14, 0
+ nop
+
+/*
+ * Debug trap for KGDB. Enter to _debug_exception by brki r16, 0x18
+ * and call handling function with saved pt_regs
+ */
+C_ENTRY(_debug_exception):
+ /* BIP bit is set on entry, no interrupts can occur */
+ swi r1, r0, TOPHYS(PER_CPU(ENTRY_SP))
+
+ mfs r1, rmsr
+ nop
+ andi r1, r1, MSR_UMS
+ bnei r1, 1f
+/* MS: Kernel-mode state save - kgdb */
+ lwi r1, r0, TOPHYS(PER_CPU(ENTRY_SP)); /* Reload kernel stack-ptr*/
+
+ /* BIP bit is set on entry, no interrupts can occur */
+ addik r1, r1, CONFIG_KERNEL_BASE_ADDR - CONFIG_KERNEL_START - PT_SIZE;
+ SAVE_REGS;
+ /* save all regs to pt_reg structure */
+ swi r0, r1, PT_R0; /* R0 must be saved too */
+ swi r14, r1, PT_R14 /* rewrite saved R14 value */
+ swi r16, r1, PT_PC; /* PC and r16 are the same */
+ /* save special purpose registers to pt_regs */
+ mfs r11, rear;
+ swi r11, r1, PT_EAR;
+ mfs r11, resr;
+ swi r11, r1, PT_ESR;
+ mfs r11, rfsr;
+ swi r11, r1, PT_FSR;
+
+ /* stack pointer is in physical address at it is decrease
+ * by PT_SIZE but we need to get correct R1 value */
+ addik r11, r1, CONFIG_KERNEL_START - CONFIG_KERNEL_BASE_ADDR + PT_SIZE;
+ swi r11, r1, PT_R1
+ /* MS: r31 - current pointer isn't changed */
+ tovirt(r1,r1)
+#ifdef CONFIG_KGDB
+ addi r5, r1, 0 /* pass pt_reg address as the first arg */
+ addik r15, r0, dbtrap_call; /* return address */
+ rtbd r0, microblaze_kgdb_break
+ nop;
+#endif
+ /* MS: Place handler for brki from kernel space if KGDB is OFF.
+ * It is very unlikely that another brki instruction is called. */
+ bri 0
+
+/* MS: User-mode state save - gdb */
+1: lwi r1, r0, TOPHYS(PER_CPU(CURRENT_SAVE)); /* get saved current */
+ tophys(r1,r1);
+ lwi r1, r1, TS_THREAD_INFO; /* get the thread info */
+ addik r1, r1, THREAD_SIZE; /* calculate kernel stack pointer */
+ tophys(r1,r1);
+
+ addik r1, r1, -PT_SIZE; /* Make room on the stack. */
+ SAVE_REGS;
+ swi r16, r1, PT_PC; /* Save LP */
+ swi r0, r1, PT_MODE; /* Was in user-mode. */
+ lwi r11, r0, TOPHYS(PER_CPU(ENTRY_SP));
+ swi r11, r1, PT_R1; /* Store user SP. */
+ lwi CURRENT_TASK, r0, TOPHYS(PER_CPU(CURRENT_SAVE));
+ tovirt(r1,r1)
+ set_vms;
+ addik r5, r1, 0;
+ addik r15, r0, dbtrap_call;
+dbtrap_call: /* Return point for kernel/user entry + 8 because of rtsd r15, 8 */
+ rtbd r0, sw_exception
+ nop
+
+ /* MS: The first instruction for the second part of the gdb/kgdb */
+ set_bip; /* Ints masked for state restore */
+ lwi r11, r1, PT_MODE;
+ bnei r11, 2f;
+/* MS: Return to user space - gdb */
+1:
+ /* Get current task ptr into r11 */
+ lwi r11, CURRENT_TASK, TS_THREAD_INFO; /* get thread info */
+ lwi r19, r11, TI_FLAGS; /* get flags in thread info */
+ andi r11, r19, _TIF_NEED_RESCHED;
+ beqi r11, 5f;
+
+ /* Call the scheduler before returning from a syscall/trap. */
+ bralid r15, schedule; /* Call scheduler */
+ nop; /* delay slot */
+ bri 1b
+
+ /* Maybe handle a signal */
+5: andi r11, r19, _TIF_SIGPENDING | _TIF_NOTIFY_RESUME;
+ beqi r11, 4f; /* Signals to handle, handle them */
+
+ addik r5, r1, 0; /* Arg 1: struct pt_regs *regs */
+ bralid r15, do_notify_resume; /* Handle any signals */
+ addi r6, r0, 0; /* Arg 2: int in_syscall */
+ bri 1b
+
+/* Finally, return to user state. */
+4: swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE); /* save current */
+ VM_OFF;
+ tophys(r1,r1);
+ /* MS: Restore all regs */
+ RESTORE_REGS_RTBD
+ addik r1, r1, PT_SIZE /* Clean up stack space */
+ lwi r1, r1, PT_R1 - PT_SIZE; /* Restore user stack pointer */
+DBTRAP_return_user: /* MS: Make global symbol for debugging */
+ rtbd r16, 0; /* MS: Instructions to return from a debug trap */
+ nop;
+
+/* MS: Return to kernel state - kgdb */
+2: VM_OFF;
+ tophys(r1,r1);
+ /* MS: Restore all regs */
+ RESTORE_REGS_RTBD
+ lwi r14, r1, PT_R14;
+ lwi r16, r1, PT_PC;
+ addik r1, r1, PT_SIZE; /* MS: Clean up stack space */
+ tovirt(r1,r1);
+DBTRAP_return_kernel: /* MS: Make global symbol for debugging */
+ rtbd r16, 0; /* MS: Instructions to return from a debug trap */
+ nop;
+
+
+ENTRY(_switch_to)
+ /* prepare return value */
+ addk r3, r0, CURRENT_TASK
+
+ /* save registers in cpu_context */
+ /* use r11 and r12, volatile registers, as temp register */
+ /* give start of cpu_context for previous process */
+ addik r11, r5, TI_CPU_CONTEXT
+ swi r1, r11, CC_R1
+ swi r2, r11, CC_R2
+ /* skip volatile registers.
+ * they are saved on stack when we jumped to _switch_to() */
+ /* dedicated registers */
+ swi r13, r11, CC_R13
+ swi r14, r11, CC_R14
+ swi r15, r11, CC_R15
+ swi r16, r11, CC_R16
+ swi r17, r11, CC_R17
+ swi r18, r11, CC_R18
+ /* save non-volatile registers */
+ swi r19, r11, CC_R19
+ swi r20, r11, CC_R20
+ swi r21, r11, CC_R21
+ swi r22, r11, CC_R22
+ swi r23, r11, CC_R23
+ swi r24, r11, CC_R24
+ swi r25, r11, CC_R25
+ swi r26, r11, CC_R26
+ swi r27, r11, CC_R27
+ swi r28, r11, CC_R28
+ swi r29, r11, CC_R29
+ swi r30, r11, CC_R30
+ /* special purpose registers */
+ mfs r12, rmsr
+ swi r12, r11, CC_MSR
+ mfs r12, rear
+ swi r12, r11, CC_EAR
+ mfs r12, resr
+ swi r12, r11, CC_ESR
+ mfs r12, rfsr
+ swi r12, r11, CC_FSR
+
+ /* update r31, the current-give me pointer to task which will be next */
+ lwi CURRENT_TASK, r6, TI_TASK
+ /* stored it to current_save too */
+ swi CURRENT_TASK, r0, PER_CPU(CURRENT_SAVE)
+
+ /* get new process' cpu context and restore */
+ /* give me start where start context of next task */
+ addik r11, r6, TI_CPU_CONTEXT
+
+ /* non-volatile registers */
+ lwi r30, r11, CC_R30
+ lwi r29, r11, CC_R29
+ lwi r28, r11, CC_R28
+ lwi r27, r11, CC_R27
+ lwi r26, r11, CC_R26
+ lwi r25, r11, CC_R25
+ lwi r24, r11, CC_R24
+ lwi r23, r11, CC_R23
+ lwi r22, r11, CC_R22
+ lwi r21, r11, CC_R21
+ lwi r20, r11, CC_R20
+ lwi r19, r11, CC_R19
+ /* dedicated registers */
+ lwi r18, r11, CC_R18
+ lwi r17, r11, CC_R17
+ lwi r16, r11, CC_R16
+ lwi r15, r11, CC_R15
+ lwi r14, r11, CC_R14
+ lwi r13, r11, CC_R13
+ /* skip volatile registers */
+ lwi r2, r11, CC_R2
+ lwi r1, r11, CC_R1
+
+ /* special purpose registers */
+ lwi r12, r11, CC_FSR
+ mts rfsr, r12
+ lwi r12, r11, CC_MSR
+ mts rmsr, r12
+
+ rtsd r15, 8
+ nop
+
+ENTRY(_reset)
+ VM_OFF
+ brai 0; /* Jump to reset vector */
+
+ /* These are compiled and loaded into high memory, then
+ * copied into place in mach_early_setup */
+ .section .init.ivt, "ax"
+#if CONFIG_MANUAL_RESET_VECTOR
+ .org 0x0
+ brai CONFIG_MANUAL_RESET_VECTOR
+#endif
+ .org 0x8
+ brai TOPHYS(_user_exception); /* syscall handler */
+ .org 0x10
+ brai TOPHYS(_interrupt); /* Interrupt handler */
+ .org 0x18
+ brai TOPHYS(_debug_exception); /* debug trap handler */
+ .org 0x20
+ brai TOPHYS(_hw_exception_handler); /* HW exception handler */
+
+.section .rodata,"a"
+#include "syscall_table.S"
+
+syscall_table_size=(.-sys_call_table)
+
+type_SYSCALL:
+ .ascii "SYSCALL\0"
+type_IRQ:
+ .ascii "IRQ\0"
+type_IRQ_PREEMPT:
+ .ascii "IRQ (PREEMPTED)\0"
+type_SYSCALL_PREEMPT:
+ .ascii " SYSCALL (PREEMPTED)\0"
+
+ /*
+ * Trap decoding for stack unwinder
+ * Tuples are (start addr, end addr, string)
+ * If return address lies on [start addr, end addr],
+ * unwinder displays 'string'
+ */
+
+ .align 4
+.global microblaze_trap_handlers
+microblaze_trap_handlers:
+ /* Exact matches come first */
+ .word ret_from_trap; .word ret_from_trap ; .word type_SYSCALL
+ .word ret_from_irq ; .word ret_from_irq ; .word type_IRQ
+ /* Fuzzy matches go here */
+ .word ret_from_irq ; .word no_intr_resched ; .word type_IRQ_PREEMPT
+ .word ret_from_trap; .word TRAP_return ; .word type_SYSCALL_PREEMPT
+ /* End of table */
+ .word 0 ; .word 0 ; .word 0
diff --git a/arch/microblaze/kernel/exceptions.c b/arch/microblaze/kernel/exceptions.c
new file mode 100644
index 000000000..6d3a6a644
--- /dev/null
+++ b/arch/microblaze/kernel/exceptions.c
@@ -0,0 +1,149 @@
+/*
+ * HW exception handling
+ *
+ * Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu>
+ * Copyright (C) 2008 PetaLogix
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License. See the file COPYING in the main directory of this
+ * archive for more details.
+ */
+
+/*
+ * This file handles the architecture-dependent parts of hardware exceptions
+ */
+
+#include <linux/export.h>
+#include <linux/kernel.h>
+#include <linux/signal.h>
+#include <linux/sched.h>
+#include <linux/sched/debug.h>
+#include <linux/kallsyms.h>
+
+#include <asm/exceptions.h>
+#include <asm/entry.h> /* For KM CPU var */
+#include <linux/uaccess.h>
+#include <linux/errno.h>
+#include <linux/ptrace.h>
+#include <asm/current.h>
+#include <asm/cacheflush.h>
+
+#define MICROBLAZE_ILL_OPCODE_EXCEPTION 0x02
+#define MICROBLAZE_IBUS_EXCEPTION 0x03
+#define MICROBLAZE_DBUS_EXCEPTION 0x04
+#define MICROBLAZE_DIV_ZERO_EXCEPTION 0x05
+#define MICROBLAZE_FPU_EXCEPTION 0x06
+#define MICROBLAZE_PRIVILEGED_EXCEPTION 0x07
+
+static DEFINE_SPINLOCK(die_lock);
+
+void die(const char *str, struct pt_regs *fp, long err)
+{
+ console_verbose();
+ spin_lock_irq(&die_lock);
+ pr_warn("Oops: %s, sig: %ld\n", str, err);
+ show_regs(fp);
+ spin_unlock_irq(&die_lock);
+ /* make_task_dead() should take care of panic'ing from an interrupt
+ * context so we don't handle it here
+ */
+ make_task_dead(err);
+}
+
+/* for user application debugging */
+asmlinkage void sw_exception(struct pt_regs *regs)
+{
+ _exception(SIGTRAP, regs, TRAP_BRKPT, regs->r16);
+ flush_dcache_range(regs->r16, regs->r16 + 0x4);
+ flush_icache_range(regs->r16, regs->r16 + 0x4);
+}
+
+void _exception(int signr, struct pt_regs *regs, int code, unsigned long addr)
+{
+ if (kernel_mode(regs))
+ die("Exception in kernel mode", regs, signr);
+
+ force_sig_fault(signr, code, (void __user *)addr);
+}
+
+asmlinkage void full_exception(struct pt_regs *regs, unsigned int type,
+ int fsr, int addr)
+{
+#ifdef CONFIG_MMU
+ addr = regs->pc;
+#endif
+
+#if 0
+ pr_warn("Exception %02x in %s mode, FSR=%08x PC=%08x ESR=%08x\n",
+ type, user_mode(regs) ? "user" : "kernel", fsr,
+ (unsigned int) regs->pc, (unsigned int) regs->esr);
+#endif
+
+ switch (type & 0x1F) {
+ case MICROBLAZE_ILL_OPCODE_EXCEPTION:
+ if (user_mode(regs)) {
+ pr_debug("Illegal opcode exception in user mode\n");
+ _exception(SIGILL, regs, ILL_ILLOPC, addr);
+ return;
+ }
+ pr_warn("Illegal opcode exception in kernel mode.\n");
+ die("opcode exception", regs, SIGBUS);
+ break;
+ case MICROBLAZE_IBUS_EXCEPTION:
+ if (user_mode(regs)) {
+ pr_debug("Instruction bus error exception in user mode\n");
+ _exception(SIGBUS, regs, BUS_ADRERR, addr);
+ return;
+ }
+ pr_warn("Instruction bus error exception in kernel mode.\n");
+ die("bus exception", regs, SIGBUS);
+ break;
+ case MICROBLAZE_DBUS_EXCEPTION:
+ if (user_mode(regs)) {
+ pr_debug("Data bus error exception in user mode\n");
+ _exception(SIGBUS, regs, BUS_ADRERR, addr);
+ return;
+ }
+ pr_warn("Data bus error exception in kernel mode.\n");
+ die("bus exception", regs, SIGBUS);
+ break;
+ case MICROBLAZE_DIV_ZERO_EXCEPTION:
+ if (user_mode(regs)) {
+ pr_debug("Divide by zero exception in user mode\n");
+ _exception(SIGFPE, regs, FPE_INTDIV, addr);
+ return;
+ }
+ pr_warn("Divide by zero exception in kernel mode.\n");
+ die("Divide by zero exception", regs, SIGBUS);
+ break;
+ case MICROBLAZE_FPU_EXCEPTION:
+ pr_debug("FPU exception\n");
+ /* IEEE FP exception */
+ /* I removed fsr variable and use code var for storing fsr */
+ if (fsr & FSR_IO)
+ fsr = FPE_FLTINV;
+ else if (fsr & FSR_OF)
+ fsr = FPE_FLTOVF;
+ else if (fsr & FSR_UF)
+ fsr = FPE_FLTUND;
+ else if (fsr & FSR_DZ)
+ fsr = FPE_FLTDIV;
+ else if (fsr & FSR_DO)
+ fsr = FPE_FLTRES;
+ _exception(SIGFPE, regs, fsr, addr);
+ break;
+
+#ifdef CONFIG_MMU
+ case MICROBLAZE_PRIVILEGED_EXCEPTION:
+ pr_debug("Privileged exception\n");
+ _exception(SIGILL, regs, ILL_PRVOPC, addr);
+ break;
+#endif
+ default:
+ /* FIXME what to do in unexpected exception */
+ pr_warn("Unexpected exception %02x PC=%08x in %s mode\n",
+ type, (unsigned int) addr,
+ kernel_mode(regs) ? "kernel" : "user");
+ }
+ return;
+}
diff --git a/arch/microblaze/kernel/ftrace.c b/arch/microblaze/kernel/ftrace.c
new file mode 100644
index 000000000..224eea40e
--- /dev/null
+++ b/arch/microblaze/kernel/ftrace.c
@@ -0,0 +1,222 @@
+/*
+ * Ftrace support for Microblaze.
+ *
+ * Copyright (C) 2009 Michal Simek <monstr@monstr.eu>
+ * Copyright (C) 2009 PetaLogix
+ *
+ * Based on MIPS and PowerPC ftrace code
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#include <asm/cacheflush.h>
+#include <linux/ftrace.h>
+
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+/*
+ * Hook the return address and push it in the stack of return addrs
+ * in current thread info.
+ */
+void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr)
+{
+ unsigned long old;
+ int faulted;
+ unsigned long return_hooker = (unsigned long)
+ &return_to_handler;
+
+ if (unlikely(ftrace_graph_is_dead()))
+ return;
+
+ if (unlikely(atomic_read(&current->tracing_graph_pause)))
+ return;
+
+ /*
+ * Protect against fault, even if it shouldn't
+ * happen. This tool is too much intrusive to
+ * ignore such a protection.
+ */
+ asm volatile(" 1: lwi %0, %2, 0;" \
+ "2: swi %3, %2, 0;" \
+ " addik %1, r0, 0;" \
+ "3:" \
+ " .section .fixup, \"ax\";" \
+ "4: brid 3b;" \
+ " addik %1, r0, 1;" \
+ " .previous;" \
+ " .section __ex_table,\"a\";" \
+ " .word 1b,4b;" \
+ " .word 2b,4b;" \
+ " .previous;" \
+ : "=&r" (old), "=r" (faulted)
+ : "r" (parent), "r" (return_hooker)
+ );
+
+ flush_dcache_range((u32)parent, (u32)parent + 4);
+ flush_icache_range((u32)parent, (u32)parent + 4);
+
+ if (unlikely(faulted)) {
+ ftrace_graph_stop();
+ WARN_ON(1);
+ return;
+ }
+
+ if (function_graph_enter(old, self_addr, 0, NULL))
+ *parent = old;
+}
+#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
+
+#ifdef CONFIG_DYNAMIC_FTRACE
+/* save value to addr - it is save to do it in asm */
+static int ftrace_modify_code(unsigned long addr, unsigned int value)
+{
+ int faulted = 0;
+
+ __asm__ __volatile__(" 1: swi %2, %1, 0;" \
+ " addik %0, r0, 0;" \
+ "2:" \
+ " .section .fixup, \"ax\";" \
+ "3: brid 2b;" \
+ " addik %0, r0, 1;" \
+ " .previous;" \
+ " .section __ex_table,\"a\";" \
+ " .word 1b,3b;" \
+ " .previous;" \
+ : "=r" (faulted)
+ : "r" (addr), "r" (value)
+ );
+
+ if (unlikely(faulted))
+ return -EFAULT;
+
+ flush_dcache_range(addr, addr + 4);
+ flush_icache_range(addr, addr + 4);
+
+ return 0;
+}
+
+#define MICROBLAZE_NOP 0x80000000
+#define MICROBLAZE_BRI 0xb800000C
+
+static unsigned int recorded; /* if save was or not */
+static unsigned int imm; /* saving whole imm instruction */
+
+/* There are two approaches howto solve ftrace_make nop function - look below */
+#undef USE_FTRACE_NOP
+
+#ifdef USE_FTRACE_NOP
+static unsigned int bralid; /* saving whole bralid instruction */
+#endif
+
+int ftrace_make_nop(struct module *mod,
+ struct dyn_ftrace *rec, unsigned long addr)
+{
+ /* we have this part of code which we are working with
+ * b000c000 imm -16384
+ * b9fc8e30 bralid r15, -29136 // c0008e30 <_mcount>
+ * 80000000 or r0, r0, r0
+ *
+ * The first solution (!USE_FTRACE_NOP-could be called branch solution)
+ * b000c000 bri 12 (0xC - jump to any other instruction)
+ * b9fc8e30 bralid r15, -29136 // c0008e30 <_mcount>
+ * 80000000 or r0, r0, r0
+ * any other instruction
+ *
+ * The second solution (USE_FTRACE_NOP) - no jump just nops
+ * 80000000 or r0, r0, r0
+ * 80000000 or r0, r0, r0
+ * 80000000 or r0, r0, r0
+ */
+ int ret = 0;
+
+ if (recorded == 0) {
+ recorded = 1;
+ imm = *(unsigned int *)rec->ip;
+ pr_debug("%s: imm:0x%x\n", __func__, imm);
+#ifdef USE_FTRACE_NOP
+ bralid = *(unsigned int *)(rec->ip + 4);
+ pr_debug("%s: bralid 0x%x\n", __func__, bralid);
+#endif /* USE_FTRACE_NOP */
+ }
+
+#ifdef USE_FTRACE_NOP
+ ret = ftrace_modify_code(rec->ip, MICROBLAZE_NOP);
+ ret += ftrace_modify_code(rec->ip + 4, MICROBLAZE_NOP);
+#else /* USE_FTRACE_NOP */
+ ret = ftrace_modify_code(rec->ip, MICROBLAZE_BRI);
+#endif /* USE_FTRACE_NOP */
+ return ret;
+}
+
+/* I believe that first is called ftrace_make_nop before this function */
+int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
+{
+ int ret;
+ pr_debug("%s: addr:0x%x, rec->ip: 0x%x, imm:0x%x\n",
+ __func__, (unsigned int)addr, (unsigned int)rec->ip, imm);
+ ret = ftrace_modify_code(rec->ip, imm);
+#ifdef USE_FTRACE_NOP
+ pr_debug("%s: bralid:0x%x\n", __func__, bralid);
+ ret += ftrace_modify_code(rec->ip + 4, bralid);
+#endif /* USE_FTRACE_NOP */
+ return ret;
+}
+
+int __init ftrace_dyn_arch_init(void)
+{
+ return 0;
+}
+
+int ftrace_update_ftrace_func(ftrace_func_t func)
+{
+ unsigned long ip = (unsigned long)(&ftrace_call);
+ unsigned int upper = (unsigned int)func;
+ unsigned int lower = (unsigned int)func;
+ int ret = 0;
+
+ /* create proper saving to ftrace_call poll */
+ upper = 0xb0000000 + (upper >> 16); /* imm func_upper */
+ lower = 0x32800000 + (lower & 0xFFFF); /* addik r20, r0, func_lower */
+
+ pr_debug("%s: func=0x%x, ip=0x%x, upper=0x%x, lower=0x%x\n",
+ __func__, (unsigned int)func, (unsigned int)ip, upper, lower);
+
+ /* save upper and lower code */
+ ret = ftrace_modify_code(ip, upper);
+ ret += ftrace_modify_code(ip + 4, lower);
+
+ /* We just need to replace the rtsd r15, 8 with NOP */
+ ret += ftrace_modify_code((unsigned long)&ftrace_caller,
+ MICROBLAZE_NOP);
+
+ return ret;
+}
+
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+unsigned int old_jump; /* saving place for jump instruction */
+
+int ftrace_enable_ftrace_graph_caller(void)
+{
+ unsigned int ret;
+ unsigned long ip = (unsigned long)(&ftrace_call_graph);
+
+ old_jump = *(unsigned int *)ip; /* save jump over instruction */
+ ret = ftrace_modify_code(ip, MICROBLAZE_NOP);
+
+ pr_debug("%s: Replace instruction: 0x%x\n", __func__, old_jump);
+ return ret;
+}
+
+int ftrace_disable_ftrace_graph_caller(void)
+{
+ unsigned int ret;
+ unsigned long ip = (unsigned long)(&ftrace_call_graph);
+
+ ret = ftrace_modify_code(ip, old_jump);
+
+ pr_debug("%s\n", __func__);
+ return ret;
+}
+#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
+#endif /* CONFIG_DYNAMIC_FTRACE */
diff --git a/arch/microblaze/kernel/head.S b/arch/microblaze/kernel/head.S
new file mode 100644
index 000000000..14b276406
--- /dev/null
+++ b/arch/microblaze/kernel/head.S
@@ -0,0 +1,386 @@
+/*
+ * Copyright (C) 2007-2009 Michal Simek <monstr@monstr.eu>
+ * Copyright (C) 2007-2009 PetaLogix
+ * Copyright (C) 2006 Atmark Techno, Inc.
+ *
+ * MMU code derived from arch/ppc/kernel/head_4xx.S:
+ * Copyright (c) 1995-1996 Gary Thomas <gdt@linuxppc.org>
+ * Initial PowerPC version.
+ * Copyright (c) 1996 Cort Dougan <cort@cs.nmt.edu>
+ * Rewritten for PReP
+ * Copyright (c) 1996 Paul Mackerras <paulus@cs.anu.edu.au>
+ * Low-level exception handers, MMU support, and rewrite.
+ * Copyright (c) 1997 Dan Malek <dmalek@jlc.net>
+ * PowerPC 8xx modifications.
+ * Copyright (c) 1998-1999 TiVo, Inc.
+ * PowerPC 403GCX modifications.
+ * Copyright (c) 1999 Grant Erickson <grant@lcse.umn.edu>
+ * PowerPC 403GCX/405GP modifications.
+ * Copyright 2000 MontaVista Software Inc.
+ * PPC405 modifications
+ * PowerPC 403GCX/405GP modifications.
+ * Author: MontaVista Software, Inc.
+ * frank_rowand@mvista.com or source@mvista.com
+ * debbie_chu@mvista.com
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/init.h>
+#include <linux/linkage.h>
+#include <asm/thread_info.h>
+#include <asm/page.h>
+#include <linux/of_fdt.h> /* for OF_DT_HEADER */
+
+#ifdef CONFIG_MMU
+#include <asm/setup.h> /* COMMAND_LINE_SIZE */
+#include <asm/mmu.h>
+#include <asm/processor.h>
+
+.section .data
+.global empty_zero_page
+.align 12
+empty_zero_page:
+ .space PAGE_SIZE
+.global swapper_pg_dir
+swapper_pg_dir:
+ .space PAGE_SIZE
+
+#endif /* CONFIG_MMU */
+
+.section .rodata
+.align 4
+endian_check:
+ .word 1
+
+ __HEAD
+ENTRY(_start)
+#if CONFIG_KERNEL_BASE_ADDR == 0
+ brai TOPHYS(real_start)
+ .org 0x100
+real_start:
+#endif
+
+ mts rmsr, r0
+/* Disable stack protection from bootloader */
+ mts rslr, r0
+ addi r8, r0, 0xFFFFFFFF
+ mts rshr, r8
+/*
+ * According to Xilinx, msrclr instruction behaves like 'mfs rX,rpc'
+ * if the msrclr instruction is not enabled. We use this to detect
+ * if the opcode is available, by issuing msrclr and then testing the result.
+ * r8 == 0 - msr instructions are implemented
+ * r8 != 0 - msr instructions are not implemented
+ */
+ mfs r1, rmsr
+ msrclr r8, 0 /* clear nothing - just read msr for test */
+ cmpu r8, r8, r1 /* r1 must contain msr reg content */
+
+/* r7 may point to an FDT, or there may be one linked in.
+ if it's in r7, we've got to save it away ASAP.
+ We ensure r7 points to a valid FDT, just in case the bootloader
+ is broken or non-existent */
+ beqi r7, no_fdt_arg /* NULL pointer? don't copy */
+/* Does r7 point to a valid FDT? Load HEADER magic number */
+ /* Run time Big/Little endian platform */
+ /* Save 1 as word and load byte - 0 - BIG, 1 - LITTLE */
+ lbui r11, r0, TOPHYS(endian_check)
+ beqid r11, big_endian /* DO NOT break delay stop dependency */
+ lw r11, r0, r7 /* Big endian load in delay slot */
+ lwr r11, r0, r7 /* Little endian load */
+big_endian:
+ rsubi r11, r11, OF_DT_HEADER /* Check FDT header */
+ beqi r11, _prepare_copy_fdt
+ or r7, r0, r0 /* clear R7 when not valid DTB */
+ bnei r11, no_fdt_arg /* No - get out of here */
+_prepare_copy_fdt:
+ or r11, r0, r0 /* incremment */
+ ori r4, r0, TOPHYS(_fdt_start)
+ ori r3, r0, (0x10000 - 4)
+_copy_fdt:
+ lw r12, r7, r11 /* r12 = r7 + r11 */
+ sw r12, r4, r11 /* addr[r4 + r11] = r12 */
+ addik r11, r11, 4 /* increment counting */
+ bgtid r3, _copy_fdt /* loop for all entries */
+ addik r3, r3, -4 /* descrement loop */
+no_fdt_arg:
+
+#ifdef CONFIG_MMU
+
+#ifndef CONFIG_CMDLINE_BOOL
+/*
+ * handling command line
+ * copy command line directly to cmd_line placed in data section.
+ */
+ beqid r5, skip /* Skip if NULL pointer */
+ or r11, r0, r0 /* incremment */
+ ori r4, r0, cmd_line /* load address of command line */
+ tophys(r4,r4) /* convert to phys address */
+ ori r3, r0, COMMAND_LINE_SIZE - 1 /* number of loops */
+_copy_command_line:
+ /* r2=r5+r11 - r5 contain pointer to command line */
+ lbu r2, r5, r11
+ beqid r2, skip /* Skip if no data */
+ sb r2, r4, r11 /* addr[r4+r11]= r2 */
+ addik r11, r11, 1 /* increment counting */
+ bgtid r3, _copy_command_line /* loop for all entries */
+ addik r3, r3, -1 /* decrement loop */
+ addik r5, r4, 0 /* add new space for command line */
+ tovirt(r5,r5)
+skip:
+#endif /* CONFIG_CMDLINE_BOOL */
+
+#ifdef NOT_COMPILE
+/* save bram context */
+ or r11, r0, r0 /* incremment */
+ ori r4, r0, TOPHYS(_bram_load_start) /* save bram context */
+ ori r3, r0, (LMB_SIZE - 4)
+_copy_bram:
+ lw r7, r0, r11 /* r7 = r0 + r11 */
+ sw r7, r4, r11 /* addr[r4 + r11] = r7 */
+ addik r11, r11, 4 /* increment counting */
+ bgtid r3, _copy_bram /* loop for all entries */
+ addik r3, r3, -4 /* descrement loop */
+#endif
+ /* We have to turn on the MMU right away. */
+
+ /*
+ * Set up the initial MMU state so we can do the first level of
+ * kernel initialization. This maps the first 16 MBytes of memory 1:1
+ * virtual to physical.
+ */
+ nop
+ addik r3, r0, MICROBLAZE_TLB_SIZE -1 /* Invalidate all TLB entries */
+_invalidate:
+ mts rtlbx, r3
+ mts rtlbhi, r0 /* flush: ensure V is clear */
+ mts rtlblo, r0
+ bgtid r3, _invalidate /* loop for all entries */
+ addik r3, r3, -1
+ /* sync */
+
+ /* Setup the kernel PID */
+ mts rpid,r0 /* Load the kernel PID */
+ nop
+ bri 4
+
+ /*
+ * We should still be executing code at physical address area
+ * RAM_BASEADDR at this point. However, kernel code is at
+ * a virtual address. So, set up a TLB mapping to cover this once
+ * translation is enabled.
+ */
+
+ addik r3,r0, CONFIG_KERNEL_START /* Load the kernel virtual address */
+ tophys(r4,r3) /* Load the kernel physical address */
+
+ /* start to do TLB calculation */
+ addik r12, r0, _end
+ rsub r12, r3, r12
+ addik r12, r12, CONFIG_LOWMEM_SIZE >> PTE_SHIFT /* that's the pad */
+
+ or r9, r0, r0 /* TLB0 = 0 */
+ or r10, r0, r0 /* TLB1 = 0 */
+
+ addik r11, r12, -0x1000000
+ bgei r11, GT16 /* size is greater than 16MB */
+ addik r11, r12, -0x0800000
+ bgei r11, GT8 /* size is greater than 8MB */
+ addik r11, r12, -0x0400000
+ bgei r11, GT4 /* size is greater than 4MB */
+ /* size is less than 4MB */
+ addik r11, r12, -0x0200000
+ bgei r11, GT2 /* size is greater than 2MB */
+ addik r9, r0, 0x0100000 /* TLB0 must be 1MB */
+ addik r11, r12, -0x0100000
+ bgei r11, GT1 /* size is greater than 1MB */
+ /* TLB1 is 0 which is setup above */
+ bri tlb_end
+GT4: /* r11 contains the rest - will be either 1 or 4 */
+ ori r9, r0, 0x400000 /* TLB0 is 4MB */
+ bri TLB1
+GT16: /* TLB0 is 16MB */
+ addik r9, r0, 0x1000000 /* means TLB0 is 16MB */
+TLB1:
+ /* must be used r2 because of subtract if failed */
+ addik r2, r11, -0x0400000
+ bgei r2, GT20 /* size is greater than 16MB */
+ /* size is >16MB and <20MB */
+ addik r11, r11, -0x0100000
+ bgei r11, GT17 /* size is greater than 17MB */
+ /* kernel is >16MB and < 17MB */
+GT1:
+ addik r10, r0, 0x0100000 /* means TLB1 is 1MB */
+ bri tlb_end
+GT2: /* TLB0 is 0 and TLB1 will be 4MB */
+GT17: /* TLB1 is 4MB - kernel size <20MB */
+ addik r10, r0, 0x0400000 /* means TLB1 is 4MB */
+ bri tlb_end
+GT8: /* TLB0 is still zero that's why I can use only TLB1 */
+GT20: /* TLB1 is 16MB - kernel size >20MB */
+ addik r10, r0, 0x1000000 /* means TLB1 is 16MB */
+tlb_end:
+
+ /*
+ * Configure and load two entries into TLB slots 0 and 1.
+ * In case we are pinning TLBs, these are reserved in by the
+ * other TLB functions. If not reserving, then it doesn't
+ * matter where they are loaded.
+ */
+ andi r4,r4,0xfffffc00 /* Mask off the real page number */
+ ori r4,r4,(TLB_WR | TLB_EX) /* Set the write and execute bits */
+
+ /*
+ * TLB0 is always used - check if is not zero (r9 stores TLB0 value)
+ * if is use TLB1 value and clear it (r10 stores TLB1 value)
+ */
+ bnei r9, tlb0_not_zero
+ add r9, r10, r0
+ add r10, r0, r0
+tlb0_not_zero:
+
+ /* look at the code below */
+ ori r30, r0, 0x200
+ andi r29, r9, 0x100000
+ bneid r29, 1f
+ addik r30, r30, 0x80
+ andi r29, r9, 0x400000
+ bneid r29, 1f
+ addik r30, r30, 0x80
+ andi r29, r9, 0x1000000
+ bneid r29, 1f
+ addik r30, r30, 0x80
+1:
+ andi r3,r3,0xfffffc00 /* Mask off the effective page number */
+ ori r3,r3,(TLB_VALID)
+ or r3, r3, r30
+
+ /* Load tlb_skip size value which is index to first unused TLB entry */
+ lwi r11, r0, TOPHYS(tlb_skip)
+ mts rtlbx,r11 /* TLB slow 0 */
+
+ mts rtlblo,r4 /* Load the data portion of the entry */
+ mts rtlbhi,r3 /* Load the tag portion of the entry */
+
+ /* Increase tlb_skip size */
+ addik r11, r11, 1
+ swi r11, r0, TOPHYS(tlb_skip)
+
+ /* TLB1 can be zeroes that's why we not setup it */
+ beqi r10, jump_over2
+
+ /* look at the code below */
+ ori r30, r0, 0x200
+ andi r29, r10, 0x100000
+ bneid r29, 1f
+ addik r30, r30, 0x80
+ andi r29, r10, 0x400000
+ bneid r29, 1f
+ addik r30, r30, 0x80
+ andi r29, r10, 0x1000000
+ bneid r29, 1f
+ addik r30, r30, 0x80
+1:
+ addk r4, r4, r9 /* previous addr + TLB0 size */
+ addk r3, r3, r9
+
+ andi r3,r3,0xfffffc00 /* Mask off the effective page number */
+ ori r3,r3,(TLB_VALID)
+ or r3, r3, r30
+
+ lwi r11, r0, TOPHYS(tlb_skip)
+ mts rtlbx, r11 /* r11 is used from TLB0 */
+
+ mts rtlblo,r4 /* Load the data portion of the entry */
+ mts rtlbhi,r3 /* Load the tag portion of the entry */
+
+ /* Increase tlb_skip size */
+ addik r11, r11, 1
+ swi r11, r0, TOPHYS(tlb_skip)
+
+jump_over2:
+ /*
+ * Load a TLB entry for LMB, since we need access to
+ * the exception vectors, using a 4k real==virtual mapping.
+ */
+ /* Use temporary TLB_ID for LMB - clear this temporary mapping later */
+ ori r11, r0, MICROBLAZE_LMB_TLB_ID
+ mts rtlbx,r11
+
+ ori r4,r0,(TLB_WR | TLB_EX)
+ ori r3,r0,(TLB_VALID | TLB_PAGESZ(PAGESZ_4K))
+
+ mts rtlblo,r4 /* Load the data portion of the entry */
+ mts rtlbhi,r3 /* Load the tag portion of the entry */
+
+ /*
+ * We now have the lower 16 Meg of RAM mapped into TLB entries, and the
+ * caches ready to work.
+ */
+turn_on_mmu:
+ ori r15,r0,start_here
+ ori r4,r0,MSR_KERNEL_VMS
+ mts rmsr,r4
+ nop
+ rted r15,0 /* enables MMU */
+ nop
+
+start_here:
+#endif /* CONFIG_MMU */
+
+ /* Initialize small data anchors */
+ addik r13, r0, _KERNEL_SDA_BASE_
+ addik r2, r0, _KERNEL_SDA2_BASE_
+
+ /* Initialize stack pointer */
+ addik r1, r0, init_thread_union + THREAD_SIZE - 4
+
+ /* Initialize r31 with current task address */
+ addik r31, r0, init_task
+
+ addik r11, r0, machine_early_init
+ brald r15, r11
+ nop
+
+#ifndef CONFIG_MMU
+ addik r15, r0, machine_halt
+ braid start_kernel
+ nop
+#else
+ /*
+ * Initialize the MMU.
+ */
+ bralid r15, mmu_init
+ nop
+
+ /* Go back to running unmapped so we can load up new values
+ * and change to using our exception vectors.
+ * On the MicroBlaze, all we invalidate the used TLB entries to clear
+ * the old 16M byte TLB mappings.
+ */
+ ori r15,r0,TOPHYS(kernel_load_context)
+ ori r4,r0,MSR_KERNEL
+ mts rmsr,r4
+ nop
+ bri 4
+ rted r15,0
+ nop
+
+ /* Load up the kernel context */
+kernel_load_context:
+ ori r5, r0, MICROBLAZE_LMB_TLB_ID
+ mts rtlbx,r5
+ nop
+ mts rtlbhi,r0
+ nop
+ addi r15, r0, machine_halt
+ ori r17, r0, start_kernel
+ ori r4, r0, MSR_KERNEL_VMS
+ mts rmsr, r4
+ nop
+ rted r17, 0 /* enable MMU and jump to start_kernel */
+ nop
+#endif /* CONFIG_MMU */
diff --git a/arch/microblaze/kernel/hw_exception_handler.S b/arch/microblaze/kernel/hw_exception_handler.S
new file mode 100644
index 000000000..54411de22
--- /dev/null
+++ b/arch/microblaze/kernel/hw_exception_handler.S
@@ -0,0 +1,1222 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Exception handling for Microblaze
+ *
+ * Rewriten interrupt handling
+ *
+ * Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu>
+ * Copyright (C) 2008-2009 PetaLogix
+ *
+ * uClinux customisation (C) 2005 John Williams
+ *
+ * MMU code derived from arch/ppc/kernel/head_4xx.S:
+ * Copyright (C) 1995-1996 Gary Thomas <gdt@linuxppc.org>
+ * Initial PowerPC version.
+ * Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu>
+ * Rewritten for PReP
+ * Copyright (C) 1996 Paul Mackerras <paulus@cs.anu.edu.au>
+ * Low-level exception handers, MMU support, and rewrite.
+ * Copyright (C) 1997 Dan Malek <dmalek@jlc.net>
+ * PowerPC 8xx modifications.
+ * Copyright (C) 1998-1999 TiVo, Inc.
+ * PowerPC 403GCX modifications.
+ * Copyright (C) 1999 Grant Erickson <grant@lcse.umn.edu>
+ * PowerPC 403GCX/405GP modifications.
+ * Copyright 2000 MontaVista Software Inc.
+ * PPC405 modifications
+ * PowerPC 403GCX/405GP modifications.
+ * Author: MontaVista Software, Inc.
+ * frank_rowand@mvista.com or source@mvista.com
+ * debbie_chu@mvista.com
+ *
+ * Original code
+ * Copyright (C) 2004 Xilinx, Inc.
+ */
+
+/*
+ * Here are the handlers which don't require enabling translation
+ * and calling other kernel code thus we can keep their design very simple
+ * and do all processing in real mode. All what they need is a valid current
+ * (that is an issue for the CONFIG_REGISTER_TASK_PTR case)
+ * This handlers use r3,r4,r5,r6 and optionally r[current] to work therefore
+ * these registers are saved/restored
+ * The handlers which require translation are in entry.S --KAA
+ *
+ * Microblaze HW Exception Handler
+ * - Non self-modifying exception handler for the following exception conditions
+ * - Unalignment
+ * - Instruction bus error
+ * - Data bus error
+ * - Illegal instruction opcode
+ * - Divide-by-zero
+ *
+ * - Privileged instruction exception (MMU)
+ * - Data storage exception (MMU)
+ * - Instruction storage exception (MMU)
+ * - Data TLB miss exception (MMU)
+ * - Instruction TLB miss exception (MMU)
+ *
+ * Note we disable interrupts during exception handling, otherwise we will
+ * possibly get multiple re-entrancy if interrupt handles themselves cause
+ * exceptions. JW
+ */
+
+#include <asm/exceptions.h>
+#include <asm/unistd.h>
+#include <asm/page.h>
+
+#include <asm/entry.h>
+#include <asm/current.h>
+#include <linux/linkage.h>
+#include <linux/pgtable.h>
+
+#include <asm/mmu.h>
+#include <asm/signal.h>
+#include <asm/registers.h>
+#include <asm/asm-offsets.h>
+
+#undef DEBUG
+
+/* Helpful Macros */
+#define NUM_TO_REG(num) r ## num
+
+#ifdef CONFIG_MMU
+ #define RESTORE_STATE \
+ lwi r5, r1, 0; \
+ mts rmsr, r5; \
+ nop; \
+ lwi r3, r1, PT_R3; \
+ lwi r4, r1, PT_R4; \
+ lwi r5, r1, PT_R5; \
+ lwi r6, r1, PT_R6; \
+ lwi r11, r1, PT_R11; \
+ lwi r31, r1, PT_R31; \
+ lwi r1, r1, PT_R1;
+#endif /* CONFIG_MMU */
+
+#define LWREG_NOP \
+ bri ex_handler_unhandled; \
+ nop;
+
+#define SWREG_NOP \
+ bri ex_handler_unhandled; \
+ nop;
+
+/* FIXME this is weird - for noMMU kernel is not possible to use brid
+ * instruction which can shorten executed time
+ */
+
+/* r3 is the source */
+#define R3_TO_LWREG_V(regnum) \
+ swi r3, r1, 4 * regnum; \
+ bri ex_handler_done;
+
+/* r3 is the source */
+#define R3_TO_LWREG(regnum) \
+ or NUM_TO_REG (regnum), r0, r3; \
+ bri ex_handler_done;
+
+/* r3 is the target */
+#define SWREG_TO_R3_V(regnum) \
+ lwi r3, r1, 4 * regnum; \
+ bri ex_sw_tail;
+
+/* r3 is the target */
+#define SWREG_TO_R3(regnum) \
+ or r3, r0, NUM_TO_REG (regnum); \
+ bri ex_sw_tail;
+
+#ifdef CONFIG_MMU
+ #define R3_TO_LWREG_VM_V(regnum) \
+ brid ex_lw_end_vm; \
+ swi r3, r7, 4 * regnum;
+
+ #define R3_TO_LWREG_VM(regnum) \
+ brid ex_lw_end_vm; \
+ or NUM_TO_REG (regnum), r0, r3;
+
+ #define SWREG_TO_R3_VM_V(regnum) \
+ brid ex_sw_tail_vm; \
+ lwi r3, r7, 4 * regnum;
+
+ #define SWREG_TO_R3_VM(regnum) \
+ brid ex_sw_tail_vm; \
+ or r3, r0, NUM_TO_REG (regnum);
+
+ /* Shift right instruction depending on available configuration */
+ #if CONFIG_XILINX_MICROBLAZE0_USE_BARREL == 0
+ /* Only the used shift constants defined here - add more if needed */
+ #define BSRLI2(rD, rA) \
+ srl rD, rA; /* << 1 */ \
+ srl rD, rD; /* << 2 */
+ #define BSRLI4(rD, rA) \
+ BSRLI2(rD, rA); \
+ BSRLI2(rD, rD)
+ #define BSRLI10(rD, rA) \
+ srl rD, rA; /* << 1 */ \
+ srl rD, rD; /* << 2 */ \
+ srl rD, rD; /* << 3 */ \
+ srl rD, rD; /* << 4 */ \
+ srl rD, rD; /* << 5 */ \
+ srl rD, rD; /* << 6 */ \
+ srl rD, rD; /* << 7 */ \
+ srl rD, rD; /* << 8 */ \
+ srl rD, rD; /* << 9 */ \
+ srl rD, rD /* << 10 */
+ #define BSRLI20(rD, rA) \
+ BSRLI10(rD, rA); \
+ BSRLI10(rD, rD)
+
+ .macro bsrli, rD, rA, IMM
+ .if (\IMM) == 2
+ BSRLI2(\rD, \rA)
+ .elseif (\IMM) == 10
+ BSRLI10(\rD, \rA)
+ .elseif (\IMM) == 12
+ BSRLI2(\rD, \rA)
+ BSRLI10(\rD, \rD)
+ .elseif (\IMM) == 14
+ BSRLI4(\rD, \rA)
+ BSRLI10(\rD, \rD)
+ .elseif (\IMM) == 20
+ BSRLI20(\rD, \rA)
+ .elseif (\IMM) == 24
+ BSRLI4(\rD, \rA)
+ BSRLI20(\rD, \rD)
+ .elseif (\IMM) == 28
+ BSRLI4(\rD, \rA)
+ BSRLI4(\rD, \rD)
+ BSRLI20(\rD, \rD)
+ .else
+ .error "BSRLI shift macros \IMM"
+ .endif
+ .endm
+ #endif
+
+#endif /* CONFIG_MMU */
+
+.extern other_exception_handler /* Defined in exception.c */
+
+/*
+ * hw_exception_handler - Handler for exceptions
+ *
+ * Exception handler notes:
+ * - Handles all exceptions
+ * - Does not handle unaligned exceptions during load into r17, r1, r0.
+ * - Does not handle unaligned exceptions during store from r17 (cannot be
+ * done) and r1 (slows down common case)
+ *
+ * Relevant register structures
+ *
+ * EAR - |----|----|----|----|----|----|----|----|
+ * - < ## 32 bit faulting address ## >
+ *
+ * ESR - |----|----|----|----|----| - | - |-----|-----|
+ * - W S REG EXC
+ *
+ *
+ * STACK FRAME STRUCTURE (for CONFIG_MMU=n)
+ * ----------------------------------------
+ *
+ * +-------------+ + 0
+ * | MSR |
+ * +-------------+ + 4
+ * | r1 |
+ * | . |
+ * | . |
+ * | . |
+ * | . |
+ * | r18 |
+ * +-------------+ + 76
+ * | . |
+ * | . |
+ *
+ * MMU kernel uses the same 'pt_pool_space' pointed space
+ * which is used for storing register values - noMMu style was, that values were
+ * stored in stack but in case of failure you lost information about register.
+ * Currently you can see register value in memory in specific place.
+ * In compare to with previous solution the speed should be the same.
+ *
+ * MMU exception handler has different handling compare to no MMU kernel.
+ * Exception handler use jump table for directing of what happen. For MMU kernel
+ * is this approach better because MMU relate exception are handled by asm code
+ * in this file. In compare to with MMU expect of unaligned exception
+ * is everything handled by C code.
+ */
+
+/*
+ * every of these handlers is entered having R3/4/5/6/11/current saved on stack
+ * and clobbered so care should be taken to restore them if someone is going to
+ * return from exception
+ */
+
+/* wrappers to restore state before coming to entry.S */
+#ifdef CONFIG_MMU
+.section .data
+.align 4
+pt_pool_space:
+ .space PT_SIZE
+
+#ifdef DEBUG
+/* Create space for exception counting. */
+.section .data
+.global exception_debug_table
+.align 4
+exception_debug_table:
+ /* Look at exception vector table. There is 32 exceptions * word size */
+ .space (32 * 4)
+#endif /* DEBUG */
+
+.section .rodata
+.align 4
+_MB_HW_ExceptionVectorTable:
+/* 0 - Undefined */
+ .long TOPHYS(ex_handler_unhandled)
+/* 1 - Unaligned data access exception */
+ .long TOPHYS(handle_unaligned_ex)
+/* 2 - Illegal op-code exception */
+ .long TOPHYS(full_exception_trapw)
+/* 3 - Instruction bus error exception */
+ .long TOPHYS(full_exception_trapw)
+/* 4 - Data bus error exception */
+ .long TOPHYS(full_exception_trapw)
+/* 5 - Divide by zero exception */
+ .long TOPHYS(full_exception_trapw)
+/* 6 - Floating point unit exception */
+ .long TOPHYS(full_exception_trapw)
+/* 7 - Privileged instruction exception */
+ .long TOPHYS(full_exception_trapw)
+/* 8 - 15 - Undefined */
+ .long TOPHYS(ex_handler_unhandled)
+ .long TOPHYS(ex_handler_unhandled)
+ .long TOPHYS(ex_handler_unhandled)
+ .long TOPHYS(ex_handler_unhandled)
+ .long TOPHYS(ex_handler_unhandled)
+ .long TOPHYS(ex_handler_unhandled)
+ .long TOPHYS(ex_handler_unhandled)
+ .long TOPHYS(ex_handler_unhandled)
+/* 16 - Data storage exception */
+ .long TOPHYS(handle_data_storage_exception)
+/* 17 - Instruction storage exception */
+ .long TOPHYS(handle_instruction_storage_exception)
+/* 18 - Data TLB miss exception */
+ .long TOPHYS(handle_data_tlb_miss_exception)
+/* 19 - Instruction TLB miss exception */
+ .long TOPHYS(handle_instruction_tlb_miss_exception)
+/* 20 - 31 - Undefined */
+ .long TOPHYS(ex_handler_unhandled)
+ .long TOPHYS(ex_handler_unhandled)
+ .long TOPHYS(ex_handler_unhandled)
+ .long TOPHYS(ex_handler_unhandled)
+ .long TOPHYS(ex_handler_unhandled)
+ .long TOPHYS(ex_handler_unhandled)
+ .long TOPHYS(ex_handler_unhandled)
+ .long TOPHYS(ex_handler_unhandled)
+ .long TOPHYS(ex_handler_unhandled)
+ .long TOPHYS(ex_handler_unhandled)
+ .long TOPHYS(ex_handler_unhandled)
+ .long TOPHYS(ex_handler_unhandled)
+#endif
+
+.global _hw_exception_handler
+.section .text
+.align 4
+.ent _hw_exception_handler
+_hw_exception_handler:
+#ifndef CONFIG_MMU
+ addik r1, r1, -(EX_HANDLER_STACK_SIZ); /* Create stack frame */
+#else
+ swi r1, r0, TOPHYS(pt_pool_space + PT_R1); /* GET_SP */
+ /* Save date to kernel memory. Here is the problem
+ * when you came from user space */
+ ori r1, r0, TOPHYS(pt_pool_space);
+#endif
+ swi r3, r1, PT_R3
+ swi r4, r1, PT_R4
+ swi r5, r1, PT_R5
+ swi r6, r1, PT_R6
+
+#ifdef CONFIG_MMU
+ swi r11, r1, PT_R11
+ swi r31, r1, PT_R31
+ lwi r31, r0, TOPHYS(PER_CPU(CURRENT_SAVE)) /* get saved current */
+#endif
+
+ mfs r5, rmsr;
+ nop
+ swi r5, r1, 0;
+ mfs r4, resr
+ nop
+ mfs r3, rear;
+ nop
+
+#ifndef CONFIG_MMU
+ andi r5, r4, 0x1000; /* Check ESR[DS] */
+ beqi r5, not_in_delay_slot; /* Branch if ESR[DS] not set */
+ mfs r17, rbtr; /* ESR[DS] set - return address in BTR */
+ nop
+not_in_delay_slot:
+ swi r17, r1, PT_R17
+#endif
+
+ andi r5, r4, 0x1F; /* Extract ESR[EXC] */
+
+#ifdef CONFIG_MMU
+ /* Calculate exception vector offset = r5 << 2 */
+ addk r6, r5, r5; /* << 1 */
+ addk r6, r6, r6; /* << 2 */
+
+#ifdef DEBUG
+/* counting which exception happen */
+ lwi r5, r0, TOPHYS(exception_debug_table)
+ addi r5, r5, 1
+ swi r5, r0, TOPHYS(exception_debug_table)
+ lwi r5, r6, TOPHYS(exception_debug_table)
+ addi r5, r5, 1
+ swi r5, r6, TOPHYS(exception_debug_table)
+#endif
+/* end */
+ /* Load the HW Exception vector */
+ lwi r6, r6, TOPHYS(_MB_HW_ExceptionVectorTable)
+ bra r6
+
+full_exception_trapw:
+ RESTORE_STATE
+ bri full_exception_trap
+#else
+ /* Exceptions enabled here. This will allow nested exceptions */
+ mfs r6, rmsr;
+ nop
+ swi r6, r1, 0; /* RMSR_OFFSET */
+ ori r6, r6, 0x100; /* Turn ON the EE bit */
+ andi r6, r6, ~2; /* Disable interrupts */
+ mts rmsr, r6;
+ nop
+
+ xori r6, r5, 1; /* 00001 = Unaligned Exception */
+ /* Jump to unalignment exception handler */
+ beqi r6, handle_unaligned_ex;
+
+handle_other_ex: /* Handle Other exceptions here */
+ /* Save other volatiles before we make procedure calls below */
+ swi r7, r1, PT_R7
+ swi r8, r1, PT_R8
+ swi r9, r1, PT_R9
+ swi r10, r1, PT_R10
+ swi r11, r1, PT_R11
+ swi r12, r1, PT_R12
+ swi r14, r1, PT_R14
+ swi r15, r1, PT_R15
+ swi r18, r1, PT_R18
+
+ or r5, r1, r0
+ andi r6, r4, 0x1F; /* Load ESR[EC] */
+ lwi r7, r0, PER_CPU(KM) /* MS: saving current kernel mode to regs */
+ swi r7, r1, PT_MODE
+ mfs r7, rfsr
+ nop
+ addk r8, r17, r0; /* Load exception address */
+ bralid r15, full_exception; /* Branch to the handler */
+ nop;
+ mts rfsr, r0; /* Clear sticky fsr */
+ nop
+
+ /*
+ * Trigger execution of the signal handler by enabling
+ * interrupts and calling an invalid syscall.
+ */
+ mfs r5, rmsr;
+ nop
+ ori r5, r5, 2;
+ mts rmsr, r5; /* enable interrupt */
+ nop
+ addi r12, r0, __NR_syscalls;
+ brki r14, 0x08;
+ mfs r5, rmsr; /* disable interrupt */
+ nop
+ andi r5, r5, ~2;
+ mts rmsr, r5;
+ nop
+
+ lwi r7, r1, PT_R7
+ lwi r8, r1, PT_R8
+ lwi r9, r1, PT_R9
+ lwi r10, r1, PT_R10
+ lwi r11, r1, PT_R11
+ lwi r12, r1, PT_R12
+ lwi r14, r1, PT_R14
+ lwi r15, r1, PT_R15
+ lwi r18, r1, PT_R18
+
+ bri ex_handler_done; /* Complete exception handling */
+#endif
+
+/* 0x01 - Unaligned data access exception
+ * This occurs when a word access is not aligned on a word boundary,
+ * or when a 16-bit access is not aligned on a 16-bit boundary.
+ * This handler perform the access, and returns, except for MMU when
+ * the unaligned address is last on a 4k page or the physical address is
+ * not found in the page table, in which case unaligned_data_trap is called.
+ */
+handle_unaligned_ex:
+ /* Working registers already saved: R3, R4, R5, R6
+ * R4 = ESR
+ * R3 = EAR
+ */
+#ifdef CONFIG_MMU
+ andi r6, r4, 0x1000 /* Check ESR[DS] */
+ beqi r6, _no_delayslot /* Branch if ESR[DS] not set */
+ mfs r17, rbtr; /* ESR[DS] set - return address in BTR */
+ nop
+_no_delayslot:
+ /* jump to high level unaligned handler */
+ RESTORE_STATE;
+ bri unaligned_data_trap
+#endif
+ andi r6, r4, 0x3E0; /* Mask and extract the register operand */
+ srl r6, r6; /* r6 >> 5 */
+ srl r6, r6;
+ srl r6, r6;
+ srl r6, r6;
+ srl r6, r6;
+ /* Store the register operand in a temporary location */
+ sbi r6, r0, TOPHYS(ex_reg_op);
+
+ andi r6, r4, 0x400; /* Extract ESR[S] */
+ bnei r6, ex_sw;
+ex_lw:
+ andi r6, r4, 0x800; /* Extract ESR[W] */
+ beqi r6, ex_lhw;
+ lbui r5, r3, 0; /* Exception address in r3 */
+ /* Load a word, byte-by-byte from destination address
+ and save it in tmp space */
+ sbi r5, r0, TOPHYS(ex_tmp_data_loc_0);
+ lbui r5, r3, 1;
+ sbi r5, r0, TOPHYS(ex_tmp_data_loc_1);
+ lbui r5, r3, 2;
+ sbi r5, r0, TOPHYS(ex_tmp_data_loc_2);
+ lbui r5, r3, 3;
+ sbi r5, r0, TOPHYS(ex_tmp_data_loc_3);
+ /* Get the destination register value into r4 */
+ lwi r4, r0, TOPHYS(ex_tmp_data_loc_0);
+ bri ex_lw_tail;
+ex_lhw:
+ lbui r5, r3, 0; /* Exception address in r3 */
+ /* Load a half-word, byte-by-byte from destination
+ address and save it in tmp space */
+ sbi r5, r0, TOPHYS(ex_tmp_data_loc_0);
+ lbui r5, r3, 1;
+ sbi r5, r0, TOPHYS(ex_tmp_data_loc_1);
+ /* Get the destination register value into r4 */
+ lhui r4, r0, TOPHYS(ex_tmp_data_loc_0);
+ex_lw_tail:
+ /* Get the destination register number into r5 */
+ lbui r5, r0, TOPHYS(ex_reg_op);
+ /* Form load_word jump table offset (lw_table + (8 * regnum)) */
+ addik r6, r0, TOPHYS(lw_table);
+ addk r5, r5, r5;
+ addk r5, r5, r5;
+ addk r5, r5, r5;
+ addk r5, r5, r6;
+ bra r5;
+ex_lw_end: /* Exception handling of load word, ends */
+ex_sw:
+ /* Get the destination register number into r5 */
+ lbui r5, r0, TOPHYS(ex_reg_op);
+ /* Form store_word jump table offset (sw_table + (8 * regnum)) */
+ addik r6, r0, TOPHYS(sw_table);
+ add r5, r5, r5;
+ add r5, r5, r5;
+ add r5, r5, r5;
+ add r5, r5, r6;
+ bra r5;
+ex_sw_tail:
+ mfs r6, resr;
+ nop
+ andi r6, r6, 0x800; /* Extract ESR[W] */
+ beqi r6, ex_shw;
+ /* Get the word - delay slot */
+ swi r4, r0, TOPHYS(ex_tmp_data_loc_0);
+ /* Store the word, byte-by-byte into destination address */
+ lbui r4, r0, TOPHYS(ex_tmp_data_loc_0);
+ sbi r4, r3, 0;
+ lbui r4, r0, TOPHYS(ex_tmp_data_loc_1);
+ sbi r4, r3, 1;
+ lbui r4, r0, TOPHYS(ex_tmp_data_loc_2);
+ sbi r4, r3, 2;
+ lbui r4, r0, TOPHYS(ex_tmp_data_loc_3);
+ sbi r4, r3, 3;
+ bri ex_handler_done;
+
+ex_shw:
+ /* Store the lower half-word, byte-by-byte into destination address */
+ swi r4, r0, TOPHYS(ex_tmp_data_loc_0);
+ lbui r4, r0, TOPHYS(ex_tmp_data_loc_2);
+ sbi r4, r3, 0;
+ lbui r4, r0, TOPHYS(ex_tmp_data_loc_3);
+ sbi r4, r3, 1;
+ex_sw_end: /* Exception handling of store word, ends. */
+
+ex_handler_done:
+#ifndef CONFIG_MMU
+ lwi r5, r1, 0 /* RMSR */
+ mts rmsr, r5
+ nop
+ lwi r3, r1, PT_R3
+ lwi r4, r1, PT_R4
+ lwi r5, r1, PT_R5
+ lwi r6, r1, PT_R6
+ lwi r17, r1, PT_R17
+
+ rted r17, 0
+ addik r1, r1, (EX_HANDLER_STACK_SIZ); /* Restore stack frame */
+#else
+ RESTORE_STATE;
+ rted r17, 0
+ nop
+#endif
+
+#ifdef CONFIG_MMU
+ /* Exception vector entry code. This code runs with address translation
+ * turned off (i.e. using physical addresses). */
+
+ /* Exception vectors. */
+
+ /* 0x10 - Data Storage Exception
+ * This happens for just a few reasons. U0 set (but we don't do that),
+ * or zone protection fault (user violation, write to protected page).
+ * If this is just an update of modified status, we do that quickly
+ * and exit. Otherwise, we call heavyweight functions to do the work.
+ */
+ handle_data_storage_exception:
+ /* Working registers already saved: R3, R4, R5, R6
+ * R3 = ESR
+ */
+ mfs r11, rpid
+ nop
+ /* If we are faulting a kernel address, we have to use the
+ * kernel page tables.
+ */
+ ori r5, r0, CONFIG_KERNEL_START
+ cmpu r5, r3, r5
+ bgti r5, ex3
+ /* First, check if it was a zone fault (which means a user
+ * tried to access a kernel or read-protected page - always
+ * a SEGV). All other faults here must be stores, so no
+ * need to check ESR_S as well. */
+ andi r4, r4, ESR_DIZ /* ESR_Z - zone protection */
+ bnei r4, ex2
+
+ ori r4, r0, swapper_pg_dir
+ mts rpid, r0 /* TLB will have 0 TID */
+ nop
+ bri ex4
+
+ /* Get the PGD for the current thread. */
+ ex3:
+ /* First, check if it was a zone fault (which means a user
+ * tried to access a kernel or read-protected page - always
+ * a SEGV). All other faults here must be stores, so no
+ * need to check ESR_S as well. */
+ andi r4, r4, ESR_DIZ /* ESR_Z */
+ bnei r4, ex2
+ /* get current task address */
+ addi r4 ,CURRENT_TASK, TOPHYS(0);
+ lwi r4, r4, TASK_THREAD+PGDIR
+ ex4:
+ tophys(r4,r4)
+ /* Create L1 (pgdir/pmd) address */
+ bsrli r5, r3, PGDIR_SHIFT - 2
+ andi r5, r5, PAGE_SIZE - 4
+/* Assume pgdir aligned on 4K boundary, no need for "andi r4,r4,0xfffff003" */
+ or r4, r4, r5
+ lwi r4, r4, 0 /* Get L1 entry */
+ andi r5, r4, PAGE_MASK /* Extract L2 (pte) base address */
+ beqi r5, ex2 /* Bail if no table */
+
+ tophys(r5,r5)
+ bsrli r6, r3, PTE_SHIFT /* Compute PTE address */
+ andi r6, r6, PAGE_SIZE - 4
+ or r5, r5, r6
+ lwi r4, r5, 0 /* Get Linux PTE */
+
+ andi r6, r4, _PAGE_RW /* Is it writeable? */
+ beqi r6, ex2 /* Bail if not */
+
+ /* Update 'changed' */
+ ori r4, r4, _PAGE_DIRTY|_PAGE_ACCESSED|_PAGE_HWWRITE
+ swi r4, r5, 0 /* Update Linux page table */
+
+ /* Most of the Linux PTE is ready to load into the TLB LO.
+ * We set ZSEL, where only the LS-bit determines user access.
+ * We set execute, because we don't have the granularity to
+ * properly set this at the page level (Linux problem).
+ * If shared is set, we cause a zero PID->TID load.
+ * Many of these bits are software only. Bits we don't set
+ * here we (properly should) assume have the appropriate value.
+ */
+/* Ignore memory coherent, just LSB on ZSEL is used + EX/WR */
+ andi r4, r4, PAGE_MASK | TLB_EX | TLB_WR | \
+ TLB_ZSEL(1) | TLB_ATTR_MASK
+ ori r4, r4, _PAGE_HWEXEC /* make it executable */
+
+ /* find the TLB index that caused the fault. It has to be here*/
+ mts rtlbsx, r3
+ nop
+ mfs r5, rtlbx /* DEBUG: TBD */
+ nop
+ mts rtlblo, r4 /* Load TLB LO */
+ nop
+ /* Will sync shadow TLBs */
+
+ /* Done...restore registers and get out of here. */
+ mts rpid, r11
+ nop
+ bri 4
+
+ RESTORE_STATE;
+ rted r17, 0
+ nop
+ ex2:
+ /* The bailout. Restore registers to pre-exception conditions
+ * and call the heavyweights to help us out. */
+ mts rpid, r11
+ nop
+ bri 4
+ RESTORE_STATE;
+ bri page_fault_data_trap
+
+
+ /* 0x11 - Instruction Storage Exception
+ * This is caused by a fetch from non-execute or guarded pages. */
+ handle_instruction_storage_exception:
+ /* Working registers already saved: R3, R4, R5, R6
+ * R3 = ESR
+ */
+
+ RESTORE_STATE;
+ bri page_fault_instr_trap
+
+ /* 0x12 - Data TLB Miss Exception
+ * As the name implies, translation is not in the MMU, so search the
+ * page tables and fix it. The only purpose of this function is to
+ * load TLB entries from the page table if they exist.
+ */
+ handle_data_tlb_miss_exception:
+ /* Working registers already saved: R3, R4, R5, R6
+ * R3 = EAR, R4 = ESR
+ */
+ mfs r11, rpid
+ nop
+
+ /* If we are faulting a kernel address, we have to use the
+ * kernel page tables. */
+ ori r6, r0, CONFIG_KERNEL_START
+ cmpu r4, r3, r6
+ bgti r4, ex5
+ ori r4, r0, swapper_pg_dir
+ mts rpid, r0 /* TLB will have 0 TID */
+ nop
+ bri ex6
+
+ /* Get the PGD for the current thread. */
+ ex5:
+ /* get current task address */
+ addi r4 ,CURRENT_TASK, TOPHYS(0);
+ lwi r4, r4, TASK_THREAD+PGDIR
+ ex6:
+ tophys(r4,r4)
+ /* Create L1 (pgdir/pmd) address */
+ bsrli r5, r3, PGDIR_SHIFT - 2
+ andi r5, r5, PAGE_SIZE - 4
+/* Assume pgdir aligned on 4K boundary, no need for "andi r4,r4,0xfffff003" */
+ or r4, r4, r5
+ lwi r4, r4, 0 /* Get L1 entry */
+ andi r5, r4, PAGE_MASK /* Extract L2 (pte) base address */
+ beqi r5, ex7 /* Bail if no table */
+
+ tophys(r5,r5)
+ bsrli r6, r3, PTE_SHIFT /* Compute PTE address */
+ andi r6, r6, PAGE_SIZE - 4
+ or r5, r5, r6
+ lwi r4, r5, 0 /* Get Linux PTE */
+
+ andi r6, r4, _PAGE_PRESENT
+ beqi r6, ex7
+
+ ori r4, r4, _PAGE_ACCESSED
+ swi r4, r5, 0
+
+ /* Most of the Linux PTE is ready to load into the TLB LO.
+ * We set ZSEL, where only the LS-bit determines user access.
+ * We set execute, because we don't have the granularity to
+ * properly set this at the page level (Linux problem).
+ * If shared is set, we cause a zero PID->TID load.
+ * Many of these bits are software only. Bits we don't set
+ * here we (properly should) assume have the appropriate value.
+ */
+ brid finish_tlb_load
+ andi r4, r4, PAGE_MASK | TLB_EX | TLB_WR | \
+ TLB_ZSEL(1) | TLB_ATTR_MASK
+ ex7:
+ /* The bailout. Restore registers to pre-exception conditions
+ * and call the heavyweights to help us out.
+ */
+ mts rpid, r11
+ nop
+ bri 4
+ RESTORE_STATE;
+ bri page_fault_data_trap
+
+ /* 0x13 - Instruction TLB Miss Exception
+ * Nearly the same as above, except we get our information from
+ * different registers and bailout to a different point.
+ */
+ handle_instruction_tlb_miss_exception:
+ /* Working registers already saved: R3, R4, R5, R6
+ * R3 = ESR
+ */
+ mfs r11, rpid
+ nop
+
+ /* If we are faulting a kernel address, we have to use the
+ * kernel page tables.
+ */
+ ori r4, r0, CONFIG_KERNEL_START
+ cmpu r4, r3, r4
+ bgti r4, ex8
+ ori r4, r0, swapper_pg_dir
+ mts rpid, r0 /* TLB will have 0 TID */
+ nop
+ bri ex9
+
+ /* Get the PGD for the current thread. */
+ ex8:
+ /* get current task address */
+ addi r4 ,CURRENT_TASK, TOPHYS(0);
+ lwi r4, r4, TASK_THREAD+PGDIR
+ ex9:
+ tophys(r4,r4)
+ /* Create L1 (pgdir/pmd) address */
+ bsrli r5, r3, PGDIR_SHIFT - 2
+ andi r5, r5, PAGE_SIZE - 4
+/* Assume pgdir aligned on 4K boundary, no need for "andi r4,r4,0xfffff003" */
+ or r4, r4, r5
+ lwi r4, r4, 0 /* Get L1 entry */
+ andi r5, r4, PAGE_MASK /* Extract L2 (pte) base address */
+ beqi r5, ex10 /* Bail if no table */
+
+ tophys(r5,r5)
+ bsrli r6, r3, PTE_SHIFT /* Compute PTE address */
+ andi r6, r6, PAGE_SIZE - 4
+ or r5, r5, r6
+ lwi r4, r5, 0 /* Get Linux PTE */
+
+ andi r6, r4, _PAGE_PRESENT
+ beqi r6, ex10
+
+ ori r4, r4, _PAGE_ACCESSED
+ swi r4, r5, 0
+
+ /* Most of the Linux PTE is ready to load into the TLB LO.
+ * We set ZSEL, where only the LS-bit determines user access.
+ * We set execute, because we don't have the granularity to
+ * properly set this at the page level (Linux problem).
+ * If shared is set, we cause a zero PID->TID load.
+ * Many of these bits are software only. Bits we don't set
+ * here we (properly should) assume have the appropriate value.
+ */
+ brid finish_tlb_load
+ andi r4, r4, PAGE_MASK | TLB_EX | TLB_WR | \
+ TLB_ZSEL(1) | TLB_ATTR_MASK
+ ex10:
+ /* The bailout. Restore registers to pre-exception conditions
+ * and call the heavyweights to help us out.
+ */
+ mts rpid, r11
+ nop
+ bri 4
+ RESTORE_STATE;
+ bri page_fault_instr_trap
+
+/* Both the instruction and data TLB miss get to this point to load the TLB.
+ * r3 - EA of fault
+ * r4 - TLB LO (info from Linux PTE)
+ * r5, r6 - available to use
+ * PID - loaded with proper value when we get here
+ * Upon exit, we reload everything and RFI.
+ * A common place to load the TLB.
+ */
+.section .data
+.align 4
+.global tlb_skip
+ tlb_skip:
+ .long MICROBLAZE_TLB_SKIP
+ tlb_index:
+ /* MS: storing last used tlb index */
+ .long MICROBLAZE_TLB_SIZE/2
+.previous
+ finish_tlb_load:
+ /* MS: load the last used TLB index. */
+ lwi r5, r0, TOPHYS(tlb_index)
+ addik r5, r5, 1 /* MS: inc tlb_index -> use next one */
+
+/* MS: FIXME this is potential fault, because this is mask not count */
+ andi r5, r5, MICROBLAZE_TLB_SIZE - 1
+ ori r6, r0, 1
+ cmp r31, r5, r6
+ blti r31, ex12
+ lwi r5, r0, TOPHYS(tlb_skip)
+ ex12:
+ /* MS: save back current TLB index */
+ swi r5, r0, TOPHYS(tlb_index)
+
+ ori r4, r4, _PAGE_HWEXEC /* make it executable */
+ mts rtlbx, r5 /* MS: save current TLB */
+ nop
+ mts rtlblo, r4 /* MS: save to TLB LO */
+ nop
+
+ /* Create EPN. This is the faulting address plus a static
+ * set of bits. These are size, valid, E, U0, and ensure
+ * bits 20 and 21 are zero.
+ */
+ andi r3, r3, PAGE_MASK
+#ifdef CONFIG_MICROBLAZE_64K_PAGES
+ ori r3, r3, TLB_VALID | TLB_PAGESZ(PAGESZ_64K)
+#elif CONFIG_MICROBLAZE_16K_PAGES
+ ori r3, r3, TLB_VALID | TLB_PAGESZ(PAGESZ_16K)
+#else
+ ori r3, r3, TLB_VALID | TLB_PAGESZ(PAGESZ_4K)
+#endif
+ mts rtlbhi, r3 /* Load TLB HI */
+ nop
+
+ /* Done...restore registers and get out of here. */
+ mts rpid, r11
+ nop
+ bri 4
+ RESTORE_STATE;
+ rted r17, 0
+ nop
+
+ /* extern void giveup_fpu(struct task_struct *prev)
+ *
+ * The MicroBlaze processor may have an FPU, so this should not just
+ * return: TBD.
+ */
+ .globl giveup_fpu;
+ .align 4;
+ giveup_fpu:
+ bralid r15,0 /* TBD */
+ nop
+
+ /* At present, this routine just hangs. - extern void abort(void) */
+ .globl abort;
+ .align 4;
+ abort:
+ br r0
+
+ .globl set_context;
+ .align 4;
+ set_context:
+ mts rpid, r5 /* Shadow TLBs are automatically */
+ nop
+ bri 4 /* flushed by changing PID */
+ rtsd r15,8
+ nop
+
+#endif
+.end _hw_exception_handler
+
+#ifdef CONFIG_MMU
+/* Unaligned data access exception last on a 4k page for MMU.
+ * When this is called, we are in virtual mode with exceptions enabled
+ * and registers 1-13,15,17,18 saved.
+ *
+ * R3 = ESR
+ * R4 = EAR
+ * R7 = pointer to saved registers (struct pt_regs *regs)
+ *
+ * This handler perform the access, and returns via ret_from_exc.
+ */
+.global _unaligned_data_exception
+.ent _unaligned_data_exception
+_unaligned_data_exception:
+ andi r8, r3, 0x3E0; /* Mask and extract the register operand */
+ bsrli r8, r8, 2; /* r8 >> 2 = register operand * 8 */
+ andi r6, r3, 0x400; /* Extract ESR[S] */
+ bneid r6, ex_sw_vm;
+ andi r6, r3, 0x800; /* Extract ESR[W] - delay slot */
+ex_lw_vm:
+ beqid r6, ex_lhw_vm;
+load1: lbui r5, r4, 0; /* Exception address in r4 - delay slot */
+/* Load a word, byte-by-byte from destination address and save it in tmp space*/
+ addik r6, r0, ex_tmp_data_loc_0;
+ sbi r5, r6, 0;
+load2: lbui r5, r4, 1;
+ sbi r5, r6, 1;
+load3: lbui r5, r4, 2;
+ sbi r5, r6, 2;
+load4: lbui r5, r4, 3;
+ sbi r5, r6, 3;
+ brid ex_lw_tail_vm;
+/* Get the destination register value into r3 - delay slot */
+ lwi r3, r6, 0;
+ex_lhw_vm:
+ /* Load a half-word, byte-by-byte from destination address and
+ * save it in tmp space */
+ addik r6, r0, ex_tmp_data_loc_0;
+ sbi r5, r6, 0;
+load5: lbui r5, r4, 1;
+ sbi r5, r6, 1;
+ lhui r3, r6, 0; /* Get the destination register value into r3 */
+ex_lw_tail_vm:
+ /* Form load_word jump table offset (lw_table_vm + (8 * regnum)) */
+ addik r5, r8, lw_table_vm;
+ bra r5;
+ex_lw_end_vm: /* Exception handling of load word, ends */
+ brai ret_from_exc;
+ex_sw_vm:
+/* Form store_word jump table offset (sw_table_vm + (8 * regnum)) */
+ addik r5, r8, sw_table_vm;
+ bra r5;
+ex_sw_tail_vm:
+ addik r5, r0, ex_tmp_data_loc_0;
+ beqid r6, ex_shw_vm;
+ swi r3, r5, 0; /* Get the word - delay slot */
+ /* Store the word, byte-by-byte into destination address */
+ lbui r3, r5, 0;
+store1: sbi r3, r4, 0;
+ lbui r3, r5, 1;
+store2: sbi r3, r4, 1;
+ lbui r3, r5, 2;
+store3: sbi r3, r4, 2;
+ lbui r3, r5, 3;
+ brid ret_from_exc;
+store4: sbi r3, r4, 3; /* Delay slot */
+ex_shw_vm:
+ /* Store the lower half-word, byte-by-byte into destination address */
+#ifdef __MICROBLAZEEL__
+ lbui r3, r5, 0;
+store5: sbi r3, r4, 0;
+ lbui r3, r5, 1;
+ brid ret_from_exc;
+store6: sbi r3, r4, 1; /* Delay slot */
+#else
+ lbui r3, r5, 2;
+store5: sbi r3, r4, 0;
+ lbui r3, r5, 3;
+ brid ret_from_exc;
+store6: sbi r3, r4, 1; /* Delay slot */
+#endif
+
+ex_sw_end_vm: /* Exception handling of store word, ends. */
+
+/* We have to prevent cases that get/put_user macros get unaligned pointer
+ * to bad page area. We have to find out which origin instruction caused it
+ * and called fixup for that origin instruction not instruction in unaligned
+ * handler */
+ex_unaligned_fixup:
+ ori r5, r7, 0 /* setup pointer to pt_regs */
+ lwi r6, r7, PT_PC; /* faulting address is one instruction above */
+ addik r6, r6, -4 /* for finding proper fixup */
+ swi r6, r7, PT_PC; /* a save back it to PT_PC */
+ addik r7, r0, SIGSEGV
+ /* call bad_page_fault for finding aligned fixup, fixup address is saved
+ * in PT_PC which is used as return address from exception */
+ addik r15, r0, ret_from_exc-8 /* setup return address */
+ brid bad_page_fault
+ nop
+
+/* We prevent all load/store because it could failed any attempt to access */
+.section __ex_table,"a";
+ .word load1,ex_unaligned_fixup;
+ .word load2,ex_unaligned_fixup;
+ .word load3,ex_unaligned_fixup;
+ .word load4,ex_unaligned_fixup;
+ .word load5,ex_unaligned_fixup;
+ .word store1,ex_unaligned_fixup;
+ .word store2,ex_unaligned_fixup;
+ .word store3,ex_unaligned_fixup;
+ .word store4,ex_unaligned_fixup;
+ .word store5,ex_unaligned_fixup;
+ .word store6,ex_unaligned_fixup;
+.previous;
+.end _unaligned_data_exception
+#endif /* CONFIG_MMU */
+
+.global ex_handler_unhandled
+ex_handler_unhandled:
+/* FIXME add handle function for unhandled exception - dump register */
+ bri 0
+
+/*
+ * hw_exception_handler Jump Table
+ * - Contains code snippets for each register that caused the unalign exception
+ * - Hence exception handler is NOT self-modifying
+ * - Separate table for load exceptions and store exceptions.
+ * - Each table is of size: (8 * 32) = 256 bytes
+ */
+
+.section .text
+.align 4
+lw_table:
+lw_r0: R3_TO_LWREG (0);
+lw_r1: LWREG_NOP;
+lw_r2: R3_TO_LWREG (2);
+lw_r3: R3_TO_LWREG_V (3);
+lw_r4: R3_TO_LWREG_V (4);
+lw_r5: R3_TO_LWREG_V (5);
+lw_r6: R3_TO_LWREG_V (6);
+lw_r7: R3_TO_LWREG (7);
+lw_r8: R3_TO_LWREG (8);
+lw_r9: R3_TO_LWREG (9);
+lw_r10: R3_TO_LWREG (10);
+lw_r11: R3_TO_LWREG (11);
+lw_r12: R3_TO_LWREG (12);
+lw_r13: R3_TO_LWREG (13);
+lw_r14: R3_TO_LWREG (14);
+lw_r15: R3_TO_LWREG (15);
+lw_r16: R3_TO_LWREG (16);
+lw_r17: LWREG_NOP;
+lw_r18: R3_TO_LWREG (18);
+lw_r19: R3_TO_LWREG (19);
+lw_r20: R3_TO_LWREG (20);
+lw_r21: R3_TO_LWREG (21);
+lw_r22: R3_TO_LWREG (22);
+lw_r23: R3_TO_LWREG (23);
+lw_r24: R3_TO_LWREG (24);
+lw_r25: R3_TO_LWREG (25);
+lw_r26: R3_TO_LWREG (26);
+lw_r27: R3_TO_LWREG (27);
+lw_r28: R3_TO_LWREG (28);
+lw_r29: R3_TO_LWREG (29);
+lw_r30: R3_TO_LWREG (30);
+#ifdef CONFIG_MMU
+lw_r31: R3_TO_LWREG_V (31);
+#else
+lw_r31: R3_TO_LWREG (31);
+#endif
+
+sw_table:
+sw_r0: SWREG_TO_R3 (0);
+sw_r1: SWREG_NOP;
+sw_r2: SWREG_TO_R3 (2);
+sw_r3: SWREG_TO_R3_V (3);
+sw_r4: SWREG_TO_R3_V (4);
+sw_r5: SWREG_TO_R3_V (5);
+sw_r6: SWREG_TO_R3_V (6);
+sw_r7: SWREG_TO_R3 (7);
+sw_r8: SWREG_TO_R3 (8);
+sw_r9: SWREG_TO_R3 (9);
+sw_r10: SWREG_TO_R3 (10);
+sw_r11: SWREG_TO_R3 (11);
+sw_r12: SWREG_TO_R3 (12);
+sw_r13: SWREG_TO_R3 (13);
+sw_r14: SWREG_TO_R3 (14);
+sw_r15: SWREG_TO_R3 (15);
+sw_r16: SWREG_TO_R3 (16);
+sw_r17: SWREG_NOP;
+sw_r18: SWREG_TO_R3 (18);
+sw_r19: SWREG_TO_R3 (19);
+sw_r20: SWREG_TO_R3 (20);
+sw_r21: SWREG_TO_R3 (21);
+sw_r22: SWREG_TO_R3 (22);
+sw_r23: SWREG_TO_R3 (23);
+sw_r24: SWREG_TO_R3 (24);
+sw_r25: SWREG_TO_R3 (25);
+sw_r26: SWREG_TO_R3 (26);
+sw_r27: SWREG_TO_R3 (27);
+sw_r28: SWREG_TO_R3 (28);
+sw_r29: SWREG_TO_R3 (29);
+sw_r30: SWREG_TO_R3 (30);
+#ifdef CONFIG_MMU
+sw_r31: SWREG_TO_R3_V (31);
+#else
+sw_r31: SWREG_TO_R3 (31);
+#endif
+
+#ifdef CONFIG_MMU
+lw_table_vm:
+lw_r0_vm: R3_TO_LWREG_VM (0);
+lw_r1_vm: R3_TO_LWREG_VM_V (1);
+lw_r2_vm: R3_TO_LWREG_VM_V (2);
+lw_r3_vm: R3_TO_LWREG_VM_V (3);
+lw_r4_vm: R3_TO_LWREG_VM_V (4);
+lw_r5_vm: R3_TO_LWREG_VM_V (5);
+lw_r6_vm: R3_TO_LWREG_VM_V (6);
+lw_r7_vm: R3_TO_LWREG_VM_V (7);
+lw_r8_vm: R3_TO_LWREG_VM_V (8);
+lw_r9_vm: R3_TO_LWREG_VM_V (9);
+lw_r10_vm: R3_TO_LWREG_VM_V (10);
+lw_r11_vm: R3_TO_LWREG_VM_V (11);
+lw_r12_vm: R3_TO_LWREG_VM_V (12);
+lw_r13_vm: R3_TO_LWREG_VM_V (13);
+lw_r14_vm: R3_TO_LWREG_VM_V (14);
+lw_r15_vm: R3_TO_LWREG_VM_V (15);
+lw_r16_vm: R3_TO_LWREG_VM_V (16);
+lw_r17_vm: R3_TO_LWREG_VM_V (17);
+lw_r18_vm: R3_TO_LWREG_VM_V (18);
+lw_r19_vm: R3_TO_LWREG_VM_V (19);
+lw_r20_vm: R3_TO_LWREG_VM_V (20);
+lw_r21_vm: R3_TO_LWREG_VM_V (21);
+lw_r22_vm: R3_TO_LWREG_VM_V (22);
+lw_r23_vm: R3_TO_LWREG_VM_V (23);
+lw_r24_vm: R3_TO_LWREG_VM_V (24);
+lw_r25_vm: R3_TO_LWREG_VM_V (25);
+lw_r26_vm: R3_TO_LWREG_VM_V (26);
+lw_r27_vm: R3_TO_LWREG_VM_V (27);
+lw_r28_vm: R3_TO_LWREG_VM_V (28);
+lw_r29_vm: R3_TO_LWREG_VM_V (29);
+lw_r30_vm: R3_TO_LWREG_VM_V (30);
+lw_r31_vm: R3_TO_LWREG_VM_V (31);
+
+sw_table_vm:
+sw_r0_vm: SWREG_TO_R3_VM (0);
+sw_r1_vm: SWREG_TO_R3_VM_V (1);
+sw_r2_vm: SWREG_TO_R3_VM_V (2);
+sw_r3_vm: SWREG_TO_R3_VM_V (3);
+sw_r4_vm: SWREG_TO_R3_VM_V (4);
+sw_r5_vm: SWREG_TO_R3_VM_V (5);
+sw_r6_vm: SWREG_TO_R3_VM_V (6);
+sw_r7_vm: SWREG_TO_R3_VM_V (7);
+sw_r8_vm: SWREG_TO_R3_VM_V (8);
+sw_r9_vm: SWREG_TO_R3_VM_V (9);
+sw_r10_vm: SWREG_TO_R3_VM_V (10);
+sw_r11_vm: SWREG_TO_R3_VM_V (11);
+sw_r12_vm: SWREG_TO_R3_VM_V (12);
+sw_r13_vm: SWREG_TO_R3_VM_V (13);
+sw_r14_vm: SWREG_TO_R3_VM_V (14);
+sw_r15_vm: SWREG_TO_R3_VM_V (15);
+sw_r16_vm: SWREG_TO_R3_VM_V (16);
+sw_r17_vm: SWREG_TO_R3_VM_V (17);
+sw_r18_vm: SWREG_TO_R3_VM_V (18);
+sw_r19_vm: SWREG_TO_R3_VM_V (19);
+sw_r20_vm: SWREG_TO_R3_VM_V (20);
+sw_r21_vm: SWREG_TO_R3_VM_V (21);
+sw_r22_vm: SWREG_TO_R3_VM_V (22);
+sw_r23_vm: SWREG_TO_R3_VM_V (23);
+sw_r24_vm: SWREG_TO_R3_VM_V (24);
+sw_r25_vm: SWREG_TO_R3_VM_V (25);
+sw_r26_vm: SWREG_TO_R3_VM_V (26);
+sw_r27_vm: SWREG_TO_R3_VM_V (27);
+sw_r28_vm: SWREG_TO_R3_VM_V (28);
+sw_r29_vm: SWREG_TO_R3_VM_V (29);
+sw_r30_vm: SWREG_TO_R3_VM_V (30);
+sw_r31_vm: SWREG_TO_R3_VM_V (31);
+#endif /* CONFIG_MMU */
+
+/* Temporary data structures used in the handler */
+.section .data
+.align 4
+ex_tmp_data_loc_0:
+ .byte 0
+ex_tmp_data_loc_1:
+ .byte 0
+ex_tmp_data_loc_2:
+ .byte 0
+ex_tmp_data_loc_3:
+ .byte 0
+ex_reg_op:
+ .byte 0
diff --git a/arch/microblaze/kernel/irq.c b/arch/microblaze/kernel/irq.c
new file mode 100644
index 000000000..903dad822
--- /dev/null
+++ b/arch/microblaze/kernel/irq.c
@@ -0,0 +1,53 @@
+/*
+ * Copyright (C) 2007-2009 Michal Simek <monstr@monstr.eu>
+ * Copyright (C) 2007-2009 PetaLogix
+ * Copyright (C) 2006 Atmark Techno, Inc.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/init.h>
+#include <linux/ftrace.h>
+#include <linux/kernel.h>
+#include <linux/hardirq.h>
+#include <linux/interrupt.h>
+#include <linux/irqflags.h>
+#include <linux/seq_file.h>
+#include <linux/kernel_stat.h>
+#include <linux/irq.h>
+#include <linux/irqchip.h>
+#include <linux/of_irq.h>
+
+static u32 concurrent_irq;
+
+void __irq_entry do_IRQ(struct pt_regs *regs)
+{
+ unsigned int irq;
+ struct pt_regs *old_regs = set_irq_regs(regs);
+ trace_hardirqs_off();
+
+ irq_enter();
+ irq = xintc_get_irq();
+next_irq:
+ BUG_ON(!irq);
+ generic_handle_irq(irq);
+
+ irq = xintc_get_irq();
+ if (irq != -1U) {
+ pr_debug("next irq: %d\n", irq);
+ ++concurrent_irq;
+ goto next_irq;
+ }
+
+ irq_exit();
+ set_irq_regs(old_regs);
+ trace_hardirqs_on();
+}
+
+void __init init_IRQ(void)
+{
+ /* process the entire interrupt tree in one go */
+ irqchip_init();
+}
diff --git a/arch/microblaze/kernel/kgdb.c b/arch/microblaze/kernel/kgdb.c
new file mode 100644
index 000000000..130cd0f06
--- /dev/null
+++ b/arch/microblaze/kernel/kgdb.c
@@ -0,0 +1,152 @@
+/*
+ * Microblaze KGDB support
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/kgdb.h>
+#include <linux/kdebug.h>
+#include <linux/irq.h>
+#include <linux/io.h>
+#include <asm/cacheflush.h>
+#include <asm/asm-offsets.h>
+#include <asm/kgdb.h>
+#include <asm/pvr.h>
+
+#define GDB_REG 0
+#define GDB_PC 32
+#define GDB_MSR 33
+#define GDB_EAR 34
+#define GDB_ESR 35
+#define GDB_FSR 36
+#define GDB_BTR 37
+#define GDB_PVR 38
+#define GDB_REDR 50
+#define GDB_RPID 51
+#define GDB_RZPR 52
+#define GDB_RTLBX 53
+#define GDB_RTLBSX 54 /* mfs can't read it */
+#define GDB_RTLBLO 55
+#define GDB_RTLBHI 56
+
+/* keep pvr separately because it is unchangeble */
+static struct pvr_s pvr;
+
+void pt_regs_to_gdb_regs(unsigned long *gdb_regs, struct pt_regs *regs)
+{
+ unsigned int i;
+ unsigned long *pt_regb = (unsigned long *)regs;
+ int temp;
+
+ /* registers r0 - r31, pc, msr, ear, esr, fsr + do not save pt_mode */
+ for (i = 0; i < (sizeof(struct pt_regs) / 4) - 1; i++)
+ gdb_regs[i] = pt_regb[i];
+
+ /* Branch target register can't be changed */
+ __asm__ __volatile__ ("mfs %0, rbtr;" : "=r"(temp) : );
+ gdb_regs[GDB_BTR] = temp;
+
+ /* pvr part - we have 11 pvr regs */
+ for (i = 0; i < sizeof(struct pvr_s)/4; i++)
+ gdb_regs[GDB_PVR + i] = pvr.pvr[i];
+
+ /* read special registers - can't be changed */
+ __asm__ __volatile__ ("mfs %0, redr;" : "=r"(temp) : );
+ gdb_regs[GDB_REDR] = temp;
+ __asm__ __volatile__ ("mfs %0, rpid;" : "=r"(temp) : );
+ gdb_regs[GDB_RPID] = temp;
+ __asm__ __volatile__ ("mfs %0, rzpr;" : "=r"(temp) : );
+ gdb_regs[GDB_RZPR] = temp;
+ __asm__ __volatile__ ("mfs %0, rtlbx;" : "=r"(temp) : );
+ gdb_regs[GDB_RTLBX] = temp;
+ __asm__ __volatile__ ("mfs %0, rtlblo;" : "=r"(temp) : );
+ gdb_regs[GDB_RTLBLO] = temp;
+ __asm__ __volatile__ ("mfs %0, rtlbhi;" : "=r"(temp) : );
+ gdb_regs[GDB_RTLBHI] = temp;
+}
+
+void gdb_regs_to_pt_regs(unsigned long *gdb_regs, struct pt_regs *regs)
+{
+ unsigned int i;
+ unsigned long *pt_regb = (unsigned long *)regs;
+
+ /* pt_regs and gdb_regs have the same 37 values.
+ * The rest of gdb_regs are unused and can't be changed.
+ * r0 register value can't be changed too. */
+ for (i = 1; i < (sizeof(struct pt_regs) / 4) - 1; i++)
+ pt_regb[i] = gdb_regs[i];
+}
+
+asmlinkage void microblaze_kgdb_break(struct pt_regs *regs)
+{
+ if (kgdb_handle_exception(1, SIGTRAP, 0, regs) != 0)
+ return;
+
+ /* Jump over the first arch_kgdb_breakpoint which is barrier to
+ * get kgdb work. The same solution is used for powerpc */
+ if (*(u32 *) (regs->pc) == *(u32 *) (&arch_kgdb_ops.gdb_bpt_instr))
+ regs->pc += BREAK_INSTR_SIZE;
+}
+
+/* untested */
+void sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *p)
+{
+ unsigned int i;
+ unsigned long *pt_regb = (unsigned long *)(p->thread.regs);
+
+ /* registers r0 - r31, pc, msr, ear, esr, fsr + do not save pt_mode */
+ for (i = 0; i < (sizeof(struct pt_regs) / 4) - 1; i++)
+ gdb_regs[i] = pt_regb[i];
+
+ /* pvr part - we have 11 pvr regs */
+ for (i = 0; i < sizeof(struct pvr_s)/4; i++)
+ gdb_regs[GDB_PVR + i] = pvr.pvr[i];
+}
+
+void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long ip)
+{
+ regs->pc = ip;
+}
+
+int kgdb_arch_handle_exception(int vector, int signo, int err_code,
+ char *remcom_in_buffer, char *remcom_out_buffer,
+ struct pt_regs *regs)
+{
+ char *ptr;
+ unsigned long address;
+
+ switch (remcom_in_buffer[0]) {
+ case 'c':
+ /* handle the optional parameter */
+ ptr = &remcom_in_buffer[1];
+ if (kgdb_hex2long(&ptr, &address))
+ regs->pc = address;
+
+ return 0;
+ }
+ return -1; /* this means that we do not want to exit from the handler */
+}
+
+int kgdb_arch_init(void)
+{
+ get_pvr(&pvr); /* Fill PVR structure */
+ return 0;
+}
+
+void kgdb_arch_exit(void)
+{
+ /* Nothing to do */
+}
+
+/*
+ * Global data
+ */
+const struct kgdb_arch arch_kgdb_ops = {
+#ifdef __MICROBLAZEEL__
+ .gdb_bpt_instr = {0x18, 0x00, 0x0c, 0xba}, /* brki r16, 0x18 */
+#else
+ .gdb_bpt_instr = {0xba, 0x0c, 0x00, 0x18}, /* brki r16, 0x18 */
+#endif
+};
diff --git a/arch/microblaze/kernel/mcount.S b/arch/microblaze/kernel/mcount.S
new file mode 100644
index 000000000..fed9da5de
--- /dev/null
+++ b/arch/microblaze/kernel/mcount.S
@@ -0,0 +1,165 @@
+/*
+ * Low-level ftrace handling
+ *
+ * Copyright (C) 2009 Michal Simek <monstr@monstr.eu>
+ * Copyright (C) 2009 PetaLogix
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License. See the file COPYING in the main directory of this
+ * archive for more details.
+ */
+
+#include <linux/linkage.h>
+
+#define NOALIGN_ENTRY(name) .globl name; name:
+
+/* FIXME MS: I think that I don't need to save all regs */
+#define SAVE_REGS \
+ addik r1, r1, -120; \
+ swi r2, r1, 4; \
+ swi r3, r1, 8; \
+ swi r4, r1, 12; \
+ swi r5, r1, 116; \
+ swi r6, r1, 16; \
+ swi r7, r1, 20; \
+ swi r8, r1, 24; \
+ swi r9, r1, 28; \
+ swi r10, r1, 32; \
+ swi r11, r1, 36; \
+ swi r12, r1, 40; \
+ swi r13, r1, 44; \
+ swi r14, r1, 48; \
+ swi r16, r1, 52; \
+ swi r17, r1, 56; \
+ swi r18, r1, 60; \
+ swi r19, r1, 64; \
+ swi r20, r1, 68; \
+ swi r21, r1, 72; \
+ swi r22, r1, 76; \
+ swi r23, r1, 80; \
+ swi r24, r1, 84; \
+ swi r25, r1, 88; \
+ swi r26, r1, 92; \
+ swi r27, r1, 96; \
+ swi r28, r1, 100; \
+ swi r29, r1, 104; \
+ swi r30, r1, 108; \
+ swi r31, r1, 112;
+
+#define RESTORE_REGS \
+ lwi r2, r1, 4; \
+ lwi r3, r1, 8; \
+ lwi r4, r1, 12; \
+ lwi r5, r1, 116; \
+ lwi r6, r1, 16; \
+ lwi r7, r1, 20; \
+ lwi r8, r1, 24; \
+ lwi r9, r1, 28; \
+ lwi r10, r1, 32; \
+ lwi r11, r1, 36; \
+ lwi r12, r1, 40; \
+ lwi r13, r1, 44; \
+ lwi r14, r1, 48; \
+ lwi r16, r1, 52; \
+ lwi r17, r1, 56; \
+ lwi r18, r1, 60; \
+ lwi r19, r1, 64; \
+ lwi r20, r1, 68; \
+ lwi r21, r1, 72; \
+ lwi r22, r1, 76; \
+ lwi r23, r1, 80; \
+ lwi r24, r1, 84; \
+ lwi r25, r1, 88; \
+ lwi r26, r1, 92; \
+ lwi r27, r1, 96; \
+ lwi r28, r1, 100; \
+ lwi r29, r1, 104; \
+ lwi r30, r1, 108; \
+ lwi r31, r1, 112; \
+ addik r1, r1, 120;
+
+ENTRY(ftrace_stub)
+ rtsd r15, 8;
+ nop;
+
+ENTRY(_mcount)
+#ifdef CONFIG_DYNAMIC_FTRACE
+ENTRY(ftrace_caller)
+ /* MS: It is just barrier which is removed from C code */
+ rtsd r15, 8
+ nop
+#endif /* CONFIG_DYNAMIC_FTRACE */
+ SAVE_REGS
+ swi r15, r1, 0;
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+#ifndef CONFIG_DYNAMIC_FTRACE
+ lwi r5, r0, ftrace_graph_return;
+ addik r6, r0, ftrace_stub; /* asm implementation */
+ cmpu r5, r5, r6; /* ftrace_graph_return != ftrace_stub */
+ beqid r5, end_graph_tracer;
+ nop;
+
+ lwi r6, r0, ftrace_graph_entry;
+ addik r5, r0, ftrace_graph_entry_stub; /* implemented in C */
+ cmpu r5, r5, r6; /* ftrace_graph_entry != ftrace_graph_entry_stub */
+ beqid r5, end_graph_tracer;
+ nop;
+#else /* CONFIG_DYNAMIC_FTRACE */
+NOALIGN_ENTRY(ftrace_call_graph)
+ /* MS: jump over graph function - replaced from C code */
+ bri end_graph_tracer
+#endif /* CONFIG_DYNAMIC_FTRACE */
+ addik r5, r1, 120; /* MS: load parent addr */
+ addik r6, r15, 0; /* MS: load current function addr */
+ bralid r15, prepare_ftrace_return;
+ nop;
+ /* MS: graph was taken that's why - can jump over function trace */
+ brid end;
+ nop;
+end_graph_tracer:
+#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
+#ifndef CONFIG_DYNAMIC_FTRACE
+ /* MS: test function trace if is taken or not */
+ lwi r20, r0, ftrace_trace_function;
+ addik r6, r0, ftrace_stub;
+ cmpu r5, r20, r6; /* ftrace_trace_function != ftrace_stub */
+ beqid r5, end; /* MS: not taken -> jump over */
+ nop;
+#else /* CONFIG_DYNAMIC_FTRACE */
+NOALIGN_ENTRY(ftrace_call)
+/* instruction for setup imm FUNC_part1, addik r20, r0, FUNC_part2 */
+ nop
+ nop
+#endif /* CONFIG_DYNAMIC_FTRACE */
+/* static normal trace */
+ lwi r6, r1, 120; /* MS: load parent addr */
+ addik r5, r15, -4; /* MS: load current function addr */
+ /* MS: here is dependency on previous code */
+ brald r15, r20; /* MS: jump to ftrace handler */
+ nop;
+end:
+ lwi r15, r1, 0;
+ RESTORE_REGS
+
+ rtsd r15, 8; /* MS: jump back */
+ nop;
+
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+ENTRY(return_to_handler)
+ nop; /* MS: just barrier for rtsd r15, 8 */
+ nop;
+ SAVE_REGS
+ swi r15, r1, 0;
+
+ /* MS: find out returning address */
+ bralid r15, ftrace_return_to_handler;
+ nop;
+
+ /* MS: return value from ftrace_return_to_handler is my returning addr
+ * must be before restore regs because I have to restore r3 content */
+ addik r15, r3, 0;
+ RESTORE_REGS
+
+ rtsd r15, 8; /* MS: jump back */
+ nop;
+#endif /* CONFIG_FUNCTION_TRACER */
diff --git a/arch/microblaze/kernel/microblaze_ksyms.c b/arch/microblaze/kernel/microblaze_ksyms.c
new file mode 100644
index 000000000..51c43ee5e
--- /dev/null
+++ b/arch/microblaze/kernel/microblaze_ksyms.c
@@ -0,0 +1,51 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu>
+ * Copyright (C) 2008-2009 PetaLogix
+ */
+
+#include <linux/export.h>
+#include <linux/string.h>
+#include <linux/delay.h>
+#include <linux/in6.h>
+#include <linux/syscalls.h>
+
+#include <asm/checksum.h>
+#include <asm/cacheflush.h>
+#include <linux/io.h>
+#include <asm/page.h>
+#include <linux/ftrace.h>
+#include <linux/uaccess.h>
+
+#ifdef CONFIG_FUNCTION_TRACER
+extern void _mcount(void);
+EXPORT_SYMBOL(_mcount);
+#endif
+
+/*
+ * Assembly functions that may be used (directly or indirectly) by modules
+ */
+EXPORT_SYMBOL(__copy_tofrom_user);
+EXPORT_SYMBOL(__strncpy_user);
+
+#ifdef CONFIG_OPT_LIB_ASM
+EXPORT_SYMBOL(memcpy);
+EXPORT_SYMBOL(memmove);
+#endif
+
+#ifdef CONFIG_MMU
+EXPORT_SYMBOL(empty_zero_page);
+#endif
+
+EXPORT_SYMBOL(mbc);
+
+extern void __divsi3(void);
+EXPORT_SYMBOL(__divsi3);
+extern void __modsi3(void);
+EXPORT_SYMBOL(__modsi3);
+extern void __mulsi3(void);
+EXPORT_SYMBOL(__mulsi3);
+extern void __udivsi3(void);
+EXPORT_SYMBOL(__udivsi3);
+extern void __umodsi3(void);
+EXPORT_SYMBOL(__umodsi3);
diff --git a/arch/microblaze/kernel/misc.S b/arch/microblaze/kernel/misc.S
new file mode 100644
index 000000000..1228a09d8
--- /dev/null
+++ b/arch/microblaze/kernel/misc.S
@@ -0,0 +1,66 @@
+/*
+ * Miscellaneous low-level MMU functions.
+ *
+ * Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu>
+ * Copyright (C) 2008-2009 PetaLogix
+ * Copyright (C) 2007 Xilinx, Inc. All rights reserved.
+ *
+ * Derived from arch/ppc/kernel/misc.S
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License. See the file COPYING in the main directory of this
+ * archive for more details.
+ */
+
+#include <linux/linkage.h>
+#include <linux/sys.h>
+#include <asm/unistd.h>
+#include <linux/errno.h>
+#include <asm/mmu.h>
+#include <asm/page.h>
+
+ .text
+/*
+ * Flush MMU TLB
+ *
+ * We avoid flushing the pinned 0, 1 and possibly 2 entries.
+ */
+.globl _tlbia;
+.type _tlbia, @function
+.align 4;
+_tlbia:
+ lwi r12, r0, tlb_skip;
+ /* isync */
+_tlbia_1:
+ mts rtlbx, r12
+ nop
+ mts rtlbhi, r0 /* flush: ensure V is clear */
+ nop
+ rsubi r11, r12, MICROBLAZE_TLB_SIZE - 1
+ bneid r11, _tlbia_1 /* loop for all entries */
+ addik r12, r12, 1
+ mbar 1 /* sync */
+ rtsd r15, 8
+ nop
+ .size _tlbia, . - _tlbia
+
+/*
+ * Flush MMU TLB for a particular address (in r5)
+ */
+.globl _tlbie;
+.type _tlbie, @function
+.align 4;
+_tlbie:
+ mts rtlbsx, r5 /* look up the address in TLB */
+ nop
+ mfs r12, rtlbx /* Retrieve index */
+ nop
+ blti r12, _tlbie_1 /* Check if found */
+ mts rtlbhi, r0 /* flush: ensure V is clear */
+ nop
+ mbar 1 /* sync */
+_tlbie_1:
+ rtsd r15, 8
+ nop
+
+ .size _tlbie, . - _tlbie
diff --git a/arch/microblaze/kernel/module.c b/arch/microblaze/kernel/module.c
new file mode 100644
index 000000000..9f12e3c2b
--- /dev/null
+++ b/arch/microblaze/kernel/module.c
@@ -0,0 +1,122 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2007-2009 Michal Simek <monstr@monstr.eu>
+ * Copyright (C) 2007-2009 PetaLogix
+ */
+
+#include <linux/export.h>
+#include <linux/moduleloader.h>
+#include <linux/kernel.h>
+#include <linux/elf.h>
+#include <linux/vmalloc.h>
+#include <linux/fs.h>
+#include <linux/string.h>
+#include <linux/pgtable.h>
+
+#include <asm/cacheflush.h>
+
+int apply_relocate_add(Elf32_Shdr *sechdrs, const char *strtab,
+ unsigned int symindex, unsigned int relsec, struct module *module)
+{
+
+ unsigned int i;
+ Elf32_Rela *rela = (void *)sechdrs[relsec].sh_addr;
+ Elf32_Sym *sym;
+ unsigned long int *location;
+ unsigned long int value;
+#if __GNUC__ < 4
+ unsigned long int old_value;
+#endif
+
+ pr_debug("Applying add relocation section %u to %u\n",
+ relsec, sechdrs[relsec].sh_info);
+
+ for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rela); i++) {
+
+ location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr +
+ rela[i].r_offset;
+ sym = (Elf32_Sym *)sechdrs[symindex].sh_addr +
+ ELF32_R_SYM(rela[i].r_info);
+ value = sym->st_value + rela[i].r_addend;
+
+ switch (ELF32_R_TYPE(rela[i].r_info)) {
+
+ /*
+ * Be careful! mb-gcc / mb-ld splits the relocs between the
+ * text and the reloc table. In general this means we must
+ * read the current contents of (*location), add any offset
+ * then store the result back in
+ */
+
+ case R_MICROBLAZE_32:
+#if __GNUC__ < 4
+ old_value = *location;
+ *location = value + old_value;
+
+ pr_debug("R_MICROBLAZE_32 (%08lx->%08lx)\n",
+ old_value, value);
+#else
+ *location = value;
+#endif
+ break;
+
+ case R_MICROBLAZE_64:
+#if __GNUC__ < 4
+ /* Split relocs only required/used pre gcc4.1.1 */
+ old_value = ((location[0] & 0x0000FFFF) << 16) |
+ (location[1] & 0x0000FFFF);
+ value += old_value;
+#endif
+ location[0] = (location[0] & 0xFFFF0000) |
+ (value >> 16);
+ location[1] = (location[1] & 0xFFFF0000) |
+ (value & 0xFFFF);
+#if __GNUC__ < 4
+ pr_debug("R_MICROBLAZE_64 (%08lx->%08lx)\n",
+ old_value, value);
+#endif
+ break;
+
+ case R_MICROBLAZE_64_PCREL:
+#if __GNUC__ < 4
+ old_value = (location[0] & 0xFFFF) << 16 |
+ (location[1] & 0xFFFF);
+ value -= old_value;
+#endif
+ value -= (unsigned long int)(location) + 4;
+ location[0] = (location[0] & 0xFFFF0000) |
+ (value >> 16);
+ location[1] = (location[1] & 0xFFFF0000) |
+ (value & 0xFFFF);
+ pr_debug("R_MICROBLAZE_64_PCREL (%08lx)\n",
+ value);
+ break;
+
+ case R_MICROBLAZE_32_PCREL_LO:
+ pr_debug("R_MICROBLAZE_32_PCREL_LO\n");
+ break;
+
+ case R_MICROBLAZE_64_NONE:
+ pr_debug("R_MICROBLAZE_64_NONE\n");
+ break;
+
+ case R_MICROBLAZE_NONE:
+ pr_debug("R_MICROBLAZE_NONE\n");
+ break;
+
+ default:
+ pr_err("module %s: Unknown relocation: %u\n",
+ module->name,
+ ELF32_R_TYPE(rela[i].r_info));
+ return -ENOEXEC;
+ }
+ }
+ return 0;
+}
+
+int module_finalize(const Elf32_Ehdr *hdr, const Elf_Shdr *sechdrs,
+ struct module *module)
+{
+ flush_dcache();
+ return 0;
+}
diff --git a/arch/microblaze/kernel/process.c b/arch/microblaze/kernel/process.c
new file mode 100644
index 000000000..ee000ae17
--- /dev/null
+++ b/arch/microblaze/kernel/process.c
@@ -0,0 +1,153 @@
+/*
+ * Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu>
+ * Copyright (C) 2008-2009 PetaLogix
+ * Copyright (C) 2006 Atmark Techno, Inc.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/cpu.h>
+#include <linux/export.h>
+#include <linux/sched.h>
+#include <linux/sched/debug.h>
+#include <linux/sched/task.h>
+#include <linux/sched/task_stack.h>
+#include <linux/pm.h>
+#include <linux/tick.h>
+#include <linux/bitops.h>
+#include <linux/ptrace.h>
+#include <linux/uaccess.h> /* for USER_DS macros */
+#include <asm/cacheflush.h>
+
+void show_regs(struct pt_regs *regs)
+{
+ show_regs_print_info(KERN_INFO);
+
+ pr_info(" Registers dump: mode=%X\r\n", regs->pt_mode);
+ pr_info(" r1=%08lX, r2=%08lX, r3=%08lX, r4=%08lX\n",
+ regs->r1, regs->r2, regs->r3, regs->r4);
+ pr_info(" r5=%08lX, r6=%08lX, r7=%08lX, r8=%08lX\n",
+ regs->r5, regs->r6, regs->r7, regs->r8);
+ pr_info(" r9=%08lX, r10=%08lX, r11=%08lX, r12=%08lX\n",
+ regs->r9, regs->r10, regs->r11, regs->r12);
+ pr_info(" r13=%08lX, r14=%08lX, r15=%08lX, r16=%08lX\n",
+ regs->r13, regs->r14, regs->r15, regs->r16);
+ pr_info(" r17=%08lX, r18=%08lX, r19=%08lX, r20=%08lX\n",
+ regs->r17, regs->r18, regs->r19, regs->r20);
+ pr_info(" r21=%08lX, r22=%08lX, r23=%08lX, r24=%08lX\n",
+ regs->r21, regs->r22, regs->r23, regs->r24);
+ pr_info(" r25=%08lX, r26=%08lX, r27=%08lX, r28=%08lX\n",
+ regs->r25, regs->r26, regs->r27, regs->r28);
+ pr_info(" r29=%08lX, r30=%08lX, r31=%08lX, rPC=%08lX\n",
+ regs->r29, regs->r30, regs->r31, regs->pc);
+ pr_info(" msr=%08lX, ear=%08lX, esr=%08lX, fsr=%08lX\n",
+ regs->msr, regs->ear, regs->esr, regs->fsr);
+}
+
+void (*pm_power_off)(void) = NULL;
+EXPORT_SYMBOL(pm_power_off);
+
+void flush_thread(void)
+{
+}
+
+int copy_thread(unsigned long clone_flags, unsigned long usp, unsigned long arg,
+ struct task_struct *p, unsigned long tls)
+{
+ struct pt_regs *childregs = task_pt_regs(p);
+ struct thread_info *ti = task_thread_info(p);
+
+ if (unlikely(p->flags & (PF_KTHREAD | PF_IO_WORKER))) {
+ /* if we're creating a new kernel thread then just zeroing all
+ * the registers. That's OK for a brand new thread.*/
+ memset(childregs, 0, sizeof(struct pt_regs));
+ memset(&ti->cpu_context, 0, sizeof(struct cpu_context));
+ ti->cpu_context.r1 = (unsigned long)childregs;
+ ti->cpu_context.r20 = (unsigned long)usp; /* fn */
+ ti->cpu_context.r19 = (unsigned long)arg;
+ childregs->pt_mode = 1;
+ local_save_flags(childregs->msr);
+#ifdef CONFIG_MMU
+ ti->cpu_context.msr = childregs->msr & ~MSR_IE;
+#endif
+ ti->cpu_context.r15 = (unsigned long)ret_from_kernel_thread - 8;
+ return 0;
+ }
+ *childregs = *current_pt_regs();
+ if (usp)
+ childregs->r1 = usp;
+
+ memset(&ti->cpu_context, 0, sizeof(struct cpu_context));
+ ti->cpu_context.r1 = (unsigned long)childregs;
+#ifndef CONFIG_MMU
+ ti->cpu_context.msr = (unsigned long)childregs->msr;
+#else
+ childregs->msr |= MSR_UMS;
+
+ /* we should consider the fact that childregs is a copy of the parent
+ * regs which were saved immediately after entering the kernel state
+ * before enabling VM. This MSR will be restored in switch_to and
+ * RETURN() and we want to have the right machine state there
+ * specifically this state must have INTs disabled before and enabled
+ * after performing rtbd
+ * compose the right MSR for RETURN(). It will work for switch_to also
+ * excepting for VM and UMS
+ * don't touch UMS , CARRY and cache bits
+ * right now MSR is a copy of parent one */
+ childregs->msr &= ~MSR_EIP;
+ childregs->msr |= MSR_IE;
+ childregs->msr &= ~MSR_VM;
+ childregs->msr |= MSR_VMS;
+ childregs->msr |= MSR_EE; /* exceptions will be enabled*/
+
+ ti->cpu_context.msr = (childregs->msr|MSR_VM);
+ ti->cpu_context.msr &= ~MSR_UMS; /* switch_to to kernel mode */
+ ti->cpu_context.msr &= ~MSR_IE;
+#endif
+ ti->cpu_context.r15 = (unsigned long)ret_from_fork - 8;
+
+ /*
+ * r21 is the thread reg, r10 is 6th arg to clone
+ * which contains TLS area
+ */
+ if (clone_flags & CLONE_SETTLS)
+ childregs->r21 = tls;
+
+ return 0;
+}
+
+unsigned long get_wchan(struct task_struct *p)
+{
+/* TBD (used by procfs) */
+ return 0;
+}
+
+/* Set up a thread for executing a new program */
+void start_thread(struct pt_regs *regs, unsigned long pc, unsigned long usp)
+{
+ regs->pc = pc;
+ regs->r1 = usp;
+ regs->pt_mode = 0;
+#ifdef CONFIG_MMU
+ regs->msr |= MSR_UMS;
+ regs->msr &= ~MSR_VM;
+#endif
+}
+
+#ifdef CONFIG_MMU
+#include <linux/elfcore.h>
+/*
+ * Set up a thread for executing a new program
+ */
+int dump_fpu(struct pt_regs *regs, elf_fpregset_t *fpregs)
+{
+ return 0; /* MicroBlaze has no separate FPU registers */
+}
+#endif /* CONFIG_MMU */
+
+void arch_cpu_idle(void)
+{
+ raw_local_irq_enable();
+}
diff --git a/arch/microblaze/kernel/prom.c b/arch/microblaze/kernel/prom.c
new file mode 100644
index 000000000..c5c6186a7
--- /dev/null
+++ b/arch/microblaze/kernel/prom.c
@@ -0,0 +1,30 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Procedures for creating, accessing and interpreting the device tree.
+ *
+ * Paul Mackerras August 1996.
+ * Copyright (C) 1996-2005 Paul Mackerras.
+ *
+ * Adapted for 64bit PowerPC by Dave Engebretsen and Peter Bergner.
+ * {engebret|bergner}@us.ibm.com
+ */
+
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/memblock.h>
+#include <linux/of_fdt.h>
+
+void __init early_init_devtree(void *params)
+{
+ pr_debug(" -> early_init_devtree(%p)\n", params);
+
+ early_init_dt_scan(params);
+ if (!strlen(boot_command_line))
+ strlcpy(boot_command_line, cmd_line, COMMAND_LINE_SIZE);
+
+ memblock_allow_resize();
+
+ pr_debug("Phys. mem: %lx\n", (unsigned long) memblock_phys_mem_size());
+
+ pr_debug(" <- early_init_devtree()\n");
+}
diff --git a/arch/microblaze/kernel/ptrace.c b/arch/microblaze/kernel/ptrace.c
new file mode 100644
index 000000000..badd28688
--- /dev/null
+++ b/arch/microblaze/kernel/ptrace.c
@@ -0,0 +1,170 @@
+/*
+ * `ptrace' system call
+ *
+ * Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu>
+ * Copyright (C) 2007-2009 PetaLogix
+ * Copyright (C) 2004-2007 John Williams <john.williams@petalogix.com>
+ *
+ * derived from arch/v850/kernel/ptrace.c
+ *
+ * Copyright (C) 2002,03 NEC Electronics Corporation
+ * Copyright (C) 2002,03 Miles Bader <miles@gnu.org>
+ *
+ * Derived from arch/mips/kernel/ptrace.c:
+ *
+ * Copyright (C) 1992 Ross Biro
+ * Copyright (C) Linus Torvalds
+ * Copyright (C) 1994, 95, 96, 97, 98, 2000 Ralf Baechle
+ * Copyright (C) 1996 David S. Miller
+ * Kevin D. Kissell, kevink@mips.com and Carsten Langgaard, carstenl@mips.com
+ * Copyright (C) 1999 MIPS Technologies, Inc.
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License. See the file COPYING in the main directory of this
+ * archive for more details.
+ */
+
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/sched.h>
+#include <linux/sched/task_stack.h>
+#include <linux/ptrace.h>
+#include <linux/signal.h>
+#include <linux/elf.h>
+#include <linux/audit.h>
+#include <linux/seccomp.h>
+#include <linux/tracehook.h>
+
+#include <linux/errno.h>
+#include <asm/processor.h>
+#include <linux/uaccess.h>
+#include <asm/asm-offsets.h>
+#include <asm/cacheflush.h>
+#include <asm/syscall.h>
+#include <linux/io.h>
+
+/* Returns the address where the register at REG_OFFS in P is stashed away. */
+static microblaze_reg_t *reg_save_addr(unsigned reg_offs,
+ struct task_struct *t)
+{
+ struct pt_regs *regs;
+
+ /*
+ * Three basic cases:
+ *
+ * (1) A register normally saved before calling the scheduler, is
+ * available in the kernel entry pt_regs structure at the top
+ * of the kernel stack. The kernel trap/irq exit path takes
+ * care to save/restore almost all registers for ptrace'd
+ * processes.
+ *
+ * (2) A call-clobbered register, where the process P entered the
+ * kernel via [syscall] trap, is not stored anywhere; that's
+ * OK, because such registers are not expected to be preserved
+ * when the trap returns anyway (so we don't actually bother to
+ * test for this case).
+ *
+ * (3) A few registers not used at all by the kernel, and so
+ * normally never saved except by context-switches, are in the
+ * context switch state.
+ */
+
+ /* Register saved during kernel entry (or not available). */
+ regs = task_pt_regs(t);
+
+ return (microblaze_reg_t *)((char *)regs + reg_offs);
+}
+
+long arch_ptrace(struct task_struct *child, long request,
+ unsigned long addr, unsigned long data)
+{
+ int rval;
+ unsigned long val = 0;
+
+ switch (request) {
+ /* Read/write the word at location ADDR in the registers. */
+ case PTRACE_PEEKUSR:
+ case PTRACE_POKEUSR:
+ pr_debug("PEEKUSR/POKEUSR : 0x%08lx\n", addr);
+ rval = 0;
+ if (addr >= PT_SIZE && request == PTRACE_PEEKUSR) {
+ /*
+ * Special requests that don't actually correspond
+ * to offsets in struct pt_regs.
+ */
+ if (addr == PT_TEXT_ADDR) {
+ val = child->mm->start_code;
+ } else if (addr == PT_DATA_ADDR) {
+ val = child->mm->start_data;
+ } else if (addr == PT_TEXT_LEN) {
+ val = child->mm->end_code
+ - child->mm->start_code;
+ } else {
+ rval = -EIO;
+ }
+ } else if (addr < PT_SIZE && (addr & 0x3) == 0) {
+ microblaze_reg_t *reg_addr = reg_save_addr(addr, child);
+ if (request == PTRACE_PEEKUSR)
+ val = *reg_addr;
+ else {
+#if 1
+ *reg_addr = data;
+#else
+ /* MS potential problem on WB system
+ * Be aware that reg_addr is virtual address
+ * virt_to_phys conversion is necessary.
+ * This could be sensible solution.
+ */
+ u32 paddr = virt_to_phys((u32)reg_addr);
+ invalidate_icache_range(paddr, paddr + 4);
+ *reg_addr = data;
+ flush_dcache_range(paddr, paddr + 4);
+#endif
+ }
+ } else
+ rval = -EIO;
+
+ if (rval == 0 && request == PTRACE_PEEKUSR)
+ rval = put_user(val, (unsigned long __user *)data);
+ break;
+ default:
+ rval = ptrace_request(child, request, addr, data);
+ }
+ return rval;
+}
+
+asmlinkage unsigned long do_syscall_trace_enter(struct pt_regs *regs)
+{
+ unsigned long ret = 0;
+
+ secure_computing_strict(regs->r12);
+
+ if (test_thread_flag(TIF_SYSCALL_TRACE) &&
+ tracehook_report_syscall_entry(regs))
+ /*
+ * Tracing decided this syscall should not happen.
+ * We'll return a bogus call number to get an ENOSYS
+ * error, but leave the original number in regs->regs[0].
+ */
+ ret = -1L;
+
+ audit_syscall_entry(regs->r12, regs->r5, regs->r6, regs->r7, regs->r8);
+
+ return ret ?: regs->r12;
+}
+
+asmlinkage void do_syscall_trace_leave(struct pt_regs *regs)
+{
+ int step;
+
+ audit_syscall_exit(regs);
+
+ step = test_thread_flag(TIF_SINGLESTEP);
+ if (step || test_thread_flag(TIF_SYSCALL_TRACE))
+ tracehook_report_syscall_exit(regs, step);
+}
+
+void ptrace_disable(struct task_struct *child)
+{
+ /* nothing to do */
+}
diff --git a/arch/microblaze/kernel/reset.c b/arch/microblaze/kernel/reset.c
new file mode 100644
index 000000000..5f4722908
--- /dev/null
+++ b/arch/microblaze/kernel/reset.c
@@ -0,0 +1,43 @@
+/*
+ * Copyright (C) 2009 Michal Simek <monstr@monstr.eu>
+ * Copyright (C) 2009 PetaLogix
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/of_platform.h>
+#include <linux/reboot.h>
+
+void machine_shutdown(void)
+{
+ pr_notice("Machine shutdown...\n");
+ while (1)
+ ;
+}
+
+void machine_halt(void)
+{
+ pr_notice("Machine halt...\n");
+ while (1)
+ ;
+}
+
+void machine_power_off(void)
+{
+ pr_notice("Machine power off...\n");
+ while (1)
+ ;
+}
+
+void machine_restart(char *cmd)
+{
+ do_kernel_restart(cmd);
+ /* Give the restart hook 1 s to take us down */
+ mdelay(1000);
+ pr_emerg("Reboot failed -- System halted\n");
+ while (1);
+}
diff --git a/arch/microblaze/kernel/setup.c b/arch/microblaze/kernel/setup.c
new file mode 100644
index 000000000..333b09658
--- /dev/null
+++ b/arch/microblaze/kernel/setup.c
@@ -0,0 +1,201 @@
+/*
+ * Copyright (C) 2007-2009 Michal Simek <monstr@monstr.eu>
+ * Copyright (C) 2007-2009 PetaLogix
+ * Copyright (C) 2006 Atmark Techno, Inc.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/init.h>
+#include <linux/clk-provider.h>
+#include <linux/clocksource.h>
+#include <linux/string.h>
+#include <linux/seq_file.h>
+#include <linux/cpu.h>
+#include <linux/initrd.h>
+#include <linux/console.h>
+#include <linux/debugfs.h>
+#include <linux/of_fdt.h>
+#include <linux/pgtable.h>
+
+#include <asm/setup.h>
+#include <asm/sections.h>
+#include <asm/page.h>
+#include <linux/io.h>
+#include <linux/bug.h>
+#include <linux/param.h>
+#include <linux/pci.h>
+#include <linux/cache.h>
+#include <linux/of.h>
+#include <linux/dma-mapping.h>
+#include <asm/cacheflush.h>
+#include <asm/entry.h>
+#include <asm/cpuinfo.h>
+
+
+DEFINE_PER_CPU(unsigned int, KSP); /* Saved kernel stack pointer */
+DEFINE_PER_CPU(unsigned int, KM); /* Kernel/user mode */
+DEFINE_PER_CPU(unsigned int, ENTRY_SP); /* Saved SP on kernel entry */
+DEFINE_PER_CPU(unsigned int, R11_SAVE); /* Temp variable for entry */
+DEFINE_PER_CPU(unsigned int, CURRENT_SAVE); /* Saved current pointer */
+
+/*
+ * Placed cmd_line to .data section because can be initialized from
+ * ASM code. Default position is BSS section which is cleared
+ * in machine_early_init().
+ */
+char cmd_line[COMMAND_LINE_SIZE] __section(".data");
+
+void __init setup_arch(char **cmdline_p)
+{
+ *cmdline_p = boot_command_line;
+
+ setup_memory();
+
+ console_verbose();
+
+ unflatten_device_tree();
+
+ setup_cpuinfo();
+
+ microblaze_cache_init();
+
+ xilinx_pci_init();
+}
+
+#ifdef CONFIG_MTD_UCLINUX
+/* Handle both romfs and cramfs types, without generating unnecessary
+ code (ie no point checking for CRAMFS if it's not even enabled) */
+inline unsigned get_romfs_len(unsigned *addr)
+{
+#ifdef CONFIG_ROMFS_FS
+ if (memcmp(&addr[0], "-rom1fs-", 8) == 0) /* romfs */
+ return be32_to_cpu(addr[2]);
+#endif
+
+#ifdef CONFIG_CRAMFS
+ if (addr[0] == le32_to_cpu(0x28cd3d45)) /* cramfs */
+ return le32_to_cpu(addr[1]);
+#endif
+ return 0;
+}
+#endif /* CONFIG_MTD_UCLINUX_EBSS */
+
+unsigned long kernel_tlb;
+
+void __init machine_early_init(const char *cmdline, unsigned int ram,
+ unsigned int fdt, unsigned int msr, unsigned int tlb0,
+ unsigned int tlb1)
+{
+ unsigned long *src, *dst;
+ unsigned int offset = 0;
+
+ /* If CONFIG_MTD_UCLINUX is defined, assume ROMFS is at the
+ * end of kernel. There are two position which we want to check.
+ * The first is __init_end and the second __bss_start.
+ */
+#ifdef CONFIG_MTD_UCLINUX
+ int romfs_size;
+ unsigned int romfs_base;
+ char *old_klimit = klimit;
+
+ romfs_base = (ram ? ram : (unsigned int)&__init_end);
+ romfs_size = PAGE_ALIGN(get_romfs_len((unsigned *)romfs_base));
+ if (!romfs_size) {
+ romfs_base = (unsigned int)&__bss_start;
+ romfs_size = PAGE_ALIGN(get_romfs_len((unsigned *)romfs_base));
+ }
+
+ /* Move ROMFS out of BSS before clearing it */
+ if (romfs_size > 0) {
+ memmove(&__bss_stop, (int *)romfs_base, romfs_size);
+ klimit += romfs_size;
+ }
+#endif
+
+/* clearing bss section */
+ memset(__bss_start, 0, __bss_stop-__bss_start);
+ memset(_ssbss, 0, _esbss-_ssbss);
+
+/* initialize device tree for usage in early_printk */
+ early_init_devtree(_fdt_start);
+
+ /* setup kernel_tlb after BSS cleaning
+ * Maybe worth to move to asm code */
+ kernel_tlb = tlb0 + tlb1;
+ /* printk("TLB1 0x%08x, TLB0 0x%08x, tlb 0x%x\n", tlb0,
+ tlb1, kernel_tlb); */
+
+ pr_info("Ramdisk addr 0x%08x, ", ram);
+ if (fdt)
+ pr_info("FDT at 0x%08x\n", fdt);
+ else
+ pr_info("Compiled-in FDT at %p\n", _fdt_start);
+
+#ifdef CONFIG_MTD_UCLINUX
+ pr_info("Found romfs @ 0x%08x (0x%08x)\n",
+ romfs_base, romfs_size);
+ pr_info("#### klimit %p ####\n", old_klimit);
+ BUG_ON(romfs_size < 0); /* What else can we do? */
+
+ pr_info("Moved 0x%08x bytes from 0x%08x to 0x%08x\n",
+ romfs_size, romfs_base, (unsigned)&__bss_stop);
+
+ pr_info("New klimit: 0x%08x\n", (unsigned)klimit);
+#endif
+
+#if CONFIG_XILINX_MICROBLAZE0_USE_MSR_INSTR
+ if (msr) {
+ pr_info("!!!Your kernel has setup MSR instruction but ");
+ pr_cont("CPU don't have it %x\n", msr);
+ }
+#else
+ if (!msr) {
+ pr_info("!!!Your kernel not setup MSR instruction but ");
+ pr_cont("CPU have it %x\n", msr);
+ }
+#endif
+
+ /* Do not copy reset vectors. offset = 0x2 means skip the first
+ * two instructions. dst is pointer to MB vectors which are placed
+ * in block ram. If you want to copy reset vector setup offset to 0x0 */
+#if !CONFIG_MANUAL_RESET_VECTOR
+ offset = 0x2;
+#endif
+ dst = (unsigned long *) (offset * sizeof(u32));
+ for (src = __ivt_start + offset; src < __ivt_end; src++, dst++)
+ *dst = *src;
+
+ /* Initialize global data */
+ per_cpu(KM, 0) = 0x1; /* We start in kernel mode */
+ per_cpu(CURRENT_SAVE, 0) = (unsigned long)current;
+}
+
+void __init time_init(void)
+{
+ of_clk_init(NULL);
+ setup_cpuinfo_clk();
+ timer_probe();
+}
+
+#ifdef CONFIG_DEBUG_FS
+struct dentry *of_debugfs_root;
+
+static int microblaze_debugfs_init(void)
+{
+ of_debugfs_root = debugfs_create_dir("microblaze", NULL);
+ return 0;
+}
+arch_initcall(microblaze_debugfs_init);
+
+# ifdef CONFIG_MMU
+static int __init debugfs_tlb(void)
+{
+ debugfs_create_u32("tlb_skip", S_IRUGO, of_debugfs_root, &tlb_skip);
+ return 0;
+}
+device_initcall(debugfs_tlb);
+# endif
+#endif
diff --git a/arch/microblaze/kernel/signal.c b/arch/microblaze/kernel/signal.c
new file mode 100644
index 000000000..5a8d173d7
--- /dev/null
+++ b/arch/microblaze/kernel/signal.c
@@ -0,0 +1,322 @@
+/*
+ * Signal handling
+ *
+ * Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu>
+ * Copyright (C) 2008-2009 PetaLogix
+ * Copyright (C) 2003,2004 John Williams <jwilliams@itee.uq.edu.au>
+ * Copyright (C) 2001 NEC Corporation
+ * Copyright (C) 2001 Miles Bader <miles@gnu.org>
+ * Copyright (C) 1999,2000 Niibe Yutaka & Kaz Kojima
+ * Copyright (C) 1991,1992 Linus Torvalds
+ *
+ * 1997-11-28 Modified for POSIX.1b signals by Richard Henderson
+ *
+ * This file was was derived from the sh version, arch/sh/kernel/signal.c
+ *
+ * This file is subject to the terms and conditions of the GNU General
+ * Public License. See the file COPYING in the main directory of this
+ * archive for more details.
+ */
+
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/smp.h>
+#include <linux/kernel.h>
+#include <linux/signal.h>
+#include <linux/errno.h>
+#include <linux/wait.h>
+#include <linux/ptrace.h>
+#include <linux/unistd.h>
+#include <linux/stddef.h>
+#include <linux/personality.h>
+#include <linux/percpu.h>
+#include <linux/linkage.h>
+#include <linux/tracehook.h>
+#include <asm/entry.h>
+#include <asm/ucontext.h>
+#include <linux/uaccess.h>
+#include <linux/syscalls.h>
+#include <asm/cacheflush.h>
+#include <asm/syscalls.h>
+
+/*
+ * Do a signal return; undo the signal stack.
+ */
+struct sigframe {
+ struct sigcontext sc;
+ unsigned long extramask[_NSIG_WORDS-1];
+ unsigned long tramp[2]; /* signal trampoline */
+};
+
+struct rt_sigframe {
+ struct siginfo info;
+ struct ucontext uc;
+ unsigned long tramp[2]; /* signal trampoline */
+};
+
+static int restore_sigcontext(struct pt_regs *regs,
+ struct sigcontext __user *sc, int *rval_p)
+{
+ unsigned int err = 0;
+
+#define COPY(x) {err |= __get_user(regs->x, &sc->regs.x); }
+ COPY(r0);
+ COPY(r1);
+ COPY(r2); COPY(r3); COPY(r4); COPY(r5);
+ COPY(r6); COPY(r7); COPY(r8); COPY(r9);
+ COPY(r10); COPY(r11); COPY(r12); COPY(r13);
+ COPY(r14); COPY(r15); COPY(r16); COPY(r17);
+ COPY(r18); COPY(r19); COPY(r20); COPY(r21);
+ COPY(r22); COPY(r23); COPY(r24); COPY(r25);
+ COPY(r26); COPY(r27); COPY(r28); COPY(r29);
+ COPY(r30); COPY(r31);
+ COPY(pc); COPY(ear); COPY(esr); COPY(fsr);
+#undef COPY
+
+ *rval_p = regs->r3;
+
+ return err;
+}
+
+asmlinkage long sys_rt_sigreturn(struct pt_regs *regs)
+{
+ struct rt_sigframe __user *frame =
+ (struct rt_sigframe __user *)(regs->r1);
+
+ sigset_t set;
+ int rval;
+
+ /* Always make any pending restarted system calls return -EINTR */
+ current->restart_block.fn = do_no_restart_syscall;
+
+ if (!access_ok(frame, sizeof(*frame)))
+ goto badframe;
+
+ if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
+ goto badframe;
+
+ set_current_blocked(&set);
+
+ if (restore_sigcontext(regs, &frame->uc.uc_mcontext, &rval))
+ goto badframe;
+
+ if (restore_altstack(&frame->uc.uc_stack))
+ goto badframe;
+
+ return rval;
+
+badframe:
+ force_sig(SIGSEGV);
+ return 0;
+}
+
+/*
+ * Set up a signal frame.
+ */
+
+static int
+setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs,
+ unsigned long mask)
+{
+ int err = 0;
+
+#define COPY(x) {err |= __put_user(regs->x, &sc->regs.x); }
+ COPY(r0);
+ COPY(r1);
+ COPY(r2); COPY(r3); COPY(r4); COPY(r5);
+ COPY(r6); COPY(r7); COPY(r8); COPY(r9);
+ COPY(r10); COPY(r11); COPY(r12); COPY(r13);
+ COPY(r14); COPY(r15); COPY(r16); COPY(r17);
+ COPY(r18); COPY(r19); COPY(r20); COPY(r21);
+ COPY(r22); COPY(r23); COPY(r24); COPY(r25);
+ COPY(r26); COPY(r27); COPY(r28); COPY(r29);
+ COPY(r30); COPY(r31);
+ COPY(pc); COPY(ear); COPY(esr); COPY(fsr);
+#undef COPY
+
+ err |= __put_user(mask, &sc->oldmask);
+
+ return err;
+}
+
+/*
+ * Determine which stack to use..
+ */
+static inline void __user *
+get_sigframe(struct ksignal *ksig, struct pt_regs *regs, size_t frame_size)
+{
+ /* Default to using normal stack */
+ unsigned long sp = sigsp(regs->r1, ksig);
+
+ return (void __user *)((sp - frame_size) & -8UL);
+}
+
+static int setup_rt_frame(struct ksignal *ksig, sigset_t *set,
+ struct pt_regs *regs)
+{
+ struct rt_sigframe __user *frame;
+ int err = 0, sig = ksig->sig;
+ unsigned long address = 0;
+#ifdef CONFIG_MMU
+ pmd_t *pmdp;
+ pte_t *ptep;
+#endif
+
+ frame = get_sigframe(ksig, regs, sizeof(*frame));
+
+ if (!access_ok(frame, sizeof(*frame)))
+ return -EFAULT;
+
+ if (ksig->ka.sa.sa_flags & SA_SIGINFO)
+ err |= copy_siginfo_to_user(&frame->info, &ksig->info);
+
+ /* Create the ucontext. */
+ err |= __put_user(0, &frame->uc.uc_flags);
+ err |= __put_user(NULL, &frame->uc.uc_link);
+ err |= __save_altstack(&frame->uc.uc_stack, regs->r1);
+ err |= setup_sigcontext(&frame->uc.uc_mcontext,
+ regs, set->sig[0]);
+ err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
+
+ /* Set up to return from userspace. If provided, use a stub
+ already in userspace. */
+ /* minus 8 is offset to cater for "rtsd r15,8" */
+ /* addi r12, r0, __NR_sigreturn */
+ err |= __put_user(0x31800000 | __NR_rt_sigreturn ,
+ frame->tramp + 0);
+ /* brki r14, 0x8 */
+ err |= __put_user(0xb9cc0008, frame->tramp + 1);
+
+ /* Return from sighandler will jump to the tramp.
+ Negative 8 offset because return is rtsd r15, 8 */
+ regs->r15 = ((unsigned long)frame->tramp)-8;
+
+ address = ((unsigned long)frame->tramp);
+#ifdef CONFIG_MMU
+ pmdp = pmd_off(current->mm, address);
+
+ preempt_disable();
+ ptep = pte_offset_map(pmdp, address);
+ if (pte_present(*ptep)) {
+ address = (unsigned long) page_address(pte_page(*ptep));
+ /* MS: I need add offset in page */
+ address += ((unsigned long)frame->tramp) & ~PAGE_MASK;
+ /* MS address is virtual */
+ address = __virt_to_phys(address);
+ invalidate_icache_range(address, address + 8);
+ flush_dcache_range(address, address + 8);
+ }
+ pte_unmap(ptep);
+ preempt_enable();
+#else
+ flush_icache_range(address, address + 8);
+ flush_dcache_range(address, address + 8);
+#endif
+ if (err)
+ return -EFAULT;
+
+ /* Set up registers for signal handler */
+ regs->r1 = (unsigned long) frame;
+
+ /* Signal handler args: */
+ regs->r5 = sig; /* arg 0: signum */
+ regs->r6 = (unsigned long) &frame->info; /* arg 1: siginfo */
+ regs->r7 = (unsigned long) &frame->uc; /* arg2: ucontext */
+ /* Offset to handle microblaze rtid r14, 0 */
+ regs->pc = (unsigned long)ksig->ka.sa.sa_handler;
+
+#ifdef DEBUG_SIG
+ pr_info("SIG deliver (%s:%d): sp=%p pc=%08lx\n",
+ current->comm, current->pid, frame, regs->pc);
+#endif
+
+ return 0;
+}
+
+/* Handle restarting system calls */
+static inline void
+handle_restart(struct pt_regs *regs, struct k_sigaction *ka, int has_handler)
+{
+ switch (regs->r3) {
+ case -ERESTART_RESTARTBLOCK:
+ case -ERESTARTNOHAND:
+ if (!has_handler)
+ goto do_restart;
+ regs->r3 = -EINTR;
+ break;
+ case -ERESTARTSYS:
+ if (has_handler && !(ka->sa.sa_flags & SA_RESTART)) {
+ regs->r3 = -EINTR;
+ break;
+ }
+ fallthrough;
+ case -ERESTARTNOINTR:
+do_restart:
+ /* offset of 4 bytes to re-execute trap (brki) instruction */
+ regs->pc -= 4;
+ break;
+ }
+}
+
+/*
+ * OK, we're invoking a handler
+ */
+
+static void
+handle_signal(struct ksignal *ksig, struct pt_regs *regs)
+{
+ sigset_t *oldset = sigmask_to_save();
+ int ret;
+
+ /* Set up the stack frame */
+ ret = setup_rt_frame(ksig, oldset, regs);
+
+ signal_setup_done(ret, ksig, test_thread_flag(TIF_SINGLESTEP));
+}
+
+/*
+ * Note that 'init' is a special process: it doesn't get signals it doesn't
+ * want to handle. Thus you cannot kill init even with a SIGKILL even by
+ * mistake.
+ *
+ * Note that we go through the signals twice: once to check the signals that
+ * the kernel can handle, and then we build all the user-level signal handling
+ * stack-frames in one go after that.
+ */
+static void do_signal(struct pt_regs *regs, int in_syscall)
+{
+ struct ksignal ksig;
+
+#ifdef DEBUG_SIG
+ pr_info("do signal: %p %d\n", regs, in_syscall);
+ pr_info("do signal2: %lx %lx %ld [%lx]\n", regs->pc, regs->r1,
+ regs->r12, current_thread_info()->flags);
+#endif
+
+ if (get_signal(&ksig)) {
+ /* Whee! Actually deliver the signal. */
+ if (in_syscall)
+ handle_restart(regs, &ksig.ka, 1);
+ handle_signal(&ksig, regs);
+ return;
+ }
+
+ if (in_syscall)
+ handle_restart(regs, NULL, 0);
+
+ /*
+ * If there's no signal to deliver, we just put the saved sigmask
+ * back.
+ */
+ restore_saved_sigmask();
+}
+
+asmlinkage void do_notify_resume(struct pt_regs *regs, int in_syscall)
+{
+ if (test_thread_flag(TIF_SIGPENDING) ||
+ test_thread_flag(TIF_NOTIFY_SIGNAL))
+ do_signal(regs, in_syscall);
+
+ if (test_thread_flag(TIF_NOTIFY_RESUME))
+ tracehook_notify_resume(regs);
+}
diff --git a/arch/microblaze/kernel/stacktrace.c b/arch/microblaze/kernel/stacktrace.c
new file mode 100644
index 000000000..b266c4d6e
--- /dev/null
+++ b/arch/microblaze/kernel/stacktrace.c
@@ -0,0 +1,31 @@
+/*
+ * Stack trace support for Microblaze.
+ *
+ * Copyright (C) 2009 Michal Simek <monstr@monstr.eu>
+ * Copyright (C) 2009 PetaLogix
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/export.h>
+#include <linux/sched.h>
+#include <linux/stacktrace.h>
+#include <linux/thread_info.h>
+#include <linux/ptrace.h>
+#include <asm/unwind.h>
+
+void save_stack_trace(struct stack_trace *trace)
+{
+ /* Exclude our helper functions from the trace*/
+ trace->skip += 2;
+ microblaze_unwind(NULL, trace, "");
+}
+EXPORT_SYMBOL_GPL(save_stack_trace);
+
+void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
+{
+ microblaze_unwind(tsk, trace, "");
+}
+EXPORT_SYMBOL_GPL(save_stack_trace_tsk);
diff --git a/arch/microblaze/kernel/sys_microblaze.c b/arch/microblaze/kernel/sys_microblaze.c
new file mode 100644
index 000000000..ed9f34da1
--- /dev/null
+++ b/arch/microblaze/kernel/sys_microblaze.c
@@ -0,0 +1,55 @@
+/*
+ * Copyright (C) 2007-2009 Michal Simek <monstr@monstr.eu>
+ * Copyright (C) 2007-2009 PetaLogix
+ * Copyright (C) 2007 John Williams <john.williams@petalogix.com>
+ *
+ * Copyright (C) 2006 Atmark Techno, Inc.
+ * Yasushi SHOJI <yashi@atmark-techno.com>
+ * Tetsuya OHKAWA <tetsuya@atmark-techno.com>
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/errno.h>
+#include <linux/export.h>
+#include <linux/mm.h>
+#include <linux/smp.h>
+#include <linux/syscalls.h>
+#include <linux/sem.h>
+#include <linux/msg.h>
+#include <linux/shm.h>
+#include <linux/stat.h>
+#include <linux/mman.h>
+#include <linux/sys.h>
+#include <linux/ipc.h>
+#include <linux/file.h>
+#include <linux/err.h>
+#include <linux/fs.h>
+#include <linux/semaphore.h>
+#include <linux/uaccess.h>
+#include <linux/unistd.h>
+#include <linux/slab.h>
+#include <asm/syscalls.h>
+
+SYSCALL_DEFINE6(mmap, unsigned long, addr, unsigned long, len,
+ unsigned long, prot, unsigned long, flags, unsigned long, fd,
+ off_t, pgoff)
+{
+ if (pgoff & ~PAGE_MASK)
+ return -EINVAL;
+
+ return ksys_mmap_pgoff(addr, len, prot, flags, fd, pgoff >> PAGE_SHIFT);
+}
+
+SYSCALL_DEFINE6(mmap2, unsigned long, addr, unsigned long, len,
+ unsigned long, prot, unsigned long, flags, unsigned long, fd,
+ unsigned long, pgoff)
+{
+ if (pgoff & (~PAGE_MASK >> 12))
+ return -EINVAL;
+
+ return ksys_mmap_pgoff(addr, len, prot, flags, fd,
+ pgoff >> (PAGE_SHIFT - 12));
+}
diff --git a/arch/microblaze/kernel/syscall_table.S b/arch/microblaze/kernel/syscall_table.S
new file mode 100644
index 000000000..ce006646f
--- /dev/null
+++ b/arch/microblaze/kernel/syscall_table.S
@@ -0,0 +1,6 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#define __SYSCALL(nr, entry, nargs) .long entry
+ENTRY(sys_call_table)
+#include <asm/syscall_table.h>
+#undef __SYSCALL
diff --git a/arch/microblaze/kernel/syscalls/Makefile b/arch/microblaze/kernel/syscalls/Makefile
new file mode 100644
index 000000000..659faefdc
--- /dev/null
+++ b/arch/microblaze/kernel/syscalls/Makefile
@@ -0,0 +1,38 @@
+# SPDX-License-Identifier: GPL-2.0
+kapi := arch/$(SRCARCH)/include/generated/asm
+uapi := arch/$(SRCARCH)/include/generated/uapi/asm
+
+_dummy := $(shell [ -d '$(uapi)' ] || mkdir -p '$(uapi)') \
+ $(shell [ -d '$(kapi)' ] || mkdir -p '$(kapi)')
+
+syscall := $(srctree)/$(src)/syscall.tbl
+syshdr := $(srctree)/$(src)/syscallhdr.sh
+systbl := $(srctree)/$(src)/syscalltbl.sh
+
+quiet_cmd_syshdr = SYSHDR $@
+ cmd_syshdr = $(CONFIG_SHELL) '$(syshdr)' '$<' '$@' \
+ '$(syshdr_abis_$(basetarget))' \
+ '$(syshdr_pfx_$(basetarget))' \
+ '$(syshdr_offset_$(basetarget))'
+
+quiet_cmd_systbl = SYSTBL $@
+ cmd_systbl = $(CONFIG_SHELL) '$(systbl)' '$<' '$@' \
+ '$(systbl_abis_$(basetarget))' \
+ '$(systbl_abi_$(basetarget))' \
+ '$(systbl_offset_$(basetarget))'
+
+$(uapi)/unistd_32.h: $(syscall) $(syshdr)
+ $(call if_changed,syshdr)
+
+$(kapi)/syscall_table.h: $(syscall) $(systbl)
+ $(call if_changed,systbl)
+
+uapisyshdr-y += unistd_32.h
+kapisyshdr-y += syscall_table.h
+
+targets += $(uapisyshdr-y) $(kapisyshdr-y)
+
+PHONY += all
+all: $(addprefix $(uapi)/,$(uapisyshdr-y))
+all: $(addprefix $(kapi)/,$(kapisyshdr-y))
+ @:
diff --git a/arch/microblaze/kernel/syscalls/syscall.tbl b/arch/microblaze/kernel/syscalls/syscall.tbl
new file mode 100644
index 000000000..aae729c95
--- /dev/null
+++ b/arch/microblaze/kernel/syscalls/syscall.tbl
@@ -0,0 +1,448 @@
+# SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
+#
+# system call numbers and entry vectors for microblaze
+#
+# The format is:
+# <number> <abi> <name> <entry point>
+#
+# The <abi> is always "common" for this file
+#
+0 common restart_syscall sys_restart_syscall
+1 common exit sys_exit
+2 common fork sys_fork
+3 common read sys_read
+4 common write sys_write
+5 common open sys_open
+6 common close sys_close
+7 common waitpid sys_waitpid
+8 common creat sys_creat
+9 common link sys_link
+10 common unlink sys_unlink
+11 common execve sys_execve
+12 common chdir sys_chdir
+13 common time sys_time32
+14 common mknod sys_mknod
+15 common chmod sys_chmod
+16 common lchown sys_lchown
+17 common break sys_ni_syscall
+18 common oldstat sys_ni_syscall
+19 common lseek sys_lseek
+20 common getpid sys_getpid
+21 common mount sys_mount
+22 common umount sys_oldumount
+23 common setuid sys_setuid
+24 common getuid sys_getuid
+25 common stime sys_stime32
+26 common ptrace sys_ptrace
+27 common alarm sys_alarm
+28 common oldfstat sys_ni_syscall
+29 common pause sys_pause
+30 common utime sys_utime32
+31 common stty sys_ni_syscall
+32 common gtty sys_ni_syscall
+33 common access sys_access
+34 common nice sys_nice
+35 common ftime sys_ni_syscall
+36 common sync sys_sync
+37 common kill sys_kill
+38 common rename sys_rename
+39 common mkdir sys_mkdir
+40 common rmdir sys_rmdir
+41 common dup sys_dup
+42 common pipe sys_pipe
+43 common times sys_times
+44 common prof sys_ni_syscall
+45 common brk sys_brk
+46 common setgid sys_setgid
+47 common getgid sys_getgid
+48 common signal sys_signal
+49 common geteuid sys_geteuid
+50 common getegid sys_getegid
+51 common acct sys_acct
+52 common umount2 sys_umount
+53 common lock sys_ni_syscall
+54 common ioctl sys_ioctl
+55 common fcntl sys_fcntl
+56 common mpx sys_ni_syscall
+57 common setpgid sys_setpgid
+58 common ulimit sys_ni_syscall
+59 common oldolduname sys_ni_syscall
+60 common umask sys_umask
+61 common chroot sys_chroot
+62 common ustat sys_ustat
+63 common dup2 sys_dup2
+64 common getppid sys_getppid
+65 common getpgrp sys_getpgrp
+66 common setsid sys_setsid
+67 common sigaction sys_ni_syscall
+68 common sgetmask sys_sgetmask
+69 common ssetmask sys_ssetmask
+70 common setreuid sys_setreuid
+71 common setregid sys_setregid
+72 common sigsuspend sys_ni_syscall
+73 common sigpending sys_sigpending
+74 common sethostname sys_sethostname
+75 common setrlimit sys_setrlimit
+76 common getrlimit sys_ni_syscall
+77 common getrusage sys_getrusage
+78 common gettimeofday sys_gettimeofday
+79 common settimeofday sys_settimeofday
+80 common getgroups sys_getgroups
+81 common setgroups sys_setgroups
+82 common select sys_ni_syscall
+83 common symlink sys_symlink
+84 common oldlstat sys_ni_syscall
+85 common readlink sys_readlink
+86 common uselib sys_uselib
+87 common swapon sys_swapon
+88 common reboot sys_reboot
+89 common readdir sys_ni_syscall
+90 common mmap sys_mmap
+91 common munmap sys_munmap
+92 common truncate sys_truncate
+93 common ftruncate sys_ftruncate
+94 common fchmod sys_fchmod
+95 common fchown sys_fchown
+96 common getpriority sys_getpriority
+97 common setpriority sys_setpriority
+98 common profil sys_ni_syscall
+99 common statfs sys_statfs
+100 common fstatfs sys_fstatfs
+101 common ioperm sys_ni_syscall
+102 common socketcall sys_socketcall
+103 common syslog sys_syslog
+104 common setitimer sys_setitimer
+105 common getitimer sys_getitimer
+106 common stat sys_newstat
+107 common lstat sys_newlstat
+108 common fstat sys_newfstat
+109 common olduname sys_ni_syscall
+110 common iopl sys_ni_syscall
+111 common vhangup sys_vhangup
+112 common idle sys_ni_syscall
+113 common vm86old sys_ni_syscall
+114 common wait4 sys_wait4
+115 common swapoff sys_swapoff
+116 common sysinfo sys_sysinfo
+117 common ipc sys_ni_syscall
+118 common fsync sys_fsync
+119 common sigreturn sys_ni_syscall
+120 common clone sys_clone
+121 common setdomainname sys_setdomainname
+122 common uname sys_newuname
+123 common modify_ldt sys_ni_syscall
+124 common adjtimex sys_adjtimex_time32
+125 common mprotect sys_mprotect
+126 common sigprocmask sys_sigprocmask
+127 common create_module sys_ni_syscall
+128 common init_module sys_init_module
+129 common delete_module sys_delete_module
+130 common get_kernel_syms sys_ni_syscall
+131 common quotactl sys_quotactl
+132 common getpgid sys_getpgid
+133 common fchdir sys_fchdir
+134 common bdflush sys_bdflush
+135 common sysfs sys_sysfs
+136 common personality sys_personality
+137 common afs_syscall sys_ni_syscall
+138 common setfsuid sys_setfsuid
+139 common setfsgid sys_setfsgid
+140 common _llseek sys_llseek
+141 common getdents sys_getdents
+142 common _newselect sys_select
+143 common flock sys_flock
+144 common msync sys_msync
+145 common readv sys_readv
+146 common writev sys_writev
+147 common getsid sys_getsid
+148 common fdatasync sys_fdatasync
+149 common _sysctl sys_ni_syscall
+150 common mlock sys_mlock
+151 common munlock sys_munlock
+152 common mlockall sys_mlockall
+153 common munlockall sys_munlockall
+154 common sched_setparam sys_sched_setparam
+155 common sched_getparam sys_sched_getparam
+156 common sched_setscheduler sys_sched_setscheduler
+157 common sched_getscheduler sys_sched_getscheduler
+158 common sched_yield sys_sched_yield
+159 common sched_get_priority_max sys_sched_get_priority_max
+160 common sched_get_priority_min sys_sched_get_priority_min
+161 common sched_rr_get_interval sys_sched_rr_get_interval_time32
+162 common nanosleep sys_nanosleep_time32
+163 common mremap sys_mremap
+164 common setresuid sys_setresuid
+165 common getresuid sys_getresuid
+166 common vm86 sys_ni_syscall
+167 common query_module sys_ni_syscall
+168 common poll sys_poll
+169 common nfsservctl sys_ni_syscall
+170 common setresgid sys_setresgid
+171 common getresgid sys_getresgid
+172 common prctl sys_prctl
+173 common rt_sigreturn sys_rt_sigreturn_wrapper
+174 common rt_sigaction sys_rt_sigaction
+175 common rt_sigprocmask sys_rt_sigprocmask
+176 common rt_sigpending sys_rt_sigpending
+177 common rt_sigtimedwait sys_rt_sigtimedwait_time32
+178 common rt_sigqueueinfo sys_rt_sigqueueinfo
+179 common rt_sigsuspend sys_rt_sigsuspend
+180 common pread64 sys_pread64
+181 common pwrite64 sys_pwrite64
+182 common chown sys_chown
+183 common getcwd sys_getcwd
+184 common capget sys_capget
+185 common capset sys_capset
+186 common sigaltstack sys_ni_syscall
+187 common sendfile sys_sendfile
+188 common getpmsg sys_ni_syscall
+189 common putpmsg sys_ni_syscall
+190 common vfork sys_vfork
+191 common ugetrlimit sys_getrlimit
+192 common mmap2 sys_mmap2
+193 common truncate64 sys_truncate64
+194 common ftruncate64 sys_ftruncate64
+195 common stat64 sys_stat64
+196 common lstat64 sys_lstat64
+197 common fstat64 sys_fstat64
+198 common lchown32 sys_lchown
+199 common getuid32 sys_getuid
+200 common getgid32 sys_getgid
+201 common geteuid32 sys_geteuid
+202 common getegid32 sys_getegid
+203 common setreuid32 sys_setreuid
+204 common setregid32 sys_setregid
+205 common getgroups32 sys_getgroups
+206 common setgroups32 sys_setgroups
+207 common fchown32 sys_fchown
+208 common setresuid32 sys_setresuid
+209 common getresuid32 sys_getresuid
+210 common setresgid32 sys_setresgid
+211 common getresgid32 sys_getresgid
+212 common chown32 sys_chown
+213 common setuid32 sys_setuid
+214 common setgid32 sys_setgid
+215 common setfsuid32 sys_setfsuid
+216 common setfsgid32 sys_setfsgid
+217 common pivot_root sys_pivot_root
+218 common mincore sys_mincore
+219 common madvise sys_madvise
+220 common getdents64 sys_getdents64
+221 common fcntl64 sys_fcntl64
+# 222 is reserved for TUX
+# 223 is unused
+224 common gettid sys_gettid
+225 common readahead sys_readahead
+226 common setxattr sys_setxattr
+227 common lsetxattr sys_lsetxattr
+228 common fsetxattr sys_fsetxattr
+229 common getxattr sys_getxattr
+230 common lgetxattr sys_lgetxattr
+231 common fgetxattr sys_fgetxattr
+232 common listxattr sys_listxattr
+233 common llistxattr sys_llistxattr
+234 common flistxattr sys_flistxattr
+235 common removexattr sys_removexattr
+236 common lremovexattr sys_lremovexattr
+237 common fremovexattr sys_fremovexattr
+238 common tkill sys_tkill
+239 common sendfile64 sys_sendfile64
+240 common futex sys_futex_time32
+241 common sched_setaffinity sys_sched_setaffinity
+242 common sched_getaffinity sys_sched_getaffinity
+243 common set_thread_area sys_ni_syscall
+244 common get_thread_area sys_ni_syscall
+245 common io_setup sys_io_setup
+246 common io_destroy sys_io_destroy
+247 common io_getevents sys_io_getevents_time32
+248 common io_submit sys_io_submit
+249 common io_cancel sys_io_cancel
+250 common fadvise64 sys_fadvise64
+# 251 is available for reuse (was briefly sys_set_zone_reclaim)
+252 common exit_group sys_exit_group
+253 common lookup_dcookie sys_lookup_dcookie
+254 common epoll_create sys_epoll_create
+255 common epoll_ctl sys_epoll_ctl
+256 common epoll_wait sys_epoll_wait
+257 common remap_file_pages sys_remap_file_pages
+258 common set_tid_address sys_set_tid_address
+259 common timer_create sys_timer_create
+260 common timer_settime sys_timer_settime32
+261 common timer_gettime sys_timer_gettime32
+262 common timer_getoverrun sys_timer_getoverrun
+263 common timer_delete sys_timer_delete
+264 common clock_settime sys_clock_settime32
+265 common clock_gettime sys_clock_gettime32
+266 common clock_getres sys_clock_getres_time32
+267 common clock_nanosleep sys_clock_nanosleep_time32
+268 common statfs64 sys_statfs64
+269 common fstatfs64 sys_fstatfs64
+270 common tgkill sys_tgkill
+271 common utimes sys_utimes_time32
+272 common fadvise64_64 sys_fadvise64_64
+273 common vserver sys_ni_syscall
+274 common mbind sys_mbind
+275 common get_mempolicy sys_get_mempolicy
+276 common set_mempolicy sys_set_mempolicy
+277 common mq_open sys_mq_open
+278 common mq_unlink sys_mq_unlink
+279 common mq_timedsend sys_mq_timedsend_time32
+280 common mq_timedreceive sys_mq_timedreceive_time32
+281 common mq_notify sys_mq_notify
+282 common mq_getsetattr sys_mq_getsetattr
+283 common kexec_load sys_kexec_load
+284 common waitid sys_waitid
+# 285 was setaltroot
+286 common add_key sys_add_key
+287 common request_key sys_request_key
+288 common keyctl sys_keyctl
+289 common ioprio_set sys_ioprio_set
+290 common ioprio_get sys_ioprio_get
+291 common inotify_init sys_inotify_init
+292 common inotify_add_watch sys_inotify_add_watch
+293 common inotify_rm_watch sys_inotify_rm_watch
+294 common migrate_pages sys_ni_syscall
+295 common openat sys_openat
+296 common mkdirat sys_mkdirat
+297 common mknodat sys_mknodat
+298 common fchownat sys_fchownat
+299 common futimesat sys_futimesat_time32
+300 common fstatat64 sys_fstatat64
+301 common unlinkat sys_unlinkat
+302 common renameat sys_renameat
+303 common linkat sys_linkat
+304 common symlinkat sys_symlinkat
+305 common readlinkat sys_readlinkat
+306 common fchmodat sys_fchmodat
+307 common faccessat sys_faccessat
+308 common pselect6 sys_pselect6_time32
+309 common ppoll sys_ppoll_time32
+310 common unshare sys_unshare
+311 common set_robust_list sys_set_robust_list
+312 common get_robust_list sys_get_robust_list
+313 common splice sys_splice
+314 common sync_file_range sys_sync_file_range
+315 common tee sys_tee
+316 common vmsplice sys_vmsplice
+317 common move_pages sys_move_pages
+318 common getcpu sys_getcpu
+319 common epoll_pwait sys_epoll_pwait
+320 common utimensat sys_utimensat_time32
+321 common signalfd sys_signalfd
+322 common timerfd_create sys_timerfd_create
+323 common eventfd sys_eventfd
+324 common fallocate sys_fallocate
+325 common semtimedop sys_semtimedop_time32
+326 common timerfd_settime sys_timerfd_settime32
+327 common timerfd_gettime sys_timerfd_gettime32
+328 common semctl sys_old_semctl
+329 common semget sys_semget
+330 common semop sys_semop
+331 common msgctl sys_old_msgctl
+332 common msgget sys_msgget
+333 common msgrcv sys_msgrcv
+334 common msgsnd sys_msgsnd
+335 common shmat sys_shmat
+336 common shmctl sys_old_shmctl
+337 common shmdt sys_shmdt
+338 common shmget sys_shmget
+339 common signalfd4 sys_signalfd4
+340 common eventfd2 sys_eventfd2
+341 common epoll_create1 sys_epoll_create1
+342 common dup3 sys_dup3
+343 common pipe2 sys_pipe2
+344 common inotify_init1 sys_inotify_init1
+345 common socket sys_socket
+346 common socketpair sys_socketpair
+347 common bind sys_bind
+348 common listen sys_listen
+349 common accept sys_accept
+350 common connect sys_connect
+351 common getsockname sys_getsockname
+352 common getpeername sys_getpeername
+353 common sendto sys_sendto
+354 common send sys_send
+355 common recvfrom sys_recvfrom
+356 common recv sys_recv
+357 common setsockopt sys_setsockopt
+358 common getsockopt sys_getsockopt
+359 common shutdown sys_shutdown
+360 common sendmsg sys_sendmsg
+361 common recvmsg sys_recvmsg
+362 common accept4 sys_accept4
+363 common preadv sys_preadv
+364 common pwritev sys_pwritev
+365 common rt_tgsigqueueinfo sys_rt_tgsigqueueinfo
+366 common perf_event_open sys_perf_event_open
+367 common recvmmsg sys_recvmmsg_time32
+368 common fanotify_init sys_fanotify_init
+369 common fanotify_mark sys_fanotify_mark
+370 common prlimit64 sys_prlimit64
+371 common name_to_handle_at sys_name_to_handle_at
+372 common open_by_handle_at sys_open_by_handle_at
+373 common clock_adjtime sys_clock_adjtime32
+374 common syncfs sys_syncfs
+375 common setns sys_setns
+376 common sendmmsg sys_sendmmsg
+377 common process_vm_readv sys_process_vm_readv
+378 common process_vm_writev sys_process_vm_writev
+379 common kcmp sys_kcmp
+380 common finit_module sys_finit_module
+381 common sched_setattr sys_sched_setattr
+382 common sched_getattr sys_sched_getattr
+383 common renameat2 sys_renameat2
+384 common seccomp sys_seccomp
+385 common getrandom sys_getrandom
+386 common memfd_create sys_memfd_create
+387 common bpf sys_bpf
+388 common execveat sys_execveat
+389 common userfaultfd sys_userfaultfd
+390 common membarrier sys_membarrier
+391 common mlock2 sys_mlock2
+392 common copy_file_range sys_copy_file_range
+393 common preadv2 sys_preadv2
+394 common pwritev2 sys_pwritev2
+395 common pkey_mprotect sys_pkey_mprotect
+396 common pkey_alloc sys_pkey_alloc
+397 common pkey_free sys_pkey_free
+398 common statx sys_statx
+399 common io_pgetevents sys_io_pgetevents_time32
+400 common rseq sys_rseq
+# 401 and 402 are unused
+403 common clock_gettime64 sys_clock_gettime
+404 common clock_settime64 sys_clock_settime
+405 common clock_adjtime64 sys_clock_adjtime
+406 common clock_getres_time64 sys_clock_getres
+407 common clock_nanosleep_time64 sys_clock_nanosleep
+408 common timer_gettime64 sys_timer_gettime
+409 common timer_settime64 sys_timer_settime
+410 common timerfd_gettime64 sys_timerfd_gettime
+411 common timerfd_settime64 sys_timerfd_settime
+412 common utimensat_time64 sys_utimensat
+413 common pselect6_time64 sys_pselect6
+414 common ppoll_time64 sys_ppoll
+416 common io_pgetevents_time64 sys_io_pgetevents
+417 common recvmmsg_time64 sys_recvmmsg
+418 common mq_timedsend_time64 sys_mq_timedsend
+419 common mq_timedreceive_time64 sys_mq_timedreceive
+420 common semtimedop_time64 sys_semtimedop
+421 common rt_sigtimedwait_time64 sys_rt_sigtimedwait
+422 common futex_time64 sys_futex
+423 common sched_rr_get_interval_time64 sys_sched_rr_get_interval
+424 common pidfd_send_signal sys_pidfd_send_signal
+425 common io_uring_setup sys_io_uring_setup
+426 common io_uring_enter sys_io_uring_enter
+427 common io_uring_register sys_io_uring_register
+428 common open_tree sys_open_tree
+429 common move_mount sys_move_mount
+430 common fsopen sys_fsopen
+431 common fsconfig sys_fsconfig
+432 common fsmount sys_fsmount
+433 common fspick sys_fspick
+434 common pidfd_open sys_pidfd_open
+435 common clone3 sys_clone3
+436 common close_range sys_close_range
+437 common openat2 sys_openat2
+438 common pidfd_getfd sys_pidfd_getfd
+439 common faccessat2 sys_faccessat2
+440 common process_madvise sys_process_madvise
diff --git a/arch/microblaze/kernel/syscalls/syscallhdr.sh b/arch/microblaze/kernel/syscalls/syscallhdr.sh
new file mode 100644
index 000000000..a914854f8
--- /dev/null
+++ b/arch/microblaze/kernel/syscalls/syscallhdr.sh
@@ -0,0 +1,36 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0
+
+in="$1"
+out="$2"
+my_abis=`echo "($3)" | tr ',' '|'`
+prefix="$4"
+offset="$5"
+
+fileguard=_UAPI_ASM_MICROBLAZE_`basename "$out" | sed \
+ -e 'y/abcdefghijklmnopqrstuvwxyz/ABCDEFGHIJKLMNOPQRSTUVWXYZ/' \
+ -e 's/[^A-Z0-9_]/_/g' -e 's/__/_/g'`
+grep -E "^[0-9A-Fa-fXx]+[[:space:]]+${my_abis}" "$in" | sort -n | (
+ printf "#ifndef %s\n" "${fileguard}"
+ printf "#define %s\n" "${fileguard}"
+ printf "\n"
+
+ nxt=0
+ while read nr abi name entry ; do
+ if [ -z "$offset" ]; then
+ printf "#define __NR_%s%s\t%s\n" \
+ "${prefix}" "${name}" "${nr}"
+ else
+ printf "#define __NR_%s%s\t(%s + %s)\n" \
+ "${prefix}" "${name}" "${offset}" "${nr}"
+ fi
+ nxt=$((nr+1))
+ done
+
+ printf "\n"
+ printf "#ifdef __KERNEL__\n"
+ printf "#define __NR_syscalls\t%s\n" "${nxt}"
+ printf "#endif\n"
+ printf "\n"
+ printf "#endif /* %s */\n" "${fileguard}"
+) > "$out"
diff --git a/arch/microblaze/kernel/syscalls/syscalltbl.sh b/arch/microblaze/kernel/syscalls/syscalltbl.sh
new file mode 100644
index 000000000..85d78d930
--- /dev/null
+++ b/arch/microblaze/kernel/syscalls/syscalltbl.sh
@@ -0,0 +1,32 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0
+
+in="$1"
+out="$2"
+my_abis=`echo "($3)" | tr ',' '|'`
+my_abi="$4"
+offset="$5"
+
+emit() {
+ t_nxt="$1"
+ t_nr="$2"
+ t_entry="$3"
+
+ while [ $t_nxt -lt $t_nr ]; do
+ printf "__SYSCALL(%s, sys_ni_syscall, )\n" "${t_nxt}"
+ t_nxt=$((t_nxt+1))
+ done
+ printf "__SYSCALL(%s, %s, )\n" "${t_nxt}" "${t_entry}"
+}
+
+grep -E "^[0-9A-Fa-fXx]+[[:space:]]+${my_abis}" "$in" | sort -n | (
+ nxt=0
+ if [ -z "$offset" ]; then
+ offset=0
+ fi
+
+ while read nr abi name entry ; do
+ emit $((nxt+offset)) $((nr+offset)) $entry
+ nxt=$((nr+1))
+ done
+) > "$out"
diff --git a/arch/microblaze/kernel/timer.c b/arch/microblaze/kernel/timer.c
new file mode 100644
index 000000000..f8832cf49
--- /dev/null
+++ b/arch/microblaze/kernel/timer.c
@@ -0,0 +1,326 @@
+/*
+ * Copyright (C) 2007-2013 Michal Simek <monstr@monstr.eu>
+ * Copyright (C) 2012-2013 Xilinx, Inc.
+ * Copyright (C) 2007-2009 PetaLogix
+ * Copyright (C) 2006 Atmark Techno, Inc.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/sched.h>
+#include <linux/sched/clock.h>
+#include <linux/sched_clock.h>
+#include <linux/clk.h>
+#include <linux/clockchips.h>
+#include <linux/of_address.h>
+#include <linux/of_irq.h>
+#include <linux/timecounter.h>
+#include <asm/cpuinfo.h>
+
+static void __iomem *timer_baseaddr;
+
+static unsigned int freq_div_hz;
+static unsigned int timer_clock_freq;
+
+#define TCSR0 (0x00)
+#define TLR0 (0x04)
+#define TCR0 (0x08)
+#define TCSR1 (0x10)
+#define TLR1 (0x14)
+#define TCR1 (0x18)
+
+#define TCSR_MDT (1<<0)
+#define TCSR_UDT (1<<1)
+#define TCSR_GENT (1<<2)
+#define TCSR_CAPT (1<<3)
+#define TCSR_ARHT (1<<4)
+#define TCSR_LOAD (1<<5)
+#define TCSR_ENIT (1<<6)
+#define TCSR_ENT (1<<7)
+#define TCSR_TINT (1<<8)
+#define TCSR_PWMA (1<<9)
+#define TCSR_ENALL (1<<10)
+
+static unsigned int (*read_fn)(void __iomem *);
+static void (*write_fn)(u32, void __iomem *);
+
+static void timer_write32(u32 val, void __iomem *addr)
+{
+ iowrite32(val, addr);
+}
+
+static unsigned int timer_read32(void __iomem *addr)
+{
+ return ioread32(addr);
+}
+
+static void timer_write32_be(u32 val, void __iomem *addr)
+{
+ iowrite32be(val, addr);
+}
+
+static unsigned int timer_read32_be(void __iomem *addr)
+{
+ return ioread32be(addr);
+}
+
+static inline void xilinx_timer0_stop(void)
+{
+ write_fn(read_fn(timer_baseaddr + TCSR0) & ~TCSR_ENT,
+ timer_baseaddr + TCSR0);
+}
+
+static inline void xilinx_timer0_start_periodic(unsigned long load_val)
+{
+ if (!load_val)
+ load_val = 1;
+ /* loading value to timer reg */
+ write_fn(load_val, timer_baseaddr + TLR0);
+
+ /* load the initial value */
+ write_fn(TCSR_LOAD, timer_baseaddr + TCSR0);
+
+ /* see timer data sheet for detail
+ * !ENALL - don't enable 'em all
+ * !PWMA - disable pwm
+ * TINT - clear interrupt status
+ * ENT- enable timer itself
+ * ENIT - enable interrupt
+ * !LOAD - clear the bit to let go
+ * ARHT - auto reload
+ * !CAPT - no external trigger
+ * !GENT - no external signal
+ * UDT - set the timer as down counter
+ * !MDT0 - generate mode
+ */
+ write_fn(TCSR_TINT|TCSR_ENIT|TCSR_ENT|TCSR_ARHT|TCSR_UDT,
+ timer_baseaddr + TCSR0);
+}
+
+static inline void xilinx_timer0_start_oneshot(unsigned long load_val)
+{
+ if (!load_val)
+ load_val = 1;
+ /* loading value to timer reg */
+ write_fn(load_val, timer_baseaddr + TLR0);
+
+ /* load the initial value */
+ write_fn(TCSR_LOAD, timer_baseaddr + TCSR0);
+
+ write_fn(TCSR_TINT|TCSR_ENIT|TCSR_ENT|TCSR_ARHT|TCSR_UDT,
+ timer_baseaddr + TCSR0);
+}
+
+static int xilinx_timer_set_next_event(unsigned long delta,
+ struct clock_event_device *dev)
+{
+ pr_debug("%s: next event, delta %x\n", __func__, (u32)delta);
+ xilinx_timer0_start_oneshot(delta);
+ return 0;
+}
+
+static int xilinx_timer_shutdown(struct clock_event_device *evt)
+{
+ pr_info("%s\n", __func__);
+ xilinx_timer0_stop();
+ return 0;
+}
+
+static int xilinx_timer_set_periodic(struct clock_event_device *evt)
+{
+ pr_info("%s\n", __func__);
+ xilinx_timer0_start_periodic(freq_div_hz);
+ return 0;
+}
+
+static struct clock_event_device clockevent_xilinx_timer = {
+ .name = "xilinx_clockevent",
+ .features = CLOCK_EVT_FEAT_ONESHOT |
+ CLOCK_EVT_FEAT_PERIODIC,
+ .shift = 8,
+ .rating = 300,
+ .set_next_event = xilinx_timer_set_next_event,
+ .set_state_shutdown = xilinx_timer_shutdown,
+ .set_state_periodic = xilinx_timer_set_periodic,
+};
+
+static inline void timer_ack(void)
+{
+ write_fn(read_fn(timer_baseaddr + TCSR0), timer_baseaddr + TCSR0);
+}
+
+static irqreturn_t timer_interrupt(int irq, void *dev_id)
+{
+ struct clock_event_device *evt = &clockevent_xilinx_timer;
+ timer_ack();
+ evt->event_handler(evt);
+ return IRQ_HANDLED;
+}
+
+static __init int xilinx_clockevent_init(void)
+{
+ clockevent_xilinx_timer.mult =
+ div_sc(timer_clock_freq, NSEC_PER_SEC,
+ clockevent_xilinx_timer.shift);
+ clockevent_xilinx_timer.max_delta_ns =
+ clockevent_delta2ns((u32)~0, &clockevent_xilinx_timer);
+ clockevent_xilinx_timer.max_delta_ticks = (u32)~0;
+ clockevent_xilinx_timer.min_delta_ns =
+ clockevent_delta2ns(1, &clockevent_xilinx_timer);
+ clockevent_xilinx_timer.min_delta_ticks = 1;
+ clockevent_xilinx_timer.cpumask = cpumask_of(0);
+ clockevents_register_device(&clockevent_xilinx_timer);
+
+ return 0;
+}
+
+static u64 xilinx_clock_read(void)
+{
+ return read_fn(timer_baseaddr + TCR1);
+}
+
+static u64 xilinx_read(struct clocksource *cs)
+{
+ /* reading actual value of timer 1 */
+ return (u64)xilinx_clock_read();
+}
+
+static struct timecounter xilinx_tc = {
+ .cc = NULL,
+};
+
+static u64 xilinx_cc_read(const struct cyclecounter *cc)
+{
+ return xilinx_read(NULL);
+}
+
+static struct cyclecounter xilinx_cc = {
+ .read = xilinx_cc_read,
+ .mask = CLOCKSOURCE_MASK(32),
+ .shift = 8,
+};
+
+static int __init init_xilinx_timecounter(void)
+{
+ xilinx_cc.mult = div_sc(timer_clock_freq, NSEC_PER_SEC,
+ xilinx_cc.shift);
+
+ timecounter_init(&xilinx_tc, &xilinx_cc, sched_clock());
+
+ return 0;
+}
+
+static struct clocksource clocksource_microblaze = {
+ .name = "xilinx_clocksource",
+ .rating = 300,
+ .read = xilinx_read,
+ .mask = CLOCKSOURCE_MASK(32),
+ .flags = CLOCK_SOURCE_IS_CONTINUOUS,
+};
+
+static int __init xilinx_clocksource_init(void)
+{
+ int ret;
+
+ ret = clocksource_register_hz(&clocksource_microblaze,
+ timer_clock_freq);
+ if (ret) {
+ pr_err("failed to register clocksource");
+ return ret;
+ }
+
+ /* stop timer1 */
+ write_fn(read_fn(timer_baseaddr + TCSR1) & ~TCSR_ENT,
+ timer_baseaddr + TCSR1);
+ /* start timer1 - up counting without interrupt */
+ write_fn(TCSR_TINT|TCSR_ENT|TCSR_ARHT, timer_baseaddr + TCSR1);
+
+ /* register timecounter - for ftrace support */
+ return init_xilinx_timecounter();
+}
+
+static int __init xilinx_timer_init(struct device_node *timer)
+{
+ struct clk *clk;
+ static int initialized;
+ u32 irq;
+ u32 timer_num = 1;
+ int ret;
+
+ if (initialized)
+ return -EINVAL;
+
+ initialized = 1;
+
+ timer_baseaddr = of_iomap(timer, 0);
+ if (!timer_baseaddr) {
+ pr_err("ERROR: invalid timer base address\n");
+ return -ENXIO;
+ }
+
+ write_fn = timer_write32;
+ read_fn = timer_read32;
+
+ write_fn(TCSR_MDT, timer_baseaddr + TCSR0);
+ if (!(read_fn(timer_baseaddr + TCSR0) & TCSR_MDT)) {
+ write_fn = timer_write32_be;
+ read_fn = timer_read32_be;
+ }
+
+ irq = irq_of_parse_and_map(timer, 0);
+ if (irq <= 0) {
+ pr_err("Failed to parse and map irq");
+ return -EINVAL;
+ }
+
+ of_property_read_u32(timer, "xlnx,one-timer-only", &timer_num);
+ if (timer_num) {
+ pr_err("Please enable two timers in HW\n");
+ return -EINVAL;
+ }
+
+ pr_info("%pOF: irq=%d\n", timer, irq);
+
+ clk = of_clk_get(timer, 0);
+ if (IS_ERR(clk)) {
+ pr_err("ERROR: timer CCF input clock not found\n");
+ /* If there is clock-frequency property than use it */
+ of_property_read_u32(timer, "clock-frequency",
+ &timer_clock_freq);
+ } else {
+ timer_clock_freq = clk_get_rate(clk);
+ }
+
+ if (!timer_clock_freq) {
+ pr_err("ERROR: Using CPU clock frequency\n");
+ timer_clock_freq = cpuinfo.cpu_clock_freq;
+ }
+
+ freq_div_hz = timer_clock_freq / HZ;
+
+ ret = request_irq(irq, timer_interrupt, IRQF_TIMER, "timer",
+ &clockevent_xilinx_timer);
+ if (ret) {
+ pr_err("Failed to setup IRQ");
+ return ret;
+ }
+
+ ret = xilinx_clocksource_init();
+ if (ret)
+ return ret;
+
+ ret = xilinx_clockevent_init();
+ if (ret)
+ return ret;
+
+ sched_clock_register(xilinx_clock_read, 32, timer_clock_freq);
+
+ return 0;
+}
+
+TIMER_OF_DECLARE(xilinx_timer, "xlnx,xps-timer-1.00.a",
+ xilinx_timer_init);
diff --git a/arch/microblaze/kernel/traps.c b/arch/microblaze/kernel/traps.c
new file mode 100644
index 000000000..94b6fe931
--- /dev/null
+++ b/arch/microblaze/kernel/traps.c
@@ -0,0 +1,78 @@
+/*
+ * Copyright (C) 2007-2009 Michal Simek <monstr@monstr.eu>
+ * Copyright (C) 2007-2009 PetaLogix
+ * Copyright (C) 2006 Atmark Techno, Inc.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+#include <linux/export.h>
+#include <linux/kernel.h>
+#include <linux/kallsyms.h>
+#include <linux/sched.h>
+#include <linux/sched/debug.h>
+#include <linux/debug_locks.h>
+
+#include <asm/exceptions.h>
+#include <asm/unwind.h>
+
+void trap_init(void)
+{
+ __enable_hw_exceptions();
+}
+
+static unsigned long kstack_depth_to_print; /* 0 == entire stack */
+
+static int __init kstack_setup(char *s)
+{
+ return !kstrtoul(s, 0, &kstack_depth_to_print);
+}
+__setup("kstack=", kstack_setup);
+
+void show_stack(struct task_struct *task, unsigned long *sp, const char *loglvl)
+{
+ unsigned long words_to_show;
+ u32 fp = (u32) sp;
+
+ if (fp == 0) {
+ if (task) {
+ fp = ((struct thread_info *)
+ (task->stack))->cpu_context.r1;
+ } else {
+ /* Pick up caller of dump_stack() */
+ fp = (u32)&sp - 8;
+ }
+ }
+
+ words_to_show = (THREAD_SIZE - (fp & (THREAD_SIZE - 1))) >> 2;
+ if (kstack_depth_to_print && (words_to_show > kstack_depth_to_print))
+ words_to_show = kstack_depth_to_print;
+
+ printk("%sKernel Stack:\n", loglvl);
+
+ /*
+ * Make the first line an 'odd' size if necessary to get
+ * remaining lines to start at an address multiple of 0x10
+ */
+ if (fp & 0xF) {
+ unsigned long line1_words = (0x10 - (fp & 0xF)) >> 2;
+ if (line1_words < words_to_show) {
+ print_hex_dump(KERN_INFO, "", DUMP_PREFIX_ADDRESS, 32,
+ 4, (void *)fp, line1_words << 2, 0);
+ fp += line1_words << 2;
+ words_to_show -= line1_words;
+ }
+ }
+ print_hex_dump(loglvl, "", DUMP_PREFIX_ADDRESS, 32, 4, (void *)fp,
+ words_to_show << 2, 0);
+ printk("%s\n\nCall Trace:\n", loglvl);
+ microblaze_unwind(task, NULL, loglvl);
+ printk("%s\n", loglvl);
+
+ if (!task)
+ task = current;
+
+ debug_show_held_locks(task);
+}
diff --git a/arch/microblaze/kernel/unwind.c b/arch/microblaze/kernel/unwind.c
new file mode 100644
index 000000000..778a761af
--- /dev/null
+++ b/arch/microblaze/kernel/unwind.c
@@ -0,0 +1,328 @@
+/*
+ * Backtrace support for Microblaze
+ *
+ * Copyright (C) 2010 Digital Design Corporation
+ *
+ * Based on arch/sh/kernel/cpu/sh5/unwind.c code which is:
+ * Copyright (C) 2004 Paul Mundt
+ * Copyright (C) 2004 Richard Curnow
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+/* #define DEBUG 1 */
+#include <linux/export.h>
+#include <linux/kallsyms.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/sched/task_stack.h>
+#include <linux/stacktrace.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/io.h>
+#include <asm/sections.h>
+#include <asm/exceptions.h>
+#include <asm/unwind.h>
+#include <asm/switch_to.h>
+
+struct stack_trace;
+
+/*
+ * On Microblaze, finding the previous stack frame is a little tricky.
+ * At this writing (3/2010), Microblaze does not support CONFIG_FRAME_POINTERS,
+ * and even if it did, gcc (4.1.2) does not store the frame pointer at
+ * a consistent offset within each frame. To determine frame size, it is
+ * necessary to search for the assembly instruction that creates or reclaims
+ * the frame and extract the size from it.
+ *
+ * Microblaze stores the stack pointer in r1, and creates a frame via
+ *
+ * addik r1, r1, -FRAME_SIZE
+ *
+ * The frame is reclaimed via
+ *
+ * addik r1, r1, FRAME_SIZE
+ *
+ * Frame creation occurs at or near the top of a function.
+ * Depending on the compiler, reclaim may occur at the end, or before
+ * a mid-function return.
+ *
+ * A stack frame is usually not created in a leaf function.
+ *
+ */
+
+/**
+ * get_frame_size - Extract the stack adjustment from an
+ * "addik r1, r1, adjust" instruction
+ * @instr : Microblaze instruction
+ *
+ * Return - Number of stack bytes the instruction reserves or reclaims
+ */
+static inline long get_frame_size(unsigned long instr)
+{
+ return abs((s16)(instr & 0xFFFF));
+}
+
+/**
+ * find_frame_creation - Search backward to find the instruction that creates
+ * the stack frame (hopefully, for the same function the
+ * initial PC is in).
+ * @pc : Program counter at which to begin the search
+ *
+ * Return - PC at which stack frame creation occurs
+ * NULL if this cannot be found, i.e. a leaf function
+ */
+static unsigned long *find_frame_creation(unsigned long *pc)
+{
+ int i;
+
+ /* NOTE: Distance to search is arbitrary
+ * 250 works well for most things,
+ * 750 picks up things like tcp_recvmsg(),
+ * 1000 needed for fat_fill_super()
+ */
+ for (i = 0; i < 1000; i++, pc--) {
+ unsigned long instr;
+ s16 frame_size;
+
+ if (!kernel_text_address((unsigned long) pc))
+ return NULL;
+
+ instr = *pc;
+
+ /* addik r1, r1, foo ? */
+ if ((instr & 0xFFFF0000) != 0x30210000)
+ continue; /* No */
+
+ frame_size = get_frame_size(instr);
+ if ((frame_size < 8) || (frame_size & 3)) {
+ pr_debug(" Invalid frame size %d at 0x%p\n",
+ frame_size, pc);
+ return NULL;
+ }
+
+ pr_debug(" Found frame creation at 0x%p, size %d\n", pc,
+ frame_size);
+ return pc;
+ }
+
+ return NULL;
+}
+
+/**
+ * lookup_prev_stack_frame - Find the stack frame of the previous function.
+ * @fp : Frame (stack) pointer for current function
+ * @pc : Program counter within current function
+ * @leaf_return : r15 value within current function. If the current function
+ * is a leaf, this is the caller's return address.
+ * @pprev_fp : On exit, set to frame (stack) pointer for previous function
+ * @pprev_pc : On exit, set to current function caller's return address
+ *
+ * Return - 0 on success, -EINVAL if the previous frame cannot be found
+ */
+static int lookup_prev_stack_frame(unsigned long fp, unsigned long pc,
+ unsigned long leaf_return,
+ unsigned long *pprev_fp,
+ unsigned long *pprev_pc)
+{
+ unsigned long *prologue = NULL;
+
+ /* _switch_to is a special leaf function */
+ if (pc != (unsigned long) &_switch_to)
+ prologue = find_frame_creation((unsigned long *)pc);
+
+ if (prologue) {
+ long frame_size = get_frame_size(*prologue);
+
+ *pprev_fp = fp + frame_size;
+ *pprev_pc = *(unsigned long *)fp;
+ } else {
+ if (!leaf_return)
+ return -EINVAL;
+ *pprev_pc = leaf_return;
+ *pprev_fp = fp;
+ }
+
+ /* NOTE: don't check kernel_text_address here, to allow display
+ * of userland return address
+ */
+ return (!*pprev_pc || (*pprev_pc & 3)) ? -EINVAL : 0;
+}
+
+static void microblaze_unwind_inner(struct task_struct *task,
+ unsigned long pc, unsigned long fp,
+ unsigned long leaf_return,
+ struct stack_trace *trace,
+ const char *loglvl);
+
+/**
+ * unwind_trap - Unwind through a system trap, that stored previous state
+ * on the stack.
+ */
+#ifdef CONFIG_MMU
+static inline void unwind_trap(struct task_struct *task, unsigned long pc,
+ unsigned long fp, struct stack_trace *trace,
+ const char *loglvl)
+{
+ /* To be implemented */
+}
+#else
+static inline void unwind_trap(struct task_struct *task, unsigned long pc,
+ unsigned long fp, struct stack_trace *trace,
+ const char *loglvl)
+{
+ const struct pt_regs *regs = (const struct pt_regs *) fp;
+ microblaze_unwind_inner(task, regs->pc, regs->r1, regs->r15, trace, loglvl);
+}
+#endif
+
+/**
+ * microblaze_unwind_inner - Unwind the stack from the specified point
+ * @task : Task whose stack we are to unwind (may be NULL)
+ * @pc : Program counter from which we start unwinding
+ * @fp : Frame (stack) pointer from which we start unwinding
+ * @leaf_return : Value of r15 at pc. If the function is a leaf, this is
+ * the caller's return address.
+ * @trace : Where to store stack backtrace (PC values).
+ * NULL == print backtrace to kernel log
+ * @loglvl : Used for printk log level if (trace == NULL).
+ */
+static void microblaze_unwind_inner(struct task_struct *task,
+ unsigned long pc, unsigned long fp,
+ unsigned long leaf_return,
+ struct stack_trace *trace,
+ const char *loglvl)
+{
+ int ofs = 0;
+
+ pr_debug(" Unwinding with PC=%p, FP=%p\n", (void *)pc, (void *)fp);
+ if (!pc || !fp || (pc & 3) || (fp & 3)) {
+ pr_debug(" Invalid state for unwind, aborting\n");
+ return;
+ }
+ for (; pc != 0;) {
+ unsigned long next_fp, next_pc = 0;
+ unsigned long return_to = pc + 2 * sizeof(unsigned long);
+ const struct trap_handler_info *handler =
+ &microblaze_trap_handlers;
+
+ /* Is previous function the HW exception handler? */
+ if ((return_to >= (unsigned long)&_hw_exception_handler)
+ &&(return_to < (unsigned long)&ex_handler_unhandled)) {
+ /*
+ * HW exception handler doesn't save all registers,
+ * so we open-code a special case of unwind_trap()
+ */
+#ifndef CONFIG_MMU
+ const struct pt_regs *regs =
+ (const struct pt_regs *) fp;
+#endif
+ printk("%sHW EXCEPTION\n", loglvl);
+#ifndef CONFIG_MMU
+ microblaze_unwind_inner(task, regs->r17 - 4,
+ fp + EX_HANDLER_STACK_SIZ,
+ regs->r15, trace, loglvl);
+#endif
+ return;
+ }
+
+ /* Is previous function a trap handler? */
+ for (; handler->start_addr; ++handler) {
+ if ((return_to >= handler->start_addr)
+ && (return_to <= handler->end_addr)) {
+ if (!trace)
+ printk("%s%s\n", loglvl, handler->trap_name);
+ unwind_trap(task, pc, fp, trace, loglvl);
+ return;
+ }
+ }
+ pc -= ofs;
+
+ if (trace) {
+#ifdef CONFIG_STACKTRACE
+ if (trace->skip > 0)
+ trace->skip--;
+ else
+ trace->entries[trace->nr_entries++] = pc;
+
+ if (trace->nr_entries >= trace->max_entries)
+ break;
+#endif
+ } else {
+ /* Have we reached userland? */
+ if (unlikely(pc == task_pt_regs(task)->pc)) {
+ printk("%s[<%p>] PID %lu [%s]\n",
+ loglvl, (void *) pc,
+ (unsigned long) task->pid,
+ task->comm);
+ break;
+ } else
+ print_ip_sym(loglvl, pc);
+ }
+
+ /* Stop when we reach anything not part of the kernel */
+ if (!kernel_text_address(pc))
+ break;
+
+ if (lookup_prev_stack_frame(fp, pc, leaf_return, &next_fp,
+ &next_pc) == 0) {
+ ofs = sizeof(unsigned long);
+ pc = next_pc & ~3;
+ fp = next_fp;
+ leaf_return = 0;
+ } else {
+ pr_debug(" Failed to find previous stack frame\n");
+ break;
+ }
+
+ pr_debug(" Next PC=%p, next FP=%p\n",
+ (void *)next_pc, (void *)next_fp);
+ }
+}
+
+/**
+ * microblaze_unwind - Stack unwinder for Microblaze (external entry point)
+ * @task : Task whose stack we are to unwind (NULL == current)
+ * @trace : Where to store stack backtrace (PC values).
+ * NULL == print backtrace to kernel log
+ * @loglvl : Used for printk log level if (trace == NULL).
+ */
+void microblaze_unwind(struct task_struct *task, struct stack_trace *trace,
+ const char *loglvl)
+{
+ if (task) {
+ if (task == current) {
+ const struct pt_regs *regs = task_pt_regs(task);
+ microblaze_unwind_inner(task, regs->pc, regs->r1,
+ regs->r15, trace, loglvl);
+ } else {
+ struct thread_info *thread_info =
+ (struct thread_info *)(task->stack);
+ const struct cpu_context *cpu_context =
+ &thread_info->cpu_context;
+
+ microblaze_unwind_inner(task,
+ (unsigned long) &_switch_to,
+ cpu_context->r1,
+ cpu_context->r15,
+ trace, loglvl);
+ }
+ } else {
+ unsigned long pc, fp;
+
+ __asm__ __volatile__ ("or %0, r1, r0" : "=r" (fp));
+
+ __asm__ __volatile__ (
+ "brlid %0, 0f;"
+ "nop;"
+ "0:"
+ : "=r" (pc)
+ );
+
+ /* Since we are not a leaf function, use leaf_return = 0 */
+ microblaze_unwind_inner(current, pc, fp, 0, trace, loglvl);
+ }
+}
+
diff --git a/arch/microblaze/kernel/vmlinux.lds.S b/arch/microblaze/kernel/vmlinux.lds.S
new file mode 100644
index 000000000..df07b3d06
--- /dev/null
+++ b/arch/microblaze/kernel/vmlinux.lds.S
@@ -0,0 +1,143 @@
+/*
+ * Copyright (C) 2008-2009 Michal Simek <monstr@monstr.eu>
+ * Copyright (C) 2008-2009 PetaLogix
+ * Copyright (C) 2006 Atmark Techno, Inc.
+ *
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ */
+
+OUTPUT_ARCH(microblaze)
+ENTRY(microblaze_start)
+
+#define RO_EXCEPTION_TABLE_ALIGN 16
+
+#include <asm/cache.h>
+#include <asm/page.h>
+#include <asm-generic/vmlinux.lds.h>
+#include <asm/thread_info.h>
+
+#ifdef __MICROBLAZEEL__
+jiffies = jiffies_64;
+#else
+jiffies = jiffies_64 + 4;
+#endif
+
+SECTIONS {
+ . = CONFIG_KERNEL_START;
+ microblaze_start = CONFIG_KERNEL_BASE_ADDR;
+ .text : AT(ADDR(.text) - LOAD_OFFSET) {
+ _text = . ;
+ _stext = . ;
+ HEAD_TEXT
+ TEXT_TEXT
+ *(.fixup)
+ EXIT_TEXT
+ EXIT_CALL
+ SCHED_TEXT
+ CPUIDLE_TEXT
+ LOCK_TEXT
+ KPROBES_TEXT
+ IRQENTRY_TEXT
+ SOFTIRQENTRY_TEXT
+ . = ALIGN (4) ;
+ _etext = . ;
+ }
+
+ . = ALIGN (4) ;
+ __fdt_blob : AT(ADDR(__fdt_blob) - LOAD_OFFSET) {
+ _fdt_start = . ; /* place for fdt blob */
+ *(__fdt_blob) ; /* Any link-placed DTB */
+ . = _fdt_start + 0x10000; /* Pad up to 64kbyte */
+ _fdt_end = . ;
+ }
+
+ . = ALIGN(16);
+ RO_DATA(4096)
+
+ /*
+ * sdata2 section can go anywhere, but must be word aligned
+ * and SDA2_BASE must point to the middle of it
+ */
+ .sdata2 : AT(ADDR(.sdata2) - LOAD_OFFSET) {
+ _ssrw = .;
+ . = ALIGN(PAGE_SIZE); /* page aligned when MMU used */
+ *(.sdata2)
+ . = ALIGN(8);
+ _essrw = .;
+ _ssrw_size = _essrw - _ssrw;
+ _KERNEL_SDA2_BASE_ = _ssrw + (_ssrw_size / 2);
+ }
+
+ _sdata = . ;
+ RW_DATA(32, PAGE_SIZE, THREAD_SIZE)
+ _edata = . ;
+
+ /* Under the microblaze ABI, .sdata and .sbss must be contiguous */
+ . = ALIGN(8);
+ .sdata : AT(ADDR(.sdata) - LOAD_OFFSET) {
+ _ssro = .;
+ *(.sdata)
+ }
+
+ .sbss : AT(ADDR(.sbss) - LOAD_OFFSET) {
+ _ssbss = .;
+ *(.sbss)
+ _esbss = .;
+ _essro = .;
+ _ssro_size = _essro - _ssro ;
+ _KERNEL_SDA_BASE_ = _ssro + (_ssro_size / 2) ;
+ }
+
+ PERCPU_SECTION(L1_CACHE_BYTES)
+
+ . = ALIGN(PAGE_SIZE);
+ __init_begin = .;
+
+ INIT_TEXT_SECTION(PAGE_SIZE)
+
+ .init.data : AT(ADDR(.init.data) - LOAD_OFFSET) {
+ INIT_DATA
+ }
+
+ . = ALIGN(4);
+ .init.ivt : AT(ADDR(.init.ivt) - LOAD_OFFSET) {
+ __ivt_start = .;
+ *(.init.ivt)
+ __ivt_end = .;
+ }
+
+ .init.setup : AT(ADDR(.init.setup) - LOAD_OFFSET) {
+ INIT_SETUP(0)
+ }
+
+ .initcall.init : AT(ADDR(.initcall.init) - LOAD_OFFSET ) {
+ INIT_CALLS
+ }
+
+ .con_initcall.init : AT(ADDR(.con_initcall.init) - LOAD_OFFSET) {
+ CON_INITCALL
+ }
+
+ __init_end_before_initramfs = .;
+
+ .init.ramfs : AT(ADDR(.init.ramfs) - LOAD_OFFSET) {
+ INIT_RAM_FS
+ }
+
+ __init_end = .;
+
+ .bss ALIGN (PAGE_SIZE) : AT(ADDR(.bss) - LOAD_OFFSET) {
+ /* page aligned when MMU used */
+ __bss_start = . ;
+ *(.bss*)
+ *(COMMON)
+ . = ALIGN (4) ;
+ __bss_stop = . ;
+ }
+ . = ALIGN(PAGE_SIZE);
+ _end = .;
+
+ DISCARDS
+}