summaryrefslogtreecommitdiffstats
path: root/arch/riscv/kernel/entry.S
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-11 08:27:49 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-11 08:27:49 +0000
commitace9429bb58fd418f0c81d4c2835699bddf6bde6 (patch)
treeb2d64bc10158fdd5497876388cd68142ca374ed3 /arch/riscv/kernel/entry.S
parentInitial commit. (diff)
downloadlinux-ace9429bb58fd418f0c81d4c2835699bddf6bde6.tar.xz
linux-ace9429bb58fd418f0c81d4c2835699bddf6bde6.zip
Adding upstream version 6.6.15.upstream/6.6.15
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'arch/riscv/kernel/entry.S')
-rw-r--r--arch/riscv/kernel/entry.S305
1 files changed, 305 insertions, 0 deletions
diff --git a/arch/riscv/kernel/entry.S b/arch/riscv/kernel/entry.S
new file mode 100644
index 0000000000..278d01d291
--- /dev/null
+++ b/arch/riscv/kernel/entry.S
@@ -0,0 +1,305 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2012 Regents of the University of California
+ * Copyright (C) 2017 SiFive
+ */
+
+#include <linux/init.h>
+#include <linux/linkage.h>
+
+#include <asm/asm.h>
+#include <asm/csr.h>
+#include <asm/unistd.h>
+#include <asm/page.h>
+#include <asm/thread_info.h>
+#include <asm/asm-offsets.h>
+#include <asm/errata_list.h>
+#include <linux/sizes.h>
+
+ .section .irqentry.text, "ax"
+
+SYM_CODE_START(handle_exception)
+ /*
+ * If coming from userspace, preserve the user thread pointer and load
+ * the kernel thread pointer. If we came from the kernel, the scratch
+ * register will contain 0, and we should continue on the current TP.
+ */
+ csrrw tp, CSR_SCRATCH, tp
+ bnez tp, _save_context
+
+_restore_kernel_tpsp:
+ csrr tp, CSR_SCRATCH
+ REG_S sp, TASK_TI_KERNEL_SP(tp)
+
+#ifdef CONFIG_VMAP_STACK
+ addi sp, sp, -(PT_SIZE_ON_STACK)
+ srli sp, sp, THREAD_SHIFT
+ andi sp, sp, 0x1
+ bnez sp, handle_kernel_stack_overflow
+ REG_L sp, TASK_TI_KERNEL_SP(tp)
+#endif
+
+_save_context:
+ REG_S sp, TASK_TI_USER_SP(tp)
+ REG_L sp, TASK_TI_KERNEL_SP(tp)
+ addi sp, sp, -(PT_SIZE_ON_STACK)
+ REG_S x1, PT_RA(sp)
+ REG_S x3, PT_GP(sp)
+ REG_S x5, PT_T0(sp)
+ save_from_x6_to_x31
+
+ /*
+ * Disable user-mode memory access as it should only be set in the
+ * actual user copy routines.
+ *
+ * Disable the FPU/Vector to detect illegal usage of floating point
+ * or vector in kernel space.
+ */
+ li t0, SR_SUM | SR_FS_VS
+
+ REG_L s0, TASK_TI_USER_SP(tp)
+ csrrc s1, CSR_STATUS, t0
+ csrr s2, CSR_EPC
+ csrr s3, CSR_TVAL
+ csrr s4, CSR_CAUSE
+ csrr s5, CSR_SCRATCH
+ REG_S s0, PT_SP(sp)
+ REG_S s1, PT_STATUS(sp)
+ REG_S s2, PT_EPC(sp)
+ REG_S s3, PT_BADADDR(sp)
+ REG_S s4, PT_CAUSE(sp)
+ REG_S s5, PT_TP(sp)
+
+ /*
+ * Set the scratch register to 0, so that if a recursive exception
+ * occurs, the exception vector knows it came from the kernel
+ */
+ csrw CSR_SCRATCH, x0
+
+ /* Load the global pointer */
+.option push
+.option norelax
+ la gp, __global_pointer$
+.option pop
+ move a0, sp /* pt_regs */
+ la ra, ret_from_exception
+
+ /*
+ * MSB of cause differentiates between
+ * interrupts and exceptions
+ */
+ bge s4, zero, 1f
+
+ /* Handle interrupts */
+ tail do_irq
+1:
+ /* Handle other exceptions */
+ slli t0, s4, RISCV_LGPTR
+ la t1, excp_vect_table
+ la t2, excp_vect_table_end
+ add t0, t1, t0
+ /* Check if exception code lies within bounds */
+ bgeu t0, t2, 1f
+ REG_L t0, 0(t0)
+ jr t0
+1:
+ tail do_trap_unknown
+SYM_CODE_END(handle_exception)
+
+/*
+ * The ret_from_exception must be called with interrupt disabled. Here is the
+ * caller list:
+ * - handle_exception
+ * - ret_from_fork
+ */
+SYM_CODE_START_NOALIGN(ret_from_exception)
+ REG_L s0, PT_STATUS(sp)
+#ifdef CONFIG_RISCV_M_MODE
+ /* the MPP value is too large to be used as an immediate arg for addi */
+ li t0, SR_MPP
+ and s0, s0, t0
+#else
+ andi s0, s0, SR_SPP
+#endif
+ bnez s0, 1f
+
+ /* Save unwound kernel stack pointer in thread_info */
+ addi s0, sp, PT_SIZE_ON_STACK
+ REG_S s0, TASK_TI_KERNEL_SP(tp)
+
+ /*
+ * Save TP into the scratch register , so we can find the kernel data
+ * structures again.
+ */
+ csrw CSR_SCRATCH, tp
+1:
+ REG_L a0, PT_STATUS(sp)
+ /*
+ * The current load reservation is effectively part of the processor's
+ * state, in the sense that load reservations cannot be shared between
+ * different hart contexts. We can't actually save and restore a load
+ * reservation, so instead here we clear any existing reservation --
+ * it's always legal for implementations to clear load reservations at
+ * any point (as long as the forward progress guarantee is kept, but
+ * we'll ignore that here).
+ *
+ * Dangling load reservations can be the result of taking a trap in the
+ * middle of an LR/SC sequence, but can also be the result of a taken
+ * forward branch around an SC -- which is how we implement CAS. As a
+ * result we need to clear reservations between the last CAS and the
+ * jump back to the new context. While it is unlikely the store
+ * completes, implementations are allowed to expand reservations to be
+ * arbitrarily large.
+ */
+ REG_L a2, PT_EPC(sp)
+ REG_SC x0, a2, PT_EPC(sp)
+
+ csrw CSR_STATUS, a0
+ csrw CSR_EPC, a2
+
+ REG_L x1, PT_RA(sp)
+ REG_L x3, PT_GP(sp)
+ REG_L x4, PT_TP(sp)
+ REG_L x5, PT_T0(sp)
+ restore_from_x6_to_x31
+
+ REG_L x2, PT_SP(sp)
+
+#ifdef CONFIG_RISCV_M_MODE
+ mret
+#else
+ sret
+#endif
+SYM_CODE_END(ret_from_exception)
+
+#ifdef CONFIG_VMAP_STACK
+SYM_CODE_START_LOCAL(handle_kernel_stack_overflow)
+ /* we reach here from kernel context, sscratch must be 0 */
+ csrrw x31, CSR_SCRATCH, x31
+ asm_per_cpu sp, overflow_stack, x31
+ li x31, OVERFLOW_STACK_SIZE
+ add sp, sp, x31
+ /* zero out x31 again and restore x31 */
+ xor x31, x31, x31
+ csrrw x31, CSR_SCRATCH, x31
+
+ addi sp, sp, -(PT_SIZE_ON_STACK)
+
+ //save context to overflow stack
+ REG_S x1, PT_RA(sp)
+ REG_S x3, PT_GP(sp)
+ REG_S x5, PT_T0(sp)
+ save_from_x6_to_x31
+
+ REG_L s0, TASK_TI_KERNEL_SP(tp)
+ csrr s1, CSR_STATUS
+ csrr s2, CSR_EPC
+ csrr s3, CSR_TVAL
+ csrr s4, CSR_CAUSE
+ csrr s5, CSR_SCRATCH
+ REG_S s0, PT_SP(sp)
+ REG_S s1, PT_STATUS(sp)
+ REG_S s2, PT_EPC(sp)
+ REG_S s3, PT_BADADDR(sp)
+ REG_S s4, PT_CAUSE(sp)
+ REG_S s5, PT_TP(sp)
+ move a0, sp
+ tail handle_bad_stack
+SYM_CODE_END(handle_kernel_stack_overflow)
+#endif
+
+SYM_CODE_START(ret_from_fork)
+ call schedule_tail
+ beqz s0, 1f /* not from kernel thread */
+ /* Call fn(arg) */
+ move a0, s1
+ jalr s0
+1:
+ move a0, sp /* pt_regs */
+ la ra, ret_from_exception
+ tail syscall_exit_to_user_mode
+SYM_CODE_END(ret_from_fork)
+
+/*
+ * Integer register context switch
+ * The callee-saved registers must be saved and restored.
+ *
+ * a0: previous task_struct (must be preserved across the switch)
+ * a1: next task_struct
+ *
+ * The value of a0 and a1 must be preserved by this function, as that's how
+ * arguments are passed to schedule_tail.
+ */
+SYM_FUNC_START(__switch_to)
+ /* Save context into prev->thread */
+ li a4, TASK_THREAD_RA
+ add a3, a0, a4
+ add a4, a1, a4
+ REG_S ra, TASK_THREAD_RA_RA(a3)
+ REG_S sp, TASK_THREAD_SP_RA(a3)
+ REG_S s0, TASK_THREAD_S0_RA(a3)
+ REG_S s1, TASK_THREAD_S1_RA(a3)
+ REG_S s2, TASK_THREAD_S2_RA(a3)
+ REG_S s3, TASK_THREAD_S3_RA(a3)
+ REG_S s4, TASK_THREAD_S4_RA(a3)
+ REG_S s5, TASK_THREAD_S5_RA(a3)
+ REG_S s6, TASK_THREAD_S6_RA(a3)
+ REG_S s7, TASK_THREAD_S7_RA(a3)
+ REG_S s8, TASK_THREAD_S8_RA(a3)
+ REG_S s9, TASK_THREAD_S9_RA(a3)
+ REG_S s10, TASK_THREAD_S10_RA(a3)
+ REG_S s11, TASK_THREAD_S11_RA(a3)
+ /* Restore context from next->thread */
+ REG_L ra, TASK_THREAD_RA_RA(a4)
+ REG_L sp, TASK_THREAD_SP_RA(a4)
+ REG_L s0, TASK_THREAD_S0_RA(a4)
+ REG_L s1, TASK_THREAD_S1_RA(a4)
+ REG_L s2, TASK_THREAD_S2_RA(a4)
+ REG_L s3, TASK_THREAD_S3_RA(a4)
+ REG_L s4, TASK_THREAD_S4_RA(a4)
+ REG_L s5, TASK_THREAD_S5_RA(a4)
+ REG_L s6, TASK_THREAD_S6_RA(a4)
+ REG_L s7, TASK_THREAD_S7_RA(a4)
+ REG_L s8, TASK_THREAD_S8_RA(a4)
+ REG_L s9, TASK_THREAD_S9_RA(a4)
+ REG_L s10, TASK_THREAD_S10_RA(a4)
+ REG_L s11, TASK_THREAD_S11_RA(a4)
+ /* The offset of thread_info in task_struct is zero. */
+ move tp, a1
+ ret
+SYM_FUNC_END(__switch_to)
+
+#ifndef CONFIG_MMU
+#define do_page_fault do_trap_unknown
+#endif
+
+ .section ".rodata"
+ .align LGREG
+ /* Exception vector table */
+SYM_CODE_START(excp_vect_table)
+ RISCV_PTR do_trap_insn_misaligned
+ ALT_INSN_FAULT(RISCV_PTR do_trap_insn_fault)
+ RISCV_PTR do_trap_insn_illegal
+ RISCV_PTR do_trap_break
+ RISCV_PTR do_trap_load_misaligned
+ RISCV_PTR do_trap_load_fault
+ RISCV_PTR do_trap_store_misaligned
+ RISCV_PTR do_trap_store_fault
+ RISCV_PTR do_trap_ecall_u /* system call */
+ RISCV_PTR do_trap_ecall_s
+ RISCV_PTR do_trap_unknown
+ RISCV_PTR do_trap_ecall_m
+ /* instruciton page fault */
+ ALT_PAGE_FAULT(RISCV_PTR do_page_fault)
+ RISCV_PTR do_page_fault /* load page fault */
+ RISCV_PTR do_trap_unknown
+ RISCV_PTR do_page_fault /* store page fault */
+excp_vect_table_end:
+SYM_CODE_END(excp_vect_table)
+
+#ifndef CONFIG_MMU
+SYM_CODE_START(__user_rt_sigreturn)
+ li a7, __NR_rt_sigreturn
+ ecall
+SYM_CODE_END(__user_rt_sigreturn)
+#endif