summaryrefslogtreecommitdiffstats
path: root/arch/sparc/kernel/etrap_32.S
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-06 01:02:30 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-06 01:02:30 +0000
commit76cb841cb886eef6b3bee341a2266c76578724ad (patch)
treef5892e5ba6cc11949952a6ce4ecbe6d516d6ce58 /arch/sparc/kernel/etrap_32.S
parentInitial commit. (diff)
downloadlinux-76cb841cb886eef6b3bee341a2266c76578724ad.tar.xz
linux-76cb841cb886eef6b3bee341a2266c76578724ad.zip
Adding upstream version 4.19.249.upstream/4.19.249
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to '')
-rw-r--r--arch/sparc/kernel/etrap_32.S281
1 files changed, 281 insertions, 0 deletions
diff --git a/arch/sparc/kernel/etrap_32.S b/arch/sparc/kernel/etrap_32.S
new file mode 100644
index 000000000..9f243f918
--- /dev/null
+++ b/arch/sparc/kernel/etrap_32.S
@@ -0,0 +1,281 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * etrap.S: Sparc trap window preparation for entry into the
+ * Linux kernel.
+ *
+ * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
+ */
+
+#include <asm/head.h>
+#include <asm/asi.h>
+#include <asm/contregs.h>
+#include <asm/page.h>
+#include <asm/psr.h>
+#include <asm/ptrace.h>
+#include <asm/winmacro.h>
+#include <asm/asmmacro.h>
+#include <asm/thread_info.h>
+
+/* Registers to not touch at all. */
+#define t_psr l0 /* Set by caller */
+#define t_pc l1 /* Set by caller */
+#define t_npc l2 /* Set by caller */
+#define t_wim l3 /* Set by caller */
+#define t_twinmask l4 /* Set at beginning of this entry routine. */
+#define t_kstack l5 /* Set right before pt_regs frame is built */
+#define t_retpc l6 /* If you change this, change winmacro.h header file */
+#define t_systable l7 /* Never touch this, could be the syscall table ptr. */
+#define curptr g6 /* Set after pt_regs frame is built */
+
+ .text
+ .align 4
+
+ /* SEVEN WINDOW PATCH INSTRUCTIONS */
+ .globl tsetup_7win_patch1, tsetup_7win_patch2
+ .globl tsetup_7win_patch3, tsetup_7win_patch4
+ .globl tsetup_7win_patch5, tsetup_7win_patch6
+tsetup_7win_patch1: sll %t_wim, 0x6, %t_wim
+tsetup_7win_patch2: and %g2, 0x7f, %g2
+tsetup_7win_patch3: and %g2, 0x7f, %g2
+tsetup_7win_patch4: and %g1, 0x7f, %g1
+tsetup_7win_patch5: sll %t_wim, 0x6, %t_wim
+tsetup_7win_patch6: and %g2, 0x7f, %g2
+ /* END OF PATCH INSTRUCTIONS */
+
+ /* At trap time, interrupts and all generic traps do the
+ * following:
+ *
+ * rd %psr, %l0
+ * b some_handler
+ * rd %wim, %l3
+ * nop
+ *
+ * Then 'some_handler' if it needs a trap frame (ie. it has
+ * to call c-code and the trap cannot be handled in-window)
+ * then it does the SAVE_ALL macro in entry.S which does
+ *
+ * sethi %hi(trap_setup), %l4
+ * jmpl %l4 + %lo(trap_setup), %l6
+ * nop
+ */
+
+ /* 2 3 4 window number
+ * -----
+ * O T S mnemonic
+ *
+ * O == Current window before trap
+ * T == Window entered when trap occurred
+ * S == Window we will need to save if (1<<T) == %wim
+ *
+ * Before execution gets here, it must be guaranteed that
+ * %l0 contains trap time %psr, %l1 and %l2 contain the
+ * trap pc and npc, and %l3 contains the trap time %wim.
+ */
+
+ .globl trap_setup, tsetup_patch1, tsetup_patch2
+ .globl tsetup_patch3, tsetup_patch4
+ .globl tsetup_patch5, tsetup_patch6
+trap_setup:
+ /* Calculate mask of trap window. See if from user
+ * or kernel and branch conditionally.
+ */
+ mov 1, %t_twinmask
+ andcc %t_psr, PSR_PS, %g0 ! fromsupv_p = (psr & PSR_PS)
+ be trap_setup_from_user ! nope, from user mode
+ sll %t_twinmask, %t_psr, %t_twinmask ! t_twinmask = (1 << psr)
+
+ /* From kernel, allocate more kernel stack and
+ * build a pt_regs trap frame.
+ */
+ sub %fp, (STACKFRAME_SZ + TRACEREG_SZ), %t_kstack
+ STORE_PT_ALL(t_kstack, t_psr, t_pc, t_npc, g2)
+
+ /* See if we are in the trap window. */
+ andcc %t_twinmask, %t_wim, %g0
+ bne trap_setup_kernel_spill ! in trap window, clean up
+ nop
+
+ /* Trap from kernel with a window available.
+ * Just do it...
+ */
+ jmpl %t_retpc + 0x8, %g0 ! return to caller
+ mov %t_kstack, %sp ! jump onto new stack
+
+trap_setup_kernel_spill:
+ ld [%curptr + TI_UWINMASK], %g1
+ orcc %g0, %g1, %g0
+ bne trap_setup_user_spill ! there are some user windows, yuck
+ /* Spill from kernel, but only kernel windows, adjust
+ * %wim and go.
+ */
+ srl %t_wim, 0x1, %g2 ! begin computation of new %wim
+tsetup_patch1:
+ sll %t_wim, 0x7, %t_wim ! patched on 7 window Sparcs
+ or %t_wim, %g2, %g2
+tsetup_patch2:
+ and %g2, 0xff, %g2 ! patched on 7 window Sparcs
+
+ save %g0, %g0, %g0
+
+ /* Set new %wim value */
+ wr %g2, 0x0, %wim
+
+ /* Save the kernel window onto the corresponding stack. */
+ STORE_WINDOW(sp)
+
+ restore %g0, %g0, %g0
+
+ jmpl %t_retpc + 0x8, %g0 ! return to caller
+ mov %t_kstack, %sp ! and onto new kernel stack
+
+#define STACK_OFFSET (THREAD_SIZE - TRACEREG_SZ - STACKFRAME_SZ)
+
+trap_setup_from_user:
+ /* We can't use %curptr yet. */
+ LOAD_CURRENT(t_kstack, t_twinmask)
+
+ sethi %hi(STACK_OFFSET), %t_twinmask
+ or %t_twinmask, %lo(STACK_OFFSET), %t_twinmask
+ add %t_kstack, %t_twinmask, %t_kstack
+
+ mov 1, %t_twinmask
+ sll %t_twinmask, %t_psr, %t_twinmask ! t_twinmask = (1 << psr)
+
+ /* Build pt_regs frame. */
+ STORE_PT_ALL(t_kstack, t_psr, t_pc, t_npc, g2)
+
+#if 0
+ /* If we're sure every task_struct is THREAD_SIZE aligned,
+ we can speed this up. */
+ sethi %hi(STACK_OFFSET), %curptr
+ or %curptr, %lo(STACK_OFFSET), %curptr
+ sub %t_kstack, %curptr, %curptr
+#else
+ sethi %hi(~(THREAD_SIZE - 1)), %curptr
+ and %t_kstack, %curptr, %curptr
+#endif
+
+ /* Clear current_thread_info->w_saved */
+ st %g0, [%curptr + TI_W_SAVED]
+
+ /* See if we are in the trap window. */
+ andcc %t_twinmask, %t_wim, %g0
+ bne trap_setup_user_spill ! yep we are
+ orn %g0, %t_twinmask, %g1 ! negate trap win mask into %g1
+
+ /* Trap from user, but not into the invalid window.
+ * Calculate new umask. The way this works is,
+ * any window from the %wim at trap time until
+ * the window right before the one we are in now,
+ * is a user window. A diagram:
+ *
+ * 7 6 5 4 3 2 1 0 window number
+ * ---------------
+ * I L T mnemonic
+ *
+ * Window 'I' is the invalid window in our example,
+ * window 'L' is the window the user was in when
+ * the trap occurred, window T is the trap window
+ * we are in now. So therefore, windows 5, 4 and
+ * 3 are user windows. The following sequence
+ * computes the user winmask to represent this.
+ */
+ subcc %t_wim, %t_twinmask, %g2
+ bneg,a 1f
+ sub %g2, 0x1, %g2
+1:
+ andn %g2, %t_twinmask, %g2
+tsetup_patch3:
+ and %g2, 0xff, %g2 ! patched on 7win Sparcs
+ st %g2, [%curptr + TI_UWINMASK] ! store new umask
+
+ jmpl %t_retpc + 0x8, %g0 ! return to caller
+ mov %t_kstack, %sp ! and onto kernel stack
+
+trap_setup_user_spill:
+ /* A spill occurred from either kernel or user mode
+ * and there exist some user windows to deal with.
+ * A mask of the currently valid user windows
+ * is in %g1 upon entry to here.
+ */
+
+tsetup_patch4:
+ and %g1, 0xff, %g1 ! patched on 7win Sparcs, mask
+ srl %t_wim, 0x1, %g2 ! compute new %wim
+tsetup_patch5:
+ sll %t_wim, 0x7, %t_wim ! patched on 7win Sparcs
+ or %t_wim, %g2, %g2 ! %g2 is new %wim
+tsetup_patch6:
+ and %g2, 0xff, %g2 ! patched on 7win Sparcs
+ andn %g1, %g2, %g1 ! clear this bit in %g1
+ st %g1, [%curptr + TI_UWINMASK]
+
+ save %g0, %g0, %g0
+
+ wr %g2, 0x0, %wim
+
+ /* Call MMU-architecture dependent stack checking
+ * routine.
+ */
+ b tsetup_srmmu_stackchk
+ andcc %sp, 0x7, %g0
+
+ /* Architecture specific stack checking routines. When either
+ * of these routines are called, the globals are free to use
+ * as they have been safely stashed on the new kernel stack
+ * pointer. Thus the definition below for simplicity.
+ */
+#define glob_tmp g1
+
+ .globl tsetup_srmmu_stackchk
+tsetup_srmmu_stackchk:
+ /* Check results of callers andcc %sp, 0x7, %g0 */
+ bne trap_setup_user_stack_is_bolixed
+ sethi %hi(PAGE_OFFSET), %glob_tmp
+
+ cmp %glob_tmp, %sp
+ bleu,a 1f
+LEON_PI( lda [%g0] ASI_LEON_MMUREGS, %glob_tmp) ! read MMU control
+SUN_PI_( lda [%g0] ASI_M_MMUREGS, %glob_tmp) ! read MMU control
+
+trap_setup_user_stack_is_bolixed:
+ /* From user/kernel into invalid window w/bad user
+ * stack. Save bad user stack, and return to caller.
+ */
+ SAVE_BOLIXED_USER_STACK(curptr, g3)
+ restore %g0, %g0, %g0
+
+ jmpl %t_retpc + 0x8, %g0
+ mov %t_kstack, %sp
+
+1:
+ /* Clear the fault status and turn on the no_fault bit. */
+ or %glob_tmp, 0x2, %glob_tmp ! or in no_fault bit
+LEON_PI(sta %glob_tmp, [%g0] ASI_LEON_MMUREGS) ! set it
+SUN_PI_(sta %glob_tmp, [%g0] ASI_M_MMUREGS) ! set it
+
+ /* Dump the registers and cross fingers. */
+ STORE_WINDOW(sp)
+
+ /* Clear the no_fault bit and check the status. */
+ andn %glob_tmp, 0x2, %glob_tmp
+LEON_PI(sta %glob_tmp, [%g0] ASI_LEON_MMUREGS)
+SUN_PI_(sta %glob_tmp, [%g0] ASI_M_MMUREGS)
+
+ mov AC_M_SFAR, %glob_tmp
+LEON_PI(lda [%glob_tmp] ASI_LEON_MMUREGS, %g0)
+SUN_PI_(lda [%glob_tmp] ASI_M_MMUREGS, %g0)
+
+ mov AC_M_SFSR, %glob_tmp
+LEON_PI(lda [%glob_tmp] ASI_LEON_MMUREGS, %glob_tmp)! save away status of winstore
+SUN_PI_(lda [%glob_tmp] ASI_M_MMUREGS, %glob_tmp) ! save away status of winstore
+
+ andcc %glob_tmp, 0x2, %g0 ! did we fault?
+ bne trap_setup_user_stack_is_bolixed ! failure
+ nop
+
+ restore %g0, %g0, %g0
+
+ jmpl %t_retpc + 0x8, %g0
+ mov %t_kstack, %sp
+