From 2c3c1048746a4622d8c89a29670120dc8fab93c4 Mon Sep 17 00:00:00 2001 From: Daniel Baumann Date: Sun, 7 Apr 2024 20:49:45 +0200 Subject: Adding upstream version 6.1.76. Signed-off-by: Daniel Baumann --- arch/mips/lib/uncached.c | 82 ++++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 82 insertions(+) create mode 100644 arch/mips/lib/uncached.c (limited to 'arch/mips/lib/uncached.c') diff --git a/arch/mips/lib/uncached.c b/arch/mips/lib/uncached.c new file mode 100644 index 000000000..f80a67c09 --- /dev/null +++ b/arch/mips/lib/uncached.c @@ -0,0 +1,82 @@ +/* + * This file is subject to the terms and conditions of the GNU General Public + * License. See the file "COPYING" in the main directory of this archive + * for more details. + * + * Copyright (C) 2005 Thiemo Seufer + * Copyright (C) 2005 MIPS Technologies, Inc. All rights reserved. + * Author: Maciej W. Rozycki + */ + + +#include +#include +#include + +#ifndef CKSEG2 +#define CKSEG2 CKSSEG +#endif +#ifndef TO_PHYS_MASK +#define TO_PHYS_MASK -1 +#endif + +/* + * FUNC is executed in one of the uncached segments, depending on its + * original address as follows: + * + * 1. If the original address is in CKSEG0 or CKSEG1, then the uncached + * segment used is CKSEG1. + * 2. If the original address is in XKPHYS, then the uncached segment + * used is XKPHYS(2). + * 3. Otherwise it's a bug. + * + * The same remapping is done with the stack pointer. Stack handling + * works because we don't handle stack arguments or more complex return + * values, so we can avoid sharing the same stack area between a cached + * and the uncached mode. + */ +unsigned long run_uncached(void *func) +{ + register long ret __asm__("$2"); + long lfunc = (long)func, ufunc; + long usp; + long sp; + + __asm__("move %0, $sp" : "=r" (sp)); + + if (sp >= (long)CKSEG0 && sp < (long)CKSEG2) + usp = CKSEG1ADDR(sp); +#ifdef CONFIG_64BIT + else if ((long long)sp >= (long long)PHYS_TO_XKPHYS(0, 0) && + (long long)sp < (long long)PHYS_TO_XKPHYS(8, 0)) + usp = PHYS_TO_XKPHYS(K_CALG_UNCACHED, + XKPHYS_TO_PHYS((long long)sp)); +#endif + else { + BUG(); + usp = sp; + } + if (lfunc >= (long)CKSEG0 && lfunc < (long)CKSEG2) + ufunc = CKSEG1ADDR(lfunc); +#ifdef CONFIG_64BIT + else if ((long long)lfunc >= (long long)PHYS_TO_XKPHYS(0, 0) && + (long long)lfunc < (long long)PHYS_TO_XKPHYS(8, 0)) + ufunc = PHYS_TO_XKPHYS(K_CALG_UNCACHED, + XKPHYS_TO_PHYS((long long)lfunc)); +#endif + else { + BUG(); + ufunc = lfunc; + } + + __asm__ __volatile__ ( + " move $16, $sp\n" + " move $sp, %1\n" + " jalr %2\n" + " move $sp, $16" + : "=r" (ret) + : "r" (usp), "r" (ufunc) + : "$16", "$31"); + + return ret; +} -- cgit v1.2.3