summaryrefslogtreecommitdiffstats
path: root/arch/riscv/include/asm
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-08-07 13:11:22 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-08-07 13:11:22 +0000
commitb20732900e4636a467c0183a47f7396700f5f743 (patch)
tree42f079ff82e701ebcb76829974b4caca3e5b6798 /arch/riscv/include/asm
parentAdding upstream version 6.8.12. (diff)
downloadlinux-b20732900e4636a467c0183a47f7396700f5f743.tar.xz
linux-b20732900e4636a467c0183a47f7396700f5f743.zip
Adding upstream version 6.9.7.upstream/6.9.7
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'arch/riscv/include/asm')
-rw-r--r--arch/riscv/include/asm/asm.h10
-rw-r--r--arch/riscv/include/asm/atomic.h17
-rw-r--r--arch/riscv/include/asm/barrier.h21
-rw-r--r--arch/riscv/include/asm/bitops.h138
-rw-r--r--arch/riscv/include/asm/cfi.h17
-rw-r--r--arch/riscv/include/asm/cmpxchg.h5
-rw-r--r--arch/riscv/include/asm/compat.h19
-rw-r--r--arch/riscv/include/asm/cpufeature.h31
-rw-r--r--arch/riscv/include/asm/crash_reserve.h (renamed from arch/riscv/include/asm/crash_core.h)4
-rw-r--r--arch/riscv/include/asm/csr.h2
-rw-r--r--arch/riscv/include/asm/elf.h11
-rw-r--r--arch/riscv/include/asm/errata_list.h13
-rw-r--r--arch/riscv/include/asm/fence.h10
-rw-r--r--arch/riscv/include/asm/ftrace.h14
-rw-r--r--arch/riscv/include/asm/hwcap.h1
-rw-r--r--arch/riscv/include/asm/io.h8
-rw-r--r--arch/riscv/include/asm/membarrier.h50
-rw-r--r--arch/riscv/include/asm/mmio.h5
-rw-r--r--arch/riscv/include/asm/mmiowb.h2
-rw-r--r--arch/riscv/include/asm/page.h2
-rw-r--r--arch/riscv/include/asm/pgalloc.h67
-rw-r--r--arch/riscv/include/asm/pgtable-64.h2
-rw-r--r--arch/riscv/include/asm/pgtable.h40
-rw-r--r--arch/riscv/include/asm/processor.h31
-rw-r--r--arch/riscv/include/asm/ptdump.h22
-rw-r--r--arch/riscv/include/asm/sbi.h2
-rw-r--r--arch/riscv/include/asm/simd.h4
-rw-r--r--arch/riscv/include/asm/suspend.h3
-rw-r--r--arch/riscv/include/asm/sync_core.h29
-rw-r--r--arch/riscv/include/asm/syscall_wrapper.h54
-rw-r--r--arch/riscv/include/asm/tlb.h18
-rw-r--r--arch/riscv/include/asm/vector.h11
-rw-r--r--arch/riscv/include/asm/vendorid_list.h2
-rw-r--r--arch/riscv/include/asm/word-at-a-time.h3
34 files changed, 379 insertions, 289 deletions
diff --git a/arch/riscv/include/asm/asm.h b/arch/riscv/include/asm/asm.h
index b0487b39e6..776354895b 100644
--- a/arch/riscv/include/asm/asm.h
+++ b/arch/riscv/include/asm/asm.h
@@ -183,6 +183,16 @@
REG_L x31, PT_T6(sp)
.endm
+/* Annotate a function as being unsuitable for kprobes. */
+#ifdef CONFIG_KPROBES
+#define ASM_NOKPROBE(name) \
+ .pushsection "_kprobe_blacklist", "aw"; \
+ RISCV_PTR name; \
+ .popsection
+#else
+#define ASM_NOKPROBE(name)
+#endif
+
#endif /* __ASSEMBLY__ */
#endif /* _ASM_RISCV_ASM_H */
diff --git a/arch/riscv/include/asm/atomic.h b/arch/riscv/include/asm/atomic.h
index f5dfef6c21..0e0522e588 100644
--- a/arch/riscv/include/asm/atomic.h
+++ b/arch/riscv/include/asm/atomic.h
@@ -17,7 +17,6 @@
#endif
#include <asm/cmpxchg.h>
-#include <asm/barrier.h>
#define __atomic_acquire_fence() \
__asm__ __volatile__(RISCV_ACQUIRE_BARRIER "" ::: "memory")
@@ -207,7 +206,7 @@ static __always_inline int arch_atomic_fetch_add_unless(atomic_t *v, int a, int
" add %[rc], %[p], %[a]\n"
" sc.w.rl %[rc], %[rc], %[c]\n"
" bnez %[rc], 0b\n"
- " fence rw, rw\n"
+ RISCV_FULL_BARRIER
"1:\n"
: [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
: [a]"r" (a), [u]"r" (u)
@@ -228,7 +227,7 @@ static __always_inline s64 arch_atomic64_fetch_add_unless(atomic64_t *v, s64 a,
" add %[rc], %[p], %[a]\n"
" sc.d.rl %[rc], %[rc], %[c]\n"
" bnez %[rc], 0b\n"
- " fence rw, rw\n"
+ RISCV_FULL_BARRIER
"1:\n"
: [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
: [a]"r" (a), [u]"r" (u)
@@ -248,7 +247,7 @@ static __always_inline bool arch_atomic_inc_unless_negative(atomic_t *v)
" addi %[rc], %[p], 1\n"
" sc.w.rl %[rc], %[rc], %[c]\n"
" bnez %[rc], 0b\n"
- " fence rw, rw\n"
+ RISCV_FULL_BARRIER
"1:\n"
: [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
:
@@ -268,7 +267,7 @@ static __always_inline bool arch_atomic_dec_unless_positive(atomic_t *v)
" addi %[rc], %[p], -1\n"
" sc.w.rl %[rc], %[rc], %[c]\n"
" bnez %[rc], 0b\n"
- " fence rw, rw\n"
+ RISCV_FULL_BARRIER
"1:\n"
: [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
:
@@ -288,7 +287,7 @@ static __always_inline int arch_atomic_dec_if_positive(atomic_t *v)
" bltz %[rc], 1f\n"
" sc.w.rl %[rc], %[rc], %[c]\n"
" bnez %[rc], 0b\n"
- " fence rw, rw\n"
+ RISCV_FULL_BARRIER
"1:\n"
: [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
:
@@ -310,7 +309,7 @@ static __always_inline bool arch_atomic64_inc_unless_negative(atomic64_t *v)
" addi %[rc], %[p], 1\n"
" sc.d.rl %[rc], %[rc], %[c]\n"
" bnez %[rc], 0b\n"
- " fence rw, rw\n"
+ RISCV_FULL_BARRIER
"1:\n"
: [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
:
@@ -331,7 +330,7 @@ static __always_inline bool arch_atomic64_dec_unless_positive(atomic64_t *v)
" addi %[rc], %[p], -1\n"
" sc.d.rl %[rc], %[rc], %[c]\n"
" bnez %[rc], 0b\n"
- " fence rw, rw\n"
+ RISCV_FULL_BARRIER
"1:\n"
: [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
:
@@ -352,7 +351,7 @@ static __always_inline s64 arch_atomic64_dec_if_positive(atomic64_t *v)
" bltz %[rc], 1f\n"
" sc.d.rl %[rc], %[rc], %[c]\n"
" bnez %[rc], 0b\n"
- " fence rw, rw\n"
+ RISCV_FULL_BARRIER
"1:\n"
: [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
:
diff --git a/arch/riscv/include/asm/barrier.h b/arch/riscv/include/asm/barrier.h
index 1107525942..880b56d848 100644
--- a/arch/riscv/include/asm/barrier.h
+++ b/arch/riscv/include/asm/barrier.h
@@ -11,28 +11,27 @@
#define _ASM_RISCV_BARRIER_H
#ifndef __ASSEMBLY__
+#include <asm/fence.h>
#define nop() __asm__ __volatile__ ("nop")
#define __nops(n) ".rept " #n "\nnop\n.endr\n"
#define nops(n) __asm__ __volatile__ (__nops(n))
-#define RISCV_FENCE(p, s) \
- __asm__ __volatile__ ("fence " #p "," #s : : : "memory")
/* These barriers need to enforce ordering on both devices or memory. */
-#define mb() RISCV_FENCE(iorw,iorw)
-#define rmb() RISCV_FENCE(ir,ir)
-#define wmb() RISCV_FENCE(ow,ow)
+#define __mb() RISCV_FENCE(iorw, iorw)
+#define __rmb() RISCV_FENCE(ir, ir)
+#define __wmb() RISCV_FENCE(ow, ow)
/* These barriers do not need to enforce ordering on devices, just memory. */
-#define __smp_mb() RISCV_FENCE(rw,rw)
-#define __smp_rmb() RISCV_FENCE(r,r)
-#define __smp_wmb() RISCV_FENCE(w,w)
+#define __smp_mb() RISCV_FENCE(rw, rw)
+#define __smp_rmb() RISCV_FENCE(r, r)
+#define __smp_wmb() RISCV_FENCE(w, w)
#define __smp_store_release(p, v) \
do { \
compiletime_assert_atomic_type(*p); \
- RISCV_FENCE(rw,w); \
+ RISCV_FENCE(rw, w); \
WRITE_ONCE(*p, v); \
} while (0)
@@ -40,7 +39,7 @@ do { \
({ \
typeof(*p) ___p1 = READ_ONCE(*p); \
compiletime_assert_atomic_type(*p); \
- RISCV_FENCE(r,rw); \
+ RISCV_FENCE(r, rw); \
___p1; \
})
@@ -69,7 +68,7 @@ do { \
* instances the scheduler pairs this with an mb(), so nothing is necessary on
* the new hart.
*/
-#define smp_mb__after_spinlock() RISCV_FENCE(iorw,iorw)
+#define smp_mb__after_spinlock() RISCV_FENCE(iorw, iorw)
#include <asm-generic/barrier.h>
diff --git a/arch/riscv/include/asm/bitops.h b/arch/riscv/include/asm/bitops.h
index 329d8244a9..880606b046 100644
--- a/arch/riscv/include/asm/bitops.h
+++ b/arch/riscv/include/asm/bitops.h
@@ -22,6 +22,16 @@
#include <asm-generic/bitops/fls.h>
#else
+#define __HAVE_ARCH___FFS
+#define __HAVE_ARCH___FLS
+#define __HAVE_ARCH_FFS
+#define __HAVE_ARCH_FLS
+
+#include <asm-generic/bitops/__ffs.h>
+#include <asm-generic/bitops/__fls.h>
+#include <asm-generic/bitops/ffs.h>
+#include <asm-generic/bitops/fls.h>
+
#include <asm/alternative-macros.h>
#include <asm/hwcap.h>
@@ -37,8 +47,6 @@
static __always_inline unsigned long variable__ffs(unsigned long word)
{
- int num;
-
asm goto(ALTERNATIVE("j %l[legacy]", "nop", 0,
RISCV_ISA_EXT_ZBB, 1)
: : : : legacy);
@@ -52,32 +60,7 @@ static __always_inline unsigned long variable__ffs(unsigned long word)
return word;
legacy:
- num = 0;
-#if BITS_PER_LONG == 64
- if ((word & 0xffffffff) == 0) {
- num += 32;
- word >>= 32;
- }
-#endif
- if ((word & 0xffff) == 0) {
- num += 16;
- word >>= 16;
- }
- if ((word & 0xff) == 0) {
- num += 8;
- word >>= 8;
- }
- if ((word & 0xf) == 0) {
- num += 4;
- word >>= 4;
- }
- if ((word & 0x3) == 0) {
- num += 2;
- word >>= 2;
- }
- if ((word & 0x1) == 0)
- num += 1;
- return num;
+ return generic___ffs(word);
}
/**
@@ -93,8 +76,6 @@ legacy:
static __always_inline unsigned long variable__fls(unsigned long word)
{
- int num;
-
asm goto(ALTERNATIVE("j %l[legacy]", "nop", 0,
RISCV_ISA_EXT_ZBB, 1)
: : : : legacy);
@@ -108,32 +89,7 @@ static __always_inline unsigned long variable__fls(unsigned long word)
return BITS_PER_LONG - 1 - word;
legacy:
- num = BITS_PER_LONG - 1;
-#if BITS_PER_LONG == 64
- if (!(word & (~0ul << 32))) {
- num -= 32;
- word <<= 32;
- }
-#endif
- if (!(word & (~0ul << (BITS_PER_LONG - 16)))) {
- num -= 16;
- word <<= 16;
- }
- if (!(word & (~0ul << (BITS_PER_LONG - 8)))) {
- num -= 8;
- word <<= 8;
- }
- if (!(word & (~0ul << (BITS_PER_LONG - 4)))) {
- num -= 4;
- word <<= 4;
- }
- if (!(word & (~0ul << (BITS_PER_LONG - 2)))) {
- num -= 2;
- word <<= 2;
- }
- if (!(word & (~0ul << (BITS_PER_LONG - 1))))
- num -= 1;
- return num;
+ return generic___fls(word);
}
/**
@@ -149,46 +105,23 @@ legacy:
static __always_inline int variable_ffs(int x)
{
- int r;
-
- if (!x)
- return 0;
-
asm goto(ALTERNATIVE("j %l[legacy]", "nop", 0,
RISCV_ISA_EXT_ZBB, 1)
: : : : legacy);
+ if (!x)
+ return 0;
+
asm volatile (".option push\n"
".option arch,+zbb\n"
CTZW "%0, %1\n"
".option pop\n"
- : "=r" (r) : "r" (x) :);
+ : "=r" (x) : "r" (x) :);
- return r + 1;
+ return x + 1;
legacy:
- r = 1;
- if (!(x & 0xffff)) {
- x >>= 16;
- r += 16;
- }
- if (!(x & 0xff)) {
- x >>= 8;
- r += 8;
- }
- if (!(x & 0xf)) {
- x >>= 4;
- r += 4;
- }
- if (!(x & 3)) {
- x >>= 2;
- r += 2;
- }
- if (!(x & 1)) {
- x >>= 1;
- r += 1;
- }
- return r;
+ return generic_ffs(x);
}
/**
@@ -204,46 +137,23 @@ legacy:
static __always_inline int variable_fls(unsigned int x)
{
- int r;
-
- if (!x)
- return 0;
-
asm goto(ALTERNATIVE("j %l[legacy]", "nop", 0,
RISCV_ISA_EXT_ZBB, 1)
: : : : legacy);
+ if (!x)
+ return 0;
+
asm volatile (".option push\n"
".option arch,+zbb\n"
CLZW "%0, %1\n"
".option pop\n"
- : "=r" (r) : "r" (x) :);
+ : "=r" (x) : "r" (x) :);
- return 32 - r;
+ return 32 - x;
legacy:
- r = 32;
- if (!(x & 0xffff0000u)) {
- x <<= 16;
- r -= 16;
- }
- if (!(x & 0xff000000u)) {
- x <<= 8;
- r -= 8;
- }
- if (!(x & 0xf0000000u)) {
- x <<= 4;
- r -= 4;
- }
- if (!(x & 0xc0000000u)) {
- x <<= 2;
- r -= 2;
- }
- if (!(x & 0x80000000u)) {
- x <<= 1;
- r -= 1;
- }
- return r;
+ return generic_fls(x);
}
/**
diff --git a/arch/riscv/include/asm/cfi.h b/arch/riscv/include/asm/cfi.h
index 8f7a622570..fb9696d7a3 100644
--- a/arch/riscv/include/asm/cfi.h
+++ b/arch/riscv/include/asm/cfi.h
@@ -13,11 +13,28 @@ struct pt_regs;
#ifdef CONFIG_CFI_CLANG
enum bug_trap_type handle_cfi_failure(struct pt_regs *regs);
+#define __bpfcall
+static inline int cfi_get_offset(void)
+{
+ return 4;
+}
+
+#define cfi_get_offset cfi_get_offset
+extern u32 cfi_bpf_hash;
+extern u32 cfi_bpf_subprog_hash;
+extern u32 cfi_get_func_hash(void *func);
#else
static inline enum bug_trap_type handle_cfi_failure(struct pt_regs *regs)
{
return BUG_TRAP_TYPE_NONE;
}
+
+#define cfi_bpf_hash 0U
+#define cfi_bpf_subprog_hash 0U
+static inline u32 cfi_get_func_hash(void *func)
+{
+ return 0;
+}
#endif /* CONFIG_CFI_CLANG */
#endif /* _ASM_RISCV_CFI_H */
diff --git a/arch/riscv/include/asm/cmpxchg.h b/arch/riscv/include/asm/cmpxchg.h
index 2f4726d3cf..2fee65cc84 100644
--- a/arch/riscv/include/asm/cmpxchg.h
+++ b/arch/riscv/include/asm/cmpxchg.h
@@ -8,7 +8,6 @@
#include <linux/bug.h>
-#include <asm/barrier.h>
#include <asm/fence.h>
#define __xchg_relaxed(ptr, new, size) \
@@ -313,7 +312,7 @@
" bne %0, %z3, 1f\n" \
" sc.w.rl %1, %z4, %2\n" \
" bnez %1, 0b\n" \
- " fence rw, rw\n" \
+ RISCV_FULL_BARRIER \
"1:\n" \
: "=&r" (__ret), "=&r" (__rc), "+A" (*__ptr) \
: "rJ" ((long)__old), "rJ" (__new) \
@@ -325,7 +324,7 @@
" bne %0, %z3, 1f\n" \
" sc.d.rl %1, %z4, %2\n" \
" bnez %1, 0b\n" \
- " fence rw, rw\n" \
+ RISCV_FULL_BARRIER \
"1:\n" \
: "=&r" (__ret), "=&r" (__rc), "+A" (*__ptr) \
: "rJ" (__old), "rJ" (__new) \
diff --git a/arch/riscv/include/asm/compat.h b/arch/riscv/include/asm/compat.h
index 2ac955b511..aa103530a5 100644
--- a/arch/riscv/include/asm/compat.h
+++ b/arch/riscv/include/asm/compat.h
@@ -14,9 +14,28 @@
static inline int is_compat_task(void)
{
+ if (!IS_ENABLED(CONFIG_COMPAT))
+ return 0;
+
return test_thread_flag(TIF_32BIT);
}
+static inline int is_compat_thread(struct thread_info *thread)
+{
+ if (!IS_ENABLED(CONFIG_COMPAT))
+ return 0;
+
+ return test_ti_thread_flag(thread, TIF_32BIT);
+}
+
+static inline void set_compat_task(bool is_compat)
+{
+ if (is_compat)
+ set_thread_flag(TIF_32BIT);
+ else
+ clear_thread_flag(TIF_32BIT);
+}
+
struct compat_user_regs_struct {
compat_ulong_t pc;
compat_ulong_t ra;
diff --git a/arch/riscv/include/asm/cpufeature.h b/arch/riscv/include/asm/cpufeature.h
index 0bd11862b7..3478054461 100644
--- a/arch/riscv/include/asm/cpufeature.h
+++ b/arch/riscv/include/asm/cpufeature.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * Copyright 2022-2023 Rivos, Inc
+ * Copyright 2022-2024 Rivos, Inc
*/
#ifndef _ASM_CPUFEATURE_H
@@ -28,29 +28,38 @@ struct riscv_isainfo {
DECLARE_PER_CPU(struct riscv_cpuinfo, riscv_cpuinfo);
-DECLARE_PER_CPU(long, misaligned_access_speed);
-
/* Per-cpu ISA extensions. */
extern struct riscv_isainfo hart_isa[NR_CPUS];
void riscv_user_isa_enable(void);
-#ifdef CONFIG_RISCV_MISALIGNED
-bool unaligned_ctl_available(void);
-bool check_unaligned_access_emulated(int cpu);
+#if defined(CONFIG_RISCV_MISALIGNED)
+bool check_unaligned_access_emulated_all_cpus(void);
void unaligned_emulation_finish(void);
+bool unaligned_ctl_available(void);
+DECLARE_PER_CPU(long, misaligned_access_speed);
#else
static inline bool unaligned_ctl_available(void)
{
return false;
}
+#endif
+
+#if defined(CONFIG_RISCV_PROBE_UNALIGNED_ACCESS)
+DECLARE_STATIC_KEY_FALSE(fast_unaligned_access_speed_key);
-static inline bool check_unaligned_access_emulated(int cpu)
+static __always_inline bool has_fast_unaligned_accesses(void)
{
- return false;
+ return static_branch_likely(&fast_unaligned_access_speed_key);
+}
+#else
+static __always_inline bool has_fast_unaligned_accesses(void)
+{
+ if (IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS))
+ return true;
+ else
+ return false;
}
-
-static inline void unaligned_emulation_finish(void) {}
#endif
unsigned long riscv_get_elf_hwcap(void);
@@ -135,6 +144,4 @@ static __always_inline bool riscv_cpu_has_extension_unlikely(int cpu, const unsi
return __riscv_isa_extension_available(hart_isa[cpu].isa, ext);
}
-DECLARE_STATIC_KEY_FALSE(fast_misaligned_access_speed_key);
-
#endif
diff --git a/arch/riscv/include/asm/crash_core.h b/arch/riscv/include/asm/crash_reserve.h
index e1874b23fe..013962e635 100644
--- a/arch/riscv/include/asm/crash_core.h
+++ b/arch/riscv/include/asm/crash_reserve.h
@@ -1,6 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0-only */
-#ifndef _RISCV_CRASH_CORE_H
-#define _RISCV_CRASH_CORE_H
+#ifndef _RISCV_CRASH_RESERVE_H
+#define _RISCV_CRASH_RESERVE_H
#define CRASH_ALIGN PMD_SIZE
diff --git a/arch/riscv/include/asm/csr.h b/arch/riscv/include/asm/csr.h
index 2468c55933..9d1b079327 100644
--- a/arch/riscv/include/asm/csr.h
+++ b/arch/riscv/include/asm/csr.h
@@ -281,7 +281,7 @@
#define CSR_HPMCOUNTER30H 0xc9e
#define CSR_HPMCOUNTER31H 0xc9f
-#define CSR_SSCOUNTOVF 0xda0
+#define CSR_SCOUNTOVF 0xda0
#define CSR_SSTATUS 0x100
#define CSR_SIE 0x104
diff --git a/arch/riscv/include/asm/elf.h b/arch/riscv/include/asm/elf.h
index 06c236bfab..c7aea7886d 100644
--- a/arch/riscv/include/asm/elf.h
+++ b/arch/riscv/include/asm/elf.h
@@ -53,13 +53,9 @@ extern bool compat_elf_check_arch(Elf32_Ehdr *hdr);
#define ELF_ET_DYN_BASE ((DEFAULT_MAP_WINDOW / 3) * 2)
#ifdef CONFIG_64BIT
-#ifdef CONFIG_COMPAT
-#define STACK_RND_MASK (test_thread_flag(TIF_32BIT) ? \
+#define STACK_RND_MASK (is_compat_task() ? \
0x7ff >> (PAGE_SHIFT - 12) : \
0x3ffff >> (PAGE_SHIFT - 12))
-#else
-#define STACK_RND_MASK (0x3ffff >> (PAGE_SHIFT - 12))
-#endif
#endif
/*
@@ -139,10 +135,7 @@ do { \
#ifdef CONFIG_COMPAT
#define SET_PERSONALITY(ex) \
-do { if ((ex).e_ident[EI_CLASS] == ELFCLASS32) \
- set_thread_flag(TIF_32BIT); \
- else \
- clear_thread_flag(TIF_32BIT); \
+do { set_compat_task((ex).e_ident[EI_CLASS] == ELFCLASS32); \
if (personality(current->personality) != PER_LINUX32) \
set_personality(PER_LINUX | \
(current->personality & (~PER_MASK))); \
diff --git a/arch/riscv/include/asm/errata_list.h b/arch/riscv/include/asm/errata_list.h
index 9bad9dfa2f..efd851e1b4 100644
--- a/arch/riscv/include/asm/errata_list.h
+++ b/arch/riscv/include/asm/errata_list.h
@@ -12,8 +12,8 @@
#include <asm/vendorid_list.h>
#ifdef CONFIG_ERRATA_ANDES
-#define ERRATA_ANDESTECH_NO_IOCP 0
-#define ERRATA_ANDESTECH_NUMBER 1
+#define ERRATA_ANDES_NO_IOCP 0
+#define ERRATA_ANDES_NUMBER 1
#endif
#ifdef CONFIG_ERRATA_SIFIVE
@@ -112,15 +112,6 @@ asm volatile(ALTERNATIVE( \
#define THEAD_C9XX_RV_IRQ_PMU 17
#define THEAD_C9XX_CSR_SCOUNTEROF 0x5c5
-#define ALT_SBI_PMU_OVERFLOW(__ovl) \
-asm volatile(ALTERNATIVE( \
- "csrr %0, " __stringify(CSR_SSCOUNTOVF), \
- "csrr %0, " __stringify(THEAD_C9XX_CSR_SCOUNTEROF), \
- THEAD_VENDOR_ID, ERRATA_THEAD_PMU, \
- CONFIG_ERRATA_THEAD_PMU) \
- : "=r" (__ovl) : \
- : "memory")
-
#endif /* __ASSEMBLY__ */
#endif
diff --git a/arch/riscv/include/asm/fence.h b/arch/riscv/include/asm/fence.h
index 2b443a3a48..6bcd80325d 100644
--- a/arch/riscv/include/asm/fence.h
+++ b/arch/riscv/include/asm/fence.h
@@ -1,12 +1,18 @@
#ifndef _ASM_RISCV_FENCE_H
#define _ASM_RISCV_FENCE_H
+#define RISCV_FENCE_ASM(p, s) "\tfence " #p "," #s "\n"
+#define RISCV_FENCE(p, s) \
+ ({ __asm__ __volatile__ (RISCV_FENCE_ASM(p, s) : : : "memory"); })
+
#ifdef CONFIG_SMP
-#define RISCV_ACQUIRE_BARRIER "\tfence r , rw\n"
-#define RISCV_RELEASE_BARRIER "\tfence rw, w\n"
+#define RISCV_ACQUIRE_BARRIER RISCV_FENCE_ASM(r, rw)
+#define RISCV_RELEASE_BARRIER RISCV_FENCE_ASM(rw, w)
+#define RISCV_FULL_BARRIER RISCV_FENCE_ASM(rw, rw)
#else
#define RISCV_ACQUIRE_BARRIER
#define RISCV_RELEASE_BARRIER
+#define RISCV_FULL_BARRIER
#endif
#endif /* _ASM_RISCV_FENCE_H */
diff --git a/arch/riscv/include/asm/ftrace.h b/arch/riscv/include/asm/ftrace.h
index 15055f9df4..1276d7d9ca 100644
--- a/arch/riscv/include/asm/ftrace.h
+++ b/arch/riscv/include/asm/ftrace.h
@@ -13,16 +13,6 @@
#endif
#define HAVE_FUNCTION_GRAPH_RET_ADDR_PTR
-/*
- * Clang prior to 13 had "mcount" instead of "_mcount":
- * https://reviews.llvm.org/D98881
- */
-#if defined(CONFIG_CC_IS_GCC) || CONFIG_CLANG_VERSION >= 130000
-#define MCOUNT_NAME _mcount
-#else
-#define MCOUNT_NAME mcount
-#endif
-
#define ARCH_SUPPORTS_FTRACE_OPS 1
#ifndef __ASSEMBLY__
@@ -30,7 +20,7 @@ extern void *return_address(unsigned int level);
#define ftrace_return_address(n) return_address(n)
-void MCOUNT_NAME(void);
+void _mcount(void);
static inline unsigned long ftrace_call_adjust(unsigned long addr)
{
return addr;
@@ -80,7 +70,7 @@ struct dyn_arch_ftrace {
* both auipc and jalr at the same time.
*/
-#define MCOUNT_ADDR ((unsigned long)MCOUNT_NAME)
+#define MCOUNT_ADDR ((unsigned long)_mcount)
#define JALR_SIGN_MASK (0x00000800)
#define JALR_OFFSET_MASK (0x00000fff)
#define AUIPC_OFFSET_MASK (0xfffff000)
diff --git a/arch/riscv/include/asm/hwcap.h b/arch/riscv/include/asm/hwcap.h
index 1f2d2599c6..e17d0078a6 100644
--- a/arch/riscv/include/asm/hwcap.h
+++ b/arch/riscv/include/asm/hwcap.h
@@ -80,6 +80,7 @@
#define RISCV_ISA_EXT_ZFA 71
#define RISCV_ISA_EXT_ZTSO 72
#define RISCV_ISA_EXT_ZACAS 73
+#define RISCV_ISA_EXT_XANDESPMU 74
#define RISCV_ISA_EXT_XLINUXENVCFG 127
diff --git a/arch/riscv/include/asm/io.h b/arch/riscv/include/asm/io.h
index 42497d487a..1c5c641075 100644
--- a/arch/riscv/include/asm/io.h
+++ b/arch/riscv/include/asm/io.h
@@ -47,10 +47,10 @@
* sufficient to ensure this works sanely on controllers that support I/O
* writes.
*/
-#define __io_pbr() __asm__ __volatile__ ("fence io,i" : : : "memory");
-#define __io_par(v) __asm__ __volatile__ ("fence i,ior" : : : "memory");
-#define __io_pbw() __asm__ __volatile__ ("fence iow,o" : : : "memory");
-#define __io_paw() __asm__ __volatile__ ("fence o,io" : : : "memory");
+#define __io_pbr() RISCV_FENCE(io, i)
+#define __io_par(v) RISCV_FENCE(i, ior)
+#define __io_pbw() RISCV_FENCE(iow, o)
+#define __io_paw() RISCV_FENCE(o, io)
/*
* Accesses from a single hart to a single I/O address must be ordered. This
diff --git a/arch/riscv/include/asm/membarrier.h b/arch/riscv/include/asm/membarrier.h
new file mode 100644
index 0000000000..47b240d0d5
--- /dev/null
+++ b/arch/riscv/include/asm/membarrier.h
@@ -0,0 +1,50 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#ifndef _ASM_RISCV_MEMBARRIER_H
+#define _ASM_RISCV_MEMBARRIER_H
+
+static inline void membarrier_arch_switch_mm(struct mm_struct *prev,
+ struct mm_struct *next,
+ struct task_struct *tsk)
+{
+ /*
+ * Only need the full barrier when switching between processes.
+ * Barrier when switching from kernel to userspace is not
+ * required here, given that it is implied by mmdrop(). Barrier
+ * when switching from userspace to kernel is not needed after
+ * store to rq->curr.
+ */
+ if (IS_ENABLED(CONFIG_SMP) &&
+ likely(!(atomic_read(&next->membarrier_state) &
+ (MEMBARRIER_STATE_PRIVATE_EXPEDITED |
+ MEMBARRIER_STATE_GLOBAL_EXPEDITED)) || !prev))
+ return;
+
+ /*
+ * The membarrier system call requires a full memory barrier
+ * after storing to rq->curr, before going back to user-space.
+ *
+ * This barrier is also needed for the SYNC_CORE command when
+ * switching between processes; in particular, on a transition
+ * from a thread belonging to another mm to a thread belonging
+ * to the mm for which a membarrier SYNC_CORE is done on CPU0:
+ *
+ * - [CPU0] sets all bits in the mm icache_stale_mask (in
+ * prepare_sync_core_cmd());
+ *
+ * - [CPU1] stores to rq->curr (by the scheduler);
+ *
+ * - [CPU0] loads rq->curr within membarrier and observes
+ * cpu_rq(1)->curr->mm != mm, so the IPI is skipped on
+ * CPU1; this means membarrier relies on switch_mm() to
+ * issue the sync-core;
+ *
+ * - [CPU1] switch_mm() loads icache_stale_mask; if the bit
+ * is zero, switch_mm() may incorrectly skip the sync-core.
+ *
+ * Matches a full barrier in the proximity of the membarrier
+ * system call entry.
+ */
+ smp_mb();
+}
+
+#endif /* _ASM_RISCV_MEMBARRIER_H */
diff --git a/arch/riscv/include/asm/mmio.h b/arch/riscv/include/asm/mmio.h
index 4c58ee7f95..06cadfd7a2 100644
--- a/arch/riscv/include/asm/mmio.h
+++ b/arch/riscv/include/asm/mmio.h
@@ -12,6 +12,7 @@
#define _ASM_RISCV_MMIO_H
#include <linux/types.h>
+#include <asm/fence.h>
#include <asm/mmiowb.h>
/* Generic IO read/write. These perform native-endian accesses. */
@@ -131,8 +132,8 @@ static inline u64 __raw_readq(const volatile void __iomem *addr)
* doesn't define any ordering between the memory space and the I/O space.
*/
#define __io_br() do {} while (0)
-#define __io_ar(v) ({ __asm__ __volatile__ ("fence i,ir" : : : "memory"); })
-#define __io_bw() ({ __asm__ __volatile__ ("fence w,o" : : : "memory"); })
+#define __io_ar(v) RISCV_FENCE(i, ir)
+#define __io_bw() RISCV_FENCE(w, o)
#define __io_aw() mmiowb_set_pending()
#define readb(c) ({ u8 __v; __io_br(); __v = readb_cpu(c); __io_ar(__v); __v; })
diff --git a/arch/riscv/include/asm/mmiowb.h b/arch/riscv/include/asm/mmiowb.h
index 0b2333e71f..52ce4a399d 100644
--- a/arch/riscv/include/asm/mmiowb.h
+++ b/arch/riscv/include/asm/mmiowb.h
@@ -7,7 +7,7 @@
* "o,w" is sufficient to ensure that all writes to the device have completed
* before the write to the spinlock is allowed to commit.
*/
-#define mmiowb() __asm__ __volatile__ ("fence o,w" : : : "memory");
+#define mmiowb() RISCV_FENCE(o, w)
#include <linux/smp.h>
#include <asm-generic/mmiowb.h>
diff --git a/arch/riscv/include/asm/page.h b/arch/riscv/include/asm/page.h
index 94b3d6930f..115ac98b8d 100644
--- a/arch/riscv/include/asm/page.h
+++ b/arch/riscv/include/asm/page.h
@@ -12,7 +12,7 @@
#include <linux/pfn.h>
#include <linux/const.h>
-#define PAGE_SHIFT (12)
+#define PAGE_SHIFT CONFIG_PAGE_SHIFT
#define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
#define PAGE_MASK (~(PAGE_SIZE - 1))
diff --git a/arch/riscv/include/asm/pgalloc.h b/arch/riscv/include/asm/pgalloc.h
index c80bb9990d..deaf971253 100644
--- a/arch/riscv/include/asm/pgalloc.h
+++ b/arch/riscv/include/asm/pgalloc.h
@@ -95,13 +95,19 @@ static inline void pud_free(struct mm_struct *mm, pud_t *pud)
__pud_free(mm, pud);
}
-#define __pud_free_tlb(tlb, pud, addr) \
-do { \
- if (pgtable_l4_enabled) { \
- pagetable_pud_dtor(virt_to_ptdesc(pud)); \
- tlb_remove_page_ptdesc((tlb), virt_to_ptdesc(pud)); \
- } \
-} while (0)
+static inline void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pud,
+ unsigned long addr)
+{
+ if (pgtable_l4_enabled) {
+ struct ptdesc *ptdesc = virt_to_ptdesc(pud);
+
+ pagetable_pud_dtor(ptdesc);
+ if (riscv_use_ipi_for_rfence())
+ tlb_remove_page_ptdesc(tlb, ptdesc);
+ else
+ tlb_remove_ptdesc(tlb, ptdesc);
+ }
+}
#define p4d_alloc_one p4d_alloc_one
static inline p4d_t *p4d_alloc_one(struct mm_struct *mm, unsigned long addr)
@@ -130,11 +136,16 @@ static inline void p4d_free(struct mm_struct *mm, p4d_t *p4d)
__p4d_free(mm, p4d);
}
-#define __p4d_free_tlb(tlb, p4d, addr) \
-do { \
- if (pgtable_l5_enabled) \
- tlb_remove_page_ptdesc((tlb), virt_to_ptdesc(p4d)); \
-} while (0)
+static inline void __p4d_free_tlb(struct mmu_gather *tlb, p4d_t *p4d,
+ unsigned long addr)
+{
+ if (pgtable_l5_enabled) {
+ if (riscv_use_ipi_for_rfence())
+ tlb_remove_page_ptdesc(tlb, virt_to_ptdesc(p4d));
+ else
+ tlb_remove_ptdesc(tlb, virt_to_ptdesc(p4d));
+ }
+}
#endif /* __PAGETABLE_PMD_FOLDED */
static inline void sync_kernel_mappings(pgd_t *pgd)
@@ -159,19 +170,31 @@ static inline pgd_t *pgd_alloc(struct mm_struct *mm)
#ifndef __PAGETABLE_PMD_FOLDED
-#define __pmd_free_tlb(tlb, pmd, addr) \
-do { \
- pagetable_pmd_dtor(virt_to_ptdesc(pmd)); \
- tlb_remove_page_ptdesc((tlb), virt_to_ptdesc(pmd)); \
-} while (0)
+static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
+ unsigned long addr)
+{
+ struct ptdesc *ptdesc = virt_to_ptdesc(pmd);
+
+ pagetable_pmd_dtor(ptdesc);
+ if (riscv_use_ipi_for_rfence())
+ tlb_remove_page_ptdesc(tlb, ptdesc);
+ else
+ tlb_remove_ptdesc(tlb, ptdesc);
+}
#endif /* __PAGETABLE_PMD_FOLDED */
-#define __pte_free_tlb(tlb, pte, buf) \
-do { \
- pagetable_pte_dtor(page_ptdesc(pte)); \
- tlb_remove_page_ptdesc((tlb), page_ptdesc(pte));\
-} while (0)
+static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t pte,
+ unsigned long addr)
+{
+ struct ptdesc *ptdesc = page_ptdesc(pte);
+
+ pagetable_pte_dtor(ptdesc);
+ if (riscv_use_ipi_for_rfence())
+ tlb_remove_page_ptdesc(tlb, ptdesc);
+ else
+ tlb_remove_ptdesc(tlb, ptdesc);
+}
#endif /* CONFIG_MMU */
#endif /* _ASM_RISCV_PGALLOC_H */
diff --git a/arch/riscv/include/asm/pgtable-64.h b/arch/riscv/include/asm/pgtable-64.h
index b99bd66107..221a5c1ee2 100644
--- a/arch/riscv/include/asm/pgtable-64.h
+++ b/arch/riscv/include/asm/pgtable-64.h
@@ -190,7 +190,7 @@ static inline int pud_bad(pud_t pud)
}
#define pud_leaf pud_leaf
-static inline int pud_leaf(pud_t pud)
+static inline bool pud_leaf(pud_t pud)
{
return pud_present(pud) && (pud_val(pud) & _PAGE_LEAF);
}
diff --git a/arch/riscv/include/asm/pgtable.h b/arch/riscv/include/asm/pgtable.h
index 39c6bb8254..6afd6bb488 100644
--- a/arch/riscv/include/asm/pgtable.h
+++ b/arch/riscv/include/asm/pgtable.h
@@ -127,17 +127,11 @@
#define VA_USER_SV48 (UL(1) << (VA_BITS_SV48 - 1))
#define VA_USER_SV57 (UL(1) << (VA_BITS_SV57 - 1))
-#ifdef CONFIG_COMPAT
#define MMAP_VA_BITS_64 ((VA_BITS >= VA_BITS_SV48) ? VA_BITS_SV48 : VA_BITS)
#define MMAP_MIN_VA_BITS_64 (VA_BITS_SV39)
#define MMAP_VA_BITS (is_compat_task() ? VA_BITS_SV32 : MMAP_VA_BITS_64)
#define MMAP_MIN_VA_BITS (is_compat_task() ? VA_BITS_SV32 : MMAP_MIN_VA_BITS_64)
#else
-#define MMAP_VA_BITS ((VA_BITS >= VA_BITS_SV48) ? VA_BITS_SV48 : VA_BITS)
-#define MMAP_MIN_VA_BITS (VA_BITS_SV39)
-#endif /* CONFIG_COMPAT */
-
-#else
#include <asm/pgtable-32.h>
#endif /* CONFIG_64BIT */
@@ -241,7 +235,7 @@ static inline int pmd_bad(pmd_t pmd)
}
#define pmd_leaf pmd_leaf
-static inline int pmd_leaf(pmd_t pmd)
+static inline bool pmd_leaf(pmd_t pmd)
{
return pmd_present(pmd) && (pmd_val(pmd) & _PAGE_LEAF);
}
@@ -519,23 +513,25 @@ static inline void set_pte(pte_t *ptep, pte_t pteval)
WRITE_ONCE(*ptep, pteval);
}
-void flush_icache_pte(pte_t pte);
+void flush_icache_pte(struct mm_struct *mm, pte_t pte);
-static inline void __set_pte_at(pte_t *ptep, pte_t pteval)
+static inline void __set_pte_at(struct mm_struct *mm, pte_t *ptep, pte_t pteval)
{
if (pte_present(pteval) && pte_exec(pteval))
- flush_icache_pte(pteval);
+ flush_icache_pte(mm, pteval);
set_pte(ptep, pteval);
}
+#define PFN_PTE_SHIFT _PAGE_PFN_SHIFT
+
static inline void set_ptes(struct mm_struct *mm, unsigned long addr,
pte_t *ptep, pte_t pteval, unsigned int nr)
{
page_table_check_ptes_set(mm, ptep, pteval, nr);
for (;;) {
- __set_pte_at(ptep, pteval);
+ __set_pte_at(mm, ptep, pteval);
if (--nr == 0)
break;
ptep++;
@@ -547,7 +543,7 @@ static inline void set_ptes(struct mm_struct *mm, unsigned long addr,
static inline void pte_clear(struct mm_struct *mm,
unsigned long addr, pte_t *ptep)
{
- __set_pte_at(ptep, __pte(0));
+ __set_pte_at(mm, ptep, __pte(0));
}
#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS /* defined in mm/pgtable.c */
@@ -597,6 +593,12 @@ static inline int ptep_clear_flush_young(struct vm_area_struct *vma,
return ptep_test_and_clear_young(vma, address, ptep);
}
+#define pgprot_nx pgprot_nx
+static inline pgprot_t pgprot_nx(pgprot_t _prot)
+{
+ return __pgprot(pgprot_val(_prot) & ~_PAGE_EXEC);
+}
+
#define pgprot_noncached pgprot_noncached
static inline pgprot_t pgprot_noncached(pgprot_t _prot)
{
@@ -662,6 +664,12 @@ static inline int pmd_write(pmd_t pmd)
return pte_write(pmd_pte(pmd));
}
+#define pud_write pud_write
+static inline int pud_write(pud_t pud)
+{
+ return pte_write(pud_pte(pud));
+}
+
#define pmd_dirty pmd_dirty
static inline int pmd_dirty(pmd_t pmd)
{
@@ -713,14 +721,14 @@ static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
pmd_t *pmdp, pmd_t pmd)
{
page_table_check_pmd_set(mm, pmdp, pmd);
- return __set_pte_at((pte_t *)pmdp, pmd_pte(pmd));
+ return __set_pte_at(mm, (pte_t *)pmdp, pmd_pte(pmd));
}
static inline void set_pud_at(struct mm_struct *mm, unsigned long addr,
pud_t *pudp, pud_t pud)
{
page_table_check_pud_set(mm, pudp, pud);
- return __set_pte_at((pte_t *)pudp, pud_pte(pud));
+ return __set_pte_at(mm, (pte_t *)pudp, pud_pte(pud));
}
#ifdef CONFIG_PAGE_TABLE_CHECK
@@ -871,8 +879,8 @@ static inline pte_t pte_swp_clear_exclusive(pte_t pte)
#define TASK_SIZE_MIN (PGDIR_SIZE_L3 * PTRS_PER_PGD / 2)
#ifdef CONFIG_COMPAT
-#define TASK_SIZE_32 (_AC(0x80000000, UL))
-#define TASK_SIZE (test_thread_flag(TIF_32BIT) ? \
+#define TASK_SIZE_32 (_AC(0x80000000, UL) - PAGE_SIZE)
+#define TASK_SIZE (is_compat_task() ? \
TASK_SIZE_32 : TASK_SIZE_64)
#else
#define TASK_SIZE TASK_SIZE_64
diff --git a/arch/riscv/include/asm/processor.h b/arch/riscv/include/asm/processor.h
index a8509cc31a..0faf5f161f 100644
--- a/arch/riscv/include/asm/processor.h
+++ b/arch/riscv/include/asm/processor.h
@@ -14,22 +14,21 @@
#include <asm/ptrace.h>
-#ifdef CONFIG_64BIT
-#define DEFAULT_MAP_WINDOW (UL(1) << (MMAP_VA_BITS - 1))
-#define STACK_TOP_MAX TASK_SIZE
-
+/*
+ * addr is a hint to the maximum userspace address that mmap should provide, so
+ * this macro needs to return the largest address space available so that
+ * mmap_end < addr, being mmap_end the top of that address space.
+ * See Documentation/arch/riscv/vm-layout.rst for more details.
+ */
#define arch_get_mmap_end(addr, len, flags) \
({ \
unsigned long mmap_end; \
typeof(addr) _addr = (addr); \
- if ((_addr) == 0 || (IS_ENABLED(CONFIG_COMPAT) && is_compat_task())) \
- mmap_end = STACK_TOP_MAX; \
- else if ((_addr) >= VA_USER_SV57) \
+ if ((_addr) == 0 || is_compat_task() || \
+ ((_addr + len) > BIT(VA_BITS - 1))) \
mmap_end = STACK_TOP_MAX; \
- else if ((((_addr) >= VA_USER_SV48)) && (VA_BITS >= VA_BITS_SV48)) \
- mmap_end = VA_USER_SV48; \
else \
- mmap_end = VA_USER_SV39; \
+ mmap_end = (_addr + len); \
mmap_end; \
})
@@ -39,17 +38,17 @@
typeof(addr) _addr = (addr); \
typeof(base) _base = (base); \
unsigned long rnd_gap = DEFAULT_MAP_WINDOW - (_base); \
- if ((_addr) == 0 || (IS_ENABLED(CONFIG_COMPAT) && is_compat_task())) \
+ if ((_addr) == 0 || is_compat_task() || \
+ ((_addr + len) > BIT(VA_BITS - 1))) \
mmap_base = (_base); \
- else if (((_addr) >= VA_USER_SV57) && (VA_BITS >= VA_BITS_SV57)) \
- mmap_base = VA_USER_SV57 - rnd_gap; \
- else if ((((_addr) >= VA_USER_SV48)) && (VA_BITS >= VA_BITS_SV48)) \
- mmap_base = VA_USER_SV48 - rnd_gap; \
else \
- mmap_base = VA_USER_SV39 - rnd_gap; \
+ mmap_base = (_addr + len) - rnd_gap; \
mmap_base; \
})
+#ifdef CONFIG_64BIT
+#define DEFAULT_MAP_WINDOW (UL(1) << (MMAP_VA_BITS - 1))
+#define STACK_TOP_MAX TASK_SIZE_64
#else
#define DEFAULT_MAP_WINDOW TASK_SIZE
#define STACK_TOP_MAX TASK_SIZE
diff --git a/arch/riscv/include/asm/ptdump.h b/arch/riscv/include/asm/ptdump.h
deleted file mode 100644
index 3c9ea6dd5a..0000000000
--- a/arch/riscv/include/asm/ptdump.h
+++ /dev/null
@@ -1,22 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * Copyright (C) 2019 SiFive
- */
-
-#ifndef _ASM_RISCV_PTDUMP_H
-#define _ASM_RISCV_PTDUMP_H
-
-void ptdump_check_wx(void);
-
-#ifdef CONFIG_DEBUG_WX
-static inline void debug_checkwx(void)
-{
- ptdump_check_wx();
-}
-#else
-static inline void debug_checkwx(void)
-{
-}
-#endif
-
-#endif /* _ASM_RISCV_PTDUMP_H */
diff --git a/arch/riscv/include/asm/sbi.h b/arch/riscv/include/asm/sbi.h
index 6e68f8dff7..0fab508a65 100644
--- a/arch/riscv/include/asm/sbi.h
+++ b/arch/riscv/include/asm/sbi.h
@@ -370,6 +370,8 @@ static inline int sbi_remote_fence_i(const struct cpumask *cpu_mask) { return -1
static inline void sbi_init(void) {}
#endif /* CONFIG_RISCV_SBI */
+unsigned long riscv_get_mvendorid(void);
+unsigned long riscv_get_marchid(void);
unsigned long riscv_cached_mvendorid(unsigned int cpu_id);
unsigned long riscv_cached_marchid(unsigned int cpu_id);
unsigned long riscv_cached_mimpid(unsigned int cpu_id);
diff --git a/arch/riscv/include/asm/simd.h b/arch/riscv/include/asm/simd.h
index 54efbf523d..adb50f3ec2 100644
--- a/arch/riscv/include/asm/simd.h
+++ b/arch/riscv/include/asm/simd.h
@@ -34,9 +34,9 @@ static __must_check inline bool may_use_simd(void)
return false;
/*
- * Nesting is acheived in preempt_v by spreading the control for
+ * Nesting is achieved in preempt_v by spreading the control for
* preemptible and non-preemptible kernel-mode Vector into two fields.
- * Always try to match with prempt_v if kernel V-context exists. Then,
+ * Always try to match with preempt_v if kernel V-context exists. Then,
* fallback to check non preempt_v if nesting happens, or if the config
* is not set.
*/
diff --git a/arch/riscv/include/asm/suspend.h b/arch/riscv/include/asm/suspend.h
index 491296a335..4718096fa5 100644
--- a/arch/riscv/include/asm/suspend.h
+++ b/arch/riscv/include/asm/suspend.h
@@ -56,4 +56,7 @@ int hibernate_resume_nonboot_cpu_disable(void);
asmlinkage void hibernate_restore_image(unsigned long resume_satp, unsigned long satp_temp,
unsigned long cpu_resume);
asmlinkage int hibernate_core_restore_code(void);
+bool riscv_sbi_hsm_is_supported(void);
+bool riscv_sbi_suspend_state_is_valid(u32 state);
+int riscv_sbi_hart_suspend(u32 state);
#endif
diff --git a/arch/riscv/include/asm/sync_core.h b/arch/riscv/include/asm/sync_core.h
new file mode 100644
index 0000000000..9153016da8
--- /dev/null
+++ b/arch/riscv/include/asm/sync_core.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_RISCV_SYNC_CORE_H
+#define _ASM_RISCV_SYNC_CORE_H
+
+/*
+ * RISC-V implements return to user-space through an xRET instruction,
+ * which is not core serializing.
+ */
+static inline void sync_core_before_usermode(void)
+{
+ asm volatile ("fence.i" ::: "memory");
+}
+
+#ifdef CONFIG_SMP
+/*
+ * Ensure the next switch_mm() on every CPU issues a core serializing
+ * instruction for the given @mm.
+ */
+static inline void prepare_sync_core_cmd(struct mm_struct *mm)
+{
+ cpumask_setall(&mm->context.icache_stale_mask);
+}
+#else
+static inline void prepare_sync_core_cmd(struct mm_struct *mm)
+{
+}
+#endif /* CONFIG_SMP */
+
+#endif /* _ASM_RISCV_SYNC_CORE_H */
diff --git a/arch/riscv/include/asm/syscall_wrapper.h b/arch/riscv/include/asm/syscall_wrapper.h
index eeec04b7da..ac80216549 100644
--- a/arch/riscv/include/asm/syscall_wrapper.h
+++ b/arch/riscv/include/asm/syscall_wrapper.h
@@ -12,25 +12,52 @@
asmlinkage long __riscv_sys_ni_syscall(const struct pt_regs *);
-#define SC_RISCV_REGS_TO_ARGS(x, ...) \
- __MAP(x,__SC_ARGS \
- ,,regs->orig_a0,,regs->a1,,regs->a2 \
+#ifdef CONFIG_64BIT
+
+#define __SYSCALL_SE_DEFINEx(x, prefix, name, ...) \
+ static long __se_##prefix##name(__MAP(x,__SC_LONG,__VA_ARGS__)); \
+ static long __se_##prefix##name(__MAP(x,__SC_LONG,__VA_ARGS__))
+
+#define SC_RISCV_REGS_TO_ARGS(x, ...) \
+ __MAP(x,__SC_ARGS \
+ ,,regs->orig_a0,,regs->a1,,regs->a2 \
,,regs->a3,,regs->a4,,regs->a5,,regs->a6)
+#else
+/*
+ * Use type aliasing to ensure registers a0-a6 are correctly passed to the syscall
+ * implementation when >word-size arguments are used.
+ */
+#define __SYSCALL_SE_DEFINEx(x, prefix, name, ...) \
+ __diag_push(); \
+ __diag_ignore(GCC, 8, "-Wattribute-alias", \
+ "Type aliasing is used to sanitize syscall arguments"); \
+ static long __se_##prefix##name(ulong, ulong, ulong, ulong, ulong, ulong, \
+ ulong) \
+ __attribute__((alias(__stringify(___se_##prefix##name)))); \
+ __diag_pop(); \
+ static long noinline ___se_##prefix##name(__MAP(x,__SC_LONG,__VA_ARGS__)) \
+ __used; \
+ static long ___se_##prefix##name(__MAP(x,__SC_LONG,__VA_ARGS__))
+
+#define SC_RISCV_REGS_TO_ARGS(x, ...) \
+ regs->orig_a0,regs->a1,regs->a2,regs->a3,regs->a4,regs->a5,regs->a6
+
+#endif /* CONFIG_64BIT */
+
#ifdef CONFIG_COMPAT
#define COMPAT_SYSCALL_DEFINEx(x, name, ...) \
asmlinkage long __riscv_compat_sys##name(const struct pt_regs *regs); \
ALLOW_ERROR_INJECTION(__riscv_compat_sys##name, ERRNO); \
- static long __se_compat_sys##name(__MAP(x,__SC_LONG,__VA_ARGS__)); \
static inline long __do_compat_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__)); \
- asmlinkage long __riscv_compat_sys##name(const struct pt_regs *regs) \
+ __SYSCALL_SE_DEFINEx(x, compat_sys, name, __VA_ARGS__) \
{ \
- return __se_compat_sys##name(SC_RISCV_REGS_TO_ARGS(x,__VA_ARGS__)); \
+ return __do_compat_sys##name(__MAP(x,__SC_DELOUSE,__VA_ARGS__)); \
} \
- static long __se_compat_sys##name(__MAP(x,__SC_LONG,__VA_ARGS__)) \
+ asmlinkage long __riscv_compat_sys##name(const struct pt_regs *regs) \
{ \
- return __do_compat_sys##name(__MAP(x,__SC_DELOUSE,__VA_ARGS__)); \
+ return __se_compat_sys##name(SC_RISCV_REGS_TO_ARGS(x,__VA_ARGS__)); \
} \
static inline long __do_compat_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__))
@@ -51,19 +78,18 @@ asmlinkage long __riscv_sys_ni_syscall(const struct pt_regs *);
#define __SYSCALL_DEFINEx(x, name, ...) \
asmlinkage long __riscv_sys##name(const struct pt_regs *regs); \
ALLOW_ERROR_INJECTION(__riscv_sys##name, ERRNO); \
- static long __se_sys##name(__MAP(x,__SC_LONG,__VA_ARGS__)); \
static inline long __do_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__)); \
- asmlinkage long __riscv_sys##name(const struct pt_regs *regs) \
- { \
- return __se_sys##name(SC_RISCV_REGS_TO_ARGS(x,__VA_ARGS__)); \
- } \
- static long __se_sys##name(__MAP(x,__SC_LONG,__VA_ARGS__)) \
+ __SYSCALL_SE_DEFINEx(x, sys, name, __VA_ARGS__) \
{ \
long ret = __do_sys##name(__MAP(x,__SC_CAST,__VA_ARGS__)); \
__MAP(x,__SC_TEST,__VA_ARGS__); \
__PROTECT(x, ret,__MAP(x,__SC_ARGS,__VA_ARGS__)); \
return ret; \
} \
+ asmlinkage long __riscv_sys##name(const struct pt_regs *regs) \
+ { \
+ return __se_sys##name(SC_RISCV_REGS_TO_ARGS(x,__VA_ARGS__)); \
+ } \
static inline long __do_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__))
#define SYSCALL_DEFINE0(sname) \
diff --git a/arch/riscv/include/asm/tlb.h b/arch/riscv/include/asm/tlb.h
index 50b63b5c15..1f6c38420d 100644
--- a/arch/riscv/include/asm/tlb.h
+++ b/arch/riscv/include/asm/tlb.h
@@ -10,6 +10,24 @@ struct mmu_gather;
static void tlb_flush(struct mmu_gather *tlb);
+#ifdef CONFIG_MMU
+#include <linux/swap.h>
+
+/*
+ * While riscv platforms with riscv_ipi_for_rfence as true require an IPI to
+ * perform TLB shootdown, some platforms with riscv_ipi_for_rfence as false use
+ * SBI to perform TLB shootdown. To keep software pagetable walkers safe in this
+ * case we switch to RCU based table free (MMU_GATHER_RCU_TABLE_FREE). See the
+ * comment below 'ifdef CONFIG_MMU_GATHER_RCU_TABLE_FREE' in include/asm-generic/tlb.h
+ * for more details.
+ */
+static inline void __tlb_remove_table(void *table)
+{
+ free_page_and_swap_cache(table);
+}
+
+#endif /* CONFIG_MMU */
+
#define tlb_flush tlb_flush
#include <asm-generic/tlb.h>
diff --git a/arch/riscv/include/asm/vector.h b/arch/riscv/include/asm/vector.h
index 0cd6f0a027..731dcd0ed4 100644
--- a/arch/riscv/include/asm/vector.h
+++ b/arch/riscv/include/asm/vector.h
@@ -284,4 +284,15 @@ static inline bool riscv_v_vstate_ctrl_user_allowed(void) { return false; }
#endif /* CONFIG_RISCV_ISA_V */
+/*
+ * Return the implementation's vlen value.
+ *
+ * riscv_v_vsize contains the value of "32 vector registers with vlenb length"
+ * so rebuild the vlen value in bits from it.
+ */
+static inline int riscv_vector_vlen(void)
+{
+ return riscv_v_vsize / 32 * 8;
+}
+
#endif /* ! __ASM_RISCV_VECTOR_H */
diff --git a/arch/riscv/include/asm/vendorid_list.h b/arch/riscv/include/asm/vendorid_list.h
index e55407ace0..2f2bb0c84f 100644
--- a/arch/riscv/include/asm/vendorid_list.h
+++ b/arch/riscv/include/asm/vendorid_list.h
@@ -5,7 +5,7 @@
#ifndef ASM_VENDOR_LIST_H
#define ASM_VENDOR_LIST_H
-#define ANDESTECH_VENDOR_ID 0x31e
+#define ANDES_VENDOR_ID 0x31e
#define SIFIVE_VENDOR_ID 0x489
#define THEAD_VENDOR_ID 0x5b7
diff --git a/arch/riscv/include/asm/word-at-a-time.h b/arch/riscv/include/asm/word-at-a-time.h
index f3f031e341..3802cda71a 100644
--- a/arch/riscv/include/asm/word-at-a-time.h
+++ b/arch/riscv/include/asm/word-at-a-time.h
@@ -10,7 +10,8 @@
#include <asm/asm-extable.h>
-#include <linux/kernel.h>
+#include <linux/bitops.h>
+#include <linux/wordpart.h>
struct word_at_a_time {
const unsigned long one_bits, high_bits;