diff options
Diffstat (limited to '')
124 files changed, 9986 insertions, 0 deletions
diff --git a/arch/riscv/include/asm/Kbuild b/arch/riscv/include/asm/Kbuild new file mode 100644 index 000000000..504f8b7e7 --- /dev/null +++ b/arch/riscv/include/asm/Kbuild @@ -0,0 +1,11 @@ +# SPDX-License-Identifier: GPL-2.0 +generic-y += early_ioremap.h +generic-y += flat.h +generic-y += kvm_para.h +generic-y += parport.h +generic-y += spinlock.h +generic-y += spinlock_types.h +generic-y += qrwlock.h +generic-y += qrwlock_types.h +generic-y += user.h +generic-y += vmlinux.lds.h diff --git a/arch/riscv/include/asm/alternative-macros.h b/arch/riscv/include/asm/alternative-macros.h new file mode 100644 index 000000000..ec2f3f1b8 --- /dev/null +++ b/arch/riscv/include/asm/alternative-macros.h @@ -0,0 +1,205 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __ASM_ALTERNATIVE_MACROS_H +#define __ASM_ALTERNATIVE_MACROS_H + +#ifdef CONFIG_RISCV_ALTERNATIVE + +#ifdef __ASSEMBLY__ + +.macro ALT_ENTRY oldptr newptr vendor_id errata_id new_len + RISCV_PTR \oldptr + RISCV_PTR \newptr + REG_ASM \vendor_id + REG_ASM \new_len + .word \errata_id +.endm + +.macro ALT_NEW_CONTENT vendor_id, errata_id, enable = 1, new_c : vararg + .if \enable + .pushsection .alternative, "a" + ALT_ENTRY 886b, 888f, \vendor_id, \errata_id, 889f - 888f + .popsection + .subsection 1 +888 : + .option push + .option norvc + .option norelax + \new_c + .option pop +889 : + .org . - (889b - 888b) + (887b - 886b) + .org . - (887b - 886b) + (889b - 888b) + .previous + .endif +.endm + +.macro __ALTERNATIVE_CFG old_c, new_c, vendor_id, errata_id, enable +886 : + .option push + .option norvc + .option norelax + \old_c + .option pop +887 : + ALT_NEW_CONTENT \vendor_id, \errata_id, \enable, \new_c +.endm + +#define _ALTERNATIVE_CFG(old_c, new_c, vendor_id, errata_id, CONFIG_k) \ + __ALTERNATIVE_CFG old_c, new_c, vendor_id, errata_id, IS_ENABLED(CONFIG_k) + +.macro __ALTERNATIVE_CFG_2 old_c, new_c_1, vendor_id_1, errata_id_1, enable_1, \ + new_c_2, vendor_id_2, errata_id_2, enable_2 +886 : + .option push + .option norvc + .option norelax + \old_c + .option pop +887 : + ALT_NEW_CONTENT \vendor_id_1, \errata_id_1, \enable_1, \new_c_1 + ALT_NEW_CONTENT \vendor_id_2, \errata_id_2, \enable_2, \new_c_2 +.endm + +#define _ALTERNATIVE_CFG_2(old_c, new_c_1, vendor_id_1, errata_id_1, \ + CONFIG_k_1, \ + new_c_2, vendor_id_2, errata_id_2, \ + CONFIG_k_2) \ + __ALTERNATIVE_CFG_2 old_c, new_c_1, vendor_id_1, errata_id_1, \ + IS_ENABLED(CONFIG_k_1), \ + new_c_2, vendor_id_2, errata_id_2, \ + IS_ENABLED(CONFIG_k_2) + +#else /* !__ASSEMBLY__ */ + +#include <asm/asm.h> +#include <linux/stringify.h> + +#define ALT_ENTRY(oldptr, newptr, vendor_id, errata_id, newlen) \ + RISCV_PTR " " oldptr "\n" \ + RISCV_PTR " " newptr "\n" \ + REG_ASM " " vendor_id "\n" \ + REG_ASM " " newlen "\n" \ + ".word " errata_id "\n" + +#define ALT_NEW_CONTENT(vendor_id, errata_id, enable, new_c) \ + ".if " __stringify(enable) " == 1\n" \ + ".pushsection .alternative, \"a\"\n" \ + ALT_ENTRY("886b", "888f", __stringify(vendor_id), __stringify(errata_id), "889f - 888f") \ + ".popsection\n" \ + ".subsection 1\n" \ + "888 :\n" \ + ".option push\n" \ + ".option norvc\n" \ + ".option norelax\n" \ + new_c "\n" \ + ".option pop\n" \ + "889 :\n" \ + ".org . - (887b - 886b) + (889b - 888b)\n" \ + ".org . - (889b - 888b) + (887b - 886b)\n" \ + ".previous\n" \ + ".endif\n" + +#define __ALTERNATIVE_CFG(old_c, new_c, vendor_id, errata_id, enable) \ + "886 :\n" \ + ".option push\n" \ + ".option norvc\n" \ + ".option norelax\n" \ + old_c "\n" \ + ".option pop\n" \ + "887 :\n" \ + ALT_NEW_CONTENT(vendor_id, errata_id, enable, new_c) + +#define _ALTERNATIVE_CFG(old_c, new_c, vendor_id, errata_id, CONFIG_k) \ + __ALTERNATIVE_CFG(old_c, new_c, vendor_id, errata_id, IS_ENABLED(CONFIG_k)) + +#define __ALTERNATIVE_CFG_2(old_c, new_c_1, vendor_id_1, errata_id_1, \ + enable_1, \ + new_c_2, vendor_id_2, errata_id_2, \ + enable_2) \ + "886 :\n" \ + ".option push\n" \ + ".option norvc\n" \ + ".option norelax\n" \ + old_c "\n" \ + ".option pop\n" \ + "887 :\n" \ + ALT_NEW_CONTENT(vendor_id_1, errata_id_1, enable_1, new_c_1) \ + ALT_NEW_CONTENT(vendor_id_2, errata_id_2, enable_2, new_c_2) + +#define _ALTERNATIVE_CFG_2(old_c, new_c_1, vendor_id_1, errata_id_1, \ + CONFIG_k_1, \ + new_c_2, vendor_id_2, errata_id_2, \ + CONFIG_k_2) \ + __ALTERNATIVE_CFG_2(old_c, new_c_1, vendor_id_1, errata_id_1, \ + IS_ENABLED(CONFIG_k_1), \ + new_c_2, vendor_id_2, errata_id_2, \ + IS_ENABLED(CONFIG_k_2)) + +#endif /* __ASSEMBLY__ */ + +#else /* CONFIG_RISCV_ALTERNATIVE */ +#ifdef __ASSEMBLY__ + +.macro __ALTERNATIVE_CFG old_c + \old_c +.endm + +#define _ALTERNATIVE_CFG(old_c, new_c, vendor_id, errata_id, CONFIG_k) \ + __ALTERNATIVE_CFG old_c + +#define _ALTERNATIVE_CFG_2(old_c, new_c_1, vendor_id_1, errata_id_1, \ + CONFIG_k_1, \ + new_c_2, vendor_id_2, errata_id_2, \ + CONFIG_k_2) \ + __ALTERNATIVE_CFG old_c + +#else /* !__ASSEMBLY__ */ + +#define __ALTERNATIVE_CFG(old_c) \ + old_c "\n" + +#define _ALTERNATIVE_CFG(old_c, new_c, vendor_id, errata_id, CONFIG_k) \ + __ALTERNATIVE_CFG(old_c) + +#define _ALTERNATIVE_CFG_2(old_c, new_c_1, vendor_id_1, errata_id_1, \ + CONFIG_k_1, \ + new_c_2, vendor_id_2, errata_id_2, \ + CONFIG_k_2) \ + __ALTERNATIVE_CFG(old_c) + +#endif /* __ASSEMBLY__ */ +#endif /* CONFIG_RISCV_ALTERNATIVE */ + +/* + * Usage: + * ALTERNATIVE(old_content, new_content, vendor_id, errata_id, CONFIG_k) + * in the assembly code. Otherwise, + * asm(ALTERNATIVE(old_content, new_content, vendor_id, errata_id, CONFIG_k)); + * + * old_content: The old content which is probably replaced with new content. + * new_content: The new content. + * vendor_id: The CPU vendor ID. + * errata_id: The errata ID. + * CONFIG_k: The Kconfig of this errata. When Kconfig is disabled, the old + * content will alwyas be executed. + */ +#define ALTERNATIVE(old_content, new_content, vendor_id, errata_id, CONFIG_k) \ + _ALTERNATIVE_CFG(old_content, new_content, vendor_id, errata_id, CONFIG_k) + +/* + * A vendor wants to replace an old_content, but another vendor has used + * ALTERNATIVE() to patch its customized content at the same location. In + * this case, this vendor can create a new macro ALTERNATIVE_2() based + * on the following sample code and then replace ALTERNATIVE() with + * ALTERNATIVE_2() to append its customized content. + */ +#define ALTERNATIVE_2(old_content, new_content_1, vendor_id_1, \ + errata_id_1, CONFIG_k_1, \ + new_content_2, vendor_id_2, \ + errata_id_2, CONFIG_k_2) \ + _ALTERNATIVE_CFG_2(old_content, new_content_1, vendor_id_1, \ + errata_id_1, CONFIG_k_1, \ + new_content_2, vendor_id_2, \ + errata_id_2, CONFIG_k_2) + +#endif diff --git a/arch/riscv/include/asm/alternative.h b/arch/riscv/include/asm/alternative.h new file mode 100644 index 000000000..6511dd73e --- /dev/null +++ b/arch/riscv/include/asm/alternative.h @@ -0,0 +1,62 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2021 Sifive. + */ + +#ifndef __ASM_ALTERNATIVE_H +#define __ASM_ALTERNATIVE_H + +#define ERRATA_STRING_LENGTH_MAX 32 + +#include <asm/alternative-macros.h> + +#ifndef __ASSEMBLY__ + +#ifdef CONFIG_RISCV_ALTERNATIVE + +#include <linux/init.h> +#include <linux/types.h> +#include <linux/stddef.h> +#include <asm/hwcap.h> + +#define RISCV_ALTERNATIVES_BOOT 0 /* alternatives applied during regular boot */ +#define RISCV_ALTERNATIVES_MODULE 1 /* alternatives applied during module-init */ +#define RISCV_ALTERNATIVES_EARLY_BOOT 2 /* alternatives applied before mmu start */ + +void __init apply_boot_alternatives(void); +void __init apply_early_boot_alternatives(void); +void apply_module_alternatives(void *start, size_t length); + +struct alt_entry { + void *old_ptr; /* address of original instruciton or data */ + void *alt_ptr; /* address of replacement instruction or data */ + unsigned long vendor_id; /* cpu vendor id */ + unsigned long alt_len; /* The replacement size */ + unsigned int errata_id; /* The errata id */ +} __packed; + +struct errata_checkfunc_id { + unsigned long vendor_id; + bool (*func)(struct alt_entry *alt); +}; + +void sifive_errata_patch_func(struct alt_entry *begin, struct alt_entry *end, + unsigned long archid, unsigned long impid, + unsigned int stage); +void thead_errata_patch_func(struct alt_entry *begin, struct alt_entry *end, + unsigned long archid, unsigned long impid, + unsigned int stage); + +void riscv_cpufeature_patch_func(struct alt_entry *begin, struct alt_entry *end, + unsigned int stage); + +#else /* CONFIG_RISCV_ALTERNATIVE */ + +static inline void apply_boot_alternatives(void) { } +static inline void apply_early_boot_alternatives(void) { } +static inline void apply_module_alternatives(void *start, size_t length) { } + +#endif /* CONFIG_RISCV_ALTERNATIVE */ + +#endif +#endif diff --git a/arch/riscv/include/asm/asm-extable.h b/arch/riscv/include/asm/asm-extable.h new file mode 100644 index 000000000..14be0673f --- /dev/null +++ b/arch/riscv/include/asm/asm-extable.h @@ -0,0 +1,65 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +#ifndef __ASM_ASM_EXTABLE_H +#define __ASM_ASM_EXTABLE_H + +#define EX_TYPE_NONE 0 +#define EX_TYPE_FIXUP 1 +#define EX_TYPE_BPF 2 +#define EX_TYPE_UACCESS_ERR_ZERO 3 + +#ifdef __ASSEMBLY__ + +#define __ASM_EXTABLE_RAW(insn, fixup, type, data) \ + .pushsection __ex_table, "a"; \ + .balign 4; \ + .long ((insn) - .); \ + .long ((fixup) - .); \ + .short (type); \ + .short (data); \ + .popsection; + + .macro _asm_extable, insn, fixup + __ASM_EXTABLE_RAW(\insn, \fixup, EX_TYPE_FIXUP, 0) + .endm + +#else /* __ASSEMBLY__ */ + +#include <linux/bits.h> +#include <linux/stringify.h> +#include <asm/gpr-num.h> + +#define __ASM_EXTABLE_RAW(insn, fixup, type, data) \ + ".pushsection __ex_table, \"a\"\n" \ + ".balign 4\n" \ + ".long ((" insn ") - .)\n" \ + ".long ((" fixup ") - .)\n" \ + ".short (" type ")\n" \ + ".short (" data ")\n" \ + ".popsection\n" + +#define _ASM_EXTABLE(insn, fixup) \ + __ASM_EXTABLE_RAW(#insn, #fixup, __stringify(EX_TYPE_FIXUP), "0") + +#define EX_DATA_REG_ERR_SHIFT 0 +#define EX_DATA_REG_ERR GENMASK(4, 0) +#define EX_DATA_REG_ZERO_SHIFT 5 +#define EX_DATA_REG_ZERO GENMASK(9, 5) + +#define EX_DATA_REG(reg, gpr) \ + "((.L__gpr_num_" #gpr ") << " __stringify(EX_DATA_REG_##reg##_SHIFT) ")" + +#define _ASM_EXTABLE_UACCESS_ERR_ZERO(insn, fixup, err, zero) \ + __DEFINE_ASM_GPR_NUMS \ + __ASM_EXTABLE_RAW(#insn, #fixup, \ + __stringify(EX_TYPE_UACCESS_ERR_ZERO), \ + "(" \ + EX_DATA_REG(ERR, err) " | " \ + EX_DATA_REG(ZERO, zero) \ + ")") + +#define _ASM_EXTABLE_UACCESS_ERR(insn, fixup, err) \ + _ASM_EXTABLE_UACCESS_ERR_ZERO(insn, fixup, err, zero) + +#endif /* __ASSEMBLY__ */ + +#endif /* __ASM_ASM_EXTABLE_H */ diff --git a/arch/riscv/include/asm/asm-offsets.h b/arch/riscv/include/asm/asm-offsets.h new file mode 100644 index 000000000..d370ee36a --- /dev/null +++ b/arch/riscv/include/asm/asm-offsets.h @@ -0,0 +1 @@ +#include <generated/asm-offsets.h> diff --git a/arch/riscv/include/asm/asm-prototypes.h b/arch/riscv/include/asm/asm-prototypes.h new file mode 100644 index 000000000..ef386fcf3 --- /dev/null +++ b/arch/riscv/include/asm/asm-prototypes.h @@ -0,0 +1,31 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_RISCV_PROTOTYPES_H +#define _ASM_RISCV_PROTOTYPES_H + +#include <linux/ftrace.h> +#include <asm-generic/asm-prototypes.h> + +long long __lshrti3(long long a, int b); +long long __ashrti3(long long a, int b); +long long __ashlti3(long long a, int b); + + +#define DECLARE_DO_ERROR_INFO(name) asmlinkage void name(struct pt_regs *regs) + +DECLARE_DO_ERROR_INFO(do_trap_unknown); +DECLARE_DO_ERROR_INFO(do_trap_insn_misaligned); +DECLARE_DO_ERROR_INFO(do_trap_insn_fault); +DECLARE_DO_ERROR_INFO(do_trap_insn_illegal); +DECLARE_DO_ERROR_INFO(do_trap_load_fault); +DECLARE_DO_ERROR_INFO(do_trap_load_misaligned); +DECLARE_DO_ERROR_INFO(do_trap_store_misaligned); +DECLARE_DO_ERROR_INFO(do_trap_store_fault); +DECLARE_DO_ERROR_INFO(do_trap_ecall_u); +DECLARE_DO_ERROR_INFO(do_trap_ecall_s); +DECLARE_DO_ERROR_INFO(do_trap_ecall_m); +DECLARE_DO_ERROR_INFO(do_trap_break); + +asmlinkage unsigned long get_overflow_stack(void); +asmlinkage void handle_bad_stack(struct pt_regs *regs); + +#endif /* _ASM_RISCV_PROTOTYPES_H */ diff --git a/arch/riscv/include/asm/asm.h b/arch/riscv/include/asm/asm.h new file mode 100644 index 000000000..816e753de --- /dev/null +++ b/arch/riscv/include/asm/asm.h @@ -0,0 +1,86 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2015 Regents of the University of California + */ + +#ifndef _ASM_RISCV_ASM_H +#define _ASM_RISCV_ASM_H + +#ifdef __ASSEMBLY__ +#define __ASM_STR(x) x +#else +#define __ASM_STR(x) #x +#endif + +#if __riscv_xlen == 64 +#define __REG_SEL(a, b) __ASM_STR(a) +#elif __riscv_xlen == 32 +#define __REG_SEL(a, b) __ASM_STR(b) +#else +#error "Unexpected __riscv_xlen" +#endif + +#define REG_L __REG_SEL(ld, lw) +#define REG_S __REG_SEL(sd, sw) +#define REG_SC __REG_SEL(sc.d, sc.w) +#define REG_AMOSWAP_AQ __REG_SEL(amoswap.d.aq, amoswap.w.aq) +#define REG_ASM __REG_SEL(.dword, .word) +#define SZREG __REG_SEL(8, 4) +#define LGREG __REG_SEL(3, 2) + +#if __SIZEOF_POINTER__ == 8 +#ifdef __ASSEMBLY__ +#define RISCV_PTR .dword +#define RISCV_SZPTR 8 +#define RISCV_LGPTR 3 +#else +#define RISCV_PTR ".dword" +#define RISCV_SZPTR "8" +#define RISCV_LGPTR "3" +#endif +#elif __SIZEOF_POINTER__ == 4 +#ifdef __ASSEMBLY__ +#define RISCV_PTR .word +#define RISCV_SZPTR 4 +#define RISCV_LGPTR 2 +#else +#define RISCV_PTR ".word" +#define RISCV_SZPTR "4" +#define RISCV_LGPTR "2" +#endif +#else +#error "Unexpected __SIZEOF_POINTER__" +#endif + +#if (__SIZEOF_INT__ == 4) +#define RISCV_INT __ASM_STR(.word) +#define RISCV_SZINT __ASM_STR(4) +#define RISCV_LGINT __ASM_STR(2) +#else +#error "Unexpected __SIZEOF_INT__" +#endif + +#if (__SIZEOF_SHORT__ == 2) +#define RISCV_SHORT __ASM_STR(.half) +#define RISCV_SZSHORT __ASM_STR(2) +#define RISCV_LGSHORT __ASM_STR(1) +#else +#error "Unexpected __SIZEOF_SHORT__" +#endif + +#ifdef __ASSEMBLY__ + +/* Common assembly source macros */ + +/* + * NOP sequence + */ +.macro nops, num + .rept \num + nop + .endr +.endm + +#endif /* __ASSEMBLY__ */ + +#endif /* _ASM_RISCV_ASM_H */ diff --git a/arch/riscv/include/asm/atomic.h b/arch/riscv/include/asm/atomic.h new file mode 100644 index 000000000..0dfe9d857 --- /dev/null +++ b/arch/riscv/include/asm/atomic.h @@ -0,0 +1,438 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved. + * Copyright (C) 2012 Regents of the University of California + * Copyright (C) 2017 SiFive + */ + +#ifndef _ASM_RISCV_ATOMIC_H +#define _ASM_RISCV_ATOMIC_H + +#ifdef CONFIG_GENERIC_ATOMIC64 +# include <asm-generic/atomic64.h> +#else +# if (__riscv_xlen < 64) +# error "64-bit atomics require XLEN to be at least 64" +# endif +#endif + +#include <asm/cmpxchg.h> +#include <asm/barrier.h> + +#define __atomic_acquire_fence() \ + __asm__ __volatile__(RISCV_ACQUIRE_BARRIER "" ::: "memory") + +#define __atomic_release_fence() \ + __asm__ __volatile__(RISCV_RELEASE_BARRIER "" ::: "memory"); + +static __always_inline int arch_atomic_read(const atomic_t *v) +{ + return READ_ONCE(v->counter); +} +static __always_inline void arch_atomic_set(atomic_t *v, int i) +{ + WRITE_ONCE(v->counter, i); +} + +#ifndef CONFIG_GENERIC_ATOMIC64 +#define ATOMIC64_INIT(i) { (i) } +static __always_inline s64 arch_atomic64_read(const atomic64_t *v) +{ + return READ_ONCE(v->counter); +} +static __always_inline void arch_atomic64_set(atomic64_t *v, s64 i) +{ + WRITE_ONCE(v->counter, i); +} +#endif + +/* + * First, the atomic ops that have no ordering constraints and therefor don't + * have the AQ or RL bits set. These don't return anything, so there's only + * one version to worry about. + */ +#define ATOMIC_OP(op, asm_op, I, asm_type, c_type, prefix) \ +static __always_inline \ +void arch_atomic##prefix##_##op(c_type i, atomic##prefix##_t *v) \ +{ \ + __asm__ __volatile__ ( \ + " amo" #asm_op "." #asm_type " zero, %1, %0" \ + : "+A" (v->counter) \ + : "r" (I) \ + : "memory"); \ +} \ + +#ifdef CONFIG_GENERIC_ATOMIC64 +#define ATOMIC_OPS(op, asm_op, I) \ + ATOMIC_OP (op, asm_op, I, w, int, ) +#else +#define ATOMIC_OPS(op, asm_op, I) \ + ATOMIC_OP (op, asm_op, I, w, int, ) \ + ATOMIC_OP (op, asm_op, I, d, s64, 64) +#endif + +ATOMIC_OPS(add, add, i) +ATOMIC_OPS(sub, add, -i) +ATOMIC_OPS(and, and, i) +ATOMIC_OPS( or, or, i) +ATOMIC_OPS(xor, xor, i) + +#undef ATOMIC_OP +#undef ATOMIC_OPS + +/* + * Atomic ops that have ordered, relaxed, acquire, and release variants. + * There's two flavors of these: the arithmatic ops have both fetch and return + * versions, while the logical ops only have fetch versions. + */ +#define ATOMIC_FETCH_OP(op, asm_op, I, asm_type, c_type, prefix) \ +static __always_inline \ +c_type arch_atomic##prefix##_fetch_##op##_relaxed(c_type i, \ + atomic##prefix##_t *v) \ +{ \ + register c_type ret; \ + __asm__ __volatile__ ( \ + " amo" #asm_op "." #asm_type " %1, %2, %0" \ + : "+A" (v->counter), "=r" (ret) \ + : "r" (I) \ + : "memory"); \ + return ret; \ +} \ +static __always_inline \ +c_type arch_atomic##prefix##_fetch_##op(c_type i, atomic##prefix##_t *v) \ +{ \ + register c_type ret; \ + __asm__ __volatile__ ( \ + " amo" #asm_op "." #asm_type ".aqrl %1, %2, %0" \ + : "+A" (v->counter), "=r" (ret) \ + : "r" (I) \ + : "memory"); \ + return ret; \ +} + +#define ATOMIC_OP_RETURN(op, asm_op, c_op, I, asm_type, c_type, prefix) \ +static __always_inline \ +c_type arch_atomic##prefix##_##op##_return_relaxed(c_type i, \ + atomic##prefix##_t *v) \ +{ \ + return arch_atomic##prefix##_fetch_##op##_relaxed(i, v) c_op I; \ +} \ +static __always_inline \ +c_type arch_atomic##prefix##_##op##_return(c_type i, atomic##prefix##_t *v) \ +{ \ + return arch_atomic##prefix##_fetch_##op(i, v) c_op I; \ +} + +#ifdef CONFIG_GENERIC_ATOMIC64 +#define ATOMIC_OPS(op, asm_op, c_op, I) \ + ATOMIC_FETCH_OP( op, asm_op, I, w, int, ) \ + ATOMIC_OP_RETURN(op, asm_op, c_op, I, w, int, ) +#else +#define ATOMIC_OPS(op, asm_op, c_op, I) \ + ATOMIC_FETCH_OP( op, asm_op, I, w, int, ) \ + ATOMIC_OP_RETURN(op, asm_op, c_op, I, w, int, ) \ + ATOMIC_FETCH_OP( op, asm_op, I, d, s64, 64) \ + ATOMIC_OP_RETURN(op, asm_op, c_op, I, d, s64, 64) +#endif + +ATOMIC_OPS(add, add, +, i) +ATOMIC_OPS(sub, add, +, -i) + +#define arch_atomic_add_return_relaxed arch_atomic_add_return_relaxed +#define arch_atomic_sub_return_relaxed arch_atomic_sub_return_relaxed +#define arch_atomic_add_return arch_atomic_add_return +#define arch_atomic_sub_return arch_atomic_sub_return + +#define arch_atomic_fetch_add_relaxed arch_atomic_fetch_add_relaxed +#define arch_atomic_fetch_sub_relaxed arch_atomic_fetch_sub_relaxed +#define arch_atomic_fetch_add arch_atomic_fetch_add +#define arch_atomic_fetch_sub arch_atomic_fetch_sub + +#ifndef CONFIG_GENERIC_ATOMIC64 +#define arch_atomic64_add_return_relaxed arch_atomic64_add_return_relaxed +#define arch_atomic64_sub_return_relaxed arch_atomic64_sub_return_relaxed +#define arch_atomic64_add_return arch_atomic64_add_return +#define arch_atomic64_sub_return arch_atomic64_sub_return + +#define arch_atomic64_fetch_add_relaxed arch_atomic64_fetch_add_relaxed +#define arch_atomic64_fetch_sub_relaxed arch_atomic64_fetch_sub_relaxed +#define arch_atomic64_fetch_add arch_atomic64_fetch_add +#define arch_atomic64_fetch_sub arch_atomic64_fetch_sub +#endif + +#undef ATOMIC_OPS + +#ifdef CONFIG_GENERIC_ATOMIC64 +#define ATOMIC_OPS(op, asm_op, I) \ + ATOMIC_FETCH_OP(op, asm_op, I, w, int, ) +#else +#define ATOMIC_OPS(op, asm_op, I) \ + ATOMIC_FETCH_OP(op, asm_op, I, w, int, ) \ + ATOMIC_FETCH_OP(op, asm_op, I, d, s64, 64) +#endif + +ATOMIC_OPS(and, and, i) +ATOMIC_OPS( or, or, i) +ATOMIC_OPS(xor, xor, i) + +#define arch_atomic_fetch_and_relaxed arch_atomic_fetch_and_relaxed +#define arch_atomic_fetch_or_relaxed arch_atomic_fetch_or_relaxed +#define arch_atomic_fetch_xor_relaxed arch_atomic_fetch_xor_relaxed +#define arch_atomic_fetch_and arch_atomic_fetch_and +#define arch_atomic_fetch_or arch_atomic_fetch_or +#define arch_atomic_fetch_xor arch_atomic_fetch_xor + +#ifndef CONFIG_GENERIC_ATOMIC64 +#define arch_atomic64_fetch_and_relaxed arch_atomic64_fetch_and_relaxed +#define arch_atomic64_fetch_or_relaxed arch_atomic64_fetch_or_relaxed +#define arch_atomic64_fetch_xor_relaxed arch_atomic64_fetch_xor_relaxed +#define arch_atomic64_fetch_and arch_atomic64_fetch_and +#define arch_atomic64_fetch_or arch_atomic64_fetch_or +#define arch_atomic64_fetch_xor arch_atomic64_fetch_xor +#endif + +#undef ATOMIC_OPS + +#undef ATOMIC_FETCH_OP +#undef ATOMIC_OP_RETURN + +/* This is required to provide a full barrier on success. */ +static __always_inline int arch_atomic_fetch_add_unless(atomic_t *v, int a, int u) +{ + int prev, rc; + + __asm__ __volatile__ ( + "0: lr.w %[p], %[c]\n" + " beq %[p], %[u], 1f\n" + " add %[rc], %[p], %[a]\n" + " sc.w.rl %[rc], %[rc], %[c]\n" + " bnez %[rc], 0b\n" + " fence rw, rw\n" + "1:\n" + : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter) + : [a]"r" (a), [u]"r" (u) + : "memory"); + return prev; +} +#define arch_atomic_fetch_add_unless arch_atomic_fetch_add_unless + +#ifndef CONFIG_GENERIC_ATOMIC64 +static __always_inline s64 arch_atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u) +{ + s64 prev; + long rc; + + __asm__ __volatile__ ( + "0: lr.d %[p], %[c]\n" + " beq %[p], %[u], 1f\n" + " add %[rc], %[p], %[a]\n" + " sc.d.rl %[rc], %[rc], %[c]\n" + " bnez %[rc], 0b\n" + " fence rw, rw\n" + "1:\n" + : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter) + : [a]"r" (a), [u]"r" (u) + : "memory"); + return prev; +} +#define arch_atomic64_fetch_add_unless arch_atomic64_fetch_add_unless +#endif + +/* + * atomic_{cmp,}xchg is required to have exactly the same ordering semantics as + * {cmp,}xchg and the operations that return, so they need a full barrier. + */ +#define ATOMIC_OP(c_t, prefix, size) \ +static __always_inline \ +c_t arch_atomic##prefix##_xchg_relaxed(atomic##prefix##_t *v, c_t n) \ +{ \ + return __xchg_relaxed(&(v->counter), n, size); \ +} \ +static __always_inline \ +c_t arch_atomic##prefix##_xchg_acquire(atomic##prefix##_t *v, c_t n) \ +{ \ + return __xchg_acquire(&(v->counter), n, size); \ +} \ +static __always_inline \ +c_t arch_atomic##prefix##_xchg_release(atomic##prefix##_t *v, c_t n) \ +{ \ + return __xchg_release(&(v->counter), n, size); \ +} \ +static __always_inline \ +c_t arch_atomic##prefix##_xchg(atomic##prefix##_t *v, c_t n) \ +{ \ + return __xchg(&(v->counter), n, size); \ +} \ +static __always_inline \ +c_t arch_atomic##prefix##_cmpxchg_relaxed(atomic##prefix##_t *v, \ + c_t o, c_t n) \ +{ \ + return __cmpxchg_relaxed(&(v->counter), o, n, size); \ +} \ +static __always_inline \ +c_t arch_atomic##prefix##_cmpxchg_acquire(atomic##prefix##_t *v, \ + c_t o, c_t n) \ +{ \ + return __cmpxchg_acquire(&(v->counter), o, n, size); \ +} \ +static __always_inline \ +c_t arch_atomic##prefix##_cmpxchg_release(atomic##prefix##_t *v, \ + c_t o, c_t n) \ +{ \ + return __cmpxchg_release(&(v->counter), o, n, size); \ +} \ +static __always_inline \ +c_t arch_atomic##prefix##_cmpxchg(atomic##prefix##_t *v, c_t o, c_t n) \ +{ \ + return __cmpxchg(&(v->counter), o, n, size); \ +} + +#ifdef CONFIG_GENERIC_ATOMIC64 +#define ATOMIC_OPS() \ + ATOMIC_OP(int, , 4) +#else +#define ATOMIC_OPS() \ + ATOMIC_OP(int, , 4) \ + ATOMIC_OP(s64, 64, 8) +#endif + +ATOMIC_OPS() + +#define arch_atomic_xchg_relaxed arch_atomic_xchg_relaxed +#define arch_atomic_xchg_acquire arch_atomic_xchg_acquire +#define arch_atomic_xchg_release arch_atomic_xchg_release +#define arch_atomic_xchg arch_atomic_xchg +#define arch_atomic_cmpxchg_relaxed arch_atomic_cmpxchg_relaxed +#define arch_atomic_cmpxchg_acquire arch_atomic_cmpxchg_acquire +#define arch_atomic_cmpxchg_release arch_atomic_cmpxchg_release +#define arch_atomic_cmpxchg arch_atomic_cmpxchg + +#undef ATOMIC_OPS +#undef ATOMIC_OP + +static __always_inline bool arch_atomic_inc_unless_negative(atomic_t *v) +{ + int prev, rc; + + __asm__ __volatile__ ( + "0: lr.w %[p], %[c]\n" + " bltz %[p], 1f\n" + " addi %[rc], %[p], 1\n" + " sc.w.rl %[rc], %[rc], %[c]\n" + " bnez %[rc], 0b\n" + " fence rw, rw\n" + "1:\n" + : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter) + : + : "memory"); + return !(prev < 0); +} + +#define arch_atomic_inc_unless_negative arch_atomic_inc_unless_negative + +static __always_inline bool arch_atomic_dec_unless_positive(atomic_t *v) +{ + int prev, rc; + + __asm__ __volatile__ ( + "0: lr.w %[p], %[c]\n" + " bgtz %[p], 1f\n" + " addi %[rc], %[p], -1\n" + " sc.w.rl %[rc], %[rc], %[c]\n" + " bnez %[rc], 0b\n" + " fence rw, rw\n" + "1:\n" + : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter) + : + : "memory"); + return !(prev > 0); +} + +#define arch_atomic_dec_unless_positive arch_atomic_dec_unless_positive + +static __always_inline int arch_atomic_dec_if_positive(atomic_t *v) +{ + int prev, rc; + + __asm__ __volatile__ ( + "0: lr.w %[p], %[c]\n" + " addi %[rc], %[p], -1\n" + " bltz %[rc], 1f\n" + " sc.w.rl %[rc], %[rc], %[c]\n" + " bnez %[rc], 0b\n" + " fence rw, rw\n" + "1:\n" + : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter) + : + : "memory"); + return prev - 1; +} + +#define arch_atomic_dec_if_positive arch_atomic_dec_if_positive + +#ifndef CONFIG_GENERIC_ATOMIC64 +static __always_inline bool arch_atomic64_inc_unless_negative(atomic64_t *v) +{ + s64 prev; + long rc; + + __asm__ __volatile__ ( + "0: lr.d %[p], %[c]\n" + " bltz %[p], 1f\n" + " addi %[rc], %[p], 1\n" + " sc.d.rl %[rc], %[rc], %[c]\n" + " bnez %[rc], 0b\n" + " fence rw, rw\n" + "1:\n" + : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter) + : + : "memory"); + return !(prev < 0); +} + +#define arch_atomic64_inc_unless_negative arch_atomic64_inc_unless_negative + +static __always_inline bool arch_atomic64_dec_unless_positive(atomic64_t *v) +{ + s64 prev; + long rc; + + __asm__ __volatile__ ( + "0: lr.d %[p], %[c]\n" + " bgtz %[p], 1f\n" + " addi %[rc], %[p], -1\n" + " sc.d.rl %[rc], %[rc], %[c]\n" + " bnez %[rc], 0b\n" + " fence rw, rw\n" + "1:\n" + : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter) + : + : "memory"); + return !(prev > 0); +} + +#define arch_atomic64_dec_unless_positive arch_atomic64_dec_unless_positive + +static __always_inline s64 arch_atomic64_dec_if_positive(atomic64_t *v) +{ + s64 prev; + long rc; + + __asm__ __volatile__ ( + "0: lr.d %[p], %[c]\n" + " addi %[rc], %[p], -1\n" + " bltz %[rc], 1f\n" + " sc.d.rl %[rc], %[rc], %[c]\n" + " bnez %[rc], 0b\n" + " fence rw, rw\n" + "1:\n" + : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter) + : + : "memory"); + return prev - 1; +} + +#define arch_atomic64_dec_if_positive arch_atomic64_dec_if_positive +#endif + +#endif /* _ASM_RISCV_ATOMIC_H */ diff --git a/arch/riscv/include/asm/barrier.h b/arch/riscv/include/asm/barrier.h new file mode 100644 index 000000000..110752594 --- /dev/null +++ b/arch/riscv/include/asm/barrier.h @@ -0,0 +1,78 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Based on arch/arm/include/asm/barrier.h + * + * Copyright (C) 2012 ARM Ltd. + * Copyright (C) 2013 Regents of the University of California + * Copyright (C) 2017 SiFive + */ + +#ifndef _ASM_RISCV_BARRIER_H +#define _ASM_RISCV_BARRIER_H + +#ifndef __ASSEMBLY__ + +#define nop() __asm__ __volatile__ ("nop") +#define __nops(n) ".rept " #n "\nnop\n.endr\n" +#define nops(n) __asm__ __volatile__ (__nops(n)) + +#define RISCV_FENCE(p, s) \ + __asm__ __volatile__ ("fence " #p "," #s : : : "memory") + +/* These barriers need to enforce ordering on both devices or memory. */ +#define mb() RISCV_FENCE(iorw,iorw) +#define rmb() RISCV_FENCE(ir,ir) +#define wmb() RISCV_FENCE(ow,ow) + +/* These barriers do not need to enforce ordering on devices, just memory. */ +#define __smp_mb() RISCV_FENCE(rw,rw) +#define __smp_rmb() RISCV_FENCE(r,r) +#define __smp_wmb() RISCV_FENCE(w,w) + +#define __smp_store_release(p, v) \ +do { \ + compiletime_assert_atomic_type(*p); \ + RISCV_FENCE(rw,w); \ + WRITE_ONCE(*p, v); \ +} while (0) + +#define __smp_load_acquire(p) \ +({ \ + typeof(*p) ___p1 = READ_ONCE(*p); \ + compiletime_assert_atomic_type(*p); \ + RISCV_FENCE(r,rw); \ + ___p1; \ +}) + +/* + * This is a very specific barrier: it's currently only used in two places in + * the kernel, both in the scheduler. See include/linux/spinlock.h for the two + * orderings it guarantees, but the "critical section is RCsc" guarantee + * mandates a barrier on RISC-V. The sequence looks like: + * + * lr.aq lock + * sc lock <= LOCKED + * smp_mb__after_spinlock() + * // critical section + * lr lock + * sc.rl lock <= UNLOCKED + * + * The AQ/RL pair provides a RCpc critical section, but there's not really any + * way we can take advantage of that here because the ordering is only enforced + * on that one lock. Thus, we're just doing a full fence. + * + * Since we allow writeX to be called from preemptive regions we need at least + * an "o" in the predecessor set to ensure device writes are visible before the + * task is marked as available for scheduling on a new hart. While I don't see + * any concrete reason we need a full IO fence, it seems safer to just upgrade + * this in order to avoid any IO crossing a scheduling boundary. In both + * instances the scheduler pairs this with an mb(), so nothing is necessary on + * the new hart. + */ +#define smp_mb__after_spinlock() RISCV_FENCE(iorw,iorw) + +#include <asm-generic/barrier.h> + +#endif /* __ASSEMBLY__ */ + +#endif /* _ASM_RISCV_BARRIER_H */ diff --git a/arch/riscv/include/asm/bitops.h b/arch/riscv/include/asm/bitops.h new file mode 100644 index 000000000..3540b6909 --- /dev/null +++ b/arch/riscv/include/asm/bitops.h @@ -0,0 +1,204 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2012 Regents of the University of California + */ + +#ifndef _ASM_RISCV_BITOPS_H +#define _ASM_RISCV_BITOPS_H + +#ifndef _LINUX_BITOPS_H +#error "Only <linux/bitops.h> can be included directly" +#endif /* _LINUX_BITOPS_H */ + +#include <linux/compiler.h> +#include <linux/irqflags.h> +#include <asm/barrier.h> +#include <asm/bitsperlong.h> + +#include <asm-generic/bitops/__ffs.h> +#include <asm-generic/bitops/ffz.h> +#include <asm-generic/bitops/fls.h> +#include <asm-generic/bitops/__fls.h> +#include <asm-generic/bitops/fls64.h> +#include <asm-generic/bitops/sched.h> +#include <asm-generic/bitops/ffs.h> + +#include <asm-generic/bitops/hweight.h> + +#if (BITS_PER_LONG == 64) +#define __AMO(op) "amo" #op ".d" +#elif (BITS_PER_LONG == 32) +#define __AMO(op) "amo" #op ".w" +#else +#error "Unexpected BITS_PER_LONG" +#endif + +#define __test_and_op_bit_ord(op, mod, nr, addr, ord) \ +({ \ + unsigned long __res, __mask; \ + __mask = BIT_MASK(nr); \ + __asm__ __volatile__ ( \ + __AMO(op) #ord " %0, %2, %1" \ + : "=r" (__res), "+A" (addr[BIT_WORD(nr)]) \ + : "r" (mod(__mask)) \ + : "memory"); \ + ((__res & __mask) != 0); \ +}) + +#define __op_bit_ord(op, mod, nr, addr, ord) \ + __asm__ __volatile__ ( \ + __AMO(op) #ord " zero, %1, %0" \ + : "+A" (addr[BIT_WORD(nr)]) \ + : "r" (mod(BIT_MASK(nr))) \ + : "memory"); + +#define __test_and_op_bit(op, mod, nr, addr) \ + __test_and_op_bit_ord(op, mod, nr, addr, .aqrl) +#define __op_bit(op, mod, nr, addr) \ + __op_bit_ord(op, mod, nr, addr, ) + +/* Bitmask modifiers */ +#define __NOP(x) (x) +#define __NOT(x) (~(x)) + +/** + * test_and_set_bit - Set a bit and return its old value + * @nr: Bit to set + * @addr: Address to count from + * + * This operation may be reordered on other architectures than x86. + */ +static inline int test_and_set_bit(int nr, volatile unsigned long *addr) +{ + return __test_and_op_bit(or, __NOP, nr, addr); +} + +/** + * test_and_clear_bit - Clear a bit and return its old value + * @nr: Bit to clear + * @addr: Address to count from + * + * This operation can be reordered on other architectures other than x86. + */ +static inline int test_and_clear_bit(int nr, volatile unsigned long *addr) +{ + return __test_and_op_bit(and, __NOT, nr, addr); +} + +/** + * test_and_change_bit - Change a bit and return its old value + * @nr: Bit to change + * @addr: Address to count from + * + * This operation is atomic and cannot be reordered. + * It also implies a memory barrier. + */ +static inline int test_and_change_bit(int nr, volatile unsigned long *addr) +{ + return __test_and_op_bit(xor, __NOP, nr, addr); +} + +/** + * set_bit - Atomically set a bit in memory + * @nr: the bit to set + * @addr: the address to start counting from + * + * Note: there are no guarantees that this function will not be reordered + * on non x86 architectures, so if you are writing portable code, + * make sure not to rely on its reordering guarantees. + * + * Note that @nr may be almost arbitrarily large; this function is not + * restricted to acting on a single-word quantity. + */ +static inline void set_bit(int nr, volatile unsigned long *addr) +{ + __op_bit(or, __NOP, nr, addr); +} + +/** + * clear_bit - Clears a bit in memory + * @nr: Bit to clear + * @addr: Address to start counting from + * + * Note: there are no guarantees that this function will not be reordered + * on non x86 architectures, so if you are writing portable code, + * make sure not to rely on its reordering guarantees. + */ +static inline void clear_bit(int nr, volatile unsigned long *addr) +{ + __op_bit(and, __NOT, nr, addr); +} + +/** + * change_bit - Toggle a bit in memory + * @nr: Bit to change + * @addr: Address to start counting from + * + * change_bit() may be reordered on other architectures than x86. + * Note that @nr may be almost arbitrarily large; this function is not + * restricted to acting on a single-word quantity. + */ +static inline void change_bit(int nr, volatile unsigned long *addr) +{ + __op_bit(xor, __NOP, nr, addr); +} + +/** + * test_and_set_bit_lock - Set a bit and return its old value, for lock + * @nr: Bit to set + * @addr: Address to count from + * + * This operation is atomic and provides acquire barrier semantics. + * It can be used to implement bit locks. + */ +static inline int test_and_set_bit_lock( + unsigned long nr, volatile unsigned long *addr) +{ + return __test_and_op_bit_ord(or, __NOP, nr, addr, .aq); +} + +/** + * clear_bit_unlock - Clear a bit in memory, for unlock + * @nr: the bit to set + * @addr: the address to start counting from + * + * This operation is atomic and provides release barrier semantics. + */ +static inline void clear_bit_unlock( + unsigned long nr, volatile unsigned long *addr) +{ + __op_bit_ord(and, __NOT, nr, addr, .rl); +} + +/** + * __clear_bit_unlock - Clear a bit in memory, for unlock + * @nr: the bit to set + * @addr: the address to start counting from + * + * This operation is like clear_bit_unlock, however it is not atomic. + * It does provide release barrier semantics so it can be used to unlock + * a bit lock, however it would only be used if no other CPU can modify + * any bits in the memory until the lock is released (a good example is + * if the bit lock itself protects access to the other bits in the word). + * + * On RISC-V systems there seems to be no benefit to taking advantage of the + * non-atomic property here: it's a lot more instructions and we still have to + * provide release semantics anyway. + */ +static inline void __clear_bit_unlock( + unsigned long nr, volatile unsigned long *addr) +{ + clear_bit_unlock(nr, addr); +} + +#undef __test_and_op_bit +#undef __op_bit +#undef __NOP +#undef __NOT +#undef __AMO + +#include <asm-generic/bitops/non-atomic.h> +#include <asm-generic/bitops/le.h> +#include <asm-generic/bitops/ext2-atomic.h> + +#endif /* _ASM_RISCV_BITOPS_H */ diff --git a/arch/riscv/include/asm/bug.h b/arch/riscv/include/asm/bug.h new file mode 100644 index 000000000..1aaea81fb --- /dev/null +++ b/arch/riscv/include/asm/bug.h @@ -0,0 +1,92 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2012 Regents of the University of California + */ + +#ifndef _ASM_RISCV_BUG_H +#define _ASM_RISCV_BUG_H + +#include <linux/compiler.h> +#include <linux/const.h> +#include <linux/types.h> + +#include <asm/asm.h> + +#define __INSN_LENGTH_MASK _UL(0x3) +#define __INSN_LENGTH_32 _UL(0x3) +#define __COMPRESSED_INSN_MASK _UL(0xffff) + +#define __BUG_INSN_32 _UL(0x00100073) /* ebreak */ +#define __BUG_INSN_16 _UL(0x9002) /* c.ebreak */ + +#define GET_INSN_LENGTH(insn) \ +({ \ + unsigned long __len; \ + __len = ((insn & __INSN_LENGTH_MASK) == __INSN_LENGTH_32) ? \ + 4UL : 2UL; \ + __len; \ +}) + +typedef u32 bug_insn_t; + +#ifdef CONFIG_GENERIC_BUG_RELATIVE_POINTERS +#define __BUG_ENTRY_ADDR RISCV_INT " 1b - ." +#define __BUG_ENTRY_FILE RISCV_INT " %0 - ." +#else +#define __BUG_ENTRY_ADDR RISCV_PTR " 1b" +#define __BUG_ENTRY_FILE RISCV_PTR " %0" +#endif + +#ifdef CONFIG_DEBUG_BUGVERBOSE +#define __BUG_ENTRY \ + __BUG_ENTRY_ADDR "\n\t" \ + __BUG_ENTRY_FILE "\n\t" \ + RISCV_SHORT " %1\n\t" \ + RISCV_SHORT " %2" +#else +#define __BUG_ENTRY \ + __BUG_ENTRY_ADDR "\n\t" \ + RISCV_SHORT " %2" +#endif + +#ifdef CONFIG_GENERIC_BUG +#define __BUG_FLAGS(flags) \ +do { \ + __asm__ __volatile__ ( \ + "1:\n\t" \ + "ebreak\n" \ + ".pushsection __bug_table,\"aw\"\n\t" \ + "2:\n\t" \ + __BUG_ENTRY "\n\t" \ + ".org 2b + %3\n\t" \ + ".popsection" \ + : \ + : "i" (__FILE__), "i" (__LINE__), \ + "i" (flags), \ + "i" (sizeof(struct bug_entry))); \ +} while (0) +#else /* CONFIG_GENERIC_BUG */ +#define __BUG_FLAGS(flags) do { \ + __asm__ __volatile__ ("ebreak\n"); \ +} while (0) +#endif /* CONFIG_GENERIC_BUG */ + +#define BUG() do { \ + __BUG_FLAGS(0); \ + unreachable(); \ +} while (0) + +#define __WARN_FLAGS(flags) __BUG_FLAGS(BUGFLAG_WARNING|(flags)) + +#define HAVE_ARCH_BUG + +#include <asm-generic/bug.h> + +struct pt_regs; +struct task_struct; + +void __show_regs(struct pt_regs *regs); +void die(struct pt_regs *regs, const char *str); +void do_trap(struct pt_regs *regs, int signo, int code, unsigned long addr); + +#endif /* _ASM_RISCV_BUG_H */ diff --git a/arch/riscv/include/asm/cache.h b/arch/riscv/include/asm/cache.h new file mode 100644 index 000000000..d3036df23 --- /dev/null +++ b/arch/riscv/include/asm/cache.h @@ -0,0 +1,26 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2017 Chen Liqin <liqin.chen@sunplusct.com> + * Copyright (C) 2012 Regents of the University of California + */ + +#ifndef _ASM_RISCV_CACHE_H +#define _ASM_RISCV_CACHE_H + +#define L1_CACHE_SHIFT 6 + +#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT) + +#ifdef CONFIG_RISCV_DMA_NONCOHERENT +#define ARCH_DMA_MINALIGN L1_CACHE_BYTES +#endif + +/* + * RISC-V requires the stack pointer to be 16-byte aligned, so ensure that + * the flat loader aligns it accordingly. + */ +#ifndef CONFIG_MMU +#define ARCH_SLAB_MINALIGN 16 +#endif + +#endif /* _ASM_RISCV_CACHE_H */ diff --git a/arch/riscv/include/asm/cacheflush.h b/arch/riscv/include/asm/cacheflush.h new file mode 100644 index 000000000..f6fbe7042 --- /dev/null +++ b/arch/riscv/include/asm/cacheflush.h @@ -0,0 +1,62 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2015 Regents of the University of California + */ + +#ifndef _ASM_RISCV_CACHEFLUSH_H +#define _ASM_RISCV_CACHEFLUSH_H + +#include <linux/mm.h> + +static inline void local_flush_icache_all(void) +{ + asm volatile ("fence.i" ::: "memory"); +} + +#define PG_dcache_clean PG_arch_1 + +static inline void flush_dcache_page(struct page *page) +{ + if (test_bit(PG_dcache_clean, &page->flags)) + clear_bit(PG_dcache_clean, &page->flags); +} +#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1 + +/* + * RISC-V doesn't have an instruction to flush parts of the instruction cache, + * so instead we just flush the whole thing. + */ +#define flush_icache_range(start, end) flush_icache_all() +#define flush_icache_user_page(vma, pg, addr, len) \ + flush_icache_mm(vma->vm_mm, 0) + +#ifndef CONFIG_SMP + +#define flush_icache_all() local_flush_icache_all() +#define flush_icache_mm(mm, local) flush_icache_all() + +#else /* CONFIG_SMP */ + +void flush_icache_all(void); +void flush_icache_mm(struct mm_struct *mm, bool local); + +#endif /* CONFIG_SMP */ + +extern unsigned int riscv_cbom_block_size; +void riscv_init_cbom_blocksize(void); + +#ifdef CONFIG_RISCV_DMA_NONCOHERENT +void riscv_noncoherent_supported(void); +#else +static inline void riscv_noncoherent_supported(void) {} +#endif + +/* + * Bits in sys_riscv_flush_icache()'s flags argument. + */ +#define SYS_RISCV_FLUSH_ICACHE_LOCAL 1UL +#define SYS_RISCV_FLUSH_ICACHE_ALL (SYS_RISCV_FLUSH_ICACHE_LOCAL) + +#include <asm-generic/cacheflush.h> + +#endif /* _ASM_RISCV_CACHEFLUSH_H */ diff --git a/arch/riscv/include/asm/cacheinfo.h b/arch/riscv/include/asm/cacheinfo.h new file mode 100644 index 000000000..d1a365215 --- /dev/null +++ b/arch/riscv/include/asm/cacheinfo.h @@ -0,0 +1,20 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2020 SiFive + */ + +#ifndef _ASM_RISCV_CACHEINFO_H +#define _ASM_RISCV_CACHEINFO_H + +#include <linux/cacheinfo.h> + +struct riscv_cacheinfo_ops { + const struct attribute_group * (*get_priv_group)(struct cacheinfo + *this_leaf); +}; + +void riscv_set_cacheinfo_ops(struct riscv_cacheinfo_ops *ops); +uintptr_t get_cache_size(u32 level, enum cache_type type); +uintptr_t get_cache_geometry(u32 level, enum cache_type type); + +#endif /* _ASM_RISCV_CACHEINFO_H */ diff --git a/arch/riscv/include/asm/clint.h b/arch/riscv/include/asm/clint.h new file mode 100644 index 000000000..0789fd37b --- /dev/null +++ b/arch/riscv/include/asm/clint.h @@ -0,0 +1,26 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2020 Google, Inc + */ + +#ifndef _ASM_RISCV_CLINT_H +#define _ASM_RISCV_CLINT_H + +#include <linux/types.h> +#include <asm/mmio.h> + +#ifdef CONFIG_RISCV_M_MODE +/* + * This lives in the CLINT driver, but is accessed directly by timex.h to avoid + * any overhead when accessing the MMIO timer. + * + * The ISA defines mtime as a 64-bit memory-mapped register that increments at + * a constant frequency, but it doesn't define some other constraints we depend + * on (most notably ordering constraints, but also some simpler stuff like the + * memory layout). Thus, this is called "clint_time_val" instead of something + * like "riscv_mtime", to signify that these non-ISA assumptions must hold. + */ +extern u64 __iomem *clint_time_val; +#endif + +#endif diff --git a/arch/riscv/include/asm/clocksource.h b/arch/riscv/include/asm/clocksource.h new file mode 100644 index 000000000..482185566 --- /dev/null +++ b/arch/riscv/include/asm/clocksource.h @@ -0,0 +1,7 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_CLOCKSOURCE_H +#define _ASM_CLOCKSOURCE_H + +#include <asm/vdso/clocksource.h> + +#endif diff --git a/arch/riscv/include/asm/cmpxchg.h b/arch/riscv/include/asm/cmpxchg.h new file mode 100644 index 000000000..12debce23 --- /dev/null +++ b/arch/riscv/include/asm/cmpxchg.h @@ -0,0 +1,363 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2014 Regents of the University of California + */ + +#ifndef _ASM_RISCV_CMPXCHG_H +#define _ASM_RISCV_CMPXCHG_H + +#include <linux/bug.h> + +#include <asm/barrier.h> +#include <asm/fence.h> + +#define __xchg_relaxed(ptr, new, size) \ +({ \ + __typeof__(ptr) __ptr = (ptr); \ + __typeof__(new) __new = (new); \ + __typeof__(*(ptr)) __ret; \ + switch (size) { \ + case 4: \ + __asm__ __volatile__ ( \ + " amoswap.w %0, %2, %1\n" \ + : "=r" (__ret), "+A" (*__ptr) \ + : "r" (__new) \ + : "memory"); \ + break; \ + case 8: \ + __asm__ __volatile__ ( \ + " amoswap.d %0, %2, %1\n" \ + : "=r" (__ret), "+A" (*__ptr) \ + : "r" (__new) \ + : "memory"); \ + break; \ + default: \ + BUILD_BUG(); \ + } \ + __ret; \ +}) + +#define arch_xchg_relaxed(ptr, x) \ +({ \ + __typeof__(*(ptr)) _x_ = (x); \ + (__typeof__(*(ptr))) __xchg_relaxed((ptr), \ + _x_, sizeof(*(ptr))); \ +}) + +#define __xchg_acquire(ptr, new, size) \ +({ \ + __typeof__(ptr) __ptr = (ptr); \ + __typeof__(new) __new = (new); \ + __typeof__(*(ptr)) __ret; \ + switch (size) { \ + case 4: \ + __asm__ __volatile__ ( \ + " amoswap.w %0, %2, %1\n" \ + RISCV_ACQUIRE_BARRIER \ + : "=r" (__ret), "+A" (*__ptr) \ + : "r" (__new) \ + : "memory"); \ + break; \ + case 8: \ + __asm__ __volatile__ ( \ + " amoswap.d %0, %2, %1\n" \ + RISCV_ACQUIRE_BARRIER \ + : "=r" (__ret), "+A" (*__ptr) \ + : "r" (__new) \ + : "memory"); \ + break; \ + default: \ + BUILD_BUG(); \ + } \ + __ret; \ +}) + +#define arch_xchg_acquire(ptr, x) \ +({ \ + __typeof__(*(ptr)) _x_ = (x); \ + (__typeof__(*(ptr))) __xchg_acquire((ptr), \ + _x_, sizeof(*(ptr))); \ +}) + +#define __xchg_release(ptr, new, size) \ +({ \ + __typeof__(ptr) __ptr = (ptr); \ + __typeof__(new) __new = (new); \ + __typeof__(*(ptr)) __ret; \ + switch (size) { \ + case 4: \ + __asm__ __volatile__ ( \ + RISCV_RELEASE_BARRIER \ + " amoswap.w %0, %2, %1\n" \ + : "=r" (__ret), "+A" (*__ptr) \ + : "r" (__new) \ + : "memory"); \ + break; \ + case 8: \ + __asm__ __volatile__ ( \ + RISCV_RELEASE_BARRIER \ + " amoswap.d %0, %2, %1\n" \ + : "=r" (__ret), "+A" (*__ptr) \ + : "r" (__new) \ + : "memory"); \ + break; \ + default: \ + BUILD_BUG(); \ + } \ + __ret; \ +}) + +#define arch_xchg_release(ptr, x) \ +({ \ + __typeof__(*(ptr)) _x_ = (x); \ + (__typeof__(*(ptr))) __xchg_release((ptr), \ + _x_, sizeof(*(ptr))); \ +}) + +#define __xchg(ptr, new, size) \ +({ \ + __typeof__(ptr) __ptr = (ptr); \ + __typeof__(new) __new = (new); \ + __typeof__(*(ptr)) __ret; \ + switch (size) { \ + case 4: \ + __asm__ __volatile__ ( \ + " amoswap.w.aqrl %0, %2, %1\n" \ + : "=r" (__ret), "+A" (*__ptr) \ + : "r" (__new) \ + : "memory"); \ + break; \ + case 8: \ + __asm__ __volatile__ ( \ + " amoswap.d.aqrl %0, %2, %1\n" \ + : "=r" (__ret), "+A" (*__ptr) \ + : "r" (__new) \ + : "memory"); \ + break; \ + default: \ + BUILD_BUG(); \ + } \ + __ret; \ +}) + +#define arch_xchg(ptr, x) \ +({ \ + __typeof__(*(ptr)) _x_ = (x); \ + (__typeof__(*(ptr))) __xchg((ptr), _x_, sizeof(*(ptr))); \ +}) + +#define xchg32(ptr, x) \ +({ \ + BUILD_BUG_ON(sizeof(*(ptr)) != 4); \ + arch_xchg((ptr), (x)); \ +}) + +#define xchg64(ptr, x) \ +({ \ + BUILD_BUG_ON(sizeof(*(ptr)) != 8); \ + arch_xchg((ptr), (x)); \ +}) + +/* + * Atomic compare and exchange. Compare OLD with MEM, if identical, + * store NEW in MEM. Return the initial value in MEM. Success is + * indicated by comparing RETURN with OLD. + */ +#define __cmpxchg_relaxed(ptr, old, new, size) \ +({ \ + __typeof__(ptr) __ptr = (ptr); \ + __typeof__(*(ptr)) __old = (old); \ + __typeof__(*(ptr)) __new = (new); \ + __typeof__(*(ptr)) __ret; \ + register unsigned int __rc; \ + switch (size) { \ + case 4: \ + __asm__ __volatile__ ( \ + "0: lr.w %0, %2\n" \ + " bne %0, %z3, 1f\n" \ + " sc.w %1, %z4, %2\n" \ + " bnez %1, 0b\n" \ + "1:\n" \ + : "=&r" (__ret), "=&r" (__rc), "+A" (*__ptr) \ + : "rJ" ((long)__old), "rJ" (__new) \ + : "memory"); \ + break; \ + case 8: \ + __asm__ __volatile__ ( \ + "0: lr.d %0, %2\n" \ + " bne %0, %z3, 1f\n" \ + " sc.d %1, %z4, %2\n" \ + " bnez %1, 0b\n" \ + "1:\n" \ + : "=&r" (__ret), "=&r" (__rc), "+A" (*__ptr) \ + : "rJ" (__old), "rJ" (__new) \ + : "memory"); \ + break; \ + default: \ + BUILD_BUG(); \ + } \ + __ret; \ +}) + +#define arch_cmpxchg_relaxed(ptr, o, n) \ +({ \ + __typeof__(*(ptr)) _o_ = (o); \ + __typeof__(*(ptr)) _n_ = (n); \ + (__typeof__(*(ptr))) __cmpxchg_relaxed((ptr), \ + _o_, _n_, sizeof(*(ptr))); \ +}) + +#define __cmpxchg_acquire(ptr, old, new, size) \ +({ \ + __typeof__(ptr) __ptr = (ptr); \ + __typeof__(*(ptr)) __old = (old); \ + __typeof__(*(ptr)) __new = (new); \ + __typeof__(*(ptr)) __ret; \ + register unsigned int __rc; \ + switch (size) { \ + case 4: \ + __asm__ __volatile__ ( \ + "0: lr.w %0, %2\n" \ + " bne %0, %z3, 1f\n" \ + " sc.w %1, %z4, %2\n" \ + " bnez %1, 0b\n" \ + RISCV_ACQUIRE_BARRIER \ + "1:\n" \ + : "=&r" (__ret), "=&r" (__rc), "+A" (*__ptr) \ + : "rJ" ((long)__old), "rJ" (__new) \ + : "memory"); \ + break; \ + case 8: \ + __asm__ __volatile__ ( \ + "0: lr.d %0, %2\n" \ + " bne %0, %z3, 1f\n" \ + " sc.d %1, %z4, %2\n" \ + " bnez %1, 0b\n" \ + RISCV_ACQUIRE_BARRIER \ + "1:\n" \ + : "=&r" (__ret), "=&r" (__rc), "+A" (*__ptr) \ + : "rJ" (__old), "rJ" (__new) \ + : "memory"); \ + break; \ + default: \ + BUILD_BUG(); \ + } \ + __ret; \ +}) + +#define arch_cmpxchg_acquire(ptr, o, n) \ +({ \ + __typeof__(*(ptr)) _o_ = (o); \ + __typeof__(*(ptr)) _n_ = (n); \ + (__typeof__(*(ptr))) __cmpxchg_acquire((ptr), \ + _o_, _n_, sizeof(*(ptr))); \ +}) + +#define __cmpxchg_release(ptr, old, new, size) \ +({ \ + __typeof__(ptr) __ptr = (ptr); \ + __typeof__(*(ptr)) __old = (old); \ + __typeof__(*(ptr)) __new = (new); \ + __typeof__(*(ptr)) __ret; \ + register unsigned int __rc; \ + switch (size) { \ + case 4: \ + __asm__ __volatile__ ( \ + RISCV_RELEASE_BARRIER \ + "0: lr.w %0, %2\n" \ + " bne %0, %z3, 1f\n" \ + " sc.w %1, %z4, %2\n" \ + " bnez %1, 0b\n" \ + "1:\n" \ + : "=&r" (__ret), "=&r" (__rc), "+A" (*__ptr) \ + : "rJ" ((long)__old), "rJ" (__new) \ + : "memory"); \ + break; \ + case 8: \ + __asm__ __volatile__ ( \ + RISCV_RELEASE_BARRIER \ + "0: lr.d %0, %2\n" \ + " bne %0, %z3, 1f\n" \ + " sc.d %1, %z4, %2\n" \ + " bnez %1, 0b\n" \ + "1:\n" \ + : "=&r" (__ret), "=&r" (__rc), "+A" (*__ptr) \ + : "rJ" (__old), "rJ" (__new) \ + : "memory"); \ + break; \ + default: \ + BUILD_BUG(); \ + } \ + __ret; \ +}) + +#define arch_cmpxchg_release(ptr, o, n) \ +({ \ + __typeof__(*(ptr)) _o_ = (o); \ + __typeof__(*(ptr)) _n_ = (n); \ + (__typeof__(*(ptr))) __cmpxchg_release((ptr), \ + _o_, _n_, sizeof(*(ptr))); \ +}) + +#define __cmpxchg(ptr, old, new, size) \ +({ \ + __typeof__(ptr) __ptr = (ptr); \ + __typeof__(*(ptr)) __old = (old); \ + __typeof__(*(ptr)) __new = (new); \ + __typeof__(*(ptr)) __ret; \ + register unsigned int __rc; \ + switch (size) { \ + case 4: \ + __asm__ __volatile__ ( \ + "0: lr.w %0, %2\n" \ + " bne %0, %z3, 1f\n" \ + " sc.w.rl %1, %z4, %2\n" \ + " bnez %1, 0b\n" \ + " fence rw, rw\n" \ + "1:\n" \ + : "=&r" (__ret), "=&r" (__rc), "+A" (*__ptr) \ + : "rJ" ((long)__old), "rJ" (__new) \ + : "memory"); \ + break; \ + case 8: \ + __asm__ __volatile__ ( \ + "0: lr.d %0, %2\n" \ + " bne %0, %z3, 1f\n" \ + " sc.d.rl %1, %z4, %2\n" \ + " bnez %1, 0b\n" \ + " fence rw, rw\n" \ + "1:\n" \ + : "=&r" (__ret), "=&r" (__rc), "+A" (*__ptr) \ + : "rJ" (__old), "rJ" (__new) \ + : "memory"); \ + break; \ + default: \ + BUILD_BUG(); \ + } \ + __ret; \ +}) + +#define arch_cmpxchg(ptr, o, n) \ +({ \ + __typeof__(*(ptr)) _o_ = (o); \ + __typeof__(*(ptr)) _n_ = (n); \ + (__typeof__(*(ptr))) __cmpxchg((ptr), \ + _o_, _n_, sizeof(*(ptr))); \ +}) + +#define arch_cmpxchg_local(ptr, o, n) \ + (__cmpxchg_relaxed((ptr), (o), (n), sizeof(*(ptr)))) + +#define arch_cmpxchg64(ptr, o, n) \ +({ \ + BUILD_BUG_ON(sizeof(*(ptr)) != 8); \ + arch_cmpxchg((ptr), (o), (n)); \ +}) + +#define arch_cmpxchg64_local(ptr, o, n) \ +({ \ + BUILD_BUG_ON(sizeof(*(ptr)) != 8); \ + arch_cmpxchg_relaxed((ptr), (o), (n)); \ +}) + +#endif /* _ASM_RISCV_CMPXCHG_H */ diff --git a/arch/riscv/include/asm/compat.h b/arch/riscv/include/asm/compat.h new file mode 100644 index 000000000..2ac955b51 --- /dev/null +++ b/arch/riscv/include/asm/compat.h @@ -0,0 +1,129 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +#ifndef __ASM_COMPAT_H +#define __ASM_COMPAT_H + +#define COMPAT_UTS_MACHINE "riscv\0\0" + +/* + * Architecture specific compatibility types + */ +#include <linux/types.h> +#include <linux/sched.h> +#include <linux/sched/task_stack.h> +#include <asm-generic/compat.h> + +static inline int is_compat_task(void) +{ + return test_thread_flag(TIF_32BIT); +} + +struct compat_user_regs_struct { + compat_ulong_t pc; + compat_ulong_t ra; + compat_ulong_t sp; + compat_ulong_t gp; + compat_ulong_t tp; + compat_ulong_t t0; + compat_ulong_t t1; + compat_ulong_t t2; + compat_ulong_t s0; + compat_ulong_t s1; + compat_ulong_t a0; + compat_ulong_t a1; + compat_ulong_t a2; + compat_ulong_t a3; + compat_ulong_t a4; + compat_ulong_t a5; + compat_ulong_t a6; + compat_ulong_t a7; + compat_ulong_t s2; + compat_ulong_t s3; + compat_ulong_t s4; + compat_ulong_t s5; + compat_ulong_t s6; + compat_ulong_t s7; + compat_ulong_t s8; + compat_ulong_t s9; + compat_ulong_t s10; + compat_ulong_t s11; + compat_ulong_t t3; + compat_ulong_t t4; + compat_ulong_t t5; + compat_ulong_t t6; +}; + +static inline void regs_to_cregs(struct compat_user_regs_struct *cregs, + struct pt_regs *regs) +{ + cregs->pc = (compat_ulong_t) regs->epc; + cregs->ra = (compat_ulong_t) regs->ra; + cregs->sp = (compat_ulong_t) regs->sp; + cregs->gp = (compat_ulong_t) regs->gp; + cregs->tp = (compat_ulong_t) regs->tp; + cregs->t0 = (compat_ulong_t) regs->t0; + cregs->t1 = (compat_ulong_t) regs->t1; + cregs->t2 = (compat_ulong_t) regs->t2; + cregs->s0 = (compat_ulong_t) regs->s0; + cregs->s1 = (compat_ulong_t) regs->s1; + cregs->a0 = (compat_ulong_t) regs->a0; + cregs->a1 = (compat_ulong_t) regs->a1; + cregs->a2 = (compat_ulong_t) regs->a2; + cregs->a3 = (compat_ulong_t) regs->a3; + cregs->a4 = (compat_ulong_t) regs->a4; + cregs->a5 = (compat_ulong_t) regs->a5; + cregs->a6 = (compat_ulong_t) regs->a6; + cregs->a7 = (compat_ulong_t) regs->a7; + cregs->s2 = (compat_ulong_t) regs->s2; + cregs->s3 = (compat_ulong_t) regs->s3; + cregs->s4 = (compat_ulong_t) regs->s4; + cregs->s5 = (compat_ulong_t) regs->s5; + cregs->s6 = (compat_ulong_t) regs->s6; + cregs->s7 = (compat_ulong_t) regs->s7; + cregs->s8 = (compat_ulong_t) regs->s8; + cregs->s9 = (compat_ulong_t) regs->s9; + cregs->s10 = (compat_ulong_t) regs->s10; + cregs->s11 = (compat_ulong_t) regs->s11; + cregs->t3 = (compat_ulong_t) regs->t3; + cregs->t4 = (compat_ulong_t) regs->t4; + cregs->t5 = (compat_ulong_t) regs->t5; + cregs->t6 = (compat_ulong_t) regs->t6; +}; + +static inline void cregs_to_regs(struct compat_user_regs_struct *cregs, + struct pt_regs *regs) +{ + regs->epc = (unsigned long) cregs->pc; + regs->ra = (unsigned long) cregs->ra; + regs->sp = (unsigned long) cregs->sp; + regs->gp = (unsigned long) cregs->gp; + regs->tp = (unsigned long) cregs->tp; + regs->t0 = (unsigned long) cregs->t0; + regs->t1 = (unsigned long) cregs->t1; + regs->t2 = (unsigned long) cregs->t2; + regs->s0 = (unsigned long) cregs->s0; + regs->s1 = (unsigned long) cregs->s1; + regs->a0 = (unsigned long) cregs->a0; + regs->a1 = (unsigned long) cregs->a1; + regs->a2 = (unsigned long) cregs->a2; + regs->a3 = (unsigned long) cregs->a3; + regs->a4 = (unsigned long) cregs->a4; + regs->a5 = (unsigned long) cregs->a5; + regs->a6 = (unsigned long) cregs->a6; + regs->a7 = (unsigned long) cregs->a7; + regs->s2 = (unsigned long) cregs->s2; + regs->s3 = (unsigned long) cregs->s3; + regs->s4 = (unsigned long) cregs->s4; + regs->s5 = (unsigned long) cregs->s5; + regs->s6 = (unsigned long) cregs->s6; + regs->s7 = (unsigned long) cregs->s7; + regs->s8 = (unsigned long) cregs->s8; + regs->s9 = (unsigned long) cregs->s9; + regs->s10 = (unsigned long) cregs->s10; + regs->s11 = (unsigned long) cregs->s11; + regs->t3 = (unsigned long) cregs->t3; + regs->t4 = (unsigned long) cregs->t4; + regs->t5 = (unsigned long) cregs->t5; + regs->t6 = (unsigned long) cregs->t6; +}; + +#endif /* __ASM_COMPAT_H */ diff --git a/arch/riscv/include/asm/cpu_ops.h b/arch/riscv/include/asm/cpu_ops.h new file mode 100644 index 000000000..aa128466c --- /dev/null +++ b/arch/riscv/include/asm/cpu_ops.h @@ -0,0 +1,45 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2020 Western Digital Corporation or its affiliates. + * Based on arch/arm64/include/asm/cpu_ops.h + */ +#ifndef __ASM_CPU_OPS_H +#define __ASM_CPU_OPS_H + +#include <linux/init.h> +#include <linux/sched.h> +#include <linux/threads.h> + +/** + * struct cpu_operations - Callback operations for hotplugging CPUs. + * + * @name: Name of the boot protocol. + * @cpu_prepare: Early one-time preparation step for a cpu. If there + * is a mechanism for doing so, tests whether it is + * possible to boot the given HART. + * @cpu_start: Boots a cpu into the kernel. + * @cpu_disable: Prepares a cpu to die. May fail for some + * mechanism-specific reason, which will cause the hot + * unplug to be aborted. Called from the cpu to be killed. + * @cpu_stop: Makes a cpu leave the kernel. Must not fail. Called from + * the cpu being stopped. + * @cpu_is_stopped: Ensures a cpu has left the kernel. Called from another + * cpu. + */ +struct cpu_operations { + const char *name; + int (*cpu_prepare)(unsigned int cpu); + int (*cpu_start)(unsigned int cpu, + struct task_struct *tidle); +#ifdef CONFIG_HOTPLUG_CPU + int (*cpu_disable)(unsigned int cpu); + void (*cpu_stop)(void); + int (*cpu_is_stopped)(unsigned int cpu); +#endif +}; + +extern const struct cpu_operations cpu_ops_spinwait; +extern const struct cpu_operations *cpu_ops[NR_CPUS]; +void __init cpu_set_ops(int cpu); + +#endif /* ifndef __ASM_CPU_OPS_H */ diff --git a/arch/riscv/include/asm/cpu_ops_sbi.h b/arch/riscv/include/asm/cpu_ops_sbi.h new file mode 100644 index 000000000..d6e4665b3 --- /dev/null +++ b/arch/riscv/include/asm/cpu_ops_sbi.h @@ -0,0 +1,27 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2021 by Rivos Inc. + */ +#ifndef __ASM_CPU_OPS_SBI_H +#define __ASM_CPU_OPS_SBI_H + +#ifndef __ASSEMBLY__ +#include <linux/init.h> +#include <linux/sched.h> +#include <linux/threads.h> + +extern const struct cpu_operations cpu_ops_sbi; + +/** + * struct sbi_hart_boot_data - Hart specific boot used during booting and + * cpu hotplug. + * @task_ptr: A pointer to the hart specific tp + * @stack_ptr: A pointer to the hart specific sp + */ +struct sbi_hart_boot_data { + void *task_ptr; + void *stack_ptr; +}; +#endif + +#endif /* ifndef __ASM_CPU_OPS_SBI_H */ diff --git a/arch/riscv/include/asm/cpuidle.h b/arch/riscv/include/asm/cpuidle.h new file mode 100644 index 000000000..71fdc607d --- /dev/null +++ b/arch/riscv/include/asm/cpuidle.h @@ -0,0 +1,24 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2021 Allwinner Ltd + * Copyright (C) 2021 Western Digital Corporation or its affiliates. + */ + +#ifndef _ASM_RISCV_CPUIDLE_H +#define _ASM_RISCV_CPUIDLE_H + +#include <asm/barrier.h> +#include <asm/processor.h> + +static inline void cpu_do_idle(void) +{ + /* + * Add mb() here to ensure that all + * IO/MEM accesses are completed prior + * to entering WFI. + */ + mb(); + wait_for_interrupt(); +} + +#endif diff --git a/arch/riscv/include/asm/csr.h b/arch/riscv/include/asm/csr.h new file mode 100644 index 000000000..0e571f648 --- /dev/null +++ b/arch/riscv/include/asm/csr.h @@ -0,0 +1,408 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2015 Regents of the University of California + */ + +#ifndef _ASM_RISCV_CSR_H +#define _ASM_RISCV_CSR_H + +#include <asm/asm.h> +#include <linux/const.h> + +/* Status register flags */ +#define SR_SIE _AC(0x00000002, UL) /* Supervisor Interrupt Enable */ +#define SR_MIE _AC(0x00000008, UL) /* Machine Interrupt Enable */ +#define SR_SPIE _AC(0x00000020, UL) /* Previous Supervisor IE */ +#define SR_MPIE _AC(0x00000080, UL) /* Previous Machine IE */ +#define SR_SPP _AC(0x00000100, UL) /* Previously Supervisor */ +#define SR_MPP _AC(0x00001800, UL) /* Previously Machine */ +#define SR_SUM _AC(0x00040000, UL) /* Supervisor User Memory Access */ + +#define SR_FS _AC(0x00006000, UL) /* Floating-point Status */ +#define SR_FS_OFF _AC(0x00000000, UL) +#define SR_FS_INITIAL _AC(0x00002000, UL) +#define SR_FS_CLEAN _AC(0x00004000, UL) +#define SR_FS_DIRTY _AC(0x00006000, UL) + +#define SR_XS _AC(0x00018000, UL) /* Extension Status */ +#define SR_XS_OFF _AC(0x00000000, UL) +#define SR_XS_INITIAL _AC(0x00008000, UL) +#define SR_XS_CLEAN _AC(0x00010000, UL) +#define SR_XS_DIRTY _AC(0x00018000, UL) + +#ifndef CONFIG_64BIT +#define SR_SD _AC(0x80000000, UL) /* FS/XS dirty */ +#else +#define SR_SD _AC(0x8000000000000000, UL) /* FS/XS dirty */ +#endif + +#ifdef CONFIG_64BIT +#define SR_UXL _AC(0x300000000, UL) /* XLEN mask for U-mode */ +#define SR_UXL_32 _AC(0x100000000, UL) /* XLEN = 32 for U-mode */ +#define SR_UXL_64 _AC(0x200000000, UL) /* XLEN = 64 for U-mode */ +#define SR_UXL_SHIFT 32 +#endif + +/* SATP flags */ +#ifndef CONFIG_64BIT +#define SATP_PPN _AC(0x003FFFFF, UL) +#define SATP_MODE_32 _AC(0x80000000, UL) +#define SATP_ASID_BITS 9 +#define SATP_ASID_SHIFT 22 +#define SATP_ASID_MASK _AC(0x1FF, UL) +#else +#define SATP_PPN _AC(0x00000FFFFFFFFFFF, UL) +#define SATP_MODE_39 _AC(0x8000000000000000, UL) +#define SATP_MODE_48 _AC(0x9000000000000000, UL) +#define SATP_MODE_57 _AC(0xa000000000000000, UL) +#define SATP_ASID_BITS 16 +#define SATP_ASID_SHIFT 44 +#define SATP_ASID_MASK _AC(0xFFFF, UL) +#endif + +/* Exception cause high bit - is an interrupt if set */ +#define CAUSE_IRQ_FLAG (_AC(1, UL) << (__riscv_xlen - 1)) + +/* Interrupt causes (minus the high bit) */ +#define IRQ_S_SOFT 1 +#define IRQ_VS_SOFT 2 +#define IRQ_M_SOFT 3 +#define IRQ_S_TIMER 5 +#define IRQ_VS_TIMER 6 +#define IRQ_M_TIMER 7 +#define IRQ_S_EXT 9 +#define IRQ_VS_EXT 10 +#define IRQ_M_EXT 11 +#define IRQ_PMU_OVF 13 + +/* Exception causes */ +#define EXC_INST_MISALIGNED 0 +#define EXC_INST_ACCESS 1 +#define EXC_INST_ILLEGAL 2 +#define EXC_BREAKPOINT 3 +#define EXC_LOAD_ACCESS 5 +#define EXC_STORE_ACCESS 7 +#define EXC_SYSCALL 8 +#define EXC_HYPERVISOR_SYSCALL 9 +#define EXC_SUPERVISOR_SYSCALL 10 +#define EXC_INST_PAGE_FAULT 12 +#define EXC_LOAD_PAGE_FAULT 13 +#define EXC_STORE_PAGE_FAULT 15 +#define EXC_INST_GUEST_PAGE_FAULT 20 +#define EXC_LOAD_GUEST_PAGE_FAULT 21 +#define EXC_VIRTUAL_INST_FAULT 22 +#define EXC_STORE_GUEST_PAGE_FAULT 23 + +/* PMP configuration */ +#define PMP_R 0x01 +#define PMP_W 0x02 +#define PMP_X 0x04 +#define PMP_A 0x18 +#define PMP_A_TOR 0x08 +#define PMP_A_NA4 0x10 +#define PMP_A_NAPOT 0x18 +#define PMP_L 0x80 + +/* HSTATUS flags */ +#ifdef CONFIG_64BIT +#define HSTATUS_VSXL _AC(0x300000000, UL) +#define HSTATUS_VSXL_SHIFT 32 +#endif +#define HSTATUS_VTSR _AC(0x00400000, UL) +#define HSTATUS_VTW _AC(0x00200000, UL) +#define HSTATUS_VTVM _AC(0x00100000, UL) +#define HSTATUS_VGEIN _AC(0x0003f000, UL) +#define HSTATUS_VGEIN_SHIFT 12 +#define HSTATUS_HU _AC(0x00000200, UL) +#define HSTATUS_SPVP _AC(0x00000100, UL) +#define HSTATUS_SPV _AC(0x00000080, UL) +#define HSTATUS_GVA _AC(0x00000040, UL) +#define HSTATUS_VSBE _AC(0x00000020, UL) + +/* HGATP flags */ +#define HGATP_MODE_OFF _AC(0, UL) +#define HGATP_MODE_SV32X4 _AC(1, UL) +#define HGATP_MODE_SV39X4 _AC(8, UL) +#define HGATP_MODE_SV48X4 _AC(9, UL) +#define HGATP_MODE_SV57X4 _AC(10, UL) + +#define HGATP32_MODE_SHIFT 31 +#define HGATP32_VMID_SHIFT 22 +#define HGATP32_VMID_MASK _AC(0x1FC00000, UL) +#define HGATP32_PPN _AC(0x003FFFFF, UL) + +#define HGATP64_MODE_SHIFT 60 +#define HGATP64_VMID_SHIFT 44 +#define HGATP64_VMID_MASK _AC(0x03FFF00000000000, UL) +#define HGATP64_PPN _AC(0x00000FFFFFFFFFFF, UL) + +#define HGATP_PAGE_SHIFT 12 + +#ifdef CONFIG_64BIT +#define HGATP_PPN HGATP64_PPN +#define HGATP_VMID_SHIFT HGATP64_VMID_SHIFT +#define HGATP_VMID_MASK HGATP64_VMID_MASK +#define HGATP_MODE_SHIFT HGATP64_MODE_SHIFT +#else +#define HGATP_PPN HGATP32_PPN +#define HGATP_VMID_SHIFT HGATP32_VMID_SHIFT +#define HGATP_VMID_MASK HGATP32_VMID_MASK +#define HGATP_MODE_SHIFT HGATP32_MODE_SHIFT +#endif + +/* VSIP & HVIP relation */ +#define VSIP_TO_HVIP_SHIFT (IRQ_VS_SOFT - IRQ_S_SOFT) +#define VSIP_VALID_MASK ((_AC(1, UL) << IRQ_S_SOFT) | \ + (_AC(1, UL) << IRQ_S_TIMER) | \ + (_AC(1, UL) << IRQ_S_EXT)) + +/* xENVCFG flags */ +#define ENVCFG_STCE (_AC(1, ULL) << 63) +#define ENVCFG_PBMTE (_AC(1, ULL) << 62) +#define ENVCFG_CBZE (_AC(1, UL) << 7) +#define ENVCFG_CBCFE (_AC(1, UL) << 6) +#define ENVCFG_CBIE_SHIFT 4 +#define ENVCFG_CBIE (_AC(0x3, UL) << ENVCFG_CBIE_SHIFT) +#define ENVCFG_CBIE_ILL _AC(0x0, UL) +#define ENVCFG_CBIE_FLUSH _AC(0x1, UL) +#define ENVCFG_CBIE_INV _AC(0x3, UL) +#define ENVCFG_FIOM _AC(0x1, UL) + +/* symbolic CSR names: */ +#define CSR_CYCLE 0xc00 +#define CSR_TIME 0xc01 +#define CSR_INSTRET 0xc02 +#define CSR_HPMCOUNTER3 0xc03 +#define CSR_HPMCOUNTER4 0xc04 +#define CSR_HPMCOUNTER5 0xc05 +#define CSR_HPMCOUNTER6 0xc06 +#define CSR_HPMCOUNTER7 0xc07 +#define CSR_HPMCOUNTER8 0xc08 +#define CSR_HPMCOUNTER9 0xc09 +#define CSR_HPMCOUNTER10 0xc0a +#define CSR_HPMCOUNTER11 0xc0b +#define CSR_HPMCOUNTER12 0xc0c +#define CSR_HPMCOUNTER13 0xc0d +#define CSR_HPMCOUNTER14 0xc0e +#define CSR_HPMCOUNTER15 0xc0f +#define CSR_HPMCOUNTER16 0xc10 +#define CSR_HPMCOUNTER17 0xc11 +#define CSR_HPMCOUNTER18 0xc12 +#define CSR_HPMCOUNTER19 0xc13 +#define CSR_HPMCOUNTER20 0xc14 +#define CSR_HPMCOUNTER21 0xc15 +#define CSR_HPMCOUNTER22 0xc16 +#define CSR_HPMCOUNTER23 0xc17 +#define CSR_HPMCOUNTER24 0xc18 +#define CSR_HPMCOUNTER25 0xc19 +#define CSR_HPMCOUNTER26 0xc1a +#define CSR_HPMCOUNTER27 0xc1b +#define CSR_HPMCOUNTER28 0xc1c +#define CSR_HPMCOUNTER29 0xc1d +#define CSR_HPMCOUNTER30 0xc1e +#define CSR_HPMCOUNTER31 0xc1f +#define CSR_CYCLEH 0xc80 +#define CSR_TIMEH 0xc81 +#define CSR_INSTRETH 0xc82 +#define CSR_HPMCOUNTER3H 0xc83 +#define CSR_HPMCOUNTER4H 0xc84 +#define CSR_HPMCOUNTER5H 0xc85 +#define CSR_HPMCOUNTER6H 0xc86 +#define CSR_HPMCOUNTER7H 0xc87 +#define CSR_HPMCOUNTER8H 0xc88 +#define CSR_HPMCOUNTER9H 0xc89 +#define CSR_HPMCOUNTER10H 0xc8a +#define CSR_HPMCOUNTER11H 0xc8b +#define CSR_HPMCOUNTER12H 0xc8c +#define CSR_HPMCOUNTER13H 0xc8d +#define CSR_HPMCOUNTER14H 0xc8e +#define CSR_HPMCOUNTER15H 0xc8f +#define CSR_HPMCOUNTER16H 0xc90 +#define CSR_HPMCOUNTER17H 0xc91 +#define CSR_HPMCOUNTER18H 0xc92 +#define CSR_HPMCOUNTER19H 0xc93 +#define CSR_HPMCOUNTER20H 0xc94 +#define CSR_HPMCOUNTER21H 0xc95 +#define CSR_HPMCOUNTER22H 0xc96 +#define CSR_HPMCOUNTER23H 0xc97 +#define CSR_HPMCOUNTER24H 0xc98 +#define CSR_HPMCOUNTER25H 0xc99 +#define CSR_HPMCOUNTER26H 0xc9a +#define CSR_HPMCOUNTER27H 0xc9b +#define CSR_HPMCOUNTER28H 0xc9c +#define CSR_HPMCOUNTER29H 0xc9d +#define CSR_HPMCOUNTER30H 0xc9e +#define CSR_HPMCOUNTER31H 0xc9f + +#define CSR_SSCOUNTOVF 0xda0 + +#define CSR_SSTATUS 0x100 +#define CSR_SIE 0x104 +#define CSR_STVEC 0x105 +#define CSR_SCOUNTEREN 0x106 +#define CSR_SSCRATCH 0x140 +#define CSR_SEPC 0x141 +#define CSR_SCAUSE 0x142 +#define CSR_STVAL 0x143 +#define CSR_SIP 0x144 +#define CSR_SATP 0x180 + +#define CSR_STIMECMP 0x14D +#define CSR_STIMECMPH 0x15D + +#define CSR_VSSTATUS 0x200 +#define CSR_VSIE 0x204 +#define CSR_VSTVEC 0x205 +#define CSR_VSSCRATCH 0x240 +#define CSR_VSEPC 0x241 +#define CSR_VSCAUSE 0x242 +#define CSR_VSTVAL 0x243 +#define CSR_VSIP 0x244 +#define CSR_VSATP 0x280 +#define CSR_VSTIMECMP 0x24D +#define CSR_VSTIMECMPH 0x25D + +#define CSR_HSTATUS 0x600 +#define CSR_HEDELEG 0x602 +#define CSR_HIDELEG 0x603 +#define CSR_HIE 0x604 +#define CSR_HTIMEDELTA 0x605 +#define CSR_HCOUNTEREN 0x606 +#define CSR_HGEIE 0x607 +#define CSR_HENVCFG 0x60a +#define CSR_HTIMEDELTAH 0x615 +#define CSR_HENVCFGH 0x61a +#define CSR_HTVAL 0x643 +#define CSR_HIP 0x644 +#define CSR_HVIP 0x645 +#define CSR_HTINST 0x64a +#define CSR_HGATP 0x680 +#define CSR_HGEIP 0xe12 + +#define CSR_MSTATUS 0x300 +#define CSR_MISA 0x301 +#define CSR_MIE 0x304 +#define CSR_MTVEC 0x305 +#define CSR_MENVCFG 0x30a +#define CSR_MENVCFGH 0x31a +#define CSR_MSCRATCH 0x340 +#define CSR_MEPC 0x341 +#define CSR_MCAUSE 0x342 +#define CSR_MTVAL 0x343 +#define CSR_MIP 0x344 +#define CSR_PMPCFG0 0x3a0 +#define CSR_PMPADDR0 0x3b0 +#define CSR_MVENDORID 0xf11 +#define CSR_MARCHID 0xf12 +#define CSR_MIMPID 0xf13 +#define CSR_MHARTID 0xf14 + +#ifdef CONFIG_RISCV_M_MODE +# define CSR_STATUS CSR_MSTATUS +# define CSR_IE CSR_MIE +# define CSR_TVEC CSR_MTVEC +# define CSR_SCRATCH CSR_MSCRATCH +# define CSR_EPC CSR_MEPC +# define CSR_CAUSE CSR_MCAUSE +# define CSR_TVAL CSR_MTVAL +# define CSR_IP CSR_MIP + +# define SR_IE SR_MIE +# define SR_PIE SR_MPIE +# define SR_PP SR_MPP + +# define RV_IRQ_SOFT IRQ_M_SOFT +# define RV_IRQ_TIMER IRQ_M_TIMER +# define RV_IRQ_EXT IRQ_M_EXT +#else /* CONFIG_RISCV_M_MODE */ +# define CSR_STATUS CSR_SSTATUS +# define CSR_IE CSR_SIE +# define CSR_TVEC CSR_STVEC +# define CSR_SCRATCH CSR_SSCRATCH +# define CSR_EPC CSR_SEPC +# define CSR_CAUSE CSR_SCAUSE +# define CSR_TVAL CSR_STVAL +# define CSR_IP CSR_SIP + +# define SR_IE SR_SIE +# define SR_PIE SR_SPIE +# define SR_PP SR_SPP + +# define RV_IRQ_SOFT IRQ_S_SOFT +# define RV_IRQ_TIMER IRQ_S_TIMER +# define RV_IRQ_EXT IRQ_S_EXT +# define RV_IRQ_PMU IRQ_PMU_OVF +# define SIP_LCOFIP (_AC(0x1, UL) << IRQ_PMU_OVF) + +#endif /* !CONFIG_RISCV_M_MODE */ + +/* IE/IP (Supervisor/Machine Interrupt Enable/Pending) flags */ +#define IE_SIE (_AC(0x1, UL) << RV_IRQ_SOFT) +#define IE_TIE (_AC(0x1, UL) << RV_IRQ_TIMER) +#define IE_EIE (_AC(0x1, UL) << RV_IRQ_EXT) + +#ifndef __ASSEMBLY__ + +#define csr_swap(csr, val) \ +({ \ + unsigned long __v = (unsigned long)(val); \ + __asm__ __volatile__ ("csrrw %0, " __ASM_STR(csr) ", %1"\ + : "=r" (__v) : "rK" (__v) \ + : "memory"); \ + __v; \ +}) + +#define csr_read(csr) \ +({ \ + register unsigned long __v; \ + __asm__ __volatile__ ("csrr %0, " __ASM_STR(csr) \ + : "=r" (__v) : \ + : "memory"); \ + __v; \ +}) + +#define csr_write(csr, val) \ +({ \ + unsigned long __v = (unsigned long)(val); \ + __asm__ __volatile__ ("csrw " __ASM_STR(csr) ", %0" \ + : : "rK" (__v) \ + : "memory"); \ +}) + +#define csr_read_set(csr, val) \ +({ \ + unsigned long __v = (unsigned long)(val); \ + __asm__ __volatile__ ("csrrs %0, " __ASM_STR(csr) ", %1"\ + : "=r" (__v) : "rK" (__v) \ + : "memory"); \ + __v; \ +}) + +#define csr_set(csr, val) \ +({ \ + unsigned long __v = (unsigned long)(val); \ + __asm__ __volatile__ ("csrs " __ASM_STR(csr) ", %0" \ + : : "rK" (__v) \ + : "memory"); \ +}) + +#define csr_read_clear(csr, val) \ +({ \ + unsigned long __v = (unsigned long)(val); \ + __asm__ __volatile__ ("csrrc %0, " __ASM_STR(csr) ", %1"\ + : "=r" (__v) : "rK" (__v) \ + : "memory"); \ + __v; \ +}) + +#define csr_clear(csr, val) \ +({ \ + unsigned long __v = (unsigned long)(val); \ + __asm__ __volatile__ ("csrc " __ASM_STR(csr) ", %0" \ + : : "rK" (__v) \ + : "memory"); \ +}) + +#endif /* __ASSEMBLY__ */ + +#endif /* _ASM_RISCV_CSR_H */ diff --git a/arch/riscv/include/asm/current.h b/arch/riscv/include/asm/current.h new file mode 100644 index 000000000..21774d868 --- /dev/null +++ b/arch/riscv/include/asm/current.h @@ -0,0 +1,40 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Based on arm/arm64/include/asm/current.h + * + * Copyright (C) 2016 ARM + * Copyright (C) 2017 SiFive + */ + + +#ifndef _ASM_RISCV_CURRENT_H +#define _ASM_RISCV_CURRENT_H + +#include <linux/bug.h> +#include <linux/compiler.h> + +#ifndef __ASSEMBLY__ + +struct task_struct; + +register struct task_struct *riscv_current_is_tp __asm__("tp"); + +/* + * This only works because "struct thread_info" is at offset 0 from "struct + * task_struct". This constraint seems to be necessary on other architectures + * as well, but __switch_to enforces it. We can't check TASK_TI here because + * <asm/asm-offsets.h> includes this, and I can't get the definition of "struct + * task_struct" here due to some header ordering problems. + */ +static __always_inline struct task_struct *get_current(void) +{ + return riscv_current_is_tp; +} + +#define current get_current() + +register unsigned long current_stack_pointer __asm__("sp"); + +#endif /* __ASSEMBLY__ */ + +#endif /* _ASM_RISCV_CURRENT_H */ diff --git a/arch/riscv/include/asm/delay.h b/arch/riscv/include/asm/delay.h new file mode 100644 index 000000000..524f8ef7c --- /dev/null +++ b/arch/riscv/include/asm/delay.h @@ -0,0 +1,20 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2009 Chen Liqin <liqin.chen@sunplusct.com> + * Copyright (C) 2016 Regents of the University of California + */ + +#ifndef _ASM_RISCV_DELAY_H +#define _ASM_RISCV_DELAY_H + +extern unsigned long riscv_timebase; + +#define udelay udelay +extern void udelay(unsigned long usecs); + +#define ndelay ndelay +extern void ndelay(unsigned long nsecs); + +extern void __delay(unsigned long cycles); + +#endif /* _ASM_RISCV_DELAY_H */ diff --git a/arch/riscv/include/asm/efi.h b/arch/riscv/include/asm/efi.h new file mode 100644 index 000000000..e229d7be4 --- /dev/null +++ b/arch/riscv/include/asm/efi.h @@ -0,0 +1,47 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2020 Western Digital Corporation or its affiliates. + */ +#ifndef _ASM_EFI_H +#define _ASM_EFI_H + +#include <asm/csr.h> +#include <asm/io.h> +#include <asm/mmu_context.h> +#include <asm/ptrace.h> +#include <asm/tlbflush.h> +#include <asm/pgalloc.h> + +#ifdef CONFIG_EFI +extern void efi_init(void); +#else +#define efi_init() +#endif + +int efi_create_mapping(struct mm_struct *mm, efi_memory_desc_t *md); +int efi_set_mapping_permissions(struct mm_struct *mm, efi_memory_desc_t *md); + +#define arch_efi_call_virt_setup() ({ \ + sync_kernel_mappings(efi_mm.pgd); \ + efi_virtmap_load(); \ + }) +#define arch_efi_call_virt_teardown() efi_virtmap_unload() + +#define ARCH_EFI_IRQ_FLAGS_MASK (SR_IE | SR_SPIE) + +/* Load initrd anywhere in system RAM */ +static inline unsigned long efi_get_max_initrd_addr(unsigned long image_addr) +{ + return ULONG_MAX; +} + +#define alloc_screen_info(x...) (&screen_info) + +static inline void free_screen_info(struct screen_info *si) +{ +} + +void efi_virtmap_load(void); +void efi_virtmap_unload(void); + +#endif /* _ASM_EFI_H */ diff --git a/arch/riscv/include/asm/elf.h b/arch/riscv/include/asm/elf.h new file mode 100644 index 000000000..e7acffdf2 --- /dev/null +++ b/arch/riscv/include/asm/elf.h @@ -0,0 +1,143 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com> + * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se> + * Copyright (C) 2012 Regents of the University of California + */ + +#ifndef _ASM_RISCV_ELF_H +#define _ASM_RISCV_ELF_H + +#include <uapi/linux/elf.h> +#include <linux/compat.h> +#include <uapi/asm/elf.h> +#include <asm/auxvec.h> +#include <asm/byteorder.h> +#include <asm/cacheinfo.h> + +/* + * These are used to set parameters in the core dumps. + */ +#define ELF_ARCH EM_RISCV + +#ifndef ELF_CLASS +#ifdef CONFIG_64BIT +#define ELF_CLASS ELFCLASS64 +#else +#define ELF_CLASS ELFCLASS32 +#endif +#endif + +#define ELF_DATA ELFDATA2LSB + +/* + * This is used to ensure we don't load something for the wrong architecture. + */ +#define elf_check_arch(x) (((x)->e_machine == EM_RISCV) && \ + ((x)->e_ident[EI_CLASS] == ELF_CLASS)) + +extern bool compat_elf_check_arch(Elf32_Ehdr *hdr); +#define compat_elf_check_arch compat_elf_check_arch + +#define CORE_DUMP_USE_REGSET +#define ELF_EXEC_PAGESIZE (PAGE_SIZE) + +/* + * This is the location that an ET_DYN program is loaded if exec'ed. Typical + * use of this is to invoke "./ld.so someprog" to test out a new version of + * the loader. We need to make sure that it is out of the way of the program + * that it will "exec", and that there is sufficient room for the brk. + */ +#define ELF_ET_DYN_BASE ((TASK_SIZE / 3) * 2) + +#ifdef CONFIG_64BIT +#ifdef CONFIG_COMPAT +#define STACK_RND_MASK (test_thread_flag(TIF_32BIT) ? \ + 0x7ff >> (PAGE_SHIFT - 12) : \ + 0x3ffff >> (PAGE_SHIFT - 12)) +#else +#define STACK_RND_MASK (0x3ffff >> (PAGE_SHIFT - 12)) +#endif +#endif +/* + * This yields a mask that user programs can use to figure out what + * instruction set this CPU supports. This could be done in user space, + * but it's not easy, and we've already done it here. + */ +#define ELF_HWCAP (elf_hwcap) +extern unsigned long elf_hwcap; + +/* + * This yields a string that ld.so will use to load implementation + * specific libraries for optimization. This is more specific in + * intent than poking at uname or /proc/cpuinfo. + */ +#define ELF_PLATFORM (NULL) + +#define COMPAT_ELF_PLATFORM (NULL) + +#ifdef CONFIG_MMU +#define ARCH_DLINFO \ +do { \ + /* \ + * Note that we add ulong after elf_addr_t because \ + * casting current->mm->context.vdso triggers a cast \ + * warning of cast from pointer to integer for \ + * COMPAT ELFCLASS32. \ + */ \ + NEW_AUX_ENT(AT_SYSINFO_EHDR, \ + (elf_addr_t)(ulong)current->mm->context.vdso); \ + NEW_AUX_ENT(AT_L1I_CACHESIZE, \ + get_cache_size(1, CACHE_TYPE_INST)); \ + NEW_AUX_ENT(AT_L1I_CACHEGEOMETRY, \ + get_cache_geometry(1, CACHE_TYPE_INST)); \ + NEW_AUX_ENT(AT_L1D_CACHESIZE, \ + get_cache_size(1, CACHE_TYPE_DATA)); \ + NEW_AUX_ENT(AT_L1D_CACHEGEOMETRY, \ + get_cache_geometry(1, CACHE_TYPE_DATA)); \ + NEW_AUX_ENT(AT_L2_CACHESIZE, \ + get_cache_size(2, CACHE_TYPE_UNIFIED)); \ + NEW_AUX_ENT(AT_L2_CACHEGEOMETRY, \ + get_cache_geometry(2, CACHE_TYPE_UNIFIED)); \ + NEW_AUX_ENT(AT_L3_CACHESIZE, \ + get_cache_size(3, CACHE_TYPE_UNIFIED)); \ + NEW_AUX_ENT(AT_L3_CACHEGEOMETRY, \ + get_cache_geometry(3, CACHE_TYPE_UNIFIED)); \ +} while (0) +#define ARCH_HAS_SETUP_ADDITIONAL_PAGES +struct linux_binprm; +extern int arch_setup_additional_pages(struct linux_binprm *bprm, + int uses_interp); +#endif /* CONFIG_MMU */ + +#define ELF_CORE_COPY_REGS(dest, regs) \ +do { \ + *(struct user_regs_struct *)&(dest) = \ + *(struct user_regs_struct *)regs; \ +} while (0); + +#ifdef CONFIG_COMPAT + +#define SET_PERSONALITY(ex) \ +do { if ((ex).e_ident[EI_CLASS] == ELFCLASS32) \ + set_thread_flag(TIF_32BIT); \ + else \ + clear_thread_flag(TIF_32BIT); \ + if (personality(current->personality) != PER_LINUX32) \ + set_personality(PER_LINUX | \ + (current->personality & (~PER_MASK))); \ +} while (0) + +#define COMPAT_ELF_ET_DYN_BASE ((TASK_SIZE_32 / 3) * 2) + +/* rv32 registers */ +typedef compat_ulong_t compat_elf_greg_t; +typedef compat_elf_greg_t compat_elf_gregset_t[ELF_NGREG]; + +extern int compat_arch_setup_additional_pages(struct linux_binprm *bprm, + int uses_interp); +#define compat_arch_setup_additional_pages \ + compat_arch_setup_additional_pages + +#endif /* CONFIG_COMPAT */ +#endif /* _ASM_RISCV_ELF_H */ diff --git a/arch/riscv/include/asm/errata_list.h b/arch/riscv/include/asm/errata_list.h new file mode 100644 index 000000000..7d2675bb7 --- /dev/null +++ b/arch/riscv/include/asm/errata_list.h @@ -0,0 +1,147 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2021 Sifive. + */ +#ifndef ASM_ERRATA_LIST_H +#define ASM_ERRATA_LIST_H + +#include <asm/alternative.h> +#include <asm/vendorid_list.h> + +#ifdef CONFIG_ERRATA_SIFIVE +#define ERRATA_SIFIVE_CIP_453 0 +#define ERRATA_SIFIVE_CIP_1200 1 +#define ERRATA_SIFIVE_NUMBER 2 +#endif + +#ifdef CONFIG_ERRATA_THEAD +#define ERRATA_THEAD_PBMT 0 +#define ERRATA_THEAD_CMO 1 +#define ERRATA_THEAD_NUMBER 2 +#endif + +#define CPUFEATURE_SVPBMT 0 +#define CPUFEATURE_ZICBOM 1 +#define CPUFEATURE_NUMBER 2 + +#ifdef __ASSEMBLY__ + +#define ALT_INSN_FAULT(x) \ +ALTERNATIVE(__stringify(RISCV_PTR do_trap_insn_fault), \ + __stringify(RISCV_PTR sifive_cip_453_insn_fault_trp), \ + SIFIVE_VENDOR_ID, ERRATA_SIFIVE_CIP_453, \ + CONFIG_ERRATA_SIFIVE_CIP_453) + +#define ALT_PAGE_FAULT(x) \ +ALTERNATIVE(__stringify(RISCV_PTR do_page_fault), \ + __stringify(RISCV_PTR sifive_cip_453_page_fault_trp), \ + SIFIVE_VENDOR_ID, ERRATA_SIFIVE_CIP_453, \ + CONFIG_ERRATA_SIFIVE_CIP_453) +#else /* !__ASSEMBLY__ */ + +#define ALT_FLUSH_TLB_PAGE(x) \ +asm(ALTERNATIVE("sfence.vma %0", "sfence.vma", SIFIVE_VENDOR_ID, \ + ERRATA_SIFIVE_CIP_1200, CONFIG_ERRATA_SIFIVE_CIP_1200) \ + : : "r" (addr) : "memory") + +/* + * _val is marked as "will be overwritten", so need to set it to 0 + * in the default case. + */ +#define ALT_SVPBMT_SHIFT 61 +#define ALT_THEAD_PBMT_SHIFT 59 +#define ALT_SVPBMT(_val, prot) \ +asm(ALTERNATIVE_2("li %0, 0\t\nnop", \ + "li %0, %1\t\nslli %0,%0,%3", 0, \ + CPUFEATURE_SVPBMT, CONFIG_RISCV_ISA_SVPBMT, \ + "li %0, %2\t\nslli %0,%0,%4", THEAD_VENDOR_ID, \ + ERRATA_THEAD_PBMT, CONFIG_ERRATA_THEAD_PBMT) \ + : "=r"(_val) \ + : "I"(prot##_SVPBMT >> ALT_SVPBMT_SHIFT), \ + "I"(prot##_THEAD >> ALT_THEAD_PBMT_SHIFT), \ + "I"(ALT_SVPBMT_SHIFT), \ + "I"(ALT_THEAD_PBMT_SHIFT)) + +#ifdef CONFIG_ERRATA_THEAD_PBMT +/* + * IO/NOCACHE memory types are handled together with svpbmt, + * so on T-Head chips, check if no other memory type is set, + * and set the non-0 PMA type if applicable. + */ +#define ALT_THEAD_PMA(_val) \ +asm volatile(ALTERNATIVE( \ + __nops(7), \ + "li t3, %1\n\t" \ + "slli t3, t3, %3\n\t" \ + "and t3, %0, t3\n\t" \ + "bne t3, zero, 2f\n\t" \ + "li t3, %2\n\t" \ + "slli t3, t3, %3\n\t" \ + "or %0, %0, t3\n\t" \ + "2:", THEAD_VENDOR_ID, \ + ERRATA_THEAD_PBMT, CONFIG_ERRATA_THEAD_PBMT) \ + : "+r"(_val) \ + : "I"(_PAGE_MTMASK_THEAD >> ALT_THEAD_PBMT_SHIFT), \ + "I"(_PAGE_PMA_THEAD >> ALT_THEAD_PBMT_SHIFT), \ + "I"(ALT_THEAD_PBMT_SHIFT) \ + : "t3") +#else +#define ALT_THEAD_PMA(_val) +#endif + +/* + * dcache.ipa rs1 (invalidate, physical address) + * | 31 - 25 | 24 - 20 | 19 - 15 | 14 - 12 | 11 - 7 | 6 - 0 | + * 0000001 01010 rs1 000 00000 0001011 + * dache.iva rs1 (invalida, virtual address) + * 0000001 00110 rs1 000 00000 0001011 + * + * dcache.cpa rs1 (clean, physical address) + * | 31 - 25 | 24 - 20 | 19 - 15 | 14 - 12 | 11 - 7 | 6 - 0 | + * 0000001 01001 rs1 000 00000 0001011 + * dcache.cva rs1 (clean, virtual address) + * 0000001 00101 rs1 000 00000 0001011 + * + * dcache.cipa rs1 (clean then invalidate, physical address) + * | 31 - 25 | 24 - 20 | 19 - 15 | 14 - 12 | 11 - 7 | 6 - 0 | + * 0000001 01011 rs1 000 00000 0001011 + * dcache.civa rs1 (... virtual address) + * 0000001 00111 rs1 000 00000 0001011 + * + * sync.s (make sure all cache operations finished) + * | 31 - 25 | 24 - 20 | 19 - 15 | 14 - 12 | 11 - 7 | 6 - 0 | + * 0000000 11001 00000 000 00000 0001011 + */ +#define THEAD_inval_A0 ".long 0x0265000b" +#define THEAD_clean_A0 ".long 0x0255000b" +#define THEAD_flush_A0 ".long 0x0275000b" +#define THEAD_SYNC_S ".long 0x0190000b" + +#define ALT_CMO_OP(_op, _start, _size, _cachesize) \ +asm volatile(ALTERNATIVE_2( \ + __nops(6), \ + "mv a0, %1\n\t" \ + "j 2f\n\t" \ + "3:\n\t" \ + "cbo." __stringify(_op) " (a0)\n\t" \ + "add a0, a0, %0\n\t" \ + "2:\n\t" \ + "bltu a0, %2, 3b\n\t" \ + "nop", 0, CPUFEATURE_ZICBOM, CONFIG_RISCV_ISA_ZICBOM, \ + "mv a0, %1\n\t" \ + "j 2f\n\t" \ + "3:\n\t" \ + THEAD_##_op##_A0 "\n\t" \ + "add a0, a0, %0\n\t" \ + "2:\n\t" \ + "bltu a0, %2, 3b\n\t" \ + THEAD_SYNC_S, THEAD_VENDOR_ID, \ + ERRATA_THEAD_CMO, CONFIG_ERRATA_THEAD_CMO) \ + : : "r"(_cachesize), \ + "r"((unsigned long)(_start) & ~((_cachesize) - 1UL)), \ + "r"((unsigned long)(_start) + (_size)) \ + : "a0") + +#endif /* __ASSEMBLY__ */ + +#endif diff --git a/arch/riscv/include/asm/extable.h b/arch/riscv/include/asm/extable.h new file mode 100644 index 000000000..512012d19 --- /dev/null +++ b/arch/riscv/include/asm/extable.h @@ -0,0 +1,48 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_RISCV_EXTABLE_H +#define _ASM_RISCV_EXTABLE_H + +/* + * The exception table consists of pairs of relative offsets: the first + * is the relative offset to an instruction that is allowed to fault, + * and the second is the relative offset at which the program should + * continue. No registers are modified, so it is entirely up to the + * continuation code to figure out what to do. + * + * All the routines below use bits of fixup code that are out of line + * with the main instruction path. This means when everything is well, + * we don't even have to jump over them. Further, they do not intrude + * on our cache or tlb entries. + */ + +struct exception_table_entry { + int insn, fixup; + short type, data; +}; + +#define ARCH_HAS_RELATIVE_EXTABLE + +#define swap_ex_entry_fixup(a, b, tmp, delta) \ +do { \ + (a)->fixup = (b)->fixup + (delta); \ + (b)->fixup = (tmp).fixup - (delta); \ + (a)->type = (b)->type; \ + (b)->type = (tmp).type; \ + (a)->data = (b)->data; \ + (b)->data = (tmp).data; \ +} while (0) + +bool fixup_exception(struct pt_regs *regs); + +#if defined(CONFIG_BPF_JIT) && defined(CONFIG_ARCH_RV64I) +bool ex_handler_bpf(const struct exception_table_entry *ex, struct pt_regs *regs); +#else +static inline bool +ex_handler_bpf(const struct exception_table_entry *ex, + struct pt_regs *regs) +{ + return false; +} +#endif + +#endif diff --git a/arch/riscv/include/asm/fence.h b/arch/riscv/include/asm/fence.h new file mode 100644 index 000000000..2b443a3a4 --- /dev/null +++ b/arch/riscv/include/asm/fence.h @@ -0,0 +1,12 @@ +#ifndef _ASM_RISCV_FENCE_H +#define _ASM_RISCV_FENCE_H + +#ifdef CONFIG_SMP +#define RISCV_ACQUIRE_BARRIER "\tfence r , rw\n" +#define RISCV_RELEASE_BARRIER "\tfence rw, w\n" +#else +#define RISCV_ACQUIRE_BARRIER +#define RISCV_RELEASE_BARRIER +#endif + +#endif /* _ASM_RISCV_FENCE_H */ diff --git a/arch/riscv/include/asm/fixmap.h b/arch/riscv/include/asm/fixmap.h new file mode 100644 index 000000000..0a55099bb --- /dev/null +++ b/arch/riscv/include/asm/fixmap.h @@ -0,0 +1,67 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2019 Western Digital Corporation or its affiliates. + */ + +#ifndef _ASM_RISCV_FIXMAP_H +#define _ASM_RISCV_FIXMAP_H + +#include <linux/kernel.h> +#include <linux/sizes.h> +#include <linux/pgtable.h> +#include <asm/page.h> + +#ifdef CONFIG_MMU +/* + * Here we define all the compile-time 'special' virtual addresses. + * The point is to have a constant address at compile time, but to + * set the physical address only in the boot process. + * + * These 'compile-time allocated' memory buffers are page-sized. Use + * set_fixmap(idx,phys) to associate physical memory with fixmap indices. + */ +enum fixed_addresses { + FIX_HOLE, + /* + * The fdt fixmap mapping must be PMD aligned and will be mapped + * using PMD entries in fixmap_pmd in 64-bit and a PGD entry in 32-bit. + */ + FIX_FDT_END, + FIX_FDT = FIX_FDT_END + FIX_FDT_SIZE / PAGE_SIZE - 1, + + /* Below fixmaps will be mapped using fixmap_pte */ + FIX_PTE, + FIX_PMD, + FIX_PUD, + FIX_P4D, + FIX_TEXT_POKE1, + FIX_TEXT_POKE0, + FIX_EARLYCON_MEM_BASE, + + __end_of_permanent_fixed_addresses, + /* + * Temporary boot-time mappings, used by early_ioremap(), + * before ioremap() is functional. + */ +#define NR_FIX_BTMAPS (SZ_256K / PAGE_SIZE) +#define FIX_BTMAPS_SLOTS 7 +#define TOTAL_FIX_BTMAPS (NR_FIX_BTMAPS * FIX_BTMAPS_SLOTS) + + FIX_BTMAP_END = __end_of_permanent_fixed_addresses, + FIX_BTMAP_BEGIN = FIX_BTMAP_END + TOTAL_FIX_BTMAPS - 1, + + __end_of_fixed_addresses +}; + +#define __early_set_fixmap __set_fixmap + +#define __late_set_fixmap __set_fixmap +#define __late_clear_fixmap(idx) __set_fixmap((idx), 0, FIXMAP_PAGE_CLEAR) + +extern void __set_fixmap(enum fixed_addresses idx, + phys_addr_t phys, pgprot_t prot); + +#include <asm-generic/fixmap.h> + +#endif /* CONFIG_MMU */ +#endif /* _ASM_RISCV_FIXMAP_H */ diff --git a/arch/riscv/include/asm/ftrace.h b/arch/riscv/include/asm/ftrace.h new file mode 100644 index 000000000..d47d87c2d --- /dev/null +++ b/arch/riscv/include/asm/ftrace.h @@ -0,0 +1,114 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2017 Andes Technology Corporation */ + +#ifndef _ASM_RISCV_FTRACE_H +#define _ASM_RISCV_FTRACE_H + +/* + * The graph frame test is not possible if CONFIG_FRAME_POINTER is not enabled. + * Check arch/riscv/kernel/mcount.S for detail. + */ +#if defined(CONFIG_FUNCTION_GRAPH_TRACER) && defined(CONFIG_FRAME_POINTER) +#define HAVE_FUNCTION_GRAPH_FP_TEST +#endif +#define HAVE_FUNCTION_GRAPH_RET_ADDR_PTR + +/* + * Clang prior to 13 had "mcount" instead of "_mcount": + * https://reviews.llvm.org/D98881 + */ +#if defined(CONFIG_CC_IS_GCC) || CONFIG_CLANG_VERSION >= 130000 +#define MCOUNT_NAME _mcount +#else +#define MCOUNT_NAME mcount +#endif + +#define ARCH_SUPPORTS_FTRACE_OPS 1 +#ifndef __ASSEMBLY__ +void MCOUNT_NAME(void); +static inline unsigned long ftrace_call_adjust(unsigned long addr) +{ + return addr; +} + +struct dyn_arch_ftrace { +}; +#endif + +#ifdef CONFIG_DYNAMIC_FTRACE +/* + * A general call in RISC-V is a pair of insts: + * 1) auipc: setting high-20 pc-related bits to ra register + * 2) jalr: setting low-12 offset to ra, jump to ra, and set ra to + * return address (original pc + 4) + * + *<ftrace enable>: + * 0: auipc t0/ra, 0x? + * 4: jalr t0/ra, ?(t0/ra) + * + *<ftrace disable>: + * 0: nop + * 4: nop + * + * Dynamic ftrace generates probes to call sites, so we must deal with + * both auipc and jalr at the same time. + */ + +#define MCOUNT_ADDR ((unsigned long)MCOUNT_NAME) +#define JALR_SIGN_MASK (0x00000800) +#define JALR_OFFSET_MASK (0x00000fff) +#define AUIPC_OFFSET_MASK (0xfffff000) +#define AUIPC_PAD (0x00001000) +#define JALR_SHIFT 20 +#define JALR_RA (0x000080e7) +#define AUIPC_RA (0x00000097) +#define JALR_T0 (0x000282e7) +#define AUIPC_T0 (0x00000297) +#define NOP4 (0x00000013) + +#define to_jalr_t0(offset) \ + (((offset & JALR_OFFSET_MASK) << JALR_SHIFT) | JALR_T0) + +#define to_auipc_t0(offset) \ + ((offset & JALR_SIGN_MASK) ? \ + (((offset & AUIPC_OFFSET_MASK) + AUIPC_PAD) | AUIPC_T0) : \ + ((offset & AUIPC_OFFSET_MASK) | AUIPC_T0)) + +#define make_call_t0(caller, callee, call) \ +do { \ + unsigned int offset = \ + (unsigned long) callee - (unsigned long) caller; \ + call[0] = to_auipc_t0(offset); \ + call[1] = to_jalr_t0(offset); \ +} while (0) + +#define to_jalr_ra(offset) \ + (((offset & JALR_OFFSET_MASK) << JALR_SHIFT) | JALR_RA) + +#define to_auipc_ra(offset) \ + ((offset & JALR_SIGN_MASK) ? \ + (((offset & AUIPC_OFFSET_MASK) + AUIPC_PAD) | AUIPC_RA) : \ + ((offset & AUIPC_OFFSET_MASK) | AUIPC_RA)) + +#define make_call_ra(caller, callee, call) \ +do { \ + unsigned int offset = \ + (unsigned long) callee - (unsigned long) caller; \ + call[0] = to_auipc_ra(offset); \ + call[1] = to_jalr_ra(offset); \ +} while (0) + +/* + * Let auipc+jalr be the basic *mcount unit*, so we make it 8 bytes here. + */ +#define MCOUNT_INSN_SIZE 8 + +#ifndef __ASSEMBLY__ +struct dyn_ftrace; +int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec); +#define ftrace_init_nop ftrace_init_nop +#endif + +#endif /* CONFIG_DYNAMIC_FTRACE */ + +#endif /* _ASM_RISCV_FTRACE_H */ diff --git a/arch/riscv/include/asm/futex.h b/arch/riscv/include/asm/futex.h new file mode 100644 index 000000000..fc8130f99 --- /dev/null +++ b/arch/riscv/include/asm/futex.h @@ -0,0 +1,104 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2006 Ralf Baechle (ralf@linux-mips.org) + * Copyright (c) 2018 Jim Wilson (jimw@sifive.com) + */ + +#ifndef _ASM_RISCV_FUTEX_H +#define _ASM_RISCV_FUTEX_H + +#include <linux/futex.h> +#include <linux/uaccess.h> +#include <linux/errno.h> +#include <asm/asm.h> +#include <asm/asm-extable.h> + +/* We don't even really need the extable code, but for now keep it simple */ +#ifndef CONFIG_MMU +#define __enable_user_access() do { } while (0) +#define __disable_user_access() do { } while (0) +#endif + +#define __futex_atomic_op(insn, ret, oldval, uaddr, oparg) \ +{ \ + __enable_user_access(); \ + __asm__ __volatile__ ( \ + "1: " insn " \n" \ + "2: \n" \ + _ASM_EXTABLE_UACCESS_ERR(1b, 2b, %[r]) \ + : [r] "+r" (ret), [ov] "=&r" (oldval), \ + [u] "+m" (*uaddr) \ + : [op] "Jr" (oparg) \ + : "memory"); \ + __disable_user_access(); \ +} + +static inline int +arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *uaddr) +{ + int oldval = 0, ret = 0; + + if (!access_ok(uaddr, sizeof(u32))) + return -EFAULT; + + switch (op) { + case FUTEX_OP_SET: + __futex_atomic_op("amoswap.w.aqrl %[ov],%z[op],%[u]", + ret, oldval, uaddr, oparg); + break; + case FUTEX_OP_ADD: + __futex_atomic_op("amoadd.w.aqrl %[ov],%z[op],%[u]", + ret, oldval, uaddr, oparg); + break; + case FUTEX_OP_OR: + __futex_atomic_op("amoor.w.aqrl %[ov],%z[op],%[u]", + ret, oldval, uaddr, oparg); + break; + case FUTEX_OP_ANDN: + __futex_atomic_op("amoand.w.aqrl %[ov],%z[op],%[u]", + ret, oldval, uaddr, ~oparg); + break; + case FUTEX_OP_XOR: + __futex_atomic_op("amoxor.w.aqrl %[ov],%z[op],%[u]", + ret, oldval, uaddr, oparg); + break; + default: + ret = -ENOSYS; + } + + if (!ret) + *oval = oldval; + + return ret; +} + +static inline int +futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, + u32 oldval, u32 newval) +{ + int ret = 0; + u32 val; + uintptr_t tmp; + + if (!access_ok(uaddr, sizeof(u32))) + return -EFAULT; + + __enable_user_access(); + __asm__ __volatile__ ( + "1: lr.w.aqrl %[v],%[u] \n" + " bne %[v],%z[ov],3f \n" + "2: sc.w.aqrl %[t],%z[nv],%[u] \n" + " bnez %[t],1b \n" + "3: \n" + _ASM_EXTABLE_UACCESS_ERR(1b, 3b, %[r]) \ + _ASM_EXTABLE_UACCESS_ERR(2b, 3b, %[r]) \ + : [r] "+r" (ret), [v] "=&r" (val), [u] "+m" (*uaddr), [t] "=&r" (tmp) + : [ov] "Jr" (oldval), [nv] "Jr" (newval) + : "memory"); + __disable_user_access(); + + *uval = val; + return ret; +} + +#endif /* _ASM_RISCV_FUTEX_H */ diff --git a/arch/riscv/include/asm/gdb_xml.h b/arch/riscv/include/asm/gdb_xml.h new file mode 100644 index 000000000..09342111f --- /dev/null +++ b/arch/riscv/include/asm/gdb_xml.h @@ -0,0 +1,116 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ + +#ifndef __ASM_GDB_XML_H_ +#define __ASM_GDB_XML_H_ + +const char riscv_gdb_stub_feature[64] = + "PacketSize=800;qXfer:features:read+;"; + +static const char gdb_xfer_read_target[31] = "qXfer:features:read:target.xml:"; + +#ifdef CONFIG_64BIT +static const char gdb_xfer_read_cpuxml[39] = + "qXfer:features:read:riscv-64bit-cpu.xml"; + +static const char riscv_gdb_stub_target_desc[256] = +"l<?xml version=\"1.0\"?>" +"<!DOCTYPE target SYSTEM \"gdb-target.dtd\">" +"<target>" +"<xi:include href=\"riscv-64bit-cpu.xml\"/>" +"</target>"; + +static const char riscv_gdb_stub_cpuxml[2048] = +"l<?xml version=\"1.0\"?>" +"<!DOCTYPE feature SYSTEM \"gdb-target.dtd\">" +"<feature name=\"org.gnu.gdb.riscv.cpu\">" +"<reg name=\""DBG_REG_ZERO"\" bitsize=\"64\" type=\"int\" regnum=\"0\"/>" +"<reg name=\""DBG_REG_RA"\" bitsize=\"64\" type=\"code_ptr\"/>" +"<reg name=\""DBG_REG_SP"\" bitsize=\"64\" type=\"data_ptr\"/>" +"<reg name=\""DBG_REG_GP"\" bitsize=\"64\" type=\"data_ptr\"/>" +"<reg name=\""DBG_REG_TP"\" bitsize=\"64\" type=\"data_ptr\"/>" +"<reg name=\""DBG_REG_T0"\" bitsize=\"64\" type=\"int\"/>" +"<reg name=\""DBG_REG_T1"\" bitsize=\"64\" type=\"int\"/>" +"<reg name=\""DBG_REG_T2"\" bitsize=\"64\" type=\"int\"/>" +"<reg name=\""DBG_REG_FP"\" bitsize=\"64\" type=\"data_ptr\"/>" +"<reg name=\""DBG_REG_S1"\" bitsize=\"64\" type=\"int\"/>" +"<reg name=\""DBG_REG_A0"\" bitsize=\"64\" type=\"int\"/>" +"<reg name=\""DBG_REG_A1"\" bitsize=\"64\" type=\"int\"/>" +"<reg name=\""DBG_REG_A2"\" bitsize=\"64\" type=\"int\"/>" +"<reg name=\""DBG_REG_A3"\" bitsize=\"64\" type=\"int\"/>" +"<reg name=\""DBG_REG_A4"\" bitsize=\"64\" type=\"int\"/>" +"<reg name=\""DBG_REG_A5"\" bitsize=\"64\" type=\"int\"/>" +"<reg name=\""DBG_REG_A6"\" bitsize=\"64\" type=\"int\"/>" +"<reg name=\""DBG_REG_A7"\" bitsize=\"64\" type=\"int\"/>" +"<reg name=\""DBG_REG_S2"\" bitsize=\"64\" type=\"int\"/>" +"<reg name=\""DBG_REG_S3"\" bitsize=\"64\" type=\"int\"/>" +"<reg name=\""DBG_REG_S4"\" bitsize=\"64\" type=\"int\"/>" +"<reg name=\""DBG_REG_S5"\" bitsize=\"64\" type=\"int\"/>" +"<reg name=\""DBG_REG_S6"\" bitsize=\"64\" type=\"int\"/>" +"<reg name=\""DBG_REG_S7"\" bitsize=\"64\" type=\"int\"/>" +"<reg name=\""DBG_REG_S8"\" bitsize=\"64\" type=\"int\"/>" +"<reg name=\""DBG_REG_S9"\" bitsize=\"64\" type=\"int\"/>" +"<reg name=\""DBG_REG_S10"\" bitsize=\"64\" type=\"int\"/>" +"<reg name=\""DBG_REG_S11"\" bitsize=\"64\" type=\"int\"/>" +"<reg name=\""DBG_REG_T3"\" bitsize=\"64\" type=\"int\"/>" +"<reg name=\""DBG_REG_T4"\" bitsize=\"64\" type=\"int\"/>" +"<reg name=\""DBG_REG_T5"\" bitsize=\"64\" type=\"int\"/>" +"<reg name=\""DBG_REG_T6"\" bitsize=\"64\" type=\"int\"/>" +"<reg name=\""DBG_REG_EPC"\" bitsize=\"64\" type=\"code_ptr\"/>" +"<reg name=\""DBG_REG_STATUS"\" bitsize=\"64\" type=\"int\"/>" +"<reg name=\""DBG_REG_BADADDR"\" bitsize=\"64\" type=\"int\"/>" +"<reg name=\""DBG_REG_CAUSE"\" bitsize=\"64\" type=\"int\"/>" +"</feature>"; +#else +static const char gdb_xfer_read_cpuxml[39] = + "qXfer:features:read:riscv-32bit-cpu.xml"; + +static const char riscv_gdb_stub_target_desc[256] = +"l<?xml version=\"1.0\"?>" +"<!DOCTYPE target SYSTEM \"gdb-target.dtd\">" +"<target>" +"<xi:include href=\"riscv-32bit-cpu.xml\"/>" +"</target>"; + +static const char riscv_gdb_stub_cpuxml[2048] = +"l<?xml version=\"1.0\"?>" +"<!DOCTYPE feature SYSTEM \"gdb-target.dtd\">" +"<feature name=\"org.gnu.gdb.riscv.cpu\">" +"<reg name=\""DBG_REG_ZERO"\" bitsize=\"32\" type=\"int\" regnum=\"0\"/>" +"<reg name=\""DBG_REG_RA"\" bitsize=\"32\" type=\"code_ptr\"/>" +"<reg name=\""DBG_REG_SP"\" bitsize=\"32\" type=\"data_ptr\"/>" +"<reg name=\""DBG_REG_GP"\" bitsize=\"32\" type=\"data_ptr\"/>" +"<reg name=\""DBG_REG_TP"\" bitsize=\"32\" type=\"data_ptr\"/>" +"<reg name=\""DBG_REG_T0"\" bitsize=\"32\" type=\"int\"/>" +"<reg name=\""DBG_REG_T1"\" bitsize=\"32\" type=\"int\"/>" +"<reg name=\""DBG_REG_T2"\" bitsize=\"32\" type=\"int\"/>" +"<reg name=\""DBG_REG_FP"\" bitsize=\"32\" type=\"data_ptr\"/>" +"<reg name=\""DBG_REG_S1"\" bitsize=\"32\" type=\"int\"/>" +"<reg name=\""DBG_REG_A0"\" bitsize=\"32\" type=\"int\"/>" +"<reg name=\""DBG_REG_A1"\" bitsize=\"32\" type=\"int\"/>" +"<reg name=\""DBG_REG_A2"\" bitsize=\"32\" type=\"int\"/>" +"<reg name=\""DBG_REG_A3"\" bitsize=\"32\" type=\"int\"/>" +"<reg name=\""DBG_REG_A4"\" bitsize=\"32\" type=\"int\"/>" +"<reg name=\""DBG_REG_A5"\" bitsize=\"32\" type=\"int\"/>" +"<reg name=\""DBG_REG_A6"\" bitsize=\"32\" type=\"int\"/>" +"<reg name=\""DBG_REG_A7"\" bitsize=\"32\" type=\"int\"/>" +"<reg name=\""DBG_REG_S2"\" bitsize=\"32\" type=\"int\"/>" +"<reg name=\""DBG_REG_S3"\" bitsize=\"32\" type=\"int\"/>" +"<reg name=\""DBG_REG_S4"\" bitsize=\"32\" type=\"int\"/>" +"<reg name=\""DBG_REG_S5"\" bitsize=\"32\" type=\"int\"/>" +"<reg name=\""DBG_REG_S6"\" bitsize=\"32\" type=\"int\"/>" +"<reg name=\""DBG_REG_S7"\" bitsize=\"32\" type=\"int\"/>" +"<reg name=\""DBG_REG_S8"\" bitsize=\"32\" type=\"int\"/>" +"<reg name=\""DBG_REG_S9"\" bitsize=\"32\" type=\"int\"/>" +"<reg name=\""DBG_REG_S10"\" bitsize=\"32\" type=\"int\"/>" +"<reg name=\""DBG_REG_S11"\" bitsize=\"32\" type=\"int\"/>" +"<reg name=\""DBG_REG_T3"\" bitsize=\"32\" type=\"int\"/>" +"<reg name=\""DBG_REG_T4"\" bitsize=\"32\" type=\"int\"/>" +"<reg name=\""DBG_REG_T5"\" bitsize=\"32\" type=\"int\"/>" +"<reg name=\""DBG_REG_T6"\" bitsize=\"32\" type=\"int\"/>" +"<reg name=\""DBG_REG_EPC"\" bitsize=\"32\" type=\"code_ptr\"/>" +"<reg name=\""DBG_REG_STATUS"\" bitsize=\"32\" type=\"int\"/>" +"<reg name=\""DBG_REG_BADADDR"\" bitsize=\"32\" type=\"int\"/>" +"<reg name=\""DBG_REG_CAUSE"\" bitsize=\"32\" type=\"int\"/>" +"</feature>"; +#endif +#endif diff --git a/arch/riscv/include/asm/gpr-num.h b/arch/riscv/include/asm/gpr-num.h new file mode 100644 index 000000000..efeb5edf8 --- /dev/null +++ b/arch/riscv/include/asm/gpr-num.h @@ -0,0 +1,85 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +#ifndef __ASM_GPR_NUM_H +#define __ASM_GPR_NUM_H + +#ifdef __ASSEMBLY__ + + .irp num,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31 + .equ .L__gpr_num_x\num, \num + .endr + + .equ .L__gpr_num_zero, 0 + .equ .L__gpr_num_ra, 1 + .equ .L__gpr_num_sp, 2 + .equ .L__gpr_num_gp, 3 + .equ .L__gpr_num_tp, 4 + .equ .L__gpr_num_t0, 5 + .equ .L__gpr_num_t1, 6 + .equ .L__gpr_num_t2, 7 + .equ .L__gpr_num_s0, 8 + .equ .L__gpr_num_s1, 9 + .equ .L__gpr_num_a0, 10 + .equ .L__gpr_num_a1, 11 + .equ .L__gpr_num_a2, 12 + .equ .L__gpr_num_a3, 13 + .equ .L__gpr_num_a4, 14 + .equ .L__gpr_num_a5, 15 + .equ .L__gpr_num_a6, 16 + .equ .L__gpr_num_a7, 17 + .equ .L__gpr_num_s2, 18 + .equ .L__gpr_num_s3, 19 + .equ .L__gpr_num_s4, 20 + .equ .L__gpr_num_s5, 21 + .equ .L__gpr_num_s6, 22 + .equ .L__gpr_num_s7, 23 + .equ .L__gpr_num_s8, 24 + .equ .L__gpr_num_s9, 25 + .equ .L__gpr_num_s10, 26 + .equ .L__gpr_num_s11, 27 + .equ .L__gpr_num_t3, 28 + .equ .L__gpr_num_t4, 29 + .equ .L__gpr_num_t5, 30 + .equ .L__gpr_num_t6, 31 + +#else /* __ASSEMBLY__ */ + +#define __DEFINE_ASM_GPR_NUMS \ +" .irp num,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31\n" \ +" .equ .L__gpr_num_x\\num, \\num\n" \ +" .endr\n" \ +" .equ .L__gpr_num_zero, 0\n" \ +" .equ .L__gpr_num_ra, 1\n" \ +" .equ .L__gpr_num_sp, 2\n" \ +" .equ .L__gpr_num_gp, 3\n" \ +" .equ .L__gpr_num_tp, 4\n" \ +" .equ .L__gpr_num_t0, 5\n" \ +" .equ .L__gpr_num_t1, 6\n" \ +" .equ .L__gpr_num_t2, 7\n" \ +" .equ .L__gpr_num_s0, 8\n" \ +" .equ .L__gpr_num_s1, 9\n" \ +" .equ .L__gpr_num_a0, 10\n" \ +" .equ .L__gpr_num_a1, 11\n" \ +" .equ .L__gpr_num_a2, 12\n" \ +" .equ .L__gpr_num_a3, 13\n" \ +" .equ .L__gpr_num_a4, 14\n" \ +" .equ .L__gpr_num_a5, 15\n" \ +" .equ .L__gpr_num_a6, 16\n" \ +" .equ .L__gpr_num_a7, 17\n" \ +" .equ .L__gpr_num_s2, 18\n" \ +" .equ .L__gpr_num_s3, 19\n" \ +" .equ .L__gpr_num_s4, 20\n" \ +" .equ .L__gpr_num_s5, 21\n" \ +" .equ .L__gpr_num_s6, 22\n" \ +" .equ .L__gpr_num_s7, 23\n" \ +" .equ .L__gpr_num_s8, 24\n" \ +" .equ .L__gpr_num_s9, 25\n" \ +" .equ .L__gpr_num_s10, 26\n" \ +" .equ .L__gpr_num_s11, 27\n" \ +" .equ .L__gpr_num_t3, 28\n" \ +" .equ .L__gpr_num_t4, 29\n" \ +" .equ .L__gpr_num_t5, 30\n" \ +" .equ .L__gpr_num_t6, 31\n" + +#endif /* __ASSEMBLY__ */ + +#endif /* __ASM_GPR_NUM_H */ diff --git a/arch/riscv/include/asm/hugetlb.h b/arch/riscv/include/asm/hugetlb.h new file mode 100644 index 000000000..ec19d6afc --- /dev/null +++ b/arch/riscv/include/asm/hugetlb.h @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_RISCV_HUGETLB_H +#define _ASM_RISCV_HUGETLB_H + +#include <asm-generic/hugetlb.h> +#include <asm/page.h> + +static inline void arch_clear_hugepage_flags(struct page *page) +{ + clear_bit(PG_dcache_clean, &page->flags); +} +#define arch_clear_hugepage_flags arch_clear_hugepage_flags + +#endif /* _ASM_RISCV_HUGETLB_H */ diff --git a/arch/riscv/include/asm/hwcap.h b/arch/riscv/include/asm/hwcap.h new file mode 100644 index 000000000..b22525290 --- /dev/null +++ b/arch/riscv/include/asm/hwcap.h @@ -0,0 +1,112 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copied from arch/arm64/include/asm/hwcap.h + * + * Copyright (C) 2012 ARM Ltd. + * Copyright (C) 2017 SiFive + */ +#ifndef _ASM_RISCV_HWCAP_H +#define _ASM_RISCV_HWCAP_H + +#include <asm/errno.h> +#include <linux/bits.h> +#include <uapi/asm/hwcap.h> + +#ifndef __ASSEMBLY__ +#include <linux/jump_label.h> +/* + * This yields a mask that user programs can use to figure out what + * instruction set this cpu supports. + */ +#define ELF_HWCAP (elf_hwcap) + +enum { + CAP_HWCAP = 1, +}; + +extern unsigned long elf_hwcap; + +#define RISCV_ISA_EXT_a ('a' - 'a') +#define RISCV_ISA_EXT_c ('c' - 'a') +#define RISCV_ISA_EXT_d ('d' - 'a') +#define RISCV_ISA_EXT_f ('f' - 'a') +#define RISCV_ISA_EXT_h ('h' - 'a') +#define RISCV_ISA_EXT_i ('i' - 'a') +#define RISCV_ISA_EXT_m ('m' - 'a') +#define RISCV_ISA_EXT_s ('s' - 'a') +#define RISCV_ISA_EXT_u ('u' - 'a') + +/* + * Increse this to higher value as kernel support more ISA extensions. + */ +#define RISCV_ISA_EXT_MAX 64 +#define RISCV_ISA_EXT_NAME_LEN_MAX 32 + +/* The base ID for multi-letter ISA extensions */ +#define RISCV_ISA_EXT_BASE 26 + +/* + * This enum represent the logical ID for each multi-letter RISC-V ISA extension. + * The logical ID should start from RISCV_ISA_EXT_BASE and must not exceed + * RISCV_ISA_EXT_MAX. 0-25 range is reserved for single letter + * extensions while all the multi-letter extensions should define the next + * available logical extension id. + */ +enum riscv_isa_ext_id { + RISCV_ISA_EXT_SSCOFPMF = RISCV_ISA_EXT_BASE, + RISCV_ISA_EXT_SVPBMT, + RISCV_ISA_EXT_ZICBOM, + RISCV_ISA_EXT_ZIHINTPAUSE, + RISCV_ISA_EXT_SSTC, + RISCV_ISA_EXT_SVINVAL, + RISCV_ISA_EXT_ID_MAX = RISCV_ISA_EXT_MAX, +}; + +/* + * This enum represents the logical ID for each RISC-V ISA extension static + * keys. We can use static key to optimize code path if some ISA extensions + * are available. + */ +enum riscv_isa_ext_key { + RISCV_ISA_EXT_KEY_FPU, /* For 'F' and 'D' */ + RISCV_ISA_EXT_KEY_ZIHINTPAUSE, + RISCV_ISA_EXT_KEY_SVINVAL, + RISCV_ISA_EXT_KEY_MAX, +}; + +struct riscv_isa_ext_data { + /* Name of the extension displayed to userspace via /proc/cpuinfo */ + char uprop[RISCV_ISA_EXT_NAME_LEN_MAX]; + /* The logical ISA extension ID */ + unsigned int isa_ext_id; +}; + +extern struct static_key_false riscv_isa_ext_keys[RISCV_ISA_EXT_KEY_MAX]; + +static __always_inline int riscv_isa_ext2key(int num) +{ + switch (num) { + case RISCV_ISA_EXT_f: + return RISCV_ISA_EXT_KEY_FPU; + case RISCV_ISA_EXT_d: + return RISCV_ISA_EXT_KEY_FPU; + case RISCV_ISA_EXT_ZIHINTPAUSE: + return RISCV_ISA_EXT_KEY_ZIHINTPAUSE; + case RISCV_ISA_EXT_SVINVAL: + return RISCV_ISA_EXT_KEY_SVINVAL; + default: + return -EINVAL; + } +} + +unsigned long riscv_isa_extension_base(const unsigned long *isa_bitmap); + +#define riscv_isa_extension_mask(ext) BIT_MASK(RISCV_ISA_EXT_##ext) + +bool __riscv_isa_extension_available(const unsigned long *isa_bitmap, int bit); +#define riscv_isa_extension_available(isa_bitmap, ext) \ + __riscv_isa_extension_available(isa_bitmap, RISCV_ISA_EXT_##ext) + +#endif + +#endif /* _ASM_RISCV_HWCAP_H */ diff --git a/arch/riscv/include/asm/image.h b/arch/riscv/include/asm/image.h new file mode 100644 index 000000000..e0b319af3 --- /dev/null +++ b/arch/riscv/include/asm/image.h @@ -0,0 +1,65 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef _ASM_RISCV_IMAGE_H +#define _ASM_RISCV_IMAGE_H + +#define RISCV_IMAGE_MAGIC "RISCV\0\0\0" +#define RISCV_IMAGE_MAGIC2 "RSC\x05" + +#define RISCV_IMAGE_FLAG_BE_SHIFT 0 +#define RISCV_IMAGE_FLAG_BE_MASK 0x1 + +#define RISCV_IMAGE_FLAG_LE 0 +#define RISCV_IMAGE_FLAG_BE 1 + +#ifdef CONFIG_CPU_BIG_ENDIAN +#error conversion of header fields to LE not yet implemented +#else +#define __HEAD_FLAG_BE RISCV_IMAGE_FLAG_LE +#endif + +#define __HEAD_FLAG(field) (__HEAD_FLAG_##field << \ + RISCV_IMAGE_FLAG_##field##_SHIFT) + +#define __HEAD_FLAGS (__HEAD_FLAG(BE)) + +#define RISCV_HEADER_VERSION_MAJOR 0 +#define RISCV_HEADER_VERSION_MINOR 2 + +#define RISCV_HEADER_VERSION (RISCV_HEADER_VERSION_MAJOR << 16 | \ + RISCV_HEADER_VERSION_MINOR) + +#ifndef __ASSEMBLY__ +/** + * struct riscv_image_header - riscv kernel image header + * @code0: Executable code + * @code1: Executable code + * @text_offset: Image load offset (little endian) + * @image_size: Effective Image size (little endian) + * @flags: kernel flags (little endian) + * @version: version + * @res1: reserved + * @res2: reserved + * @magic: Magic number (RISC-V specific; deprecated) + * @magic2: Magic number 2 (to match the ARM64 'magic' field pos) + * @res3: reserved (will be used for PE COFF offset) + * + * The intention is for this header format to be shared between multiple + * architectures to avoid a proliferation of image header formats. + */ + +struct riscv_image_header { + u32 code0; + u32 code1; + u64 text_offset; + u64 image_size; + u64 flags; + u32 version; + u32 res1; + u64 res2; + u64 magic; + u32 magic2; + u32 res3; +}; +#endif /* __ASSEMBLY__ */ +#endif /* _ASM_RISCV_IMAGE_H */ diff --git a/arch/riscv/include/asm/insn-def.h b/arch/riscv/include/asm/insn-def.h new file mode 100644 index 000000000..16044affa --- /dev/null +++ b/arch/riscv/include/asm/insn-def.h @@ -0,0 +1,137 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ + +#ifndef __ASM_INSN_DEF_H +#define __ASM_INSN_DEF_H + +#include <asm/asm.h> + +#define INSN_R_FUNC7_SHIFT 25 +#define INSN_R_RS2_SHIFT 20 +#define INSN_R_RS1_SHIFT 15 +#define INSN_R_FUNC3_SHIFT 12 +#define INSN_R_RD_SHIFT 7 +#define INSN_R_OPCODE_SHIFT 0 + +#ifdef __ASSEMBLY__ + +#ifdef CONFIG_AS_HAS_INSN + + .macro insn_r, opcode, func3, func7, rd, rs1, rs2 + .insn r \opcode, \func3, \func7, \rd, \rs1, \rs2 + .endm + +#else + +#include <asm/gpr-num.h> + + .macro insn_r, opcode, func3, func7, rd, rs1, rs2 + .4byte ((\opcode << INSN_R_OPCODE_SHIFT) | \ + (\func3 << INSN_R_FUNC3_SHIFT) | \ + (\func7 << INSN_R_FUNC7_SHIFT) | \ + (.L__gpr_num_\rd << INSN_R_RD_SHIFT) | \ + (.L__gpr_num_\rs1 << INSN_R_RS1_SHIFT) | \ + (.L__gpr_num_\rs2 << INSN_R_RS2_SHIFT)) + .endm + +#endif + +#define __INSN_R(...) insn_r __VA_ARGS__ + +#else /* ! __ASSEMBLY__ */ + +#ifdef CONFIG_AS_HAS_INSN + +#define __INSN_R(opcode, func3, func7, rd, rs1, rs2) \ + ".insn r " opcode ", " func3 ", " func7 ", " rd ", " rs1 ", " rs2 "\n" + +#else + +#include <linux/stringify.h> +#include <asm/gpr-num.h> + +#define DEFINE_INSN_R \ + __DEFINE_ASM_GPR_NUMS \ +" .macro insn_r, opcode, func3, func7, rd, rs1, rs2\n" \ +" .4byte ((\\opcode << " __stringify(INSN_R_OPCODE_SHIFT) ") |" \ +" (\\func3 << " __stringify(INSN_R_FUNC3_SHIFT) ") |" \ +" (\\func7 << " __stringify(INSN_R_FUNC7_SHIFT) ") |" \ +" (.L__gpr_num_\\rd << " __stringify(INSN_R_RD_SHIFT) ") |" \ +" (.L__gpr_num_\\rs1 << " __stringify(INSN_R_RS1_SHIFT) ") |" \ +" (.L__gpr_num_\\rs2 << " __stringify(INSN_R_RS2_SHIFT) "))\n" \ +" .endm\n" + +#define UNDEFINE_INSN_R \ +" .purgem insn_r\n" + +#define __INSN_R(opcode, func3, func7, rd, rs1, rs2) \ + DEFINE_INSN_R \ + "insn_r " opcode ", " func3 ", " func7 ", " rd ", " rs1 ", " rs2 "\n" \ + UNDEFINE_INSN_R + +#endif + +#endif /* ! __ASSEMBLY__ */ + +#define INSN_R(opcode, func3, func7, rd, rs1, rs2) \ + __INSN_R(RV_##opcode, RV_##func3, RV_##func7, \ + RV_##rd, RV_##rs1, RV_##rs2) + +#define RV_OPCODE(v) __ASM_STR(v) +#define RV_FUNC3(v) __ASM_STR(v) +#define RV_FUNC7(v) __ASM_STR(v) +#define RV_RD(v) __ASM_STR(v) +#define RV_RS1(v) __ASM_STR(v) +#define RV_RS2(v) __ASM_STR(v) +#define __RV_REG(v) __ASM_STR(x ## v) +#define RV___RD(v) __RV_REG(v) +#define RV___RS1(v) __RV_REG(v) +#define RV___RS2(v) __RV_REG(v) + +#define RV_OPCODE_SYSTEM RV_OPCODE(115) + +#define HFENCE_VVMA(vaddr, asid) \ + INSN_R(OPCODE_SYSTEM, FUNC3(0), FUNC7(17), \ + __RD(0), RS1(vaddr), RS2(asid)) + +#define HFENCE_GVMA(gaddr, vmid) \ + INSN_R(OPCODE_SYSTEM, FUNC3(0), FUNC7(49), \ + __RD(0), RS1(gaddr), RS2(vmid)) + +#define HLVX_HU(dest, addr) \ + INSN_R(OPCODE_SYSTEM, FUNC3(4), FUNC7(50), \ + RD(dest), RS1(addr), __RS2(3)) + +#define HLV_W(dest, addr) \ + INSN_R(OPCODE_SYSTEM, FUNC3(4), FUNC7(52), \ + RD(dest), RS1(addr), __RS2(0)) + +#ifdef CONFIG_64BIT +#define HLV_D(dest, addr) \ + INSN_R(OPCODE_SYSTEM, FUNC3(4), FUNC7(54), \ + RD(dest), RS1(addr), __RS2(0)) +#else +#define HLV_D(dest, addr) \ + __ASM_STR(.error "hlv.d requires 64-bit support") +#endif + +#define SINVAL_VMA(vaddr, asid) \ + INSN_R(OPCODE_SYSTEM, FUNC3(0), FUNC7(11), \ + __RD(0), RS1(vaddr), RS2(asid)) + +#define SFENCE_W_INVAL() \ + INSN_R(OPCODE_SYSTEM, FUNC3(0), FUNC7(12), \ + __RD(0), __RS1(0), __RS2(0)) + +#define SFENCE_INVAL_IR() \ + INSN_R(OPCODE_SYSTEM, FUNC3(0), FUNC7(12), \ + __RD(0), __RS1(0), __RS2(1)) + +#define HINVAL_VVMA(vaddr, asid) \ + INSN_R(OPCODE_SYSTEM, FUNC3(0), FUNC7(19), \ + __RD(0), RS1(vaddr), RS2(asid)) + +#define HINVAL_GVMA(gaddr, vmid) \ + INSN_R(OPCODE_SYSTEM, FUNC3(0), FUNC7(51), \ + __RD(0), RS1(gaddr), RS2(vmid)) + +#endif /* __ASM_INSN_DEF_H */ diff --git a/arch/riscv/include/asm/io.h b/arch/riscv/include/asm/io.h new file mode 100644 index 000000000..42497d487 --- /dev/null +++ b/arch/riscv/include/asm/io.h @@ -0,0 +1,143 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * {read,write}{b,w,l,q} based on arch/arm64/include/asm/io.h + * which was based on arch/arm/include/io.h + * + * Copyright (C) 1996-2000 Russell King + * Copyright (C) 2012 ARM Ltd. + * Copyright (C) 2014 Regents of the University of California + */ + +#ifndef _ASM_RISCV_IO_H +#define _ASM_RISCV_IO_H + +#include <linux/types.h> +#include <linux/pgtable.h> +#include <asm/mmiowb.h> +#include <asm/early_ioremap.h> + +/* + * MMIO access functions are separated out to break dependency cycles + * when using {read,write}* fns in low-level headers + */ +#include <asm/mmio.h> + +/* + * I/O port access constants. + */ +#ifdef CONFIG_MMU +#define IO_SPACE_LIMIT (PCI_IO_SIZE - 1) +#define PCI_IOBASE ((void __iomem *)PCI_IO_START) +#endif /* CONFIG_MMU */ + +/* + * Emulation routines for the port-mapped IO space used by some PCI drivers. + * These are defined as being "fully synchronous", but also "not guaranteed to + * be fully ordered with respect to other memory and I/O operations". We're + * going to be on the safe side here and just make them: + * - Fully ordered WRT each other, by bracketing them with two fences. The + * outer set contains both I/O so inX is ordered with outX, while the inner just + * needs the type of the access (I for inX and O for outX). + * - Ordered in the same manner as readX/writeX WRT memory by subsuming their + * fences. + * - Ordered WRT timer reads, so udelay and friends don't get elided by the + * implementation. + * Note that there is no way to actually enforce that outX is a non-posted + * operation on RISC-V, but hopefully the timer ordering constraint is + * sufficient to ensure this works sanely on controllers that support I/O + * writes. + */ +#define __io_pbr() __asm__ __volatile__ ("fence io,i" : : : "memory"); +#define __io_par(v) __asm__ __volatile__ ("fence i,ior" : : : "memory"); +#define __io_pbw() __asm__ __volatile__ ("fence iow,o" : : : "memory"); +#define __io_paw() __asm__ __volatile__ ("fence o,io" : : : "memory"); + +/* + * Accesses from a single hart to a single I/O address must be ordered. This + * allows us to use the raw read macros, but we still need to fence before and + * after the block to ensure ordering WRT other macros. These are defined to + * perform host-endian accesses so we use __raw instead of __cpu. + */ +#define __io_reads_ins(port, ctype, len, bfence, afence) \ + static inline void __ ## port ## len(const volatile void __iomem *addr, \ + void *buffer, \ + unsigned int count) \ + { \ + bfence; \ + if (count) { \ + ctype *buf = buffer; \ + \ + do { \ + ctype x = __raw_read ## len(addr); \ + *buf++ = x; \ + } while (--count); \ + } \ + afence; \ + } + +#define __io_writes_outs(port, ctype, len, bfence, afence) \ + static inline void __ ## port ## len(volatile void __iomem *addr, \ + const void *buffer, \ + unsigned int count) \ + { \ + bfence; \ + if (count) { \ + const ctype *buf = buffer; \ + \ + do { \ + __raw_write ## len(*buf++, addr); \ + } while (--count); \ + } \ + afence; \ + } + +__io_reads_ins(reads, u8, b, __io_br(), __io_ar(addr)) +__io_reads_ins(reads, u16, w, __io_br(), __io_ar(addr)) +__io_reads_ins(reads, u32, l, __io_br(), __io_ar(addr)) +#define readsb(addr, buffer, count) __readsb(addr, buffer, count) +#define readsw(addr, buffer, count) __readsw(addr, buffer, count) +#define readsl(addr, buffer, count) __readsl(addr, buffer, count) + +__io_reads_ins(ins, u8, b, __io_pbr(), __io_par(addr)) +__io_reads_ins(ins, u16, w, __io_pbr(), __io_par(addr)) +__io_reads_ins(ins, u32, l, __io_pbr(), __io_par(addr)) +#define insb(addr, buffer, count) __insb(PCI_IOBASE + (addr), buffer, count) +#define insw(addr, buffer, count) __insw(PCI_IOBASE + (addr), buffer, count) +#define insl(addr, buffer, count) __insl(PCI_IOBASE + (addr), buffer, count) + +__io_writes_outs(writes, u8, b, __io_bw(), __io_aw()) +__io_writes_outs(writes, u16, w, __io_bw(), __io_aw()) +__io_writes_outs(writes, u32, l, __io_bw(), __io_aw()) +#define writesb(addr, buffer, count) __writesb(addr, buffer, count) +#define writesw(addr, buffer, count) __writesw(addr, buffer, count) +#define writesl(addr, buffer, count) __writesl(addr, buffer, count) + +__io_writes_outs(outs, u8, b, __io_pbw(), __io_paw()) +__io_writes_outs(outs, u16, w, __io_pbw(), __io_paw()) +__io_writes_outs(outs, u32, l, __io_pbw(), __io_paw()) +#define outsb(addr, buffer, count) __outsb(PCI_IOBASE + (addr), buffer, count) +#define outsw(addr, buffer, count) __outsw(PCI_IOBASE + (addr), buffer, count) +#define outsl(addr, buffer, count) __outsl(PCI_IOBASE + (addr), buffer, count) + +#ifdef CONFIG_64BIT +__io_reads_ins(reads, u64, q, __io_br(), __io_ar(addr)) +#define readsq(addr, buffer, count) __readsq(addr, buffer, count) + +__io_reads_ins(ins, u64, q, __io_pbr(), __io_par(addr)) +#define insq(addr, buffer, count) __insq(PCI_IOBASE + (addr), buffer, count) + +__io_writes_outs(writes, u64, q, __io_bw(), __io_aw()) +#define writesq(addr, buffer, count) __writesq(addr, buffer, count) + +__io_writes_outs(outs, u64, q, __io_pbr(), __io_paw()) +#define outsq(addr, buffer, count) __outsq(PCI_IOBASE + (addr), buffer, count) +#endif + +#include <asm-generic/io.h> + +#ifdef CONFIG_MMU +#define arch_memremap_wb(addr, size) \ + ((__force void *)ioremap_prot((addr), (size), _PAGE_KERNEL)) +#endif + +#endif /* _ASM_RISCV_IO_H */ diff --git a/arch/riscv/include/asm/irq.h b/arch/riscv/include/asm/irq.h new file mode 100644 index 000000000..e4c435509 --- /dev/null +++ b/arch/riscv/include/asm/irq.h @@ -0,0 +1,17 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2012 Regents of the University of California + * Copyright (C) 2017 SiFive + */ + +#ifndef _ASM_RISCV_IRQ_H +#define _ASM_RISCV_IRQ_H + +#include <linux/interrupt.h> +#include <linux/linkage.h> + +#include <asm-generic/irq.h> + +extern void __init init_IRQ(void); + +#endif /* _ASM_RISCV_IRQ_H */ diff --git a/arch/riscv/include/asm/irq_work.h b/arch/riscv/include/asm/irq_work.h new file mode 100644 index 000000000..b53891964 --- /dev/null +++ b/arch/riscv/include/asm/irq_work.h @@ -0,0 +1,10 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_RISCV_IRQ_WORK_H +#define _ASM_RISCV_IRQ_WORK_H + +static inline bool arch_irq_work_has_interrupt(void) +{ + return IS_ENABLED(CONFIG_SMP); +} +extern void arch_irq_work_raise(void); +#endif /* _ASM_RISCV_IRQ_WORK_H */ diff --git a/arch/riscv/include/asm/irqflags.h b/arch/riscv/include/asm/irqflags.h new file mode 100644 index 000000000..08d4d6a5b --- /dev/null +++ b/arch/riscv/include/asm/irqflags.h @@ -0,0 +1,55 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2012 Regents of the University of California + */ + + +#ifndef _ASM_RISCV_IRQFLAGS_H +#define _ASM_RISCV_IRQFLAGS_H + +#include <asm/processor.h> +#include <asm/csr.h> + +/* read interrupt enabled status */ +static inline unsigned long arch_local_save_flags(void) +{ + return csr_read(CSR_STATUS); +} + +/* unconditionally enable interrupts */ +static inline void arch_local_irq_enable(void) +{ + csr_set(CSR_STATUS, SR_IE); +} + +/* unconditionally disable interrupts */ +static inline void arch_local_irq_disable(void) +{ + csr_clear(CSR_STATUS, SR_IE); +} + +/* get status and disable interrupts */ +static inline unsigned long arch_local_irq_save(void) +{ + return csr_read_clear(CSR_STATUS, SR_IE); +} + +/* test flags */ +static inline int arch_irqs_disabled_flags(unsigned long flags) +{ + return !(flags & SR_IE); +} + +/* test hardware interrupt enable bit */ +static inline int arch_irqs_disabled(void) +{ + return arch_irqs_disabled_flags(arch_local_save_flags()); +} + +/* set interrupt enabled status */ +static inline void arch_local_irq_restore(unsigned long flags) +{ + csr_set(CSR_STATUS, flags & SR_IE); +} + +#endif /* _ASM_RISCV_IRQFLAGS_H */ diff --git a/arch/riscv/include/asm/jump_label.h b/arch/riscv/include/asm/jump_label.h new file mode 100644 index 000000000..14a5ea8d8 --- /dev/null +++ b/arch/riscv/include/asm/jump_label.h @@ -0,0 +1,62 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2020 Emil Renner Berthing + * + * Based on arch/arm64/include/asm/jump_label.h + */ +#ifndef __ASM_JUMP_LABEL_H +#define __ASM_JUMP_LABEL_H + +#ifndef __ASSEMBLY__ + +#include <linux/types.h> +#include <asm/asm.h> + +#define JUMP_LABEL_NOP_SIZE 4 + +static __always_inline bool arch_static_branch(struct static_key * const key, + const bool branch) +{ + asm_volatile_goto( + " .align 2 \n\t" + " .option push \n\t" + " .option norelax \n\t" + " .option norvc \n\t" + "1: nop \n\t" + " .option pop \n\t" + " .pushsection __jump_table, \"aw\" \n\t" + " .align " RISCV_LGPTR " \n\t" + " .long 1b - ., %l[label] - . \n\t" + " " RISCV_PTR " %0 - . \n\t" + " .popsection \n\t" + : : "i"(&((char *)key)[branch]) : : label); + + return false; +label: + return true; +} + +static __always_inline bool arch_static_branch_jump(struct static_key * const key, + const bool branch) +{ + asm_volatile_goto( + " .align 2 \n\t" + " .option push \n\t" + " .option norelax \n\t" + " .option norvc \n\t" + "1: jal zero, %l[label] \n\t" + " .option pop \n\t" + " .pushsection __jump_table, \"aw\" \n\t" + " .align " RISCV_LGPTR " \n\t" + " .long 1b - ., %l[label] - . \n\t" + " " RISCV_PTR " %0 - . \n\t" + " .popsection \n\t" + : : "i"(&((char *)key)[branch]) : : label); + + return false; +label: + return true; +} + +#endif /* __ASSEMBLY__ */ +#endif /* __ASM_JUMP_LABEL_H */ diff --git a/arch/riscv/include/asm/kasan.h b/arch/riscv/include/asm/kasan.h new file mode 100644 index 000000000..0b85e363e --- /dev/null +++ b/arch/riscv/include/asm/kasan.h @@ -0,0 +1,45 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2019 Andes Technology Corporation */ + +#ifndef __ASM_KASAN_H +#define __ASM_KASAN_H + +#ifndef __ASSEMBLY__ + +#ifdef CONFIG_KASAN + +/* + * The following comment was copied from arm64: + * KASAN_SHADOW_START: beginning of the kernel virtual addresses. + * KASAN_SHADOW_END: KASAN_SHADOW_START + 1/N of kernel virtual addresses, + * where N = (1 << KASAN_SHADOW_SCALE_SHIFT). + * + * KASAN_SHADOW_OFFSET: + * This value is used to map an address to the corresponding shadow + * address by the following formula: + * shadow_addr = (address >> KASAN_SHADOW_SCALE_SHIFT) + KASAN_SHADOW_OFFSET + * + * (1 << (64 - KASAN_SHADOW_SCALE_SHIFT)) shadow addresses that lie in range + * [KASAN_SHADOW_OFFSET, KASAN_SHADOW_END) cover all 64-bits of virtual + * addresses. So KASAN_SHADOW_OFFSET should satisfy the following equation: + * KASAN_SHADOW_OFFSET = KASAN_SHADOW_END - + * (1ULL << (64 - KASAN_SHADOW_SCALE_SHIFT)) + */ +#define KASAN_SHADOW_SCALE_SHIFT 3 + +#define KASAN_SHADOW_SIZE (UL(1) << ((VA_BITS - 1) - KASAN_SHADOW_SCALE_SHIFT)) +/* + * Depending on the size of the virtual address space, the region may not be + * aligned on PGDIR_SIZE, so force its alignment to ease its population. + */ +#define KASAN_SHADOW_START ((KASAN_SHADOW_END - KASAN_SHADOW_SIZE) & PGDIR_MASK) +#define KASAN_SHADOW_END MODULES_LOWEST_VADDR +#define KASAN_SHADOW_OFFSET _AC(CONFIG_KASAN_SHADOW_OFFSET, UL) + +void kasan_init(void); +asmlinkage void kasan_early_init(void); +void kasan_swapper_init(void); + +#endif +#endif +#endif /* __ASM_KASAN_H */ diff --git a/arch/riscv/include/asm/kdebug.h b/arch/riscv/include/asm/kdebug.h new file mode 100644 index 000000000..85ac00411 --- /dev/null +++ b/arch/riscv/include/asm/kdebug.h @@ -0,0 +1,12 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ + +#ifndef _ASM_ARC_KDEBUG_H +#define _ASM_ARC_KDEBUG_H + +enum die_val { + DIE_UNUSED, + DIE_TRAP, + DIE_OOPS +}; + +#endif diff --git a/arch/riscv/include/asm/kexec.h b/arch/riscv/include/asm/kexec.h new file mode 100644 index 000000000..2b56769cb --- /dev/null +++ b/arch/riscv/include/asm/kexec.h @@ -0,0 +1,72 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2019 FORTH-ICS/CARV + * Nick Kossifidis <mick@ics.forth.gr> + */ + +#ifndef _RISCV_KEXEC_H +#define _RISCV_KEXEC_H + +#include <asm/page.h> /* For PAGE_SIZE */ + +/* Maximum physical address we can use pages from */ +#define KEXEC_SOURCE_MEMORY_LIMIT (-1UL) + +/* Maximum address we can reach in physical address mode */ +#define KEXEC_DESTINATION_MEMORY_LIMIT (-1UL) + +/* Maximum address we can use for the control code buffer */ +#define KEXEC_CONTROL_MEMORY_LIMIT (-1UL) + +/* Reserve a page for the control code buffer */ +#define KEXEC_CONTROL_PAGE_SIZE PAGE_SIZE + +#define KEXEC_ARCH KEXEC_ARCH_RISCV + +extern void riscv_crash_save_regs(struct pt_regs *newregs); + +static inline void +crash_setup_regs(struct pt_regs *newregs, + struct pt_regs *oldregs) +{ + if (oldregs) + memcpy(newregs, oldregs, sizeof(struct pt_regs)); + else + riscv_crash_save_regs(newregs); +} + + +#define ARCH_HAS_KIMAGE_ARCH + +struct kimage_arch { + void *fdt; /* For CONFIG_KEXEC_FILE */ + unsigned long fdt_addr; +}; + +extern const unsigned char riscv_kexec_relocate[]; +extern const unsigned int riscv_kexec_relocate_size; + +typedef void (*riscv_kexec_method)(unsigned long first_ind_entry, + unsigned long jump_addr, + unsigned long fdt_addr, + unsigned long hartid, + unsigned long va_pa_off); + +extern riscv_kexec_method riscv_kexec_norelocate; + +#ifdef CONFIG_KEXEC_FILE +extern const struct kexec_file_ops elf_kexec_ops; + +struct purgatory_info; +int arch_kexec_apply_relocations_add(struct purgatory_info *pi, + Elf_Shdr *section, + const Elf_Shdr *relsec, + const Elf_Shdr *symtab); +#define arch_kexec_apply_relocations_add arch_kexec_apply_relocations_add + +struct kimage; +int arch_kimage_file_post_load_cleanup(struct kimage *image); +#define arch_kimage_file_post_load_cleanup arch_kimage_file_post_load_cleanup +#endif + +#endif diff --git a/arch/riscv/include/asm/kfence.h b/arch/riscv/include/asm/kfence.h new file mode 100644 index 000000000..d887a5404 --- /dev/null +++ b/arch/riscv/include/asm/kfence.h @@ -0,0 +1,63 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef _ASM_RISCV_KFENCE_H +#define _ASM_RISCV_KFENCE_H + +#include <linux/kfence.h> +#include <linux/pfn.h> +#include <asm-generic/pgalloc.h> +#include <asm/pgtable.h> + +static inline int split_pmd_page(unsigned long addr) +{ + int i; + unsigned long pfn = PFN_DOWN(__pa((addr & PMD_MASK))); + pmd_t *pmd = pmd_off_k(addr); + pte_t *pte = pte_alloc_one_kernel(&init_mm); + + if (!pte) + return -ENOMEM; + + for (i = 0; i < PTRS_PER_PTE; i++) + set_pte(pte + i, pfn_pte(pfn + i, PAGE_KERNEL)); + set_pmd(pmd, pfn_pmd(PFN_DOWN(__pa(pte)), PAGE_TABLE)); + + flush_tlb_kernel_range(addr, addr + PMD_SIZE); + return 0; +} + +static inline bool arch_kfence_init_pool(void) +{ + int ret; + unsigned long addr; + pmd_t *pmd; + + for (addr = (unsigned long)__kfence_pool; is_kfence_address((void *)addr); + addr += PAGE_SIZE) { + pmd = pmd_off_k(addr); + + if (pmd_leaf(*pmd)) { + ret = split_pmd_page(addr); + if (ret) + return false; + } + } + + return true; +} + +static inline bool kfence_protect_page(unsigned long addr, bool protect) +{ + pte_t *pte = virt_to_kpte(addr); + + if (protect) + set_pte(pte, __pte(pte_val(*pte) & ~_PAGE_PRESENT)); + else + set_pte(pte, __pte(pte_val(*pte) | _PAGE_PRESENT)); + + flush_tlb_kernel_range(addr, addr + PAGE_SIZE); + + return true; +} + +#endif /* _ASM_RISCV_KFENCE_H */ diff --git a/arch/riscv/include/asm/kgdb.h b/arch/riscv/include/asm/kgdb.h new file mode 100644 index 000000000..46677daf7 --- /dev/null +++ b/arch/riscv/include/asm/kgdb.h @@ -0,0 +1,113 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ + +#ifndef __ASM_KGDB_H_ +#define __ASM_KGDB_H_ + +#ifdef __KERNEL__ + +#define GDB_SIZEOF_REG sizeof(unsigned long) + +#define DBG_MAX_REG_NUM (36) +#define NUMREGBYTES ((DBG_MAX_REG_NUM) * GDB_SIZEOF_REG) +#define CACHE_FLUSH_IS_SAFE 1 +#define BUFMAX 2048 +#ifdef CONFIG_RISCV_ISA_C +#define BREAK_INSTR_SIZE 2 +#else +#define BREAK_INSTR_SIZE 4 +#endif + +#ifndef __ASSEMBLY__ + +extern unsigned long kgdb_compiled_break; + +static inline void arch_kgdb_breakpoint(void) +{ + asm(".global kgdb_compiled_break\n" + ".option norvc\n" + "kgdb_compiled_break: ebreak\n" + ".option rvc\n"); +} + +#endif /* !__ASSEMBLY__ */ + +#define DBG_REG_ZERO "zero" +#define DBG_REG_RA "ra" +#define DBG_REG_SP "sp" +#define DBG_REG_GP "gp" +#define DBG_REG_TP "tp" +#define DBG_REG_T0 "t0" +#define DBG_REG_T1 "t1" +#define DBG_REG_T2 "t2" +#define DBG_REG_FP "fp" +#define DBG_REG_S1 "s1" +#define DBG_REG_A0 "a0" +#define DBG_REG_A1 "a1" +#define DBG_REG_A2 "a2" +#define DBG_REG_A3 "a3" +#define DBG_REG_A4 "a4" +#define DBG_REG_A5 "a5" +#define DBG_REG_A6 "a6" +#define DBG_REG_A7 "a7" +#define DBG_REG_S2 "s2" +#define DBG_REG_S3 "s3" +#define DBG_REG_S4 "s4" +#define DBG_REG_S5 "s5" +#define DBG_REG_S6 "s6" +#define DBG_REG_S7 "s7" +#define DBG_REG_S8 "s8" +#define DBG_REG_S9 "s9" +#define DBG_REG_S10 "s10" +#define DBG_REG_S11 "s11" +#define DBG_REG_T3 "t3" +#define DBG_REG_T4 "t4" +#define DBG_REG_T5 "t5" +#define DBG_REG_T6 "t6" +#define DBG_REG_EPC "pc" +#define DBG_REG_STATUS "sstatus" +#define DBG_REG_BADADDR "stval" +#define DBG_REG_CAUSE "scause" + +#define DBG_REG_ZERO_OFF 0 +#define DBG_REG_RA_OFF 1 +#define DBG_REG_SP_OFF 2 +#define DBG_REG_GP_OFF 3 +#define DBG_REG_TP_OFF 4 +#define DBG_REG_T0_OFF 5 +#define DBG_REG_T1_OFF 6 +#define DBG_REG_T2_OFF 7 +#define DBG_REG_FP_OFF 8 +#define DBG_REG_S1_OFF 9 +#define DBG_REG_A0_OFF 10 +#define DBG_REG_A1_OFF 11 +#define DBG_REG_A2_OFF 12 +#define DBG_REG_A3_OFF 13 +#define DBG_REG_A4_OFF 14 +#define DBG_REG_A5_OFF 15 +#define DBG_REG_A6_OFF 16 +#define DBG_REG_A7_OFF 17 +#define DBG_REG_S2_OFF 18 +#define DBG_REG_S3_OFF 19 +#define DBG_REG_S4_OFF 20 +#define DBG_REG_S5_OFF 21 +#define DBG_REG_S6_OFF 22 +#define DBG_REG_S7_OFF 23 +#define DBG_REG_S8_OFF 24 +#define DBG_REG_S9_OFF 25 +#define DBG_REG_S10_OFF 26 +#define DBG_REG_S11_OFF 27 +#define DBG_REG_T3_OFF 28 +#define DBG_REG_T4_OFF 29 +#define DBG_REG_T5_OFF 30 +#define DBG_REG_T6_OFF 31 +#define DBG_REG_EPC_OFF 32 +#define DBG_REG_STATUS_OFF 33 +#define DBG_REG_BADADDR_OFF 34 +#define DBG_REG_CAUSE_OFF 35 + +extern const char riscv_gdb_stub_feature[64]; + +#define kgdb_arch_gdb_stub_feature riscv_gdb_stub_feature + +#endif +#endif diff --git a/arch/riscv/include/asm/kprobes.h b/arch/riscv/include/asm/kprobes.h new file mode 100644 index 000000000..217ef89f2 --- /dev/null +++ b/arch/riscv/include/asm/kprobes.h @@ -0,0 +1,47 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copied from arch/arm64/include/asm/kprobes.h + * + * Copyright (C) 2013 Linaro Limited + * Copyright (C) 2017 SiFive + */ + +#ifndef _ASM_RISCV_KPROBES_H +#define _ASM_RISCV_KPROBES_H + +#include <asm-generic/kprobes.h> + +#ifdef CONFIG_KPROBES +#include <linux/types.h> +#include <linux/ptrace.h> +#include <linux/percpu.h> + +#define __ARCH_WANT_KPROBES_INSN_SLOT +#define MAX_INSN_SIZE 2 + +#define flush_insn_slot(p) do { } while (0) +#define kretprobe_blacklist_size 0 + +#include <asm/probes.h> + +struct prev_kprobe { + struct kprobe *kp; + unsigned int status; +}; + +/* per-cpu kprobe control block */ +struct kprobe_ctlblk { + unsigned int kprobe_status; + unsigned long saved_status; + struct prev_kprobe prev_kprobe; +}; + +void arch_remove_kprobe(struct kprobe *p); +int kprobe_fault_handler(struct pt_regs *regs, unsigned int trapnr); +bool kprobe_breakpoint_handler(struct pt_regs *regs); +bool kprobe_single_step_handler(struct pt_regs *regs); +void __kretprobe_trampoline(void); +void __kprobes *trampoline_probe_handler(struct pt_regs *regs); + +#endif /* CONFIG_KPROBES */ +#endif /* _ASM_RISCV_KPROBES_H */ diff --git a/arch/riscv/include/asm/kvm_host.h b/arch/riscv/include/asm/kvm_host.h new file mode 100644 index 000000000..dbbf43d52 --- /dev/null +++ b/arch/riscv/include/asm/kvm_host.h @@ -0,0 +1,333 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2019 Western Digital Corporation or its affiliates. + * + * Authors: + * Anup Patel <anup.patel@wdc.com> + */ + +#ifndef __RISCV_KVM_HOST_H__ +#define __RISCV_KVM_HOST_H__ + +#include <linux/types.h> +#include <linux/kvm.h> +#include <linux/kvm_types.h> +#include <linux/spinlock.h> +#include <asm/csr.h> +#include <asm/hwcap.h> +#include <asm/kvm_vcpu_fp.h> +#include <asm/kvm_vcpu_insn.h> +#include <asm/kvm_vcpu_timer.h> + +#define KVM_MAX_VCPUS 1024 + +#define KVM_HALT_POLL_NS_DEFAULT 500000 + +#define KVM_VCPU_MAX_FEATURES 0 + +#define KVM_REQ_SLEEP \ + KVM_ARCH_REQ_FLAGS(0, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP) +#define KVM_REQ_VCPU_RESET KVM_ARCH_REQ(1) +#define KVM_REQ_UPDATE_HGATP KVM_ARCH_REQ(2) +#define KVM_REQ_FENCE_I \ + KVM_ARCH_REQ_FLAGS(3, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP) +#define KVM_REQ_HFENCE_GVMA_VMID_ALL KVM_REQ_TLB_FLUSH +#define KVM_REQ_HFENCE_VVMA_ALL \ + KVM_ARCH_REQ_FLAGS(4, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP) +#define KVM_REQ_HFENCE \ + KVM_ARCH_REQ_FLAGS(5, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP) + +enum kvm_riscv_hfence_type { + KVM_RISCV_HFENCE_UNKNOWN = 0, + KVM_RISCV_HFENCE_GVMA_VMID_GPA, + KVM_RISCV_HFENCE_VVMA_ASID_GVA, + KVM_RISCV_HFENCE_VVMA_ASID_ALL, + KVM_RISCV_HFENCE_VVMA_GVA, +}; + +struct kvm_riscv_hfence { + enum kvm_riscv_hfence_type type; + unsigned long asid; + unsigned long order; + gpa_t addr; + gpa_t size; +}; + +#define KVM_RISCV_VCPU_MAX_HFENCE 64 + +struct kvm_vm_stat { + struct kvm_vm_stat_generic generic; +}; + +struct kvm_vcpu_stat { + struct kvm_vcpu_stat_generic generic; + u64 ecall_exit_stat; + u64 wfi_exit_stat; + u64 mmio_exit_user; + u64 mmio_exit_kernel; + u64 csr_exit_user; + u64 csr_exit_kernel; + u64 signal_exits; + u64 exits; +}; + +struct kvm_arch_memory_slot { +}; + +struct kvm_vmid { + /* + * Writes to vmid_version and vmid happen with vmid_lock held + * whereas reads happen without any lock held. + */ + unsigned long vmid_version; + unsigned long vmid; +}; + +struct kvm_arch { + /* G-stage vmid */ + struct kvm_vmid vmid; + + /* G-stage page table */ + pgd_t *pgd; + phys_addr_t pgd_phys; + + /* Guest Timer */ + struct kvm_guest_timer timer; +}; + +struct kvm_sbi_context { + int return_handled; +}; + +struct kvm_cpu_trap { + unsigned long sepc; + unsigned long scause; + unsigned long stval; + unsigned long htval; + unsigned long htinst; +}; + +struct kvm_cpu_context { + unsigned long zero; + unsigned long ra; + unsigned long sp; + unsigned long gp; + unsigned long tp; + unsigned long t0; + unsigned long t1; + unsigned long t2; + unsigned long s0; + unsigned long s1; + unsigned long a0; + unsigned long a1; + unsigned long a2; + unsigned long a3; + unsigned long a4; + unsigned long a5; + unsigned long a6; + unsigned long a7; + unsigned long s2; + unsigned long s3; + unsigned long s4; + unsigned long s5; + unsigned long s6; + unsigned long s7; + unsigned long s8; + unsigned long s9; + unsigned long s10; + unsigned long s11; + unsigned long t3; + unsigned long t4; + unsigned long t5; + unsigned long t6; + unsigned long sepc; + unsigned long sstatus; + unsigned long hstatus; + union __riscv_fp_state fp; +}; + +struct kvm_vcpu_csr { + unsigned long vsstatus; + unsigned long vsie; + unsigned long vstvec; + unsigned long vsscratch; + unsigned long vsepc; + unsigned long vscause; + unsigned long vstval; + unsigned long hvip; + unsigned long vsatp; + unsigned long scounteren; +}; + +struct kvm_vcpu_arch { + /* VCPU ran at least once */ + bool ran_atleast_once; + + /* Last Host CPU on which Guest VCPU exited */ + int last_exit_cpu; + + /* ISA feature bits (similar to MISA) */ + DECLARE_BITMAP(isa, RISCV_ISA_EXT_MAX); + + /* SSCRATCH, STVEC, and SCOUNTEREN of Host */ + unsigned long host_sscratch; + unsigned long host_stvec; + unsigned long host_scounteren; + + /* CPU context of Host */ + struct kvm_cpu_context host_context; + + /* CPU context of Guest VCPU */ + struct kvm_cpu_context guest_context; + + /* CPU CSR context of Guest VCPU */ + struct kvm_vcpu_csr guest_csr; + + /* CPU context upon Guest VCPU reset */ + struct kvm_cpu_context guest_reset_context; + + /* CPU CSR context upon Guest VCPU reset */ + struct kvm_vcpu_csr guest_reset_csr; + + /* + * VCPU interrupts + * + * We have a lockless approach for tracking pending VCPU interrupts + * implemented using atomic bitops. The irqs_pending bitmap represent + * pending interrupts whereas irqs_pending_mask represent bits changed + * in irqs_pending. Our approach is modeled around multiple producer + * and single consumer problem where the consumer is the VCPU itself. + */ + unsigned long irqs_pending; + unsigned long irqs_pending_mask; + + /* VCPU Timer */ + struct kvm_vcpu_timer timer; + + /* HFENCE request queue */ + spinlock_t hfence_lock; + unsigned long hfence_head; + unsigned long hfence_tail; + struct kvm_riscv_hfence hfence_queue[KVM_RISCV_VCPU_MAX_HFENCE]; + + /* MMIO instruction details */ + struct kvm_mmio_decode mmio_decode; + + /* CSR instruction details */ + struct kvm_csr_decode csr_decode; + + /* SBI context */ + struct kvm_sbi_context sbi_context; + + /* Cache pages needed to program page tables with spinlock held */ + struct kvm_mmu_memory_cache mmu_page_cache; + + /* VCPU power-off state */ + bool power_off; + + /* Don't run the VCPU (blocked) */ + bool pause; +}; + +static inline void kvm_arch_hardware_unsetup(void) {} +static inline void kvm_arch_sync_events(struct kvm *kvm) {} +static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {} + +#define KVM_ARCH_WANT_MMU_NOTIFIER + +#define KVM_RISCV_GSTAGE_TLB_MIN_ORDER 12 + +void kvm_riscv_local_hfence_gvma_vmid_gpa(unsigned long vmid, + gpa_t gpa, gpa_t gpsz, + unsigned long order); +void kvm_riscv_local_hfence_gvma_vmid_all(unsigned long vmid); +void kvm_riscv_local_hfence_gvma_gpa(gpa_t gpa, gpa_t gpsz, + unsigned long order); +void kvm_riscv_local_hfence_gvma_all(void); +void kvm_riscv_local_hfence_vvma_asid_gva(unsigned long vmid, + unsigned long asid, + unsigned long gva, + unsigned long gvsz, + unsigned long order); +void kvm_riscv_local_hfence_vvma_asid_all(unsigned long vmid, + unsigned long asid); +void kvm_riscv_local_hfence_vvma_gva(unsigned long vmid, + unsigned long gva, unsigned long gvsz, + unsigned long order); +void kvm_riscv_local_hfence_vvma_all(unsigned long vmid); + +void kvm_riscv_local_tlb_sanitize(struct kvm_vcpu *vcpu); + +void kvm_riscv_fence_i_process(struct kvm_vcpu *vcpu); +void kvm_riscv_hfence_gvma_vmid_all_process(struct kvm_vcpu *vcpu); +void kvm_riscv_hfence_vvma_all_process(struct kvm_vcpu *vcpu); +void kvm_riscv_hfence_process(struct kvm_vcpu *vcpu); + +void kvm_riscv_fence_i(struct kvm *kvm, + unsigned long hbase, unsigned long hmask); +void kvm_riscv_hfence_gvma_vmid_gpa(struct kvm *kvm, + unsigned long hbase, unsigned long hmask, + gpa_t gpa, gpa_t gpsz, + unsigned long order); +void kvm_riscv_hfence_gvma_vmid_all(struct kvm *kvm, + unsigned long hbase, unsigned long hmask); +void kvm_riscv_hfence_vvma_asid_gva(struct kvm *kvm, + unsigned long hbase, unsigned long hmask, + unsigned long gva, unsigned long gvsz, + unsigned long order, unsigned long asid); +void kvm_riscv_hfence_vvma_asid_all(struct kvm *kvm, + unsigned long hbase, unsigned long hmask, + unsigned long asid); +void kvm_riscv_hfence_vvma_gva(struct kvm *kvm, + unsigned long hbase, unsigned long hmask, + unsigned long gva, unsigned long gvsz, + unsigned long order); +void kvm_riscv_hfence_vvma_all(struct kvm *kvm, + unsigned long hbase, unsigned long hmask); + +int kvm_riscv_gstage_ioremap(struct kvm *kvm, gpa_t gpa, + phys_addr_t hpa, unsigned long size, + bool writable, bool in_atomic); +void kvm_riscv_gstage_iounmap(struct kvm *kvm, gpa_t gpa, + unsigned long size); +int kvm_riscv_gstage_map(struct kvm_vcpu *vcpu, + struct kvm_memory_slot *memslot, + gpa_t gpa, unsigned long hva, bool is_write); +int kvm_riscv_gstage_alloc_pgd(struct kvm *kvm); +void kvm_riscv_gstage_free_pgd(struct kvm *kvm); +void kvm_riscv_gstage_update_hgatp(struct kvm_vcpu *vcpu); +void kvm_riscv_gstage_mode_detect(void); +unsigned long kvm_riscv_gstage_mode(void); +int kvm_riscv_gstage_gpa_bits(void); + +void kvm_riscv_gstage_vmid_detect(void); +unsigned long kvm_riscv_gstage_vmid_bits(void); +int kvm_riscv_gstage_vmid_init(struct kvm *kvm); +bool kvm_riscv_gstage_vmid_ver_changed(struct kvm_vmid *vmid); +void kvm_riscv_gstage_vmid_update(struct kvm_vcpu *vcpu); + +void __kvm_riscv_unpriv_trap(void); + +unsigned long kvm_riscv_vcpu_unpriv_read(struct kvm_vcpu *vcpu, + bool read_insn, + unsigned long guest_addr, + struct kvm_cpu_trap *trap); +void kvm_riscv_vcpu_trap_redirect(struct kvm_vcpu *vcpu, + struct kvm_cpu_trap *trap); +int kvm_riscv_vcpu_exit(struct kvm_vcpu *vcpu, struct kvm_run *run, + struct kvm_cpu_trap *trap); + +void __kvm_riscv_switch_to(struct kvm_vcpu_arch *vcpu_arch); + +int kvm_riscv_vcpu_set_interrupt(struct kvm_vcpu *vcpu, unsigned int irq); +int kvm_riscv_vcpu_unset_interrupt(struct kvm_vcpu *vcpu, unsigned int irq); +void kvm_riscv_vcpu_flush_interrupts(struct kvm_vcpu *vcpu); +void kvm_riscv_vcpu_sync_interrupts(struct kvm_vcpu *vcpu); +bool kvm_riscv_vcpu_has_interrupts(struct kvm_vcpu *vcpu, unsigned long mask); +void kvm_riscv_vcpu_power_off(struct kvm_vcpu *vcpu); +void kvm_riscv_vcpu_power_on(struct kvm_vcpu *vcpu); + +int kvm_riscv_vcpu_sbi_return(struct kvm_vcpu *vcpu, struct kvm_run *run); +int kvm_riscv_vcpu_sbi_ecall(struct kvm_vcpu *vcpu, struct kvm_run *run); + +#endif /* __RISCV_KVM_HOST_H__ */ diff --git a/arch/riscv/include/asm/kvm_types.h b/arch/riscv/include/asm/kvm_types.h new file mode 100644 index 000000000..e15765f98 --- /dev/null +++ b/arch/riscv/include/asm/kvm_types.h @@ -0,0 +1,7 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_RISCV_KVM_TYPES_H +#define _ASM_RISCV_KVM_TYPES_H + +#define KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE 32 + +#endif /* _ASM_RISCV_KVM_TYPES_H */ diff --git a/arch/riscv/include/asm/kvm_vcpu_fp.h b/arch/riscv/include/asm/kvm_vcpu_fp.h new file mode 100644 index 000000000..b55401474 --- /dev/null +++ b/arch/riscv/include/asm/kvm_vcpu_fp.h @@ -0,0 +1,59 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2021 Western Digital Corporation or its affiliates. + * + * Authors: + * Atish Patra <atish.patra@wdc.com> + * Anup Patel <anup.patel@wdc.com> + */ + +#ifndef __KVM_VCPU_RISCV_FP_H +#define __KVM_VCPU_RISCV_FP_H + +#include <linux/types.h> + +struct kvm_cpu_context; + +#ifdef CONFIG_FPU +void __kvm_riscv_fp_f_save(struct kvm_cpu_context *context); +void __kvm_riscv_fp_f_restore(struct kvm_cpu_context *context); +void __kvm_riscv_fp_d_save(struct kvm_cpu_context *context); +void __kvm_riscv_fp_d_restore(struct kvm_cpu_context *context); + +void kvm_riscv_vcpu_fp_reset(struct kvm_vcpu *vcpu); +void kvm_riscv_vcpu_guest_fp_save(struct kvm_cpu_context *cntx, + const unsigned long *isa); +void kvm_riscv_vcpu_guest_fp_restore(struct kvm_cpu_context *cntx, + const unsigned long *isa); +void kvm_riscv_vcpu_host_fp_save(struct kvm_cpu_context *cntx); +void kvm_riscv_vcpu_host_fp_restore(struct kvm_cpu_context *cntx); +#else +static inline void kvm_riscv_vcpu_fp_reset(struct kvm_vcpu *vcpu) +{ +} +static inline void kvm_riscv_vcpu_guest_fp_save(struct kvm_cpu_context *cntx, + const unsigned long *isa) +{ +} +static inline void kvm_riscv_vcpu_guest_fp_restore( + struct kvm_cpu_context *cntx, + const unsigned long *isa) +{ +} +static inline void kvm_riscv_vcpu_host_fp_save(struct kvm_cpu_context *cntx) +{ +} +static inline void kvm_riscv_vcpu_host_fp_restore( + struct kvm_cpu_context *cntx) +{ +} +#endif + +int kvm_riscv_vcpu_get_reg_fp(struct kvm_vcpu *vcpu, + const struct kvm_one_reg *reg, + unsigned long rtype); +int kvm_riscv_vcpu_set_reg_fp(struct kvm_vcpu *vcpu, + const struct kvm_one_reg *reg, + unsigned long rtype); + +#endif diff --git a/arch/riscv/include/asm/kvm_vcpu_insn.h b/arch/riscv/include/asm/kvm_vcpu_insn.h new file mode 100644 index 000000000..350011c83 --- /dev/null +++ b/arch/riscv/include/asm/kvm_vcpu_insn.h @@ -0,0 +1,48 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2022 Ventana Micro Systems Inc. + */ + +#ifndef __KVM_VCPU_RISCV_INSN_H +#define __KVM_VCPU_RISCV_INSN_H + +struct kvm_vcpu; +struct kvm_run; +struct kvm_cpu_trap; + +struct kvm_mmio_decode { + unsigned long insn; + int insn_len; + int len; + int shift; + int return_handled; +}; + +struct kvm_csr_decode { + unsigned long insn; + int return_handled; +}; + +/* Return values used by function emulating a particular instruction */ +enum kvm_insn_return { + KVM_INSN_EXIT_TO_USER_SPACE = 0, + KVM_INSN_CONTINUE_NEXT_SEPC, + KVM_INSN_CONTINUE_SAME_SEPC, + KVM_INSN_ILLEGAL_TRAP, + KVM_INSN_VIRTUAL_TRAP +}; + +void kvm_riscv_vcpu_wfi(struct kvm_vcpu *vcpu); +int kvm_riscv_vcpu_csr_return(struct kvm_vcpu *vcpu, struct kvm_run *run); +int kvm_riscv_vcpu_virtual_insn(struct kvm_vcpu *vcpu, struct kvm_run *run, + struct kvm_cpu_trap *trap); + +int kvm_riscv_vcpu_mmio_load(struct kvm_vcpu *vcpu, struct kvm_run *run, + unsigned long fault_addr, + unsigned long htinst); +int kvm_riscv_vcpu_mmio_store(struct kvm_vcpu *vcpu, struct kvm_run *run, + unsigned long fault_addr, + unsigned long htinst); +int kvm_riscv_vcpu_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run); + +#endif diff --git a/arch/riscv/include/asm/kvm_vcpu_sbi.h b/arch/riscv/include/asm/kvm_vcpu_sbi.h new file mode 100644 index 000000000..d4e3e600b --- /dev/null +++ b/arch/riscv/include/asm/kvm_vcpu_sbi.h @@ -0,0 +1,48 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/** + * Copyright (c) 2021 Western Digital Corporation or its affiliates. + * + * Authors: + * Atish Patra <atish.patra@wdc.com> + */ + +#ifndef __RISCV_KVM_VCPU_SBI_H__ +#define __RISCV_KVM_VCPU_SBI_H__ + +#define KVM_SBI_IMPID 3 + +#define KVM_SBI_VERSION_MAJOR 1 +#define KVM_SBI_VERSION_MINOR 0 + +struct kvm_vcpu_sbi_extension { + unsigned long extid_start; + unsigned long extid_end; + /** + * SBI extension handler. It can be defined for a given extension or group of + * extension. But it should always return linux error codes rather than SBI + * specific error codes. + */ + int (*handler)(struct kvm_vcpu *vcpu, struct kvm_run *run, + unsigned long *out_val, struct kvm_cpu_trap *utrap, + bool *exit); +}; + +void kvm_riscv_vcpu_sbi_forward(struct kvm_vcpu *vcpu, struct kvm_run *run); +void kvm_riscv_vcpu_sbi_system_reset(struct kvm_vcpu *vcpu, + struct kvm_run *run, + u32 type, u64 flags); +const struct kvm_vcpu_sbi_extension *kvm_vcpu_sbi_find_ext(unsigned long extid); + +#ifdef CONFIG_RISCV_SBI_V01 +extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_v01; +#endif +extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_base; +extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_time; +extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_ipi; +extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_rfence; +extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_srst; +extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_hsm; +extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_experimental; +extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_vendor; + +#endif /* __RISCV_KVM_VCPU_SBI_H__ */ diff --git a/arch/riscv/include/asm/kvm_vcpu_timer.h b/arch/riscv/include/asm/kvm_vcpu_timer.h new file mode 100644 index 000000000..82f726030 --- /dev/null +++ b/arch/riscv/include/asm/kvm_vcpu_timer.h @@ -0,0 +1,52 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2019 Western Digital Corporation or its affiliates. + * + * Authors: + * Atish Patra <atish.patra@wdc.com> + */ + +#ifndef __KVM_VCPU_RISCV_TIMER_H +#define __KVM_VCPU_RISCV_TIMER_H + +#include <linux/hrtimer.h> + +struct kvm_guest_timer { + /* Mult & Shift values to get nanoseconds from cycles */ + u32 nsec_mult; + u32 nsec_shift; + /* Time delta value */ + u64 time_delta; +}; + +struct kvm_vcpu_timer { + /* Flag for whether init is done */ + bool init_done; + /* Flag for whether timer event is configured */ + bool next_set; + /* Next timer event cycles */ + u64 next_cycles; + /* Underlying hrtimer instance */ + struct hrtimer hrt; + + /* Flag to check if sstc is enabled or not */ + bool sstc_enabled; + /* A function pointer to switch between stimecmp or hrtimer at runtime */ + int (*timer_next_event)(struct kvm_vcpu *vcpu, u64 ncycles); +}; + +int kvm_riscv_vcpu_timer_next_event(struct kvm_vcpu *vcpu, u64 ncycles); +int kvm_riscv_vcpu_get_reg_timer(struct kvm_vcpu *vcpu, + const struct kvm_one_reg *reg); +int kvm_riscv_vcpu_set_reg_timer(struct kvm_vcpu *vcpu, + const struct kvm_one_reg *reg); +int kvm_riscv_vcpu_timer_init(struct kvm_vcpu *vcpu); +int kvm_riscv_vcpu_timer_deinit(struct kvm_vcpu *vcpu); +int kvm_riscv_vcpu_timer_reset(struct kvm_vcpu *vcpu); +void kvm_riscv_vcpu_timer_restore(struct kvm_vcpu *vcpu); +void kvm_riscv_guest_timer_init(struct kvm *kvm); +void kvm_riscv_vcpu_timer_sync(struct kvm_vcpu *vcpu); +void kvm_riscv_vcpu_timer_save(struct kvm_vcpu *vcpu); +bool kvm_riscv_vcpu_timer_pending(struct kvm_vcpu *vcpu); + +#endif diff --git a/arch/riscv/include/asm/linkage.h b/arch/riscv/include/asm/linkage.h new file mode 100644 index 000000000..9e88ba23c --- /dev/null +++ b/arch/riscv/include/asm/linkage.h @@ -0,0 +1,12 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2015 Regents of the University of California + */ + +#ifndef _ASM_RISCV_LINKAGE_H +#define _ASM_RISCV_LINKAGE_H + +#define __ALIGN .balign 4 +#define __ALIGN_STR ".balign 4" + +#endif /* _ASM_RISCV_LINKAGE_H */ diff --git a/arch/riscv/include/asm/mmio.h b/arch/riscv/include/asm/mmio.h new file mode 100644 index 000000000..4c58ee7f9 --- /dev/null +++ b/arch/riscv/include/asm/mmio.h @@ -0,0 +1,151 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * {read,write}{b,w,l,q} based on arch/arm64/include/asm/io.h + * which was based on arch/arm/include/io.h + * + * Copyright (C) 1996-2000 Russell King + * Copyright (C) 2012 ARM Ltd. + * Copyright (C) 2014 Regents of the University of California + */ + +#ifndef _ASM_RISCV_MMIO_H +#define _ASM_RISCV_MMIO_H + +#include <linux/types.h> +#include <asm/mmiowb.h> + +/* Generic IO read/write. These perform native-endian accesses. */ +#define __raw_writeb __raw_writeb +static inline void __raw_writeb(u8 val, volatile void __iomem *addr) +{ + asm volatile("sb %0, 0(%1)" : : "r" (val), "r" (addr)); +} + +#define __raw_writew __raw_writew +static inline void __raw_writew(u16 val, volatile void __iomem *addr) +{ + asm volatile("sh %0, 0(%1)" : : "r" (val), "r" (addr)); +} + +#define __raw_writel __raw_writel +static inline void __raw_writel(u32 val, volatile void __iomem *addr) +{ + asm volatile("sw %0, 0(%1)" : : "r" (val), "r" (addr)); +} + +#ifdef CONFIG_64BIT +#define __raw_writeq __raw_writeq +static inline void __raw_writeq(u64 val, volatile void __iomem *addr) +{ + asm volatile("sd %0, 0(%1)" : : "r" (val), "r" (addr)); +} +#endif + +#define __raw_readb __raw_readb +static inline u8 __raw_readb(const volatile void __iomem *addr) +{ + u8 val; + + asm volatile("lb %0, 0(%1)" : "=r" (val) : "r" (addr)); + return val; +} + +#define __raw_readw __raw_readw +static inline u16 __raw_readw(const volatile void __iomem *addr) +{ + u16 val; + + asm volatile("lh %0, 0(%1)" : "=r" (val) : "r" (addr)); + return val; +} + +#define __raw_readl __raw_readl +static inline u32 __raw_readl(const volatile void __iomem *addr) +{ + u32 val; + + asm volatile("lw %0, 0(%1)" : "=r" (val) : "r" (addr)); + return val; +} + +#ifdef CONFIG_64BIT +#define __raw_readq __raw_readq +static inline u64 __raw_readq(const volatile void __iomem *addr) +{ + u64 val; + + asm volatile("ld %0, 0(%1)" : "=r" (val) : "r" (addr)); + return val; +} +#endif + +/* + * Unordered I/O memory access primitives. These are even more relaxed than + * the relaxed versions, as they don't even order accesses between successive + * operations to the I/O regions. + */ +#define readb_cpu(c) ({ u8 __r = __raw_readb(c); __r; }) +#define readw_cpu(c) ({ u16 __r = le16_to_cpu((__force __le16)__raw_readw(c)); __r; }) +#define readl_cpu(c) ({ u32 __r = le32_to_cpu((__force __le32)__raw_readl(c)); __r; }) + +#define writeb_cpu(v, c) ((void)__raw_writeb((v), (c))) +#define writew_cpu(v, c) ((void)__raw_writew((__force u16)cpu_to_le16(v), (c))) +#define writel_cpu(v, c) ((void)__raw_writel((__force u32)cpu_to_le32(v), (c))) + +#ifdef CONFIG_64BIT +#define readq_cpu(c) ({ u64 __r = le64_to_cpu((__force __le64)__raw_readq(c)); __r; }) +#define writeq_cpu(v, c) ((void)__raw_writeq((__force u64)cpu_to_le64(v), (c))) +#endif + +/* + * Relaxed I/O memory access primitives. These follow the Device memory + * ordering rules but do not guarantee any ordering relative to Normal memory + * accesses. These are defined to order the indicated access (either a read or + * write) with all other I/O memory accesses to the same peripheral. Since the + * platform specification defines that all I/O regions are strongly ordered on + * channel 0, no explicit fences are required to enforce this ordering. + */ +/* FIXME: These are now the same as asm-generic */ +#define __io_rbr() do {} while (0) +#define __io_rar() do {} while (0) +#define __io_rbw() do {} while (0) +#define __io_raw() do {} while (0) + +#define readb_relaxed(c) ({ u8 __v; __io_rbr(); __v = readb_cpu(c); __io_rar(); __v; }) +#define readw_relaxed(c) ({ u16 __v; __io_rbr(); __v = readw_cpu(c); __io_rar(); __v; }) +#define readl_relaxed(c) ({ u32 __v; __io_rbr(); __v = readl_cpu(c); __io_rar(); __v; }) + +#define writeb_relaxed(v, c) ({ __io_rbw(); writeb_cpu((v), (c)); __io_raw(); }) +#define writew_relaxed(v, c) ({ __io_rbw(); writew_cpu((v), (c)); __io_raw(); }) +#define writel_relaxed(v, c) ({ __io_rbw(); writel_cpu((v), (c)); __io_raw(); }) + +#ifdef CONFIG_64BIT +#define readq_relaxed(c) ({ u64 __v; __io_rbr(); __v = readq_cpu(c); __io_rar(); __v; }) +#define writeq_relaxed(v, c) ({ __io_rbw(); writeq_cpu((v), (c)); __io_raw(); }) +#endif + +/* + * I/O memory access primitives. Reads are ordered relative to any following + * Normal memory read and delay() loop. Writes are ordered relative to any + * prior Normal memory write. The memory barriers here are necessary as RISC-V + * doesn't define any ordering between the memory space and the I/O space. + */ +#define __io_br() do {} while (0) +#define __io_ar(v) ({ __asm__ __volatile__ ("fence i,ir" : : : "memory"); }) +#define __io_bw() ({ __asm__ __volatile__ ("fence w,o" : : : "memory"); }) +#define __io_aw() mmiowb_set_pending() + +#define readb(c) ({ u8 __v; __io_br(); __v = readb_cpu(c); __io_ar(__v); __v; }) +#define readw(c) ({ u16 __v; __io_br(); __v = readw_cpu(c); __io_ar(__v); __v; }) +#define readl(c) ({ u32 __v; __io_br(); __v = readl_cpu(c); __io_ar(__v); __v; }) + +#define writeb(v, c) ({ __io_bw(); writeb_cpu((v), (c)); __io_aw(); }) +#define writew(v, c) ({ __io_bw(); writew_cpu((v), (c)); __io_aw(); }) +#define writel(v, c) ({ __io_bw(); writel_cpu((v), (c)); __io_aw(); }) + +#ifdef CONFIG_64BIT +#define readq(c) ({ u64 __v; __io_br(); __v = readq_cpu(c); __io_ar(__v); __v; }) +#define writeq(v, c) ({ __io_bw(); writeq_cpu((v), (c)); __io_aw(); }) +#endif + +#endif /* _ASM_RISCV_MMIO_H */ diff --git a/arch/riscv/include/asm/mmiowb.h b/arch/riscv/include/asm/mmiowb.h new file mode 100644 index 000000000..0b2333e71 --- /dev/null +++ b/arch/riscv/include/asm/mmiowb.h @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef _ASM_RISCV_MMIOWB_H +#define _ASM_RISCV_MMIOWB_H + +/* + * "o,w" is sufficient to ensure that all writes to the device have completed + * before the write to the spinlock is allowed to commit. + */ +#define mmiowb() __asm__ __volatile__ ("fence o,w" : : : "memory"); + +#include <linux/smp.h> +#include <asm-generic/mmiowb.h> + +#endif /* _ASM_RISCV_MMIOWB_H */ diff --git a/arch/riscv/include/asm/mmu.h b/arch/riscv/include/asm/mmu.h new file mode 100644 index 000000000..0099dc116 --- /dev/null +++ b/arch/riscv/include/asm/mmu.h @@ -0,0 +1,29 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2012 Regents of the University of California + */ + + +#ifndef _ASM_RISCV_MMU_H +#define _ASM_RISCV_MMU_H + +#ifndef __ASSEMBLY__ + +typedef struct { +#ifndef CONFIG_MMU + unsigned long end_brk; +#else + atomic_long_t id; +#endif + void *vdso; +#ifdef CONFIG_SMP + /* A local icache flush is needed before user execution can resume. */ + cpumask_t icache_stale_mask; +#endif +} mm_context_t; + +void __init create_pgd_mapping(pgd_t *pgdp, uintptr_t va, phys_addr_t pa, + phys_addr_t sz, pgprot_t prot); +#endif /* __ASSEMBLY__ */ + +#endif /* _ASM_RISCV_MMU_H */ diff --git a/arch/riscv/include/asm/mmu_context.h b/arch/riscv/include/asm/mmu_context.h new file mode 100644 index 000000000..7030837ad --- /dev/null +++ b/arch/riscv/include/asm/mmu_context.h @@ -0,0 +1,40 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2012 Regents of the University of California + * Copyright (C) 2017 SiFive + */ + +#ifndef _ASM_RISCV_MMU_CONTEXT_H +#define _ASM_RISCV_MMU_CONTEXT_H + +#include <linux/mm_types.h> +#include <asm-generic/mm_hooks.h> + +#include <linux/mm.h> +#include <linux/sched.h> + +void switch_mm(struct mm_struct *prev, struct mm_struct *next, + struct task_struct *task); + +#define activate_mm activate_mm +static inline void activate_mm(struct mm_struct *prev, + struct mm_struct *next) +{ + switch_mm(prev, next, NULL); +} + +#define init_new_context init_new_context +static inline int init_new_context(struct task_struct *tsk, + struct mm_struct *mm) +{ +#ifdef CONFIG_MMU + atomic_long_set(&mm->context.id, 0); +#endif + return 0; +} + +DECLARE_STATIC_KEY_FALSE(use_asid_allocator); + +#include <asm-generic/mmu_context.h> + +#endif /* _ASM_RISCV_MMU_CONTEXT_H */ diff --git a/arch/riscv/include/asm/mmzone.h b/arch/riscv/include/asm/mmzone.h new file mode 100644 index 000000000..fa17e01d9 --- /dev/null +++ b/arch/riscv/include/asm/mmzone.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __ASM_MMZONE_H +#define __ASM_MMZONE_H + +#ifdef CONFIG_NUMA + +#include <asm/numa.h> + +extern struct pglist_data *node_data[]; +#define NODE_DATA(nid) (node_data[(nid)]) + +#endif /* CONFIG_NUMA */ +#endif /* __ASM_MMZONE_H */ diff --git a/arch/riscv/include/asm/module.h b/arch/riscv/include/asm/module.h new file mode 100644 index 000000000..76aa96a9f --- /dev/null +++ b/arch/riscv/include/asm/module.h @@ -0,0 +1,114 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2017 Andes Technology Corporation */ + +#ifndef _ASM_RISCV_MODULE_H +#define _ASM_RISCV_MODULE_H + +#include <asm-generic/module.h> + +struct module; +unsigned long module_emit_got_entry(struct module *mod, unsigned long val); +unsigned long module_emit_plt_entry(struct module *mod, unsigned long val); + +#ifdef CONFIG_MODULE_SECTIONS +struct mod_section { + Elf_Shdr *shdr; + int num_entries; + int max_entries; +}; + +struct mod_arch_specific { + struct mod_section got; + struct mod_section plt; + struct mod_section got_plt; +}; + +struct got_entry { + unsigned long symbol_addr; /* the real variable address */ +}; + +static inline struct got_entry emit_got_entry(unsigned long val) +{ + return (struct got_entry) {val}; +} + +static inline struct got_entry *get_got_entry(unsigned long val, + const struct mod_section *sec) +{ + struct got_entry *got = (struct got_entry *)(sec->shdr->sh_addr); + int i; + for (i = 0; i < sec->num_entries; i++) { + if (got[i].symbol_addr == val) + return &got[i]; + } + return NULL; +} + +struct plt_entry { + /* + * Trampoline code to real target address. The return address + * should be the original (pc+4) before entring plt entry. + */ + u32 insn_auipc; /* auipc t0, 0x0 */ + u32 insn_ld; /* ld t1, 0x10(t0) */ + u32 insn_jr; /* jr t1 */ +}; + +#define OPC_AUIPC 0x0017 +#define OPC_LD 0x3003 +#define OPC_JALR 0x0067 +#define REG_T0 0x5 +#define REG_T1 0x6 + +static inline struct plt_entry emit_plt_entry(unsigned long val, + unsigned long plt, + unsigned long got_plt) +{ + /* + * U-Type encoding: + * +------------+----------+----------+ + * | imm[31:12] | rd[11:7] | opc[6:0] | + * +------------+----------+----------+ + * + * I-Type encoding: + * +------------+------------+--------+----------+----------+ + * | imm[31:20] | rs1[19:15] | funct3 | rd[11:7] | opc[6:0] | + * +------------+------------+--------+----------+----------+ + * + */ + unsigned long offset = got_plt - plt; + u32 hi20 = (offset + 0x800) & 0xfffff000; + u32 lo12 = (offset - hi20); + return (struct plt_entry) { + OPC_AUIPC | (REG_T0 << 7) | hi20, + OPC_LD | (lo12 << 20) | (REG_T0 << 15) | (REG_T1 << 7), + OPC_JALR | (REG_T1 << 15) + }; +} + +static inline int get_got_plt_idx(unsigned long val, const struct mod_section *sec) +{ + struct got_entry *got_plt = (struct got_entry *)sec->shdr->sh_addr; + int i; + for (i = 0; i < sec->num_entries; i++) { + if (got_plt[i].symbol_addr == val) + return i; + } + return -1; +} + +static inline struct plt_entry *get_plt_entry(unsigned long val, + const struct mod_section *sec_plt, + const struct mod_section *sec_got_plt) +{ + struct plt_entry *plt = (struct plt_entry *)sec_plt->shdr->sh_addr; + int got_plt_idx = get_got_plt_idx(val, sec_got_plt); + if (got_plt_idx >= 0) + return plt + got_plt_idx; + else + return NULL; +} + +#endif /* CONFIG_MODULE_SECTIONS */ + +#endif /* _ASM_RISCV_MODULE_H */ diff --git a/arch/riscv/include/asm/module.lds.h b/arch/riscv/include/asm/module.lds.h new file mode 100644 index 000000000..1075beae1 --- /dev/null +++ b/arch/riscv/include/asm/module.lds.h @@ -0,0 +1,9 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2017 Andes Technology Corporation */ +#ifdef CONFIG_MODULE_SECTIONS +SECTIONS { + .plt : { BYTE(0) } + .got : { BYTE(0) } + .got.plt : { BYTE(0) } +} +#endif diff --git a/arch/riscv/include/asm/numa.h b/arch/riscv/include/asm/numa.h new file mode 100644 index 000000000..8c8cf4297 --- /dev/null +++ b/arch/riscv/include/asm/numa.h @@ -0,0 +1,8 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __ASM_NUMA_H +#define __ASM_NUMA_H + +#include <asm/topology.h> +#include <asm-generic/numa.h> + +#endif /* __ASM_NUMA_H */ diff --git a/arch/riscv/include/asm/page.h b/arch/riscv/include/asm/page.h new file mode 100644 index 000000000..86048c60f --- /dev/null +++ b/arch/riscv/include/asm/page.h @@ -0,0 +1,191 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2009 Chen Liqin <liqin.chen@sunplusct.com> + * Copyright (C) 2012 Regents of the University of California + * Copyright (C) 2017 SiFive + * Copyright (C) 2017 XiaojingZhu <zhuxiaoj@ict.ac.cn> + */ + +#ifndef _ASM_RISCV_PAGE_H +#define _ASM_RISCV_PAGE_H + +#include <linux/pfn.h> +#include <linux/const.h> + +#define PAGE_SHIFT (12) +#define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT) +#define PAGE_MASK (~(PAGE_SIZE - 1)) + +#ifdef CONFIG_64BIT +#define HUGE_MAX_HSTATE 2 +#else +#define HUGE_MAX_HSTATE 1 +#endif +#define HPAGE_SHIFT PMD_SHIFT +#define HPAGE_SIZE (_AC(1, UL) << HPAGE_SHIFT) +#define HPAGE_MASK (~(HPAGE_SIZE - 1)) +#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT) + +/* + * PAGE_OFFSET -- the first address of the first page of memory. + * When not using MMU this corresponds to the first free page in + * physical memory (aligned on a page boundary). + */ +#ifdef CONFIG_64BIT +#ifdef CONFIG_MMU +#define PAGE_OFFSET kernel_map.page_offset +#else +#define PAGE_OFFSET _AC(CONFIG_PAGE_OFFSET, UL) +#endif +/* + * By default, CONFIG_PAGE_OFFSET value corresponds to SV57 address space so + * define the PAGE_OFFSET value for SV48 and SV39. + */ +#define PAGE_OFFSET_L4 _AC(0xffffaf8000000000, UL) +#define PAGE_OFFSET_L3 _AC(0xffffffd800000000, UL) +#else +#define PAGE_OFFSET _AC(CONFIG_PAGE_OFFSET, UL) +#endif /* CONFIG_64BIT */ + +#ifndef __ASSEMBLY__ + +#define clear_page(pgaddr) memset((pgaddr), 0, PAGE_SIZE) +#define copy_page(to, from) memcpy((to), (from), PAGE_SIZE) + +#define clear_user_page(pgaddr, vaddr, page) memset((pgaddr), 0, PAGE_SIZE) +#define copy_user_page(vto, vfrom, vaddr, topg) \ + memcpy((vto), (vfrom), PAGE_SIZE) + +/* + * Use struct definitions to apply C type checking + */ + +/* Page Global Directory entry */ +typedef struct { + unsigned long pgd; +} pgd_t; + +/* Page Table entry */ +typedef struct { + unsigned long pte; +} pte_t; + +typedef struct { + unsigned long pgprot; +} pgprot_t; + +typedef struct page *pgtable_t; + +#define pte_val(x) ((x).pte) +#define pgd_val(x) ((x).pgd) +#define pgprot_val(x) ((x).pgprot) + +#define __pte(x) ((pte_t) { (x) }) +#define __pgd(x) ((pgd_t) { (x) }) +#define __pgprot(x) ((pgprot_t) { (x) }) + +#ifdef CONFIG_64BIT +#define PTE_FMT "%016lx" +#else +#define PTE_FMT "%08lx" +#endif + +#ifdef CONFIG_MMU +extern unsigned long riscv_pfn_base; +#define ARCH_PFN_OFFSET (riscv_pfn_base) +#else +#define ARCH_PFN_OFFSET (PAGE_OFFSET >> PAGE_SHIFT) +#endif /* CONFIG_MMU */ + +struct kernel_mapping { + unsigned long page_offset; + unsigned long virt_addr; + uintptr_t phys_addr; + uintptr_t size; + /* Offset between linear mapping virtual address and kernel load address */ + unsigned long va_pa_offset; + /* Offset between kernel mapping virtual address and kernel load address */ + unsigned long va_kernel_pa_offset; + unsigned long va_kernel_xip_pa_offset; +#ifdef CONFIG_XIP_KERNEL + uintptr_t xiprom; + uintptr_t xiprom_sz; +#endif +}; + +extern struct kernel_mapping kernel_map; +extern phys_addr_t phys_ram_base; + +#define is_kernel_mapping(x) \ + ((x) >= kernel_map.virt_addr && (x) < (kernel_map.virt_addr + kernel_map.size)) + +#define is_linear_mapping(x) \ + ((x) >= PAGE_OFFSET && (!IS_ENABLED(CONFIG_64BIT) || (x) < PAGE_OFFSET + KERN_VIRT_SIZE)) + +#define linear_mapping_pa_to_va(x) ((void *)((unsigned long)(x) + kernel_map.va_pa_offset)) +#define kernel_mapping_pa_to_va(y) ({ \ + unsigned long _y = y; \ + (IS_ENABLED(CONFIG_XIP_KERNEL) && _y < phys_ram_base) ? \ + (void *)((unsigned long)(_y) + kernel_map.va_kernel_xip_pa_offset) : \ + (void *)((unsigned long)(_y) + kernel_map.va_kernel_pa_offset + XIP_OFFSET); \ + }) +#define __pa_to_va_nodebug(x) linear_mapping_pa_to_va(x) + +#define linear_mapping_va_to_pa(x) ((unsigned long)(x) - kernel_map.va_pa_offset) +#define kernel_mapping_va_to_pa(y) ({ \ + unsigned long _y = y; \ + (IS_ENABLED(CONFIG_XIP_KERNEL) && _y < kernel_map.virt_addr + XIP_OFFSET) ? \ + ((unsigned long)(_y) - kernel_map.va_kernel_xip_pa_offset) : \ + ((unsigned long)(_y) - kernel_map.va_kernel_pa_offset - XIP_OFFSET); \ + }) + +#define __va_to_pa_nodebug(x) ({ \ + unsigned long _x = x; \ + is_linear_mapping(_x) ? \ + linear_mapping_va_to_pa(_x) : kernel_mapping_va_to_pa(_x); \ + }) + +#ifdef CONFIG_DEBUG_VIRTUAL +extern phys_addr_t __virt_to_phys(unsigned long x); +extern phys_addr_t __phys_addr_symbol(unsigned long x); +#else +#define __virt_to_phys(x) __va_to_pa_nodebug(x) +#define __phys_addr_symbol(x) __va_to_pa_nodebug(x) +#endif /* CONFIG_DEBUG_VIRTUAL */ + +#define __pa_symbol(x) __phys_addr_symbol(RELOC_HIDE((unsigned long)(x), 0)) +#define __pa(x) __virt_to_phys((unsigned long)(x)) +#define __va(x) ((void *)__pa_to_va_nodebug((phys_addr_t)(x))) + +#define phys_to_pfn(phys) (PFN_DOWN(phys)) +#define pfn_to_phys(pfn) (PFN_PHYS(pfn)) + +#define virt_to_pfn(vaddr) (phys_to_pfn(__pa(vaddr))) +#define pfn_to_virt(pfn) (__va(pfn_to_phys(pfn))) + +#define virt_to_page(vaddr) (pfn_to_page(virt_to_pfn(vaddr))) +#define page_to_virt(page) (pfn_to_virt(page_to_pfn(page))) + +#define page_to_phys(page) (pfn_to_phys(page_to_pfn(page))) +#define phys_to_page(paddr) (pfn_to_page(phys_to_pfn(paddr))) + +#define sym_to_pfn(x) __phys_to_pfn(__pa_symbol(x)) + +#ifdef CONFIG_FLATMEM +#define pfn_valid(pfn) \ + (((pfn) >= ARCH_PFN_OFFSET) && (((pfn) - ARCH_PFN_OFFSET) < max_mapnr)) +#endif + +#endif /* __ASSEMBLY__ */ + +#define virt_addr_valid(vaddr) ({ \ + unsigned long _addr = (unsigned long)vaddr; \ + (unsigned long)(_addr) >= PAGE_OFFSET && pfn_valid(virt_to_pfn(_addr)); \ +}) + +#define VM_DATA_DEFAULT_FLAGS VM_DATA_FLAGS_NON_EXEC + +#include <asm-generic/memory_model.h> +#include <asm-generic/getorder.h> + +#endif /* _ASM_RISCV_PAGE_H */ diff --git a/arch/riscv/include/asm/parse_asm.h b/arch/riscv/include/asm/parse_asm.h new file mode 100644 index 000000000..3cd00332d --- /dev/null +++ b/arch/riscv/include/asm/parse_asm.h @@ -0,0 +1,224 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2020 SiFive + */ + +#ifndef _ASM_RISCV_INSN_H +#define _ASM_RISCV_INSN_H + +#include <linux/bits.h> + +/* The bit field of immediate value in I-type instruction */ +#define I_IMM_SIGN_OPOFF 31 +#define I_IMM_11_0_OPOFF 20 +#define I_IMM_SIGN_OFF 12 +#define I_IMM_11_0_OFF 0 +#define I_IMM_11_0_MASK GENMASK(11, 0) + +/* The bit field of immediate value in J-type instruction */ +#define J_IMM_SIGN_OPOFF 31 +#define J_IMM_10_1_OPOFF 21 +#define J_IMM_11_OPOFF 20 +#define J_IMM_19_12_OPOFF 12 +#define J_IMM_SIGN_OFF 20 +#define J_IMM_10_1_OFF 1 +#define J_IMM_11_OFF 11 +#define J_IMM_19_12_OFF 12 +#define J_IMM_10_1_MASK GENMASK(9, 0) +#define J_IMM_11_MASK GENMASK(0, 0) +#define J_IMM_19_12_MASK GENMASK(7, 0) + +/* The bit field of immediate value in B-type instruction */ +#define B_IMM_SIGN_OPOFF 31 +#define B_IMM_10_5_OPOFF 25 +#define B_IMM_4_1_OPOFF 8 +#define B_IMM_11_OPOFF 7 +#define B_IMM_SIGN_OFF 12 +#define B_IMM_10_5_OFF 5 +#define B_IMM_4_1_OFF 1 +#define B_IMM_11_OFF 11 +#define B_IMM_10_5_MASK GENMASK(5, 0) +#define B_IMM_4_1_MASK GENMASK(3, 0) +#define B_IMM_11_MASK GENMASK(0, 0) + +/* The register offset in RVG instruction */ +#define RVG_RS1_OPOFF 15 +#define RVG_RS2_OPOFF 20 +#define RVG_RD_OPOFF 7 + +/* The bit field of immediate value in RVC J instruction */ +#define RVC_J_IMM_SIGN_OPOFF 12 +#define RVC_J_IMM_4_OPOFF 11 +#define RVC_J_IMM_9_8_OPOFF 9 +#define RVC_J_IMM_10_OPOFF 8 +#define RVC_J_IMM_6_OPOFF 7 +#define RVC_J_IMM_7_OPOFF 6 +#define RVC_J_IMM_3_1_OPOFF 3 +#define RVC_J_IMM_5_OPOFF 2 +#define RVC_J_IMM_SIGN_OFF 11 +#define RVC_J_IMM_4_OFF 4 +#define RVC_J_IMM_9_8_OFF 8 +#define RVC_J_IMM_10_OFF 10 +#define RVC_J_IMM_6_OFF 6 +#define RVC_J_IMM_7_OFF 7 +#define RVC_J_IMM_3_1_OFF 1 +#define RVC_J_IMM_5_OFF 5 +#define RVC_J_IMM_4_MASK GENMASK(0, 0) +#define RVC_J_IMM_9_8_MASK GENMASK(1, 0) +#define RVC_J_IMM_10_MASK GENMASK(0, 0) +#define RVC_J_IMM_6_MASK GENMASK(0, 0) +#define RVC_J_IMM_7_MASK GENMASK(0, 0) +#define RVC_J_IMM_3_1_MASK GENMASK(2, 0) +#define RVC_J_IMM_5_MASK GENMASK(0, 0) + +/* The bit field of immediate value in RVC B instruction */ +#define RVC_B_IMM_SIGN_OPOFF 12 +#define RVC_B_IMM_4_3_OPOFF 10 +#define RVC_B_IMM_7_6_OPOFF 5 +#define RVC_B_IMM_2_1_OPOFF 3 +#define RVC_B_IMM_5_OPOFF 2 +#define RVC_B_IMM_SIGN_OFF 8 +#define RVC_B_IMM_4_3_OFF 3 +#define RVC_B_IMM_7_6_OFF 6 +#define RVC_B_IMM_2_1_OFF 1 +#define RVC_B_IMM_5_OFF 5 +#define RVC_B_IMM_4_3_MASK GENMASK(1, 0) +#define RVC_B_IMM_7_6_MASK GENMASK(1, 0) +#define RVC_B_IMM_2_1_MASK GENMASK(1, 0) +#define RVC_B_IMM_5_MASK GENMASK(0, 0) + +/* The register offset in RVC op=C0 instruction */ +#define RVC_C0_RS1_OPOFF 7 +#define RVC_C0_RS2_OPOFF 2 +#define RVC_C0_RD_OPOFF 2 + +/* The register offset in RVC op=C1 instruction */ +#define RVC_C1_RS1_OPOFF 7 +#define RVC_C1_RS2_OPOFF 2 +#define RVC_C1_RD_OPOFF 7 + +/* The register offset in RVC op=C2 instruction */ +#define RVC_C2_RS1_OPOFF 7 +#define RVC_C2_RS2_OPOFF 2 +#define RVC_C2_RD_OPOFF 7 + +/* parts of opcode for RVG*/ +#define OPCODE_BRANCH 0x63 +#define OPCODE_JALR 0x67 +#define OPCODE_JAL 0x6f +#define OPCODE_SYSTEM 0x73 + +/* parts of opcode for RVC*/ +#define OPCODE_C_0 0x0 +#define OPCODE_C_1 0x1 +#define OPCODE_C_2 0x2 + +/* parts of funct3 code for I, M, A extension*/ +#define FUNCT3_JALR 0x0 +#define FUNCT3_BEQ 0x0 +#define FUNCT3_BNE 0x1000 +#define FUNCT3_BLT 0x4000 +#define FUNCT3_BGE 0x5000 +#define FUNCT3_BLTU 0x6000 +#define FUNCT3_BGEU 0x7000 + +/* parts of funct3 code for C extension*/ +#define FUNCT3_C_BEQZ 0xc000 +#define FUNCT3_C_BNEZ 0xe000 +#define FUNCT3_C_J 0xa000 +#define FUNCT3_C_JAL 0x2000 +#define FUNCT4_C_JR 0x8000 +#define FUNCT4_C_JALR 0xf000 + +#define FUNCT12_SRET 0x10200000 + +#define MATCH_JALR (FUNCT3_JALR | OPCODE_JALR) +#define MATCH_JAL (OPCODE_JAL) +#define MATCH_BEQ (FUNCT3_BEQ | OPCODE_BRANCH) +#define MATCH_BNE (FUNCT3_BNE | OPCODE_BRANCH) +#define MATCH_BLT (FUNCT3_BLT | OPCODE_BRANCH) +#define MATCH_BGE (FUNCT3_BGE | OPCODE_BRANCH) +#define MATCH_BLTU (FUNCT3_BLTU | OPCODE_BRANCH) +#define MATCH_BGEU (FUNCT3_BGEU | OPCODE_BRANCH) +#define MATCH_SRET (FUNCT12_SRET | OPCODE_SYSTEM) +#define MATCH_C_BEQZ (FUNCT3_C_BEQZ | OPCODE_C_1) +#define MATCH_C_BNEZ (FUNCT3_C_BNEZ | OPCODE_C_1) +#define MATCH_C_J (FUNCT3_C_J | OPCODE_C_1) +#define MATCH_C_JAL (FUNCT3_C_JAL | OPCODE_C_1) +#define MATCH_C_JR (FUNCT4_C_JR | OPCODE_C_2) +#define MATCH_C_JALR (FUNCT4_C_JALR | OPCODE_C_2) + +#define MASK_JALR 0x707f +#define MASK_JAL 0x7f +#define MASK_C_JALR 0xf07f +#define MASK_C_JR 0xf07f +#define MASK_C_JAL 0xe003 +#define MASK_C_J 0xe003 +#define MASK_BEQ 0x707f +#define MASK_BNE 0x707f +#define MASK_BLT 0x707f +#define MASK_BGE 0x707f +#define MASK_BLTU 0x707f +#define MASK_BGEU 0x707f +#define MASK_C_BEQZ 0xe003 +#define MASK_C_BNEZ 0xe003 +#define MASK_SRET 0xffffffff + +#define __INSN_LENGTH_MASK _UL(0x3) +#define __INSN_LENGTH_GE_32 _UL(0x3) +#define __INSN_OPCODE_MASK _UL(0x7F) +#define __INSN_BRANCH_OPCODE _UL(OPCODE_BRANCH) + +/* Define a series of is_XXX_insn functions to check if the value INSN + * is an instance of instruction XXX. + */ +#define DECLARE_INSN(INSN_NAME, INSN_MATCH, INSN_MASK) \ +static inline bool is_ ## INSN_NAME ## _insn(long insn) \ +{ \ + return (insn & (INSN_MASK)) == (INSN_MATCH); \ +} + +#define RV_IMM_SIGN(x) (-(((x) >> 31) & 1)) +#define RVC_IMM_SIGN(x) (-(((x) >> 12) & 1)) +#define RV_X(X, s, mask) (((X) >> (s)) & (mask)) +#define RVC_X(X, s, mask) RV_X(X, s, mask) + +#define EXTRACT_JTYPE_IMM(x) \ + ({typeof(x) x_ = (x); \ + (RV_X(x_, J_IMM_10_1_OPOFF, J_IMM_10_1_MASK) << J_IMM_10_1_OFF) | \ + (RV_X(x_, J_IMM_11_OPOFF, J_IMM_11_MASK) << J_IMM_11_OFF) | \ + (RV_X(x_, J_IMM_19_12_OPOFF, J_IMM_19_12_MASK) << J_IMM_19_12_OFF) | \ + (RV_IMM_SIGN(x_) << J_IMM_SIGN_OFF); }) + +#define EXTRACT_ITYPE_IMM(x) \ + ({typeof(x) x_ = (x); \ + (RV_X(x_, I_IMM_11_0_OPOFF, I_IMM_11_0_MASK)) | \ + (RV_IMM_SIGN(x_) << I_IMM_SIGN_OFF); }) + +#define EXTRACT_BTYPE_IMM(x) \ + ({typeof(x) x_ = (x); \ + (RV_X(x_, B_IMM_4_1_OPOFF, B_IMM_4_1_MASK) << B_IMM_4_1_OFF) | \ + (RV_X(x_, B_IMM_10_5_OPOFF, B_IMM_10_5_MASK) << B_IMM_10_5_OFF) | \ + (RV_X(x_, B_IMM_11_OPOFF, B_IMM_11_MASK) << B_IMM_11_OFF) | \ + (RV_IMM_SIGN(x_) << B_IMM_SIGN_OFF); }) + +#define EXTRACT_RVC_J_IMM(x) \ + ({typeof(x) x_ = (x); \ + (RVC_X(x_, RVC_J_IMM_3_1_OPOFF, RVC_J_IMM_3_1_MASK) << RVC_J_IMM_3_1_OFF) | \ + (RVC_X(x_, RVC_J_IMM_4_OPOFF, RVC_J_IMM_4_MASK) << RVC_J_IMM_4_OFF) | \ + (RVC_X(x_, RVC_J_IMM_5_OPOFF, RVC_J_IMM_5_MASK) << RVC_J_IMM_5_OFF) | \ + (RVC_X(x_, RVC_J_IMM_6_OPOFF, RVC_J_IMM_6_MASK) << RVC_J_IMM_6_OFF) | \ + (RVC_X(x_, RVC_J_IMM_7_OPOFF, RVC_J_IMM_7_MASK) << RVC_J_IMM_7_OFF) | \ + (RVC_X(x_, RVC_J_IMM_9_8_OPOFF, RVC_J_IMM_9_8_MASK) << RVC_J_IMM_9_8_OFF) | \ + (RVC_X(x_, RVC_J_IMM_10_OPOFF, RVC_J_IMM_10_MASK) << RVC_J_IMM_10_OFF) | \ + (RVC_IMM_SIGN(x_) << RVC_J_IMM_SIGN_OFF); }) + +#define EXTRACT_RVC_B_IMM(x) \ + ({typeof(x) x_ = (x); \ + (RVC_X(x_, RVC_B_IMM_2_1_OPOFF, RVC_B_IMM_2_1_MASK) << RVC_B_IMM_2_1_OFF) | \ + (RVC_X(x_, RVC_B_IMM_4_3_OPOFF, RVC_B_IMM_4_3_MASK) << RVC_B_IMM_4_3_OFF) | \ + (RVC_X(x_, RVC_B_IMM_5_OPOFF, RVC_B_IMM_5_MASK) << RVC_B_IMM_5_OFF) | \ + (RVC_X(x_, RVC_B_IMM_7_6_OPOFF, RVC_B_IMM_7_6_MASK) << RVC_B_IMM_7_6_OFF) | \ + (RVC_IMM_SIGN(x_) << RVC_B_IMM_SIGN_OFF); }) + +#endif /* _ASM_RISCV_INSN_H */ diff --git a/arch/riscv/include/asm/patch.h b/arch/riscv/include/asm/patch.h new file mode 100644 index 000000000..98d9de07c --- /dev/null +++ b/arch/riscv/include/asm/patch.h @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2020 SiFive + */ + +#ifndef _ASM_RISCV_PATCH_H +#define _ASM_RISCV_PATCH_H + +int patch_text_nosync(void *addr, const void *insns, size_t len); +int patch_text(void *addr, u32 insn); + +extern int riscv_patch_in_stop_machine; + +#endif /* _ASM_RISCV_PATCH_H */ diff --git a/arch/riscv/include/asm/pci.h b/arch/riscv/include/asm/pci.h new file mode 100644 index 000000000..cc2a184cf --- /dev/null +++ b/arch/riscv/include/asm/pci.h @@ -0,0 +1,33 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2016 SiFive + */ + +#ifndef _ASM_RISCV_PCI_H +#define _ASM_RISCV_PCI_H + +#include <linux/types.h> +#include <linux/slab.h> +#include <linux/dma-mapping.h> + +#include <asm/io.h> + +#define PCIBIOS_MIN_IO 4 +#define PCIBIOS_MIN_MEM 16 + +#if defined(CONFIG_PCI) && defined(CONFIG_NUMA) +static inline int pcibus_to_node(struct pci_bus *bus) +{ + return dev_to_node(&bus->dev); +} +#ifndef cpumask_of_pcibus +#define cpumask_of_pcibus(bus) (pcibus_to_node(bus) == -1 ? \ + cpu_all_mask : \ + cpumask_of_node(pcibus_to_node(bus))) +#endif +#endif /* defined(CONFIG_PCI) && defined(CONFIG_NUMA) */ + +/* Generic PCI */ +#include <asm-generic/pci.h> + +#endif /* _ASM_RISCV_PCI_H */ diff --git a/arch/riscv/include/asm/perf_event.h b/arch/riscv/include/asm/perf_event.h new file mode 100644 index 000000000..665bbc9b2 --- /dev/null +++ b/arch/riscv/include/asm/perf_event.h @@ -0,0 +1,20 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2018 SiFive + * Copyright (C) 2018 Andes Technology Corporation + * + */ + +#ifndef _ASM_RISCV_PERF_EVENT_H +#define _ASM_RISCV_PERF_EVENT_H + +#include <linux/perf_event.h> +#define perf_arch_bpf_user_pt_regs(regs) (struct user_regs_struct *)regs + +#define perf_arch_fetch_caller_regs(regs, __ip) { \ + (regs)->epc = (__ip); \ + (regs)->s0 = (unsigned long) __builtin_frame_address(0); \ + (regs)->sp = current_stack_pointer; \ + (regs)->status = SR_PP; \ +} +#endif /* _ASM_RISCV_PERF_EVENT_H */ diff --git a/arch/riscv/include/asm/pgalloc.h b/arch/riscv/include/asm/pgalloc.h new file mode 100644 index 000000000..59dc12b5b --- /dev/null +++ b/arch/riscv/include/asm/pgalloc.h @@ -0,0 +1,163 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2009 Chen Liqin <liqin.chen@sunplusct.com> + * Copyright (C) 2012 Regents of the University of California + */ + +#ifndef _ASM_RISCV_PGALLOC_H +#define _ASM_RISCV_PGALLOC_H + +#include <linux/mm.h> +#include <asm/tlb.h> + +#ifdef CONFIG_MMU +#define __HAVE_ARCH_PUD_ALLOC_ONE +#define __HAVE_ARCH_PUD_FREE +#include <asm-generic/pgalloc.h> + +static inline void pmd_populate_kernel(struct mm_struct *mm, + pmd_t *pmd, pte_t *pte) +{ + unsigned long pfn = virt_to_pfn(pte); + + set_pmd(pmd, __pmd((pfn << _PAGE_PFN_SHIFT) | _PAGE_TABLE)); +} + +static inline void pmd_populate(struct mm_struct *mm, + pmd_t *pmd, pgtable_t pte) +{ + unsigned long pfn = virt_to_pfn(page_address(pte)); + + set_pmd(pmd, __pmd((pfn << _PAGE_PFN_SHIFT) | _PAGE_TABLE)); +} + +#ifndef __PAGETABLE_PMD_FOLDED +static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd) +{ + unsigned long pfn = virt_to_pfn(pmd); + + set_pud(pud, __pud((pfn << _PAGE_PFN_SHIFT) | _PAGE_TABLE)); +} + +static inline void p4d_populate(struct mm_struct *mm, p4d_t *p4d, pud_t *pud) +{ + if (pgtable_l4_enabled) { + unsigned long pfn = virt_to_pfn(pud); + + set_p4d(p4d, __p4d((pfn << _PAGE_PFN_SHIFT) | _PAGE_TABLE)); + } +} + +static inline void p4d_populate_safe(struct mm_struct *mm, p4d_t *p4d, + pud_t *pud) +{ + if (pgtable_l4_enabled) { + unsigned long pfn = virt_to_pfn(pud); + + set_p4d_safe(p4d, + __p4d((pfn << _PAGE_PFN_SHIFT) | _PAGE_TABLE)); + } +} + +static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, p4d_t *p4d) +{ + if (pgtable_l5_enabled) { + unsigned long pfn = virt_to_pfn(p4d); + + set_pgd(pgd, __pgd((pfn << _PAGE_PFN_SHIFT) | _PAGE_TABLE)); + } +} + +static inline void pgd_populate_safe(struct mm_struct *mm, pgd_t *pgd, + p4d_t *p4d) +{ + if (pgtable_l5_enabled) { + unsigned long pfn = virt_to_pfn(p4d); + + set_pgd_safe(pgd, + __pgd((pfn << _PAGE_PFN_SHIFT) | _PAGE_TABLE)); + } +} + +#define pud_alloc_one pud_alloc_one +static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr) +{ + if (pgtable_l4_enabled) + return __pud_alloc_one(mm, addr); + + return NULL; +} + +#define pud_free pud_free +static inline void pud_free(struct mm_struct *mm, pud_t *pud) +{ + if (pgtable_l4_enabled) + __pud_free(mm, pud); +} + +#define __pud_free_tlb(tlb, pud, addr) pud_free((tlb)->mm, pud) + +#define p4d_alloc_one p4d_alloc_one +static inline p4d_t *p4d_alloc_one(struct mm_struct *mm, unsigned long addr) +{ + if (pgtable_l5_enabled) { + gfp_t gfp = GFP_PGTABLE_USER; + + if (mm == &init_mm) + gfp = GFP_PGTABLE_KERNEL; + return (p4d_t *)get_zeroed_page(gfp); + } + + return NULL; +} + +static inline void __p4d_free(struct mm_struct *mm, p4d_t *p4d) +{ + BUG_ON((unsigned long)p4d & (PAGE_SIZE-1)); + free_page((unsigned long)p4d); +} + +#define p4d_free p4d_free +static inline void p4d_free(struct mm_struct *mm, p4d_t *p4d) +{ + if (pgtable_l5_enabled) + __p4d_free(mm, p4d); +} + +#define __p4d_free_tlb(tlb, p4d, addr) p4d_free((tlb)->mm, p4d) +#endif /* __PAGETABLE_PMD_FOLDED */ + +static inline void sync_kernel_mappings(pgd_t *pgd) +{ + memcpy(pgd + USER_PTRS_PER_PGD, + init_mm.pgd + USER_PTRS_PER_PGD, + (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t)); +} + +static inline pgd_t *pgd_alloc(struct mm_struct *mm) +{ + pgd_t *pgd; + + pgd = (pgd_t *)__get_free_page(GFP_KERNEL); + if (likely(pgd != NULL)) { + memset(pgd, 0, USER_PTRS_PER_PGD * sizeof(pgd_t)); + /* Copy kernel mappings */ + sync_kernel_mappings(pgd); + } + return pgd; +} + +#ifndef __PAGETABLE_PMD_FOLDED + +#define __pmd_free_tlb(tlb, pmd, addr) pmd_free((tlb)->mm, pmd) + +#endif /* __PAGETABLE_PMD_FOLDED */ + +#define __pte_free_tlb(tlb, pte, buf) \ +do { \ + pgtable_pte_page_dtor(pte); \ + tlb_remove_page((tlb), pte); \ +} while (0) +#endif /* CONFIG_MMU */ + +#endif /* _ASM_RISCV_PGALLOC_H */ diff --git a/arch/riscv/include/asm/pgtable-32.h b/arch/riscv/include/asm/pgtable-32.h new file mode 100644 index 000000000..59ba1fbaf --- /dev/null +++ b/arch/riscv/include/asm/pgtable-32.h @@ -0,0 +1,36 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2012 Regents of the University of California + */ + +#ifndef _ASM_RISCV_PGTABLE_32_H +#define _ASM_RISCV_PGTABLE_32_H + +#include <asm-generic/pgtable-nopmd.h> +#include <linux/bits.h> +#include <linux/const.h> + +/* Size of region mapped by a page global directory */ +#define PGDIR_SHIFT 22 +#define PGDIR_SIZE (_AC(1, UL) << PGDIR_SHIFT) +#define PGDIR_MASK (~(PGDIR_SIZE - 1)) + +#define MAX_POSSIBLE_PHYSMEM_BITS 34 + +/* + * rv32 PTE format: + * | XLEN-1 10 | 9 8 | 7 | 6 | 5 | 4 | 3 | 2 | 1 | 0 + * PFN reserved for SW D A G U X W R V + */ +#define _PAGE_PFN_MASK GENMASK(31, 10) + +#define _PAGE_NOCACHE 0 +#define _PAGE_IO 0 +#define _PAGE_MTMASK 0 + +/* Set of bits to preserve across pte_modify() */ +#define _PAGE_CHG_MASK (~(unsigned long)(_PAGE_PRESENT | _PAGE_READ | \ + _PAGE_WRITE | _PAGE_EXEC | \ + _PAGE_USER | _PAGE_GLOBAL)) + +#endif /* _ASM_RISCV_PGTABLE_32_H */ diff --git a/arch/riscv/include/asm/pgtable-64.h b/arch/riscv/include/asm/pgtable-64.h new file mode 100644 index 000000000..42a042c0e --- /dev/null +++ b/arch/riscv/include/asm/pgtable-64.h @@ -0,0 +1,377 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2012 Regents of the University of California + */ + +#ifndef _ASM_RISCV_PGTABLE_64_H +#define _ASM_RISCV_PGTABLE_64_H + +#include <linux/bits.h> +#include <linux/const.h> +#include <asm/errata_list.h> + +extern bool pgtable_l4_enabled; +extern bool pgtable_l5_enabled; + +#define PGDIR_SHIFT_L3 30 +#define PGDIR_SHIFT_L4 39 +#define PGDIR_SHIFT_L5 48 +#define PGDIR_SIZE_L3 (_AC(1, UL) << PGDIR_SHIFT_L3) + +#define PGDIR_SHIFT (pgtable_l5_enabled ? PGDIR_SHIFT_L5 : \ + (pgtable_l4_enabled ? PGDIR_SHIFT_L4 : PGDIR_SHIFT_L3)) +/* Size of region mapped by a page global directory */ +#define PGDIR_SIZE (_AC(1, UL) << PGDIR_SHIFT) +#define PGDIR_MASK (~(PGDIR_SIZE - 1)) + +/* p4d is folded into pgd in case of 4-level page table */ +#define P4D_SHIFT_L3 30 +#define P4D_SHIFT_L4 39 +#define P4D_SHIFT_L5 39 +#define P4D_SHIFT (pgtable_l5_enabled ? P4D_SHIFT_L5 : \ + (pgtable_l4_enabled ? P4D_SHIFT_L4 : P4D_SHIFT_L3)) +#define P4D_SIZE (_AC(1, UL) << P4D_SHIFT) +#define P4D_MASK (~(P4D_SIZE - 1)) + +/* pud is folded into pgd in case of 3-level page table */ +#define PUD_SHIFT 30 +#define PUD_SIZE (_AC(1, UL) << PUD_SHIFT) +#define PUD_MASK (~(PUD_SIZE - 1)) + +#define PMD_SHIFT 21 +/* Size of region mapped by a page middle directory */ +#define PMD_SIZE (_AC(1, UL) << PMD_SHIFT) +#define PMD_MASK (~(PMD_SIZE - 1)) + +/* Page 4th Directory entry */ +typedef struct { + unsigned long p4d; +} p4d_t; + +#define p4d_val(x) ((x).p4d) +#define __p4d(x) ((p4d_t) { (x) }) +#define PTRS_PER_P4D (PAGE_SIZE / sizeof(p4d_t)) + +/* Page Upper Directory entry */ +typedef struct { + unsigned long pud; +} pud_t; + +#define pud_val(x) ((x).pud) +#define __pud(x) ((pud_t) { (x) }) +#define PTRS_PER_PUD (PAGE_SIZE / sizeof(pud_t)) + +/* Page Middle Directory entry */ +typedef struct { + unsigned long pmd; +} pmd_t; + +#define pmd_val(x) ((x).pmd) +#define __pmd(x) ((pmd_t) { (x) }) + +#define PTRS_PER_PMD (PAGE_SIZE / sizeof(pmd_t)) + +/* + * rv64 PTE format: + * | 63 | 62 61 | 60 54 | 53 10 | 9 8 | 7 | 6 | 5 | 4 | 3 | 2 | 1 | 0 + * N MT RSV PFN reserved for SW D A G U X W R V + */ +#define _PAGE_PFN_MASK GENMASK(53, 10) + +/* + * [62:61] Svpbmt Memory Type definitions: + * + * 00 - PMA Normal Cacheable, No change to implied PMA memory type + * 01 - NC Non-cacheable, idempotent, weakly-ordered Main Memory + * 10 - IO Non-cacheable, non-idempotent, strongly-ordered I/O memory + * 11 - Rsvd Reserved for future standard use + */ +#define _PAGE_NOCACHE_SVPBMT (1UL << 61) +#define _PAGE_IO_SVPBMT (1UL << 62) +#define _PAGE_MTMASK_SVPBMT (_PAGE_NOCACHE_SVPBMT | _PAGE_IO_SVPBMT) + +/* + * [63:59] T-Head Memory Type definitions: + * + * 00000 - NC Weakly-ordered, Non-cacheable, Non-bufferable, Non-shareable, Non-trustable + * 01110 - PMA Weakly-ordered, Cacheable, Bufferable, Shareable, Non-trustable + * 10000 - IO Strongly-ordered, Non-cacheable, Non-bufferable, Non-shareable, Non-trustable + */ +#define _PAGE_PMA_THEAD ((1UL << 62) | (1UL << 61) | (1UL << 60)) +#define _PAGE_NOCACHE_THEAD 0UL +#define _PAGE_IO_THEAD (1UL << 63) +#define _PAGE_MTMASK_THEAD (_PAGE_PMA_THEAD | _PAGE_IO_THEAD | (1UL << 59)) + +static inline u64 riscv_page_mtmask(void) +{ + u64 val; + + ALT_SVPBMT(val, _PAGE_MTMASK); + return val; +} + +static inline u64 riscv_page_nocache(void) +{ + u64 val; + + ALT_SVPBMT(val, _PAGE_NOCACHE); + return val; +} + +static inline u64 riscv_page_io(void) +{ + u64 val; + + ALT_SVPBMT(val, _PAGE_IO); + return val; +} + +#define _PAGE_NOCACHE riscv_page_nocache() +#define _PAGE_IO riscv_page_io() +#define _PAGE_MTMASK riscv_page_mtmask() + +/* Set of bits to preserve across pte_modify() */ +#define _PAGE_CHG_MASK (~(unsigned long)(_PAGE_PRESENT | _PAGE_READ | \ + _PAGE_WRITE | _PAGE_EXEC | \ + _PAGE_USER | _PAGE_GLOBAL | \ + _PAGE_MTMASK)) + +static inline int pud_present(pud_t pud) +{ + return (pud_val(pud) & _PAGE_PRESENT); +} + +static inline int pud_none(pud_t pud) +{ + return (pud_val(pud) == 0); +} + +static inline int pud_bad(pud_t pud) +{ + return !pud_present(pud); +} + +#define pud_leaf pud_leaf +static inline int pud_leaf(pud_t pud) +{ + return pud_present(pud) && (pud_val(pud) & _PAGE_LEAF); +} + +static inline int pud_user(pud_t pud) +{ + return pud_val(pud) & _PAGE_USER; +} + +static inline void set_pud(pud_t *pudp, pud_t pud) +{ + *pudp = pud; +} + +static inline void pud_clear(pud_t *pudp) +{ + set_pud(pudp, __pud(0)); +} + +static inline pud_t pfn_pud(unsigned long pfn, pgprot_t prot) +{ + return __pud((pfn << _PAGE_PFN_SHIFT) | pgprot_val(prot)); +} + +static inline unsigned long _pud_pfn(pud_t pud) +{ + return __page_val_to_pfn(pud_val(pud)); +} + +static inline pmd_t *pud_pgtable(pud_t pud) +{ + return (pmd_t *)pfn_to_virt(__page_val_to_pfn(pud_val(pud))); +} + +static inline struct page *pud_page(pud_t pud) +{ + return pfn_to_page(__page_val_to_pfn(pud_val(pud))); +} + +#define mm_p4d_folded mm_p4d_folded +static inline bool mm_p4d_folded(struct mm_struct *mm) +{ + if (pgtable_l5_enabled) + return false; + + return true; +} + +#define mm_pud_folded mm_pud_folded +static inline bool mm_pud_folded(struct mm_struct *mm) +{ + if (pgtable_l4_enabled) + return false; + + return true; +} + +#define pmd_index(addr) (((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1)) + +static inline pmd_t pfn_pmd(unsigned long pfn, pgprot_t prot) +{ + unsigned long prot_val = pgprot_val(prot); + + ALT_THEAD_PMA(prot_val); + + return __pmd((pfn << _PAGE_PFN_SHIFT) | prot_val); +} + +static inline unsigned long _pmd_pfn(pmd_t pmd) +{ + return __page_val_to_pfn(pmd_val(pmd)); +} + +#define mk_pmd(page, prot) pfn_pmd(page_to_pfn(page), prot) + +#define pmd_ERROR(e) \ + pr_err("%s:%d: bad pmd %016lx.\n", __FILE__, __LINE__, pmd_val(e)) + +#define pud_ERROR(e) \ + pr_err("%s:%d: bad pud %016lx.\n", __FILE__, __LINE__, pud_val(e)) + +#define p4d_ERROR(e) \ + pr_err("%s:%d: bad p4d %016lx.\n", __FILE__, __LINE__, p4d_val(e)) + +static inline void set_p4d(p4d_t *p4dp, p4d_t p4d) +{ + if (pgtable_l4_enabled) + *p4dp = p4d; + else + set_pud((pud_t *)p4dp, (pud_t){ p4d_val(p4d) }); +} + +static inline int p4d_none(p4d_t p4d) +{ + if (pgtable_l4_enabled) + return (p4d_val(p4d) == 0); + + return 0; +} + +static inline int p4d_present(p4d_t p4d) +{ + if (pgtable_l4_enabled) + return (p4d_val(p4d) & _PAGE_PRESENT); + + return 1; +} + +static inline int p4d_bad(p4d_t p4d) +{ + if (pgtable_l4_enabled) + return !p4d_present(p4d); + + return 0; +} + +static inline void p4d_clear(p4d_t *p4d) +{ + if (pgtable_l4_enabled) + set_p4d(p4d, __p4d(0)); +} + +static inline p4d_t pfn_p4d(unsigned long pfn, pgprot_t prot) +{ + return __p4d((pfn << _PAGE_PFN_SHIFT) | pgprot_val(prot)); +} + +static inline unsigned long _p4d_pfn(p4d_t p4d) +{ + return __page_val_to_pfn(p4d_val(p4d)); +} + +static inline pud_t *p4d_pgtable(p4d_t p4d) +{ + if (pgtable_l4_enabled) + return (pud_t *)pfn_to_virt(__page_val_to_pfn(p4d_val(p4d))); + + return (pud_t *)pud_pgtable((pud_t) { p4d_val(p4d) }); +} +#define p4d_page_vaddr(p4d) ((unsigned long)p4d_pgtable(p4d)) + +static inline struct page *p4d_page(p4d_t p4d) +{ + return pfn_to_page(__page_val_to_pfn(p4d_val(p4d))); +} + +#define pud_index(addr) (((addr) >> PUD_SHIFT) & (PTRS_PER_PUD - 1)) + +#define pud_offset pud_offset +static inline pud_t *pud_offset(p4d_t *p4d, unsigned long address) +{ + if (pgtable_l4_enabled) + return p4d_pgtable(*p4d) + pud_index(address); + + return (pud_t *)p4d; +} + +static inline void set_pgd(pgd_t *pgdp, pgd_t pgd) +{ + if (pgtable_l5_enabled) + *pgdp = pgd; + else + set_p4d((p4d_t *)pgdp, (p4d_t){ pgd_val(pgd) }); +} + +static inline int pgd_none(pgd_t pgd) +{ + if (pgtable_l5_enabled) + return (pgd_val(pgd) == 0); + + return 0; +} + +static inline int pgd_present(pgd_t pgd) +{ + if (pgtable_l5_enabled) + return (pgd_val(pgd) & _PAGE_PRESENT); + + return 1; +} + +static inline int pgd_bad(pgd_t pgd) +{ + if (pgtable_l5_enabled) + return !pgd_present(pgd); + + return 0; +} + +static inline void pgd_clear(pgd_t *pgd) +{ + if (pgtable_l5_enabled) + set_pgd(pgd, __pgd(0)); +} + +static inline p4d_t *pgd_pgtable(pgd_t pgd) +{ + if (pgtable_l5_enabled) + return (p4d_t *)pfn_to_virt(__page_val_to_pfn(pgd_val(pgd))); + + return (p4d_t *)p4d_pgtable((p4d_t) { pgd_val(pgd) }); +} +#define pgd_page_vaddr(pgd) ((unsigned long)pgd_pgtable(pgd)) + +static inline struct page *pgd_page(pgd_t pgd) +{ + return pfn_to_page(__page_val_to_pfn(pgd_val(pgd))); +} +#define pgd_page(pgd) pgd_page(pgd) + +#define p4d_index(addr) (((addr) >> P4D_SHIFT) & (PTRS_PER_P4D - 1)) + +#define p4d_offset p4d_offset +static inline p4d_t *p4d_offset(pgd_t *pgd, unsigned long address) +{ + if (pgtable_l5_enabled) + return pgd_pgtable(*pgd) + p4d_index(address); + + return (p4d_t *)pgd; +} + +#endif /* _ASM_RISCV_PGTABLE_64_H */ diff --git a/arch/riscv/include/asm/pgtable-bits.h b/arch/riscv/include/asm/pgtable-bits.h new file mode 100644 index 000000000..b9e13a8fe --- /dev/null +++ b/arch/riscv/include/asm/pgtable-bits.h @@ -0,0 +1,38 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2012 Regents of the University of California + */ + +#ifndef _ASM_RISCV_PGTABLE_BITS_H +#define _ASM_RISCV_PGTABLE_BITS_H + +#define _PAGE_ACCESSED_OFFSET 6 + +#define _PAGE_PRESENT (1 << 0) +#define _PAGE_READ (1 << 1) /* Readable */ +#define _PAGE_WRITE (1 << 2) /* Writable */ +#define _PAGE_EXEC (1 << 3) /* Executable */ +#define _PAGE_USER (1 << 4) /* User */ +#define _PAGE_GLOBAL (1 << 5) /* Global */ +#define _PAGE_ACCESSED (1 << 6) /* Set by hardware on any access */ +#define _PAGE_DIRTY (1 << 7) /* Set by hardware on any write */ +#define _PAGE_SOFT (1 << 8) /* Reserved for software */ + +#define _PAGE_SPECIAL _PAGE_SOFT +#define _PAGE_TABLE _PAGE_PRESENT + +/* + * _PAGE_PROT_NONE is set on not-present pages (and ignored by the hardware) to + * distinguish them from swapped out pages + */ +#define _PAGE_PROT_NONE _PAGE_GLOBAL + +#define _PAGE_PFN_SHIFT 10 + +/* + * when all of R/W/X are zero, the PTE is a pointer to the next level + * of the page table; otherwise, it is a leaf PTE. + */ +#define _PAGE_LEAF (_PAGE_READ | _PAGE_WRITE | _PAGE_EXEC) + +#endif /* _ASM_RISCV_PGTABLE_BITS_H */ diff --git a/arch/riscv/include/asm/pgtable.h b/arch/riscv/include/asm/pgtable.h new file mode 100644 index 000000000..59bb53da4 --- /dev/null +++ b/arch/riscv/include/asm/pgtable.h @@ -0,0 +1,835 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2012 Regents of the University of California + */ + +#ifndef _ASM_RISCV_PGTABLE_H +#define _ASM_RISCV_PGTABLE_H + +#include <linux/mmzone.h> +#include <linux/sizes.h> + +#include <asm/pgtable-bits.h> + +#ifndef CONFIG_MMU +#define KERNEL_LINK_ADDR PAGE_OFFSET +#define KERN_VIRT_SIZE (UL(-1)) +#else + +#define ADDRESS_SPACE_END (UL(-1)) + +#ifdef CONFIG_64BIT +/* Leave 2GB for kernel and BPF at the end of the address space */ +#define KERNEL_LINK_ADDR (ADDRESS_SPACE_END - SZ_2G + 1) +#else +#define KERNEL_LINK_ADDR PAGE_OFFSET +#endif + +/* Number of entries in the page global directory */ +#define PTRS_PER_PGD (PAGE_SIZE / sizeof(pgd_t)) +/* Number of entries in the page table */ +#define PTRS_PER_PTE (PAGE_SIZE / sizeof(pte_t)) + +/* + * Half of the kernel address space (half of the entries of the page global + * directory) is for the direct mapping. + */ +#define KERN_VIRT_SIZE ((PTRS_PER_PGD / 2 * PGDIR_SIZE) / 2) + +#define VMALLOC_SIZE (KERN_VIRT_SIZE >> 1) +#define VMALLOC_END PAGE_OFFSET +#define VMALLOC_START (PAGE_OFFSET - VMALLOC_SIZE) + +#define BPF_JIT_REGION_SIZE (SZ_128M) +#ifdef CONFIG_64BIT +#define BPF_JIT_REGION_START (BPF_JIT_REGION_END - BPF_JIT_REGION_SIZE) +#define BPF_JIT_REGION_END (MODULES_END) +#else +#define BPF_JIT_REGION_START (PAGE_OFFSET - BPF_JIT_REGION_SIZE) +#define BPF_JIT_REGION_END (VMALLOC_END) +#endif + +/* Modules always live before the kernel */ +#ifdef CONFIG_64BIT +/* This is used to define the end of the KASAN shadow region */ +#define MODULES_LOWEST_VADDR (KERNEL_LINK_ADDR - SZ_2G) +#define MODULES_VADDR (PFN_ALIGN((unsigned long)&_end) - SZ_2G) +#define MODULES_END (PFN_ALIGN((unsigned long)&_start)) +#endif + +/* + * Roughly size the vmemmap space to be large enough to fit enough + * struct pages to map half the virtual address space. Then + * position vmemmap directly below the VMALLOC region. + */ +#ifdef CONFIG_64BIT +#define VA_BITS (pgtable_l5_enabled ? \ + 57 : (pgtable_l4_enabled ? 48 : 39)) +#else +#define VA_BITS 32 +#endif + +#define VMEMMAP_SHIFT \ + (VA_BITS - PAGE_SHIFT - 1 + STRUCT_PAGE_MAX_SHIFT) +#define VMEMMAP_SIZE BIT(VMEMMAP_SHIFT) +#define VMEMMAP_END VMALLOC_START +#define VMEMMAP_START (VMALLOC_START - VMEMMAP_SIZE) + +/* + * Define vmemmap for pfn_to_page & page_to_pfn calls. Needed if kernel + * is configured with CONFIG_SPARSEMEM_VMEMMAP enabled. + */ +#define vmemmap ((struct page *)VMEMMAP_START) + +#define PCI_IO_SIZE SZ_16M +#define PCI_IO_END VMEMMAP_START +#define PCI_IO_START (PCI_IO_END - PCI_IO_SIZE) + +#define FIXADDR_TOP PCI_IO_START +#ifdef CONFIG_64BIT +#define MAX_FDT_SIZE PMD_SIZE +#define FIX_FDT_SIZE (MAX_FDT_SIZE + SZ_2M) +#define FIXADDR_SIZE (PMD_SIZE + FIX_FDT_SIZE) +#else +#define MAX_FDT_SIZE PGDIR_SIZE +#define FIX_FDT_SIZE MAX_FDT_SIZE +#define FIXADDR_SIZE (PGDIR_SIZE + FIX_FDT_SIZE) +#endif +#define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE) + +#endif + +#ifdef CONFIG_XIP_KERNEL +#define XIP_OFFSET SZ_32M +#define XIP_OFFSET_MASK (SZ_32M - 1) +#else +#define XIP_OFFSET 0 +#endif + +#ifndef __ASSEMBLY__ + +#include <asm/page.h> +#include <asm/tlbflush.h> +#include <linux/mm_types.h> + +#define __page_val_to_pfn(_val) (((_val) & _PAGE_PFN_MASK) >> _PAGE_PFN_SHIFT) + +#ifdef CONFIG_64BIT +#include <asm/pgtable-64.h> +#else +#include <asm/pgtable-32.h> +#endif /* CONFIG_64BIT */ + +#include <linux/page_table_check.h> + +#ifdef CONFIG_XIP_KERNEL +#define XIP_FIXUP(addr) ({ \ + uintptr_t __a = (uintptr_t)(addr); \ + (__a >= CONFIG_XIP_PHYS_ADDR && \ + __a < CONFIG_XIP_PHYS_ADDR + XIP_OFFSET * 2) ? \ + __a - CONFIG_XIP_PHYS_ADDR + CONFIG_PHYS_RAM_BASE - XIP_OFFSET :\ + __a; \ + }) +#else +#define XIP_FIXUP(addr) (addr) +#endif /* CONFIG_XIP_KERNEL */ + +struct pt_alloc_ops { + pte_t *(*get_pte_virt)(phys_addr_t pa); + phys_addr_t (*alloc_pte)(uintptr_t va); +#ifndef __PAGETABLE_PMD_FOLDED + pmd_t *(*get_pmd_virt)(phys_addr_t pa); + phys_addr_t (*alloc_pmd)(uintptr_t va); + pud_t *(*get_pud_virt)(phys_addr_t pa); + phys_addr_t (*alloc_pud)(uintptr_t va); + p4d_t *(*get_p4d_virt)(phys_addr_t pa); + phys_addr_t (*alloc_p4d)(uintptr_t va); +#endif +}; + +extern struct pt_alloc_ops pt_ops __initdata; + +#ifdef CONFIG_MMU +/* Number of PGD entries that a user-mode program can use */ +#define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE) + +/* Page protection bits */ +#define _PAGE_BASE (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_USER) + +#define PAGE_NONE __pgprot(_PAGE_PROT_NONE | _PAGE_READ) +#define PAGE_READ __pgprot(_PAGE_BASE | _PAGE_READ) +#define PAGE_WRITE __pgprot(_PAGE_BASE | _PAGE_READ | _PAGE_WRITE) +#define PAGE_EXEC __pgprot(_PAGE_BASE | _PAGE_EXEC) +#define PAGE_READ_EXEC __pgprot(_PAGE_BASE | _PAGE_READ | _PAGE_EXEC) +#define PAGE_WRITE_EXEC __pgprot(_PAGE_BASE | _PAGE_READ | \ + _PAGE_EXEC | _PAGE_WRITE) + +#define PAGE_COPY PAGE_READ +#define PAGE_COPY_EXEC PAGE_READ_EXEC +#define PAGE_SHARED PAGE_WRITE +#define PAGE_SHARED_EXEC PAGE_WRITE_EXEC + +#define _PAGE_KERNEL (_PAGE_READ \ + | _PAGE_WRITE \ + | _PAGE_PRESENT \ + | _PAGE_ACCESSED \ + | _PAGE_DIRTY \ + | _PAGE_GLOBAL) + +#define PAGE_KERNEL __pgprot(_PAGE_KERNEL) +#define PAGE_KERNEL_READ __pgprot(_PAGE_KERNEL & ~_PAGE_WRITE) +#define PAGE_KERNEL_EXEC __pgprot(_PAGE_KERNEL | _PAGE_EXEC) +#define PAGE_KERNEL_READ_EXEC __pgprot((_PAGE_KERNEL & ~_PAGE_WRITE) \ + | _PAGE_EXEC) + +#define PAGE_TABLE __pgprot(_PAGE_TABLE) + +#define _PAGE_IOREMAP ((_PAGE_KERNEL & ~_PAGE_MTMASK) | _PAGE_IO) +#define PAGE_KERNEL_IO __pgprot(_PAGE_IOREMAP) + +extern pgd_t swapper_pg_dir[]; + +#ifdef CONFIG_TRANSPARENT_HUGEPAGE +static inline int pmd_present(pmd_t pmd) +{ + /* + * Checking for _PAGE_LEAF is needed too because: + * When splitting a THP, split_huge_page() will temporarily clear + * the present bit, in this situation, pmd_present() and + * pmd_trans_huge() still needs to return true. + */ + return (pmd_val(pmd) & (_PAGE_PRESENT | _PAGE_PROT_NONE | _PAGE_LEAF)); +} +#else +static inline int pmd_present(pmd_t pmd) +{ + return (pmd_val(pmd) & (_PAGE_PRESENT | _PAGE_PROT_NONE)); +} +#endif + +static inline int pmd_none(pmd_t pmd) +{ + return (pmd_val(pmd) == 0); +} + +static inline int pmd_bad(pmd_t pmd) +{ + return !pmd_present(pmd) || (pmd_val(pmd) & _PAGE_LEAF); +} + +#define pmd_leaf pmd_leaf +static inline int pmd_leaf(pmd_t pmd) +{ + return pmd_present(pmd) && (pmd_val(pmd) & _PAGE_LEAF); +} + +static inline void set_pmd(pmd_t *pmdp, pmd_t pmd) +{ + *pmdp = pmd; +} + +static inline void pmd_clear(pmd_t *pmdp) +{ + set_pmd(pmdp, __pmd(0)); +} + +static inline pgd_t pfn_pgd(unsigned long pfn, pgprot_t prot) +{ + unsigned long prot_val = pgprot_val(prot); + + ALT_THEAD_PMA(prot_val); + + return __pgd((pfn << _PAGE_PFN_SHIFT) | prot_val); +} + +static inline unsigned long _pgd_pfn(pgd_t pgd) +{ + return __page_val_to_pfn(pgd_val(pgd)); +} + +static inline struct page *pmd_page(pmd_t pmd) +{ + return pfn_to_page(__page_val_to_pfn(pmd_val(pmd))); +} + +static inline unsigned long pmd_page_vaddr(pmd_t pmd) +{ + return (unsigned long)pfn_to_virt(__page_val_to_pfn(pmd_val(pmd))); +} + +static inline pte_t pmd_pte(pmd_t pmd) +{ + return __pte(pmd_val(pmd)); +} + +static inline pte_t pud_pte(pud_t pud) +{ + return __pte(pud_val(pud)); +} + +/* Yields the page frame number (PFN) of a page table entry */ +static inline unsigned long pte_pfn(pte_t pte) +{ + return __page_val_to_pfn(pte_val(pte)); +} + +#define pte_page(x) pfn_to_page(pte_pfn(x)) + +/* Constructs a page table entry */ +static inline pte_t pfn_pte(unsigned long pfn, pgprot_t prot) +{ + unsigned long prot_val = pgprot_val(prot); + + ALT_THEAD_PMA(prot_val); + + return __pte((pfn << _PAGE_PFN_SHIFT) | prot_val); +} + +#define mk_pte(page, prot) pfn_pte(page_to_pfn(page), prot) + +static inline int pte_present(pte_t pte) +{ + return (pte_val(pte) & (_PAGE_PRESENT | _PAGE_PROT_NONE)); +} + +static inline int pte_none(pte_t pte) +{ + return (pte_val(pte) == 0); +} + +static inline int pte_write(pte_t pte) +{ + return pte_val(pte) & _PAGE_WRITE; +} + +static inline int pte_exec(pte_t pte) +{ + return pte_val(pte) & _PAGE_EXEC; +} + +static inline int pte_user(pte_t pte) +{ + return pte_val(pte) & _PAGE_USER; +} + +static inline int pte_huge(pte_t pte) +{ + return pte_present(pte) && (pte_val(pte) & _PAGE_LEAF); +} + +static inline int pte_dirty(pte_t pte) +{ + return pte_val(pte) & _PAGE_DIRTY; +} + +static inline int pte_young(pte_t pte) +{ + return pte_val(pte) & _PAGE_ACCESSED; +} + +static inline int pte_special(pte_t pte) +{ + return pte_val(pte) & _PAGE_SPECIAL; +} + +/* static inline pte_t pte_rdprotect(pte_t pte) */ + +static inline pte_t pte_wrprotect(pte_t pte) +{ + return __pte(pte_val(pte) & ~(_PAGE_WRITE)); +} + +/* static inline pte_t pte_mkread(pte_t pte) */ + +static inline pte_t pte_mkwrite(pte_t pte) +{ + return __pte(pte_val(pte) | _PAGE_WRITE); +} + +/* static inline pte_t pte_mkexec(pte_t pte) */ + +static inline pte_t pte_mkdirty(pte_t pte) +{ + return __pte(pte_val(pte) | _PAGE_DIRTY); +} + +static inline pte_t pte_mkclean(pte_t pte) +{ + return __pte(pte_val(pte) & ~(_PAGE_DIRTY)); +} + +static inline pte_t pte_mkyoung(pte_t pte) +{ + return __pte(pte_val(pte) | _PAGE_ACCESSED); +} + +static inline pte_t pte_mkold(pte_t pte) +{ + return __pte(pte_val(pte) & ~(_PAGE_ACCESSED)); +} + +static inline pte_t pte_mkspecial(pte_t pte) +{ + return __pte(pte_val(pte) | _PAGE_SPECIAL); +} + +static inline pte_t pte_mkhuge(pte_t pte) +{ + return pte; +} + +#ifdef CONFIG_NUMA_BALANCING +/* + * See the comment in include/asm-generic/pgtable.h + */ +static inline int pte_protnone(pte_t pte) +{ + return (pte_val(pte) & (_PAGE_PRESENT | _PAGE_PROT_NONE)) == _PAGE_PROT_NONE; +} + +static inline int pmd_protnone(pmd_t pmd) +{ + return pte_protnone(pmd_pte(pmd)); +} +#endif + +/* Modify page protection bits */ +static inline pte_t pte_modify(pte_t pte, pgprot_t newprot) +{ + unsigned long newprot_val = pgprot_val(newprot); + + ALT_THEAD_PMA(newprot_val); + + return __pte((pte_val(pte) & _PAGE_CHG_MASK) | newprot_val); +} + +#define pgd_ERROR(e) \ + pr_err("%s:%d: bad pgd " PTE_FMT ".\n", __FILE__, __LINE__, pgd_val(e)) + + +/* Commit new configuration to MMU hardware */ +static inline void update_mmu_cache(struct vm_area_struct *vma, + unsigned long address, pte_t *ptep) +{ + /* + * The kernel assumes that TLBs don't cache invalid entries, but + * in RISC-V, SFENCE.VMA specifies an ordering constraint, not a + * cache flush; it is necessary even after writing invalid entries. + * Relying on flush_tlb_fix_spurious_fault would suffice, but + * the extra traps reduce performance. So, eagerly SFENCE.VMA. + */ + local_flush_tlb_page(address); +} + +static inline void update_mmu_cache_pmd(struct vm_area_struct *vma, + unsigned long address, pmd_t *pmdp) +{ + pte_t *ptep = (pte_t *)pmdp; + + update_mmu_cache(vma, address, ptep); +} + +#define __HAVE_ARCH_PTE_SAME +static inline int pte_same(pte_t pte_a, pte_t pte_b) +{ + return pte_val(pte_a) == pte_val(pte_b); +} + +/* + * Certain architectures need to do special things when PTEs within + * a page table are directly modified. Thus, the following hook is + * made available. + */ +static inline void set_pte(pte_t *ptep, pte_t pteval) +{ + *ptep = pteval; +} + +void flush_icache_pte(pte_t pte); + +static inline void __set_pte_at(struct mm_struct *mm, + unsigned long addr, pte_t *ptep, pte_t pteval) +{ + if (pte_present(pteval) && pte_exec(pteval)) + flush_icache_pte(pteval); + + set_pte(ptep, pteval); +} + +static inline void set_pte_at(struct mm_struct *mm, + unsigned long addr, pte_t *ptep, pte_t pteval) +{ + page_table_check_pte_set(mm, addr, ptep, pteval); + __set_pte_at(mm, addr, ptep, pteval); +} + +static inline void pte_clear(struct mm_struct *mm, + unsigned long addr, pte_t *ptep) +{ + __set_pte_at(mm, addr, ptep, __pte(0)); +} + +#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS +static inline int ptep_set_access_flags(struct vm_area_struct *vma, + unsigned long address, pte_t *ptep, + pte_t entry, int dirty) +{ + if (!pte_same(*ptep, entry)) + set_pte_at(vma->vm_mm, address, ptep, entry); + /* + * update_mmu_cache will unconditionally execute, handling both + * the case that the PTE changed and the spurious fault case. + */ + return true; +} + +#define __HAVE_ARCH_PTEP_GET_AND_CLEAR +static inline pte_t ptep_get_and_clear(struct mm_struct *mm, + unsigned long address, pte_t *ptep) +{ + pte_t pte = __pte(atomic_long_xchg((atomic_long_t *)ptep, 0)); + + page_table_check_pte_clear(mm, address, pte); + + return pte; +} + +#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG +static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, + unsigned long address, + pte_t *ptep) +{ + if (!pte_young(*ptep)) + return 0; + return test_and_clear_bit(_PAGE_ACCESSED_OFFSET, &pte_val(*ptep)); +} + +#define __HAVE_ARCH_PTEP_SET_WRPROTECT +static inline void ptep_set_wrprotect(struct mm_struct *mm, + unsigned long address, pte_t *ptep) +{ + atomic_long_and(~(unsigned long)_PAGE_WRITE, (atomic_long_t *)ptep); +} + +#define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH +static inline int ptep_clear_flush_young(struct vm_area_struct *vma, + unsigned long address, pte_t *ptep) +{ + /* + * This comment is borrowed from x86, but applies equally to RISC-V: + * + * Clearing the accessed bit without a TLB flush + * doesn't cause data corruption. [ It could cause incorrect + * page aging and the (mistaken) reclaim of hot pages, but the + * chance of that should be relatively low. ] + * + * So as a performance optimization don't flush the TLB when + * clearing the accessed bit, it will eventually be flushed by + * a context switch or a VM operation anyway. [ In the rare + * event of it not getting flushed for a long time the delay + * shouldn't really matter because there's no real memory + * pressure for swapout to react to. ] + */ + return ptep_test_and_clear_young(vma, address, ptep); +} + +#define pgprot_noncached pgprot_noncached +static inline pgprot_t pgprot_noncached(pgprot_t _prot) +{ + unsigned long prot = pgprot_val(_prot); + + prot &= ~_PAGE_MTMASK; + prot |= _PAGE_IO; + + return __pgprot(prot); +} + +#define pgprot_writecombine pgprot_writecombine +static inline pgprot_t pgprot_writecombine(pgprot_t _prot) +{ + unsigned long prot = pgprot_val(_prot); + + prot &= ~_PAGE_MTMASK; + prot |= _PAGE_NOCACHE; + + return __pgprot(prot); +} + +/* + * THP functions + */ +static inline pmd_t pte_pmd(pte_t pte) +{ + return __pmd(pte_val(pte)); +} + +static inline pmd_t pmd_mkhuge(pmd_t pmd) +{ + return pmd; +} + +static inline pmd_t pmd_mkinvalid(pmd_t pmd) +{ + return __pmd(pmd_val(pmd) & ~(_PAGE_PRESENT|_PAGE_PROT_NONE)); +} + +#define __pmd_to_phys(pmd) (__page_val_to_pfn(pmd_val(pmd)) << PAGE_SHIFT) + +static inline unsigned long pmd_pfn(pmd_t pmd) +{ + return ((__pmd_to_phys(pmd) & PMD_MASK) >> PAGE_SHIFT); +} + +#define __pud_to_phys(pud) (__page_val_to_pfn(pud_val(pud)) << PAGE_SHIFT) + +static inline unsigned long pud_pfn(pud_t pud) +{ + return ((__pud_to_phys(pud) & PUD_MASK) >> PAGE_SHIFT); +} + +static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot) +{ + return pte_pmd(pte_modify(pmd_pte(pmd), newprot)); +} + +#define pmd_write pmd_write +static inline int pmd_write(pmd_t pmd) +{ + return pte_write(pmd_pte(pmd)); +} + +static inline int pmd_dirty(pmd_t pmd) +{ + return pte_dirty(pmd_pte(pmd)); +} + +#define pmd_young pmd_young +static inline int pmd_young(pmd_t pmd) +{ + return pte_young(pmd_pte(pmd)); +} + +static inline int pmd_user(pmd_t pmd) +{ + return pte_user(pmd_pte(pmd)); +} + +static inline pmd_t pmd_mkold(pmd_t pmd) +{ + return pte_pmd(pte_mkold(pmd_pte(pmd))); +} + +static inline pmd_t pmd_mkyoung(pmd_t pmd) +{ + return pte_pmd(pte_mkyoung(pmd_pte(pmd))); +} + +static inline pmd_t pmd_mkwrite(pmd_t pmd) +{ + return pte_pmd(pte_mkwrite(pmd_pte(pmd))); +} + +static inline pmd_t pmd_wrprotect(pmd_t pmd) +{ + return pte_pmd(pte_wrprotect(pmd_pte(pmd))); +} + +static inline pmd_t pmd_mkclean(pmd_t pmd) +{ + return pte_pmd(pte_mkclean(pmd_pte(pmd))); +} + +static inline pmd_t pmd_mkdirty(pmd_t pmd) +{ + return pte_pmd(pte_mkdirty(pmd_pte(pmd))); +} + +static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr, + pmd_t *pmdp, pmd_t pmd) +{ + page_table_check_pmd_set(mm, addr, pmdp, pmd); + return __set_pte_at(mm, addr, (pte_t *)pmdp, pmd_pte(pmd)); +} + +static inline void set_pud_at(struct mm_struct *mm, unsigned long addr, + pud_t *pudp, pud_t pud) +{ + page_table_check_pud_set(mm, addr, pudp, pud); + return __set_pte_at(mm, addr, (pte_t *)pudp, pud_pte(pud)); +} + +#ifdef CONFIG_PAGE_TABLE_CHECK +static inline bool pte_user_accessible_page(pte_t pte) +{ + return pte_present(pte) && pte_user(pte); +} + +static inline bool pmd_user_accessible_page(pmd_t pmd) +{ + return pmd_leaf(pmd) && pmd_user(pmd); +} + +static inline bool pud_user_accessible_page(pud_t pud) +{ + return pud_leaf(pud) && pud_user(pud); +} +#endif + +#ifdef CONFIG_TRANSPARENT_HUGEPAGE +static inline int pmd_trans_huge(pmd_t pmd) +{ + return pmd_leaf(pmd); +} + +#define __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS +static inline int pmdp_set_access_flags(struct vm_area_struct *vma, + unsigned long address, pmd_t *pmdp, + pmd_t entry, int dirty) +{ + return ptep_set_access_flags(vma, address, (pte_t *)pmdp, pmd_pte(entry), dirty); +} + +#define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG +static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma, + unsigned long address, pmd_t *pmdp) +{ + return ptep_test_and_clear_young(vma, address, (pte_t *)pmdp); +} + +#define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR +static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm, + unsigned long address, pmd_t *pmdp) +{ + pmd_t pmd = __pmd(atomic_long_xchg((atomic_long_t *)pmdp, 0)); + + page_table_check_pmd_clear(mm, address, pmd); + + return pmd; +} + +#define __HAVE_ARCH_PMDP_SET_WRPROTECT +static inline void pmdp_set_wrprotect(struct mm_struct *mm, + unsigned long address, pmd_t *pmdp) +{ + ptep_set_wrprotect(mm, address, (pte_t *)pmdp); +} + +#define pmdp_establish pmdp_establish +static inline pmd_t pmdp_establish(struct vm_area_struct *vma, + unsigned long address, pmd_t *pmdp, pmd_t pmd) +{ + page_table_check_pmd_set(vma->vm_mm, address, pmdp, pmd); + return __pmd(atomic_long_xchg((atomic_long_t *)pmdp, pmd_val(pmd))); +} +#endif /* CONFIG_TRANSPARENT_HUGEPAGE */ + +/* + * Encode and decode a swap entry + * + * Format of swap PTE: + * bit 0: _PAGE_PRESENT (zero) + * bit 1 to 3: _PAGE_LEAF (zero) + * bit 5: _PAGE_PROT_NONE (zero) + * bits 6 to 10: swap type + * bits 10 to XLEN-1: swap offset + */ +#define __SWP_TYPE_SHIFT 6 +#define __SWP_TYPE_BITS 5 +#define __SWP_TYPE_MASK ((1UL << __SWP_TYPE_BITS) - 1) +#define __SWP_OFFSET_SHIFT (__SWP_TYPE_BITS + __SWP_TYPE_SHIFT) + +#define MAX_SWAPFILES_CHECK() \ + BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > __SWP_TYPE_BITS) + +#define __swp_type(x) (((x).val >> __SWP_TYPE_SHIFT) & __SWP_TYPE_MASK) +#define __swp_offset(x) ((x).val >> __SWP_OFFSET_SHIFT) +#define __swp_entry(type, offset) ((swp_entry_t) \ + { ((type) << __SWP_TYPE_SHIFT) | ((offset) << __SWP_OFFSET_SHIFT) }) + +#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) }) +#define __swp_entry_to_pte(x) ((pte_t) { (x).val }) + +#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION +#define __pmd_to_swp_entry(pmd) ((swp_entry_t) { pmd_val(pmd) }) +#define __swp_entry_to_pmd(swp) __pmd((swp).val) +#endif /* CONFIG_ARCH_ENABLE_THP_MIGRATION */ + +/* + * In the RV64 Linux scheme, we give the user half of the virtual-address space + * and give the kernel the other (upper) half. + */ +#ifdef CONFIG_64BIT +#define KERN_VIRT_START (-(BIT(VA_BITS)) + TASK_SIZE) +#else +#define KERN_VIRT_START FIXADDR_START +#endif + +/* + * Task size is 0x4000000000 for RV64 or 0x9fc00000 for RV32. + * Note that PGDIR_SIZE must evenly divide TASK_SIZE. + * Task size is: + * - 0x9fc00000 (~2.5GB) for RV32. + * - 0x4000000000 ( 256GB) for RV64 using SV39 mmu + * - 0x800000000000 ( 128TB) for RV64 using SV48 mmu + * + * Note that PGDIR_SIZE must evenly divide TASK_SIZE since "RISC-V + * Instruction Set Manual Volume II: Privileged Architecture" states that + * "load and store effective addresses, which are 64bits, must have bits + * 63–48 all equal to bit 47, or else a page-fault exception will occur." + */ +#ifdef CONFIG_64BIT +#define TASK_SIZE_64 (PGDIR_SIZE * PTRS_PER_PGD / 2) +#define TASK_SIZE_MIN (PGDIR_SIZE_L3 * PTRS_PER_PGD / 2) + +#ifdef CONFIG_COMPAT +#define TASK_SIZE_32 (_AC(0x80000000, UL) - PAGE_SIZE) +#define TASK_SIZE (test_thread_flag(TIF_32BIT) ? \ + TASK_SIZE_32 : TASK_SIZE_64) +#else +#define TASK_SIZE TASK_SIZE_64 +#endif + +#else +#define TASK_SIZE FIXADDR_START +#define TASK_SIZE_MIN TASK_SIZE +#endif + +#else /* CONFIG_MMU */ + +#define PAGE_SHARED __pgprot(0) +#define PAGE_KERNEL __pgprot(0) +#define swapper_pg_dir NULL +#define TASK_SIZE 0xffffffffUL +#define VMALLOC_START 0 +#define VMALLOC_END TASK_SIZE + +#endif /* !CONFIG_MMU */ + +#define kern_addr_valid(addr) (1) /* FIXME */ + +extern char _start[]; +extern void *_dtb_early_va; +extern uintptr_t _dtb_early_pa; +#if defined(CONFIG_XIP_KERNEL) && defined(CONFIG_MMU) +#define dtb_early_va (*(void **)XIP_FIXUP(&_dtb_early_va)) +#define dtb_early_pa (*(uintptr_t *)XIP_FIXUP(&_dtb_early_pa)) +#else +#define dtb_early_va _dtb_early_va +#define dtb_early_pa _dtb_early_pa +#endif /* CONFIG_XIP_KERNEL */ +extern u64 satp_mode; +extern bool pgtable_l4_enabled; + +void paging_init(void); +void misc_mem_init(void); + +/* + * ZERO_PAGE is a global shared page that is always zero, + * used for zero-mapped memory areas, etc. + */ +extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]; +#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page)) + +#endif /* !__ASSEMBLY__ */ + +#endif /* _ASM_RISCV_PGTABLE_H */ diff --git a/arch/riscv/include/asm/probes.h b/arch/riscv/include/asm/probes.h new file mode 100644 index 000000000..a787e6d53 --- /dev/null +++ b/arch/riscv/include/asm/probes.h @@ -0,0 +1,24 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef _ASM_RISCV_PROBES_H +#define _ASM_RISCV_PROBES_H + +typedef u32 probe_opcode_t; +typedef bool (probes_handler_t) (u32 opcode, unsigned long addr, struct pt_regs *); + +/* architecture specific copy of original instruction */ +struct arch_probe_insn { + probe_opcode_t *insn; + probes_handler_t *handler; + /* restore address after simulation */ + unsigned long restore; +}; + +#ifdef CONFIG_KPROBES +typedef u32 kprobe_opcode_t; +struct arch_specific_insn { + struct arch_probe_insn api; +}; +#endif + +#endif /* _ASM_RISCV_PROBES_H */ diff --git a/arch/riscv/include/asm/processor.h b/arch/riscv/include/asm/processor.h new file mode 100644 index 000000000..94a0590c6 --- /dev/null +++ b/arch/riscv/include/asm/processor.h @@ -0,0 +1,85 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2012 Regents of the University of California + */ + +#ifndef _ASM_RISCV_PROCESSOR_H +#define _ASM_RISCV_PROCESSOR_H + +#include <linux/const.h> + +#include <vdso/processor.h> + +#include <asm/ptrace.h> + +/* + * This decides where the kernel will search for a free chunk of vm + * space during mmap's. + */ +#define TASK_UNMAPPED_BASE PAGE_ALIGN(TASK_SIZE / 3) + +#define STACK_TOP TASK_SIZE +#ifdef CONFIG_64BIT +#define STACK_TOP_MAX TASK_SIZE_64 +#else +#define STACK_TOP_MAX TASK_SIZE +#endif +#define STACK_ALIGN 16 + +#ifndef __ASSEMBLY__ + +struct task_struct; +struct pt_regs; + +/* CPU-specific state of a task */ +struct thread_struct { + /* Callee-saved registers */ + unsigned long ra; + unsigned long sp; /* Kernel mode stack */ + unsigned long s[12]; /* s[0]: frame pointer */ + struct __riscv_d_ext_state fstate; + unsigned long bad_cause; +}; + +/* Whitelist the fstate from the task_struct for hardened usercopy */ +static inline void arch_thread_struct_whitelist(unsigned long *offset, + unsigned long *size) +{ + *offset = offsetof(struct thread_struct, fstate); + *size = sizeof_field(struct thread_struct, fstate); +} + +#define INIT_THREAD { \ + .sp = sizeof(init_stack) + (long)&init_stack, \ +} + +#define task_pt_regs(tsk) \ + ((struct pt_regs *)(task_stack_page(tsk) + THREAD_SIZE \ + - ALIGN(sizeof(struct pt_regs), STACK_ALIGN))) + +#define KSTK_EIP(tsk) (task_pt_regs(tsk)->epc) +#define KSTK_ESP(tsk) (task_pt_regs(tsk)->sp) + + +/* Do necessary setup to start up a newly executed thread. */ +extern void start_thread(struct pt_regs *regs, + unsigned long pc, unsigned long sp); + +extern unsigned long __get_wchan(struct task_struct *p); + + +static inline void wait_for_interrupt(void) +{ + __asm__ __volatile__ ("wfi"); +} + +struct device_node; +int riscv_of_processor_hartid(struct device_node *node, unsigned long *hartid); +int riscv_of_parent_hartid(struct device_node *node, unsigned long *hartid); + +extern void riscv_fill_hwcap(void); +extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src); + +#endif /* __ASSEMBLY__ */ + +#endif /* _ASM_RISCV_PROCESSOR_H */ diff --git a/arch/riscv/include/asm/ptdump.h b/arch/riscv/include/asm/ptdump.h new file mode 100644 index 000000000..3c9ea6dd5 --- /dev/null +++ b/arch/riscv/include/asm/ptdump.h @@ -0,0 +1,22 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (C) 2019 SiFive + */ + +#ifndef _ASM_RISCV_PTDUMP_H +#define _ASM_RISCV_PTDUMP_H + +void ptdump_check_wx(void); + +#ifdef CONFIG_DEBUG_WX +static inline void debug_checkwx(void) +{ + ptdump_check_wx(); +} +#else +static inline void debug_checkwx(void) +{ +} +#endif + +#endif /* _ASM_RISCV_PTDUMP_H */ diff --git a/arch/riscv/include/asm/ptrace.h b/arch/riscv/include/asm/ptrace.h new file mode 100644 index 000000000..6ecd46112 --- /dev/null +++ b/arch/riscv/include/asm/ptrace.h @@ -0,0 +1,177 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2012 Regents of the University of California + */ + +#ifndef _ASM_RISCV_PTRACE_H +#define _ASM_RISCV_PTRACE_H + +#include <uapi/asm/ptrace.h> +#include <asm/csr.h> +#include <linux/compiler.h> + +#ifndef __ASSEMBLY__ + +struct pt_regs { + unsigned long epc; + unsigned long ra; + unsigned long sp; + unsigned long gp; + unsigned long tp; + unsigned long t0; + unsigned long t1; + unsigned long t2; + unsigned long s0; + unsigned long s1; + unsigned long a0; + unsigned long a1; + unsigned long a2; + unsigned long a3; + unsigned long a4; + unsigned long a5; + unsigned long a6; + unsigned long a7; + unsigned long s2; + unsigned long s3; + unsigned long s4; + unsigned long s5; + unsigned long s6; + unsigned long s7; + unsigned long s8; + unsigned long s9; + unsigned long s10; + unsigned long s11; + unsigned long t3; + unsigned long t4; + unsigned long t5; + unsigned long t6; + /* Supervisor/Machine CSRs */ + unsigned long status; + unsigned long badaddr; + unsigned long cause; + /* a0 value before the syscall */ + unsigned long orig_a0; +}; + +#ifdef CONFIG_64BIT +#define REG_FMT "%016lx" +#else +#define REG_FMT "%08lx" +#endif + +#define user_mode(regs) (((regs)->status & SR_PP) == 0) + +#define MAX_REG_OFFSET offsetof(struct pt_regs, orig_a0) + +/* Helpers for working with the instruction pointer */ +static inline unsigned long instruction_pointer(struct pt_regs *regs) +{ + return regs->epc; +} +static inline void instruction_pointer_set(struct pt_regs *regs, + unsigned long val) +{ + regs->epc = val; +} + +#define profile_pc(regs) instruction_pointer(regs) + +/* Helpers for working with the user stack pointer */ +static inline unsigned long user_stack_pointer(struct pt_regs *regs) +{ + return regs->sp; +} +static inline void user_stack_pointer_set(struct pt_regs *regs, + unsigned long val) +{ + regs->sp = val; +} + +/* Valid only for Kernel mode traps. */ +static inline unsigned long kernel_stack_pointer(struct pt_regs *regs) +{ + return regs->sp; +} + +/* Helpers for working with the frame pointer */ +static inline unsigned long frame_pointer(struct pt_regs *regs) +{ + return regs->s0; +} +static inline void frame_pointer_set(struct pt_regs *regs, + unsigned long val) +{ + regs->s0 = val; +} + +static inline unsigned long regs_return_value(struct pt_regs *regs) +{ + return regs->a0; +} + +static inline void regs_set_return_value(struct pt_regs *regs, + unsigned long val) +{ + regs->a0 = val; +} + +extern int regs_query_register_offset(const char *name); +extern unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs, + unsigned int n); + +void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr, + unsigned long frame_pointer); +int do_syscall_trace_enter(struct pt_regs *regs); +void do_syscall_trace_exit(struct pt_regs *regs); + +/** + * regs_get_register() - get register value from its offset + * @regs: pt_regs from which register value is gotten + * @offset: offset of the register. + * + * regs_get_register returns the value of a register whose offset from @regs. + * The @offset is the offset of the register in struct pt_regs. + * If @offset is bigger than MAX_REG_OFFSET, this returns 0. + */ +static inline unsigned long regs_get_register(struct pt_regs *regs, + unsigned int offset) +{ + if (unlikely(offset > MAX_REG_OFFSET)) + return 0; + + return *(unsigned long *)((unsigned long)regs + offset); +} + +/** + * regs_get_kernel_argument() - get Nth function argument in kernel + * @regs: pt_regs of that context + * @n: function argument number (start from 0) + * + * regs_get_argument() returns @n th argument of the function call. + * + * Note you can get the parameter correctly if the function has no + * more than eight arguments. + */ +static inline unsigned long regs_get_kernel_argument(struct pt_regs *regs, + unsigned int n) +{ + static const int nr_reg_arguments = 8; + static const unsigned int argument_offs[] = { + offsetof(struct pt_regs, a0), + offsetof(struct pt_regs, a1), + offsetof(struct pt_regs, a2), + offsetof(struct pt_regs, a3), + offsetof(struct pt_regs, a4), + offsetof(struct pt_regs, a5), + offsetof(struct pt_regs, a6), + offsetof(struct pt_regs, a7), + }; + + if (n < nr_reg_arguments) + return regs_get_register(regs, argument_offs[n]); + return 0; +} + +#endif /* __ASSEMBLY__ */ + +#endif /* _ASM_RISCV_PTRACE_H */ diff --git a/arch/riscv/include/asm/sbi.h b/arch/riscv/include/asm/sbi.h new file mode 100644 index 000000000..9baddaee5 --- /dev/null +++ b/arch/riscv/include/asm/sbi.h @@ -0,0 +1,330 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2015 Regents of the University of California + * Copyright (c) 2020 Western Digital Corporation or its affiliates. + */ + +#ifndef _ASM_RISCV_SBI_H +#define _ASM_RISCV_SBI_H + +#include <linux/types.h> +#include <linux/cpumask.h> + +#ifdef CONFIG_RISCV_SBI +enum sbi_ext_id { +#ifdef CONFIG_RISCV_SBI_V01 + SBI_EXT_0_1_SET_TIMER = 0x0, + SBI_EXT_0_1_CONSOLE_PUTCHAR = 0x1, + SBI_EXT_0_1_CONSOLE_GETCHAR = 0x2, + SBI_EXT_0_1_CLEAR_IPI = 0x3, + SBI_EXT_0_1_SEND_IPI = 0x4, + SBI_EXT_0_1_REMOTE_FENCE_I = 0x5, + SBI_EXT_0_1_REMOTE_SFENCE_VMA = 0x6, + SBI_EXT_0_1_REMOTE_SFENCE_VMA_ASID = 0x7, + SBI_EXT_0_1_SHUTDOWN = 0x8, +#endif + SBI_EXT_BASE = 0x10, + SBI_EXT_TIME = 0x54494D45, + SBI_EXT_IPI = 0x735049, + SBI_EXT_RFENCE = 0x52464E43, + SBI_EXT_HSM = 0x48534D, + SBI_EXT_SRST = 0x53525354, + SBI_EXT_PMU = 0x504D55, + + /* Experimentals extensions must lie within this range */ + SBI_EXT_EXPERIMENTAL_START = 0x08000000, + SBI_EXT_EXPERIMENTAL_END = 0x08FFFFFF, + + /* Vendor extensions must lie within this range */ + SBI_EXT_VENDOR_START = 0x09000000, + SBI_EXT_VENDOR_END = 0x09FFFFFF, +}; + +enum sbi_ext_base_fid { + SBI_EXT_BASE_GET_SPEC_VERSION = 0, + SBI_EXT_BASE_GET_IMP_ID, + SBI_EXT_BASE_GET_IMP_VERSION, + SBI_EXT_BASE_PROBE_EXT, + SBI_EXT_BASE_GET_MVENDORID, + SBI_EXT_BASE_GET_MARCHID, + SBI_EXT_BASE_GET_MIMPID, +}; + +enum sbi_ext_time_fid { + SBI_EXT_TIME_SET_TIMER = 0, +}; + +enum sbi_ext_ipi_fid { + SBI_EXT_IPI_SEND_IPI = 0, +}; + +enum sbi_ext_rfence_fid { + SBI_EXT_RFENCE_REMOTE_FENCE_I = 0, + SBI_EXT_RFENCE_REMOTE_SFENCE_VMA, + SBI_EXT_RFENCE_REMOTE_SFENCE_VMA_ASID, + SBI_EXT_RFENCE_REMOTE_HFENCE_GVMA_VMID, + SBI_EXT_RFENCE_REMOTE_HFENCE_GVMA, + SBI_EXT_RFENCE_REMOTE_HFENCE_VVMA_ASID, + SBI_EXT_RFENCE_REMOTE_HFENCE_VVMA, +}; + +enum sbi_ext_hsm_fid { + SBI_EXT_HSM_HART_START = 0, + SBI_EXT_HSM_HART_STOP, + SBI_EXT_HSM_HART_STATUS, + SBI_EXT_HSM_HART_SUSPEND, +}; + +enum sbi_hsm_hart_state { + SBI_HSM_STATE_STARTED = 0, + SBI_HSM_STATE_STOPPED, + SBI_HSM_STATE_START_PENDING, + SBI_HSM_STATE_STOP_PENDING, + SBI_HSM_STATE_SUSPENDED, + SBI_HSM_STATE_SUSPEND_PENDING, + SBI_HSM_STATE_RESUME_PENDING, +}; + +#define SBI_HSM_SUSP_BASE_MASK 0x7fffffff +#define SBI_HSM_SUSP_NON_RET_BIT 0x80000000 +#define SBI_HSM_SUSP_PLAT_BASE 0x10000000 + +#define SBI_HSM_SUSPEND_RET_DEFAULT 0x00000000 +#define SBI_HSM_SUSPEND_RET_PLATFORM SBI_HSM_SUSP_PLAT_BASE +#define SBI_HSM_SUSPEND_RET_LAST SBI_HSM_SUSP_BASE_MASK +#define SBI_HSM_SUSPEND_NON_RET_DEFAULT SBI_HSM_SUSP_NON_RET_BIT +#define SBI_HSM_SUSPEND_NON_RET_PLATFORM (SBI_HSM_SUSP_NON_RET_BIT | \ + SBI_HSM_SUSP_PLAT_BASE) +#define SBI_HSM_SUSPEND_NON_RET_LAST (SBI_HSM_SUSP_NON_RET_BIT | \ + SBI_HSM_SUSP_BASE_MASK) + +enum sbi_ext_srst_fid { + SBI_EXT_SRST_RESET = 0, +}; + +enum sbi_srst_reset_type { + SBI_SRST_RESET_TYPE_SHUTDOWN = 0, + SBI_SRST_RESET_TYPE_COLD_REBOOT, + SBI_SRST_RESET_TYPE_WARM_REBOOT, +}; + +enum sbi_srst_reset_reason { + SBI_SRST_RESET_REASON_NONE = 0, + SBI_SRST_RESET_REASON_SYS_FAILURE, +}; + +enum sbi_ext_pmu_fid { + SBI_EXT_PMU_NUM_COUNTERS = 0, + SBI_EXT_PMU_COUNTER_GET_INFO, + SBI_EXT_PMU_COUNTER_CFG_MATCH, + SBI_EXT_PMU_COUNTER_START, + SBI_EXT_PMU_COUNTER_STOP, + SBI_EXT_PMU_COUNTER_FW_READ, +}; + +union sbi_pmu_ctr_info { + unsigned long value; + struct { + unsigned long csr:12; + unsigned long width:6; +#if __riscv_xlen == 32 + unsigned long reserved:13; +#else + unsigned long reserved:45; +#endif + unsigned long type:1; + }; +}; + +#define RISCV_PMU_RAW_EVENT_MASK GENMASK_ULL(47, 0) +#define RISCV_PMU_RAW_EVENT_IDX 0x20000 + +/** General pmu event codes specified in SBI PMU extension */ +enum sbi_pmu_hw_generic_events_t { + SBI_PMU_HW_NO_EVENT = 0, + SBI_PMU_HW_CPU_CYCLES = 1, + SBI_PMU_HW_INSTRUCTIONS = 2, + SBI_PMU_HW_CACHE_REFERENCES = 3, + SBI_PMU_HW_CACHE_MISSES = 4, + SBI_PMU_HW_BRANCH_INSTRUCTIONS = 5, + SBI_PMU_HW_BRANCH_MISSES = 6, + SBI_PMU_HW_BUS_CYCLES = 7, + SBI_PMU_HW_STALLED_CYCLES_FRONTEND = 8, + SBI_PMU_HW_STALLED_CYCLES_BACKEND = 9, + SBI_PMU_HW_REF_CPU_CYCLES = 10, + + SBI_PMU_HW_GENERAL_MAX, +}; + +/** + * Special "firmware" events provided by the firmware, even if the hardware + * does not support performance events. These events are encoded as a raw + * event type in Linux kernel perf framework. + */ +enum sbi_pmu_fw_generic_events_t { + SBI_PMU_FW_MISALIGNED_LOAD = 0, + SBI_PMU_FW_MISALIGNED_STORE = 1, + SBI_PMU_FW_ACCESS_LOAD = 2, + SBI_PMU_FW_ACCESS_STORE = 3, + SBI_PMU_FW_ILLEGAL_INSN = 4, + SBI_PMU_FW_SET_TIMER = 5, + SBI_PMU_FW_IPI_SENT = 6, + SBI_PMU_FW_IPI_RECVD = 7, + SBI_PMU_FW_FENCE_I_SENT = 8, + SBI_PMU_FW_FENCE_I_RECVD = 9, + SBI_PMU_FW_SFENCE_VMA_SENT = 10, + SBI_PMU_FW_SFENCE_VMA_RCVD = 11, + SBI_PMU_FW_SFENCE_VMA_ASID_SENT = 12, + SBI_PMU_FW_SFENCE_VMA_ASID_RCVD = 13, + + SBI_PMU_FW_HFENCE_GVMA_SENT = 14, + SBI_PMU_FW_HFENCE_GVMA_RCVD = 15, + SBI_PMU_FW_HFENCE_GVMA_VMID_SENT = 16, + SBI_PMU_FW_HFENCE_GVMA_VMID_RCVD = 17, + + SBI_PMU_FW_HFENCE_VVMA_SENT = 18, + SBI_PMU_FW_HFENCE_VVMA_RCVD = 19, + SBI_PMU_FW_HFENCE_VVMA_ASID_SENT = 20, + SBI_PMU_FW_HFENCE_VVMA_ASID_RCVD = 21, + SBI_PMU_FW_MAX, +}; + +/* SBI PMU event types */ +enum sbi_pmu_event_type { + SBI_PMU_EVENT_TYPE_HW = 0x0, + SBI_PMU_EVENT_TYPE_CACHE = 0x1, + SBI_PMU_EVENT_TYPE_RAW = 0x2, + SBI_PMU_EVENT_TYPE_FW = 0xf, +}; + +/* SBI PMU event types */ +enum sbi_pmu_ctr_type { + SBI_PMU_CTR_TYPE_HW = 0x0, + SBI_PMU_CTR_TYPE_FW, +}; + +/* Helper macros to decode event idx */ +#define SBI_PMU_EVENT_IDX_OFFSET 20 +#define SBI_PMU_EVENT_IDX_MASK 0xFFFFF +#define SBI_PMU_EVENT_IDX_CODE_MASK 0xFFFF +#define SBI_PMU_EVENT_IDX_TYPE_MASK 0xF0000 +#define SBI_PMU_EVENT_RAW_IDX 0x20000 +#define SBI_PMU_FIXED_CTR_MASK 0x07 + +#define SBI_PMU_EVENT_CACHE_ID_CODE_MASK 0xFFF8 +#define SBI_PMU_EVENT_CACHE_OP_ID_CODE_MASK 0x06 +#define SBI_PMU_EVENT_CACHE_RESULT_ID_CODE_MASK 0x01 + +#define SBI_PMU_EVENT_IDX_INVALID 0xFFFFFFFF + +/* Flags defined for config matching function */ +#define SBI_PMU_CFG_FLAG_SKIP_MATCH (1 << 0) +#define SBI_PMU_CFG_FLAG_CLEAR_VALUE (1 << 1) +#define SBI_PMU_CFG_FLAG_AUTO_START (1 << 2) +#define SBI_PMU_CFG_FLAG_SET_VUINH (1 << 3) +#define SBI_PMU_CFG_FLAG_SET_VSINH (1 << 4) +#define SBI_PMU_CFG_FLAG_SET_UINH (1 << 5) +#define SBI_PMU_CFG_FLAG_SET_SINH (1 << 6) +#define SBI_PMU_CFG_FLAG_SET_MINH (1 << 7) + +/* Flags defined for counter start function */ +#define SBI_PMU_START_FLAG_SET_INIT_VALUE (1 << 0) + +/* Flags defined for counter stop function */ +#define SBI_PMU_STOP_FLAG_RESET (1 << 0) + +#define SBI_SPEC_VERSION_DEFAULT 0x1 +#define SBI_SPEC_VERSION_MAJOR_SHIFT 24 +#define SBI_SPEC_VERSION_MAJOR_MASK 0x7f +#define SBI_SPEC_VERSION_MINOR_MASK 0xffffff + +/* SBI return error codes */ +#define SBI_SUCCESS 0 +#define SBI_ERR_FAILURE -1 +#define SBI_ERR_NOT_SUPPORTED -2 +#define SBI_ERR_INVALID_PARAM -3 +#define SBI_ERR_DENIED -4 +#define SBI_ERR_INVALID_ADDRESS -5 +#define SBI_ERR_ALREADY_AVAILABLE -6 +#define SBI_ERR_ALREADY_STARTED -7 +#define SBI_ERR_ALREADY_STOPPED -8 + +extern unsigned long sbi_spec_version; +struct sbiret { + long error; + long value; +}; + +void sbi_init(void); +struct sbiret sbi_ecall(int ext, int fid, unsigned long arg0, + unsigned long arg1, unsigned long arg2, + unsigned long arg3, unsigned long arg4, + unsigned long arg5); + +void sbi_console_putchar(int ch); +int sbi_console_getchar(void); +long sbi_get_mvendorid(void); +long sbi_get_marchid(void); +long sbi_get_mimpid(void); +void sbi_set_timer(uint64_t stime_value); +void sbi_shutdown(void); +void sbi_clear_ipi(void); +int sbi_send_ipi(const struct cpumask *cpu_mask); +int sbi_remote_fence_i(const struct cpumask *cpu_mask); +int sbi_remote_sfence_vma(const struct cpumask *cpu_mask, + unsigned long start, + unsigned long size); + +int sbi_remote_sfence_vma_asid(const struct cpumask *cpu_mask, + unsigned long start, + unsigned long size, + unsigned long asid); +int sbi_remote_hfence_gvma(const struct cpumask *cpu_mask, + unsigned long start, + unsigned long size); +int sbi_remote_hfence_gvma_vmid(const struct cpumask *cpu_mask, + unsigned long start, + unsigned long size, + unsigned long vmid); +int sbi_remote_hfence_vvma(const struct cpumask *cpu_mask, + unsigned long start, + unsigned long size); +int sbi_remote_hfence_vvma_asid(const struct cpumask *cpu_mask, + unsigned long start, + unsigned long size, + unsigned long asid); +long sbi_probe_extension(int ext); + +/* Check if current SBI specification version is 0.1 or not */ +static inline int sbi_spec_is_0_1(void) +{ + return (sbi_spec_version == SBI_SPEC_VERSION_DEFAULT) ? 1 : 0; +} + +/* Get the major version of SBI */ +static inline unsigned long sbi_major_version(void) +{ + return (sbi_spec_version >> SBI_SPEC_VERSION_MAJOR_SHIFT) & + SBI_SPEC_VERSION_MAJOR_MASK; +} + +/* Get the minor version of SBI */ +static inline unsigned long sbi_minor_version(void) +{ + return sbi_spec_version & SBI_SPEC_VERSION_MINOR_MASK; +} + +/* Make SBI version */ +static inline unsigned long sbi_mk_version(unsigned long major, + unsigned long minor) +{ + return ((major & SBI_SPEC_VERSION_MAJOR_MASK) << + SBI_SPEC_VERSION_MAJOR_SHIFT) | minor; +} + +int sbi_err_map_linux_errno(int err); +#else /* CONFIG_RISCV_SBI */ +static inline int sbi_remote_fence_i(const struct cpumask *cpu_mask) { return -1; } +static inline void sbi_init(void) {} +#endif /* CONFIG_RISCV_SBI */ +#endif /* _ASM_RISCV_SBI_H */ diff --git a/arch/riscv/include/asm/seccomp.h b/arch/riscv/include/asm/seccomp.h new file mode 100644 index 000000000..c7ee6a350 --- /dev/null +++ b/arch/riscv/include/asm/seccomp.h @@ -0,0 +1,20 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef _ASM_SECCOMP_H +#define _ASM_SECCOMP_H + +#include <asm/unistd.h> + +#include <asm-generic/seccomp.h> + +#ifdef CONFIG_64BIT +# define SECCOMP_ARCH_NATIVE AUDIT_ARCH_RISCV64 +# define SECCOMP_ARCH_NATIVE_NR NR_syscalls +# define SECCOMP_ARCH_NATIVE_NAME "riscv64" +#else /* !CONFIG_64BIT */ +# define SECCOMP_ARCH_NATIVE AUDIT_ARCH_RISCV32 +# define SECCOMP_ARCH_NATIVE_NR NR_syscalls +# define SECCOMP_ARCH_NATIVE_NAME "riscv32" +#endif + +#endif /* _ASM_SECCOMP_H */ diff --git a/arch/riscv/include/asm/sections.h b/arch/riscv/include/asm/sections.h new file mode 100644 index 000000000..a393d5035 --- /dev/null +++ b/arch/riscv/include/asm/sections.h @@ -0,0 +1,34 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2020 Western Digital Corporation or its affiliates. + */ +#ifndef __ASM_SECTIONS_H +#define __ASM_SECTIONS_H + +#include <asm-generic/sections.h> +#include <linux/mm.h> + +extern char _start[]; +extern char _start_kernel[]; +extern char __init_data_begin[], __init_data_end[]; +extern char __init_text_begin[], __init_text_end[]; +extern char __alt_start[], __alt_end[]; +extern char __exittext_begin[], __exittext_end[]; + +static inline bool is_va_kernel_text(uintptr_t va) +{ + uintptr_t start = (uintptr_t)_start; + uintptr_t end = (uintptr_t)__init_data_begin; + + return va >= start && va < end; +} + +static inline bool is_va_kernel_lm_alias_text(uintptr_t va) +{ + uintptr_t start = (uintptr_t)lm_alias(_start); + uintptr_t end = (uintptr_t)lm_alias(__init_data_begin); + + return va >= start && va < end; +} + +#endif /* __ASM_SECTIONS_H */ diff --git a/arch/riscv/include/asm/set_memory.h b/arch/riscv/include/asm/set_memory.h new file mode 100644 index 000000000..a2c14d4b3 --- /dev/null +++ b/arch/riscv/include/asm/set_memory.h @@ -0,0 +1,59 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2019 SiFive + */ + +#ifndef _ASM_RISCV_SET_MEMORY_H +#define _ASM_RISCV_SET_MEMORY_H + +#ifndef __ASSEMBLY__ +/* + * Functions to change memory attributes. + */ +#ifdef CONFIG_MMU +int set_memory_ro(unsigned long addr, int numpages); +int set_memory_rw(unsigned long addr, int numpages); +int set_memory_x(unsigned long addr, int numpages); +int set_memory_nx(unsigned long addr, int numpages); +int set_memory_rw_nx(unsigned long addr, int numpages); +static __always_inline int set_kernel_memory(char *startp, char *endp, + int (*set_memory)(unsigned long start, + int num_pages)) +{ + unsigned long start = (unsigned long)startp; + unsigned long end = (unsigned long)endp; + int num_pages = PAGE_ALIGN(end - start) >> PAGE_SHIFT; + + return set_memory(start, num_pages); +} +#else +static inline int set_memory_ro(unsigned long addr, int numpages) { return 0; } +static inline int set_memory_rw(unsigned long addr, int numpages) { return 0; } +static inline int set_memory_x(unsigned long addr, int numpages) { return 0; } +static inline int set_memory_nx(unsigned long addr, int numpages) { return 0; } +static inline int set_memory_rw_nx(unsigned long addr, int numpages) { return 0; } +static inline int set_kernel_memory(char *startp, char *endp, + int (*set_memory)(unsigned long start, + int num_pages)) +{ + return 0; +} +#endif + +int set_direct_map_invalid_noflush(struct page *page); +int set_direct_map_default_noflush(struct page *page); +bool kernel_page_present(struct page *page); + +#endif /* __ASSEMBLY__ */ + +#ifdef CONFIG_STRICT_KERNEL_RWX +#ifdef CONFIG_64BIT +#define SECTION_ALIGN (1 << 21) +#else +#define SECTION_ALIGN (1 << 22) +#endif +#else /* !CONFIG_STRICT_KERNEL_RWX */ +#define SECTION_ALIGN L1_CACHE_BYTES +#endif /* CONFIG_STRICT_KERNEL_RWX */ + +#endif /* _ASM_RISCV_SET_MEMORY_H */ diff --git a/arch/riscv/include/asm/signal.h b/arch/riscv/include/asm/signal.h new file mode 100644 index 000000000..956ae0a01 --- /dev/null +++ b/arch/riscv/include/asm/signal.h @@ -0,0 +1,12 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ + +#ifndef __ASM_SIGNAL_H +#define __ASM_SIGNAL_H + +#include <uapi/asm/signal.h> +#include <uapi/asm/ptrace.h> + +asmlinkage __visible +void do_work_pending(struct pt_regs *regs, unsigned long thread_info_flags); + +#endif diff --git a/arch/riscv/include/asm/signal32.h b/arch/riscv/include/asm/signal32.h new file mode 100644 index 000000000..96dc56932 --- /dev/null +++ b/arch/riscv/include/asm/signal32.h @@ -0,0 +1,18 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ + +#ifndef __ASM_SIGNAL32_H +#define __ASM_SIGNAL32_H + +#if IS_ENABLED(CONFIG_COMPAT) +int compat_setup_rt_frame(struct ksignal *ksig, sigset_t *set, + struct pt_regs *regs); +#else +static inline +int compat_setup_rt_frame(struct ksignal *ksig, sigset_t *set, + struct pt_regs *regs) +{ + return -1; +} +#endif + +#endif diff --git a/arch/riscv/include/asm/smp.h b/arch/riscv/include/asm/smp.h new file mode 100644 index 000000000..3831b638e --- /dev/null +++ b/arch/riscv/include/asm/smp.h @@ -0,0 +1,107 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2012 Regents of the University of California + */ + +#ifndef _ASM_RISCV_SMP_H +#define _ASM_RISCV_SMP_H + +#include <linux/cpumask.h> +#include <linux/irqreturn.h> +#include <linux/thread_info.h> + +#define INVALID_HARTID ULONG_MAX + +struct seq_file; +extern unsigned long boot_cpu_hartid; + +struct riscv_ipi_ops { + void (*ipi_inject)(const struct cpumask *target); + void (*ipi_clear)(void); +}; + +#ifdef CONFIG_SMP +/* + * Mapping between linux logical cpu index and hartid. + */ +extern unsigned long __cpuid_to_hartid_map[NR_CPUS]; +#define cpuid_to_hartid_map(cpu) __cpuid_to_hartid_map[cpu] + +/* print IPI stats */ +void show_ipi_stats(struct seq_file *p, int prec); + +/* SMP initialization hook for setup_arch */ +void __init setup_smp(void); + +/* Called from C code, this handles an IPI. */ +void handle_IPI(struct pt_regs *regs); + +/* Hook for the generic smp_call_function_many() routine. */ +void arch_send_call_function_ipi_mask(struct cpumask *mask); + +/* Hook for the generic smp_call_function_single() routine. */ +void arch_send_call_function_single_ipi(int cpu); + +int riscv_hartid_to_cpuid(unsigned long hartid); + +/* Set custom IPI operations */ +void riscv_set_ipi_ops(const struct riscv_ipi_ops *ops); + +/* Clear IPI for current CPU */ +void riscv_clear_ipi(void); + +/* Check other CPUs stop or not */ +bool smp_crash_stop_failed(void); + +/* Secondary hart entry */ +asmlinkage void smp_callin(void); + +/* + * Obtains the hart ID of the currently executing task. This relies on + * THREAD_INFO_IN_TASK, but we define that unconditionally. + */ +#define raw_smp_processor_id() (current_thread_info()->cpu) + +#if defined CONFIG_HOTPLUG_CPU +int __cpu_disable(void); +void __cpu_die(unsigned int cpu); +#endif /* CONFIG_HOTPLUG_CPU */ + +#else + +static inline void show_ipi_stats(struct seq_file *p, int prec) +{ +} + +static inline int riscv_hartid_to_cpuid(unsigned long hartid) +{ + if (hartid == boot_cpu_hartid) + return 0; + + return -1; +} +static inline unsigned long cpuid_to_hartid_map(int cpu) +{ + return boot_cpu_hartid; +} + +static inline void riscv_set_ipi_ops(const struct riscv_ipi_ops *ops) +{ +} + +static inline void riscv_clear_ipi(void) +{ +} + +#endif /* CONFIG_SMP */ + +#if defined(CONFIG_HOTPLUG_CPU) && (CONFIG_SMP) +bool cpu_has_hotplug(unsigned int cpu); +#else +static inline bool cpu_has_hotplug(unsigned int cpu) +{ + return false; +} +#endif + +#endif /* _ASM_RISCV_SMP_H */ diff --git a/arch/riscv/include/asm/soc.h b/arch/riscv/include/asm/soc.h new file mode 100644 index 000000000..f49406605 --- /dev/null +++ b/arch/riscv/include/asm/soc.h @@ -0,0 +1,24 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Copyright (C) 2020 Western Digital Corporation or its affiliates. + * Copyright (C) 2020 Google, Inc + */ + +#ifndef _ASM_RISCV_SOC_H +#define _ASM_RISCV_SOC_H + +#include <linux/of.h> +#include <linux/linkage.h> +#include <linux/types.h> + +#define SOC_EARLY_INIT_DECLARE(name, compat, fn) \ + static const struct of_device_id __soc_early_init__##name \ + __used __section("__soc_early_init_table") \ + = { .compatible = compat, .data = fn } + +void soc_early_init(void); + +extern unsigned long __soc_early_init_table_start; +extern unsigned long __soc_early_init_table_end; + +#endif diff --git a/arch/riscv/include/asm/sparsemem.h b/arch/riscv/include/asm/sparsemem.h new file mode 100644 index 000000000..63acaecc3 --- /dev/null +++ b/arch/riscv/include/asm/sparsemem.h @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef _ASM_RISCV_SPARSEMEM_H +#define _ASM_RISCV_SPARSEMEM_H + +#ifdef CONFIG_SPARSEMEM +#ifdef CONFIG_64BIT +#define MAX_PHYSMEM_BITS 56 +#else +#define MAX_PHYSMEM_BITS 34 +#endif /* CONFIG_64BIT */ +#define SECTION_SIZE_BITS 27 +#endif /* CONFIG_SPARSEMEM */ + +#endif /* _ASM_RISCV_SPARSEMEM_H */ diff --git a/arch/riscv/include/asm/stackprotector.h b/arch/riscv/include/asm/stackprotector.h new file mode 100644 index 000000000..09093af46 --- /dev/null +++ b/arch/riscv/include/asm/stackprotector.h @@ -0,0 +1,30 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef _ASM_RISCV_STACKPROTECTOR_H +#define _ASM_RISCV_STACKPROTECTOR_H + +#include <linux/random.h> +#include <linux/version.h> + +extern unsigned long __stack_chk_guard; + +/* + * Initialize the stackprotector canary value. + * + * NOTE: this must only be called from functions that never return, + * and it must always be inlined. + */ +static __always_inline void boot_init_stack_canary(void) +{ + unsigned long canary; + + /* Try to get a semi random initial value. */ + get_random_bytes(&canary, sizeof(canary)); + canary ^= LINUX_VERSION_CODE; + canary &= CANARY_MASK; + + current->stack_canary = canary; + if (!IS_ENABLED(CONFIG_STACKPROTECTOR_PER_TASK)) + __stack_chk_guard = current->stack_canary; +} +#endif /* _ASM_RISCV_STACKPROTECTOR_H */ diff --git a/arch/riscv/include/asm/stacktrace.h b/arch/riscv/include/asm/stacktrace.h new file mode 100644 index 000000000..3450c1912 --- /dev/null +++ b/arch/riscv/include/asm/stacktrace.h @@ -0,0 +1,19 @@ +/* SPDX-License-Identifier: GPL-2.0 */ + +#ifndef _ASM_RISCV_STACKTRACE_H +#define _ASM_RISCV_STACKTRACE_H + +#include <linux/sched.h> +#include <asm/ptrace.h> + +struct stackframe { + unsigned long fp; + unsigned long ra; +}; + +extern void notrace walk_stackframe(struct task_struct *task, struct pt_regs *regs, + bool (*fn)(void *, unsigned long), void *arg); +extern void dump_backtrace(struct pt_regs *regs, struct task_struct *task, + const char *loglvl); + +#endif /* _ASM_RISCV_STACKTRACE_H */ diff --git a/arch/riscv/include/asm/string.h b/arch/riscv/include/asm/string.h new file mode 100644 index 000000000..909049366 --- /dev/null +++ b/arch/riscv/include/asm/string.h @@ -0,0 +1,32 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2013 Regents of the University of California + */ + +#ifndef _ASM_RISCV_STRING_H +#define _ASM_RISCV_STRING_H + +#include <linux/types.h> +#include <linux/linkage.h> + +#define __HAVE_ARCH_MEMSET +extern asmlinkage void *memset(void *, int, size_t); +extern asmlinkage void *__memset(void *, int, size_t); +#define __HAVE_ARCH_MEMCPY +extern asmlinkage void *memcpy(void *, const void *, size_t); +extern asmlinkage void *__memcpy(void *, const void *, size_t); +#define __HAVE_ARCH_MEMMOVE +extern asmlinkage void *memmove(void *, const void *, size_t); +extern asmlinkage void *__memmove(void *, const void *, size_t); +/* For those files which don't want to check by kasan. */ +#if defined(CONFIG_KASAN) && !defined(__SANITIZE_ADDRESS__) +#define memcpy(dst, src, len) __memcpy(dst, src, len) +#define memset(s, c, n) __memset(s, c, n) +#define memmove(dst, src, len) __memmove(dst, src, len) + +#ifndef __NO_FORTIFY +#define __NO_FORTIFY /* FORTIFY_SOURCE uses __builtin_memcpy, etc. */ +#endif + +#endif +#endif /* _ASM_RISCV_STRING_H */ diff --git a/arch/riscv/include/asm/suspend.h b/arch/riscv/include/asm/suspend.h new file mode 100644 index 000000000..8be391c2a --- /dev/null +++ b/arch/riscv/include/asm/suspend.h @@ -0,0 +1,36 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (c) 2021 Western Digital Corporation or its affiliates. + * Copyright (c) 2022 Ventana Micro Systems Inc. + */ + +#ifndef _ASM_RISCV_SUSPEND_H +#define _ASM_RISCV_SUSPEND_H + +#include <asm/ptrace.h> + +struct suspend_context { + /* Saved and restored by low-level functions */ + struct pt_regs regs; + /* Saved and restored by high-level functions */ + unsigned long scratch; + unsigned long tvec; + unsigned long ie; +#ifdef CONFIG_MMU + unsigned long satp; +#endif +}; + +/* Low-level CPU suspend entry function */ +int __cpu_suspend_enter(struct suspend_context *context); + +/* High-level CPU suspend which will save context and call finish() */ +int cpu_suspend(unsigned long arg, + int (*finish)(unsigned long arg, + unsigned long entry, + unsigned long context)); + +/* Low-level CPU resume entry function */ +int __cpu_resume_enter(unsigned long hartid, unsigned long context); + +#endif diff --git a/arch/riscv/include/asm/switch_to.h b/arch/riscv/include/asm/switch_to.h new file mode 100644 index 000000000..11463489f --- /dev/null +++ b/arch/riscv/include/asm/switch_to.h @@ -0,0 +1,83 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2012 Regents of the University of California + */ + +#ifndef _ASM_RISCV_SWITCH_TO_H +#define _ASM_RISCV_SWITCH_TO_H + +#include <linux/jump_label.h> +#include <linux/sched/task_stack.h> +#include <asm/hwcap.h> +#include <asm/processor.h> +#include <asm/ptrace.h> +#include <asm/csr.h> + +#ifdef CONFIG_FPU +extern void __fstate_save(struct task_struct *save_to); +extern void __fstate_restore(struct task_struct *restore_from); + +static inline void __fstate_clean(struct pt_regs *regs) +{ + regs->status = (regs->status & ~SR_FS) | SR_FS_CLEAN; +} + +static inline void fstate_off(struct task_struct *task, + struct pt_regs *regs) +{ + regs->status = (regs->status & ~SR_FS) | SR_FS_OFF; +} + +static inline void fstate_save(struct task_struct *task, + struct pt_regs *regs) +{ + if ((regs->status & SR_FS) == SR_FS_DIRTY) { + __fstate_save(task); + __fstate_clean(regs); + } +} + +static inline void fstate_restore(struct task_struct *task, + struct pt_regs *regs) +{ + if ((regs->status & SR_FS) != SR_FS_OFF) { + __fstate_restore(task); + __fstate_clean(regs); + } +} + +static inline void __switch_to_aux(struct task_struct *prev, + struct task_struct *next) +{ + struct pt_regs *regs; + + regs = task_pt_regs(prev); + if (unlikely(regs->status & SR_SD)) + fstate_save(prev, regs); + fstate_restore(next, task_pt_regs(next)); +} + +static __always_inline bool has_fpu(void) +{ + return static_branch_likely(&riscv_isa_ext_keys[RISCV_ISA_EXT_KEY_FPU]); +} +#else +static __always_inline bool has_fpu(void) { return false; } +#define fstate_save(task, regs) do { } while (0) +#define fstate_restore(task, regs) do { } while (0) +#define __switch_to_aux(__prev, __next) do { } while (0) +#endif + +extern struct task_struct *__switch_to(struct task_struct *, + struct task_struct *); + +#define switch_to(prev, next, last) \ +do { \ + struct task_struct *__prev = (prev); \ + struct task_struct *__next = (next); \ + if (has_fpu()) \ + __switch_to_aux(__prev, __next); \ + ((last) = __switch_to(__prev, __next)); \ +} while (0) + +#endif /* _ASM_RISCV_SWITCH_TO_H */ diff --git a/arch/riscv/include/asm/syscall.h b/arch/riscv/include/asm/syscall.h new file mode 100644 index 000000000..384a63b86 --- /dev/null +++ b/arch/riscv/include/asm/syscall.h @@ -0,0 +1,78 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2008-2009 Red Hat, Inc. All rights reserved. + * Copyright 2010 Tilera Corporation. All Rights Reserved. + * Copyright 2015 Regents of the University of California, Berkeley + * + * See asm-generic/syscall.h for descriptions of what we must do here. + */ + +#ifndef _ASM_RISCV_SYSCALL_H +#define _ASM_RISCV_SYSCALL_H + +#include <uapi/linux/audit.h> +#include <linux/sched.h> +#include <linux/err.h> + +/* The array of function pointers for syscalls. */ +extern void * const sys_call_table[]; +extern void * const compat_sys_call_table[]; + +/* + * Only the low 32 bits of orig_r0 are meaningful, so we return int. + * This importantly ignores the high bits on 64-bit, so comparisons + * sign-extend the low 32 bits. + */ +static inline int syscall_get_nr(struct task_struct *task, + struct pt_regs *regs) +{ + return regs->a7; +} + +static inline void syscall_rollback(struct task_struct *task, + struct pt_regs *regs) +{ + regs->a0 = regs->orig_a0; +} + +static inline long syscall_get_error(struct task_struct *task, + struct pt_regs *regs) +{ + unsigned long error = regs->a0; + + return IS_ERR_VALUE(error) ? error : 0; +} + +static inline long syscall_get_return_value(struct task_struct *task, + struct pt_regs *regs) +{ + return regs->a0; +} + +static inline void syscall_set_return_value(struct task_struct *task, + struct pt_regs *regs, + int error, long val) +{ + regs->a0 = (long) error ?: val; +} + +static inline void syscall_get_arguments(struct task_struct *task, + struct pt_regs *regs, + unsigned long *args) +{ + args[0] = regs->orig_a0; + args++; + memcpy(args, ®s->a1, 5 * sizeof(args[0])); +} + +static inline int syscall_get_arch(struct task_struct *task) +{ +#ifdef CONFIG_64BIT + return AUDIT_ARCH_RISCV64; +#else + return AUDIT_ARCH_RISCV32; +#endif +} + +asmlinkage long sys_riscv_flush_icache(uintptr_t, uintptr_t, uintptr_t); +#endif /* _ASM_RISCV_SYSCALL_H */ diff --git a/arch/riscv/include/asm/thread_info.h b/arch/riscv/include/asm/thread_info.h new file mode 100644 index 000000000..f704c8dd5 --- /dev/null +++ b/arch/riscv/include/asm/thread_info.h @@ -0,0 +1,123 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2009 Chen Liqin <liqin.chen@sunplusct.com> + * Copyright (C) 2012 Regents of the University of California + * Copyright (C) 2017 SiFive + */ + +#ifndef _ASM_RISCV_THREAD_INFO_H +#define _ASM_RISCV_THREAD_INFO_H + +#include <asm/page.h> +#include <linux/const.h> + +#ifdef CONFIG_KASAN +#define KASAN_STACK_ORDER 1 +#else +#define KASAN_STACK_ORDER 0 +#endif + +/* thread information allocation */ +#ifdef CONFIG_64BIT +#define THREAD_SIZE_ORDER (2 + KASAN_STACK_ORDER) +#else +#define THREAD_SIZE_ORDER (1 + KASAN_STACK_ORDER) +#endif +#define THREAD_SIZE (PAGE_SIZE << THREAD_SIZE_ORDER) + +/* + * By aligning VMAP'd stacks to 2 * THREAD_SIZE, we can detect overflow by + * checking sp & (1 << THREAD_SHIFT), which we can do cheaply in the entry + * assembly. + */ +#ifdef CONFIG_VMAP_STACK +#define THREAD_ALIGN (2 * THREAD_SIZE) +#else +#define THREAD_ALIGN THREAD_SIZE +#endif + +#define THREAD_SHIFT (PAGE_SHIFT + THREAD_SIZE_ORDER) +#define OVERFLOW_STACK_SIZE SZ_4K +#define SHADOW_OVERFLOW_STACK_SIZE (1024) + +#ifndef __ASSEMBLY__ + +extern long shadow_stack[SHADOW_OVERFLOW_STACK_SIZE / sizeof(long)]; +extern unsigned long spin_shadow_stack; + +#include <asm/processor.h> +#include <asm/csr.h> + +/* + * low level task data that entry.S needs immediate access to + * - this struct should fit entirely inside of one cache line + * - if the members of this struct changes, the assembly constants + * in asm-offsets.c must be updated accordingly + * - thread_info is included in task_struct at an offset of 0. This means that + * tp points to both thread_info and task_struct. + */ +struct thread_info { + unsigned long flags; /* low level flags */ + int preempt_count; /* 0=>preemptible, <0=>BUG */ + /* + * These stack pointers are overwritten on every system call or + * exception. SP is also saved to the stack it can be recovered when + * overwritten. + */ + long kernel_sp; /* Kernel stack pointer */ + long user_sp; /* User stack pointer */ + int cpu; +}; + +/* + * macros/functions for gaining access to the thread information structure + * + * preempt_count needs to be 1 initially, until the scheduler is functional. + */ +#define INIT_THREAD_INFO(tsk) \ +{ \ + .flags = 0, \ + .preempt_count = INIT_PREEMPT_COUNT, \ +} + +#endif /* !__ASSEMBLY__ */ + +/* + * thread information flags + * - these are process state flags that various assembly files may need to + * access + * - pending work-to-be-done flags are in lowest half-word + * - other flags in upper half-word(s) + */ +#define TIF_SYSCALL_TRACE 0 /* syscall trace active */ +#define TIF_NOTIFY_RESUME 1 /* callback before returning to user */ +#define TIF_SIGPENDING 2 /* signal pending */ +#define TIF_NEED_RESCHED 3 /* rescheduling necessary */ +#define TIF_RESTORE_SIGMASK 4 /* restore signal mask in do_signal() */ +#define TIF_MEMDIE 5 /* is terminating due to OOM killer */ +#define TIF_SYSCALL_TRACEPOINT 6 /* syscall tracepoint instrumentation */ +#define TIF_SYSCALL_AUDIT 7 /* syscall auditing */ +#define TIF_SECCOMP 8 /* syscall secure computing */ +#define TIF_NOTIFY_SIGNAL 9 /* signal notifications exist */ +#define TIF_UPROBE 10 /* uprobe breakpoint or singlestep */ +#define TIF_32BIT 11 /* compat-mode 32bit process */ + +#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE) +#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME) +#define _TIF_SIGPENDING (1 << TIF_SIGPENDING) +#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED) +#define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT) +#define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT) +#define _TIF_SECCOMP (1 << TIF_SECCOMP) +#define _TIF_NOTIFY_SIGNAL (1 << TIF_NOTIFY_SIGNAL) +#define _TIF_UPROBE (1 << TIF_UPROBE) + +#define _TIF_WORK_MASK \ + (_TIF_NOTIFY_RESUME | _TIF_SIGPENDING | _TIF_NEED_RESCHED | \ + _TIF_NOTIFY_SIGNAL | _TIF_UPROBE) + +#define _TIF_SYSCALL_WORK \ + (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_TRACEPOINT | _TIF_SYSCALL_AUDIT | \ + _TIF_SECCOMP) + +#endif /* _ASM_RISCV_THREAD_INFO_H */ diff --git a/arch/riscv/include/asm/timex.h b/arch/riscv/include/asm/timex.h new file mode 100644 index 000000000..d6a7428f6 --- /dev/null +++ b/arch/riscv/include/asm/timex.h @@ -0,0 +1,93 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2012 Regents of the University of California + */ + +#ifndef _ASM_RISCV_TIMEX_H +#define _ASM_RISCV_TIMEX_H + +#include <asm/csr.h> + +typedef unsigned long cycles_t; + +#ifdef CONFIG_RISCV_M_MODE + +#include <asm/clint.h> + +#ifdef CONFIG_64BIT +static inline cycles_t get_cycles(void) +{ + return readq_relaxed(clint_time_val); +} +#else /* !CONFIG_64BIT */ +static inline u32 get_cycles(void) +{ + return readl_relaxed(((u32 *)clint_time_val)); +} +#define get_cycles get_cycles + +static inline u32 get_cycles_hi(void) +{ + return readl_relaxed(((u32 *)clint_time_val) + 1); +} +#define get_cycles_hi get_cycles_hi +#endif /* CONFIG_64BIT */ + +/* + * Much like MIPS, we may not have a viable counter to use at an early point + * in the boot process. Unfortunately we don't have a fallback, so instead + * we just return 0. + */ +static inline unsigned long random_get_entropy(void) +{ + if (unlikely(clint_time_val == NULL)) + return random_get_entropy_fallback(); + return get_cycles(); +} +#define random_get_entropy() random_get_entropy() + +#else /* CONFIG_RISCV_M_MODE */ + +static inline cycles_t get_cycles(void) +{ + return csr_read(CSR_TIME); +} +#define get_cycles get_cycles + +static inline u32 get_cycles_hi(void) +{ + return csr_read(CSR_TIMEH); +} +#define get_cycles_hi get_cycles_hi + +#endif /* !CONFIG_RISCV_M_MODE */ + +#ifdef CONFIG_64BIT +static inline u64 get_cycles64(void) +{ + return get_cycles(); +} +#else /* CONFIG_64BIT */ +static inline u64 get_cycles64(void) +{ + u32 hi, lo; + + do { + hi = get_cycles_hi(); + lo = get_cycles(); + } while (hi != get_cycles_hi()); + + return ((u64)hi << 32) | lo; +} +#endif /* CONFIG_64BIT */ + +#define ARCH_HAS_READ_CURRENT_TIMER +static inline int read_current_timer(unsigned long *timer_val) +{ + *timer_val = get_cycles(); + return 0; +} + +extern void time_init(void); + +#endif /* _ASM_RISCV_TIMEX_H */ diff --git a/arch/riscv/include/asm/tlb.h b/arch/riscv/include/asm/tlb.h new file mode 100644 index 000000000..120bcf2ed --- /dev/null +++ b/arch/riscv/include/asm/tlb.h @@ -0,0 +1,21 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2012 Regents of the University of California + */ + +#ifndef _ASM_RISCV_TLB_H +#define _ASM_RISCV_TLB_H + +struct mmu_gather; + +static void tlb_flush(struct mmu_gather *tlb); + +#define tlb_flush tlb_flush +#include <asm-generic/tlb.h> + +static inline void tlb_flush(struct mmu_gather *tlb) +{ + flush_tlb_mm(tlb->mm); +} + +#endif /* _ASM_RISCV_TLB_H */ diff --git a/arch/riscv/include/asm/tlbflush.h b/arch/riscv/include/asm/tlbflush.h new file mode 100644 index 000000000..a09196f8d --- /dev/null +++ b/arch/riscv/include/asm/tlbflush.h @@ -0,0 +1,64 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2009 Chen Liqin <liqin.chen@sunplusct.com> + * Copyright (C) 2012 Regents of the University of California + */ + +#ifndef _ASM_RISCV_TLBFLUSH_H +#define _ASM_RISCV_TLBFLUSH_H + +#include <linux/mm_types.h> +#include <asm/smp.h> +#include <asm/errata_list.h> + +#ifdef CONFIG_MMU +extern unsigned long asid_mask; + +static inline void local_flush_tlb_all(void) +{ + __asm__ __volatile__ ("sfence.vma" : : : "memory"); +} + +/* Flush one page from local TLB */ +static inline void local_flush_tlb_page(unsigned long addr) +{ + ALT_FLUSH_TLB_PAGE(__asm__ __volatile__ ("sfence.vma %0" : : "r" (addr) : "memory")); +} +#else /* CONFIG_MMU */ +#define local_flush_tlb_all() do { } while (0) +#define local_flush_tlb_page(addr) do { } while (0) +#endif /* CONFIG_MMU */ + +#if defined(CONFIG_SMP) && defined(CONFIG_MMU) +void flush_tlb_all(void); +void flush_tlb_mm(struct mm_struct *mm); +void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr); +void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, + unsigned long end); +#ifdef CONFIG_TRANSPARENT_HUGEPAGE +#define __HAVE_ARCH_FLUSH_PMD_TLB_RANGE +void flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start, + unsigned long end); +#endif +#else /* CONFIG_SMP && CONFIG_MMU */ + +#define flush_tlb_all() local_flush_tlb_all() +#define flush_tlb_page(vma, addr) local_flush_tlb_page(addr) + +static inline void flush_tlb_range(struct vm_area_struct *vma, + unsigned long start, unsigned long end) +{ + local_flush_tlb_all(); +} + +#define flush_tlb_mm(mm) flush_tlb_all() +#endif /* !CONFIG_SMP || !CONFIG_MMU */ + +/* Flush a range of kernel pages */ +static inline void flush_tlb_kernel_range(unsigned long start, + unsigned long end) +{ + flush_tlb_all(); +} + +#endif /* _ASM_RISCV_TLBFLUSH_H */ diff --git a/arch/riscv/include/asm/uaccess.h b/arch/riscv/include/asm/uaccess.h new file mode 100644 index 000000000..ec0cab9fb --- /dev/null +++ b/arch/riscv/include/asm/uaccess.h @@ -0,0 +1,341 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2012 Regents of the University of California + * + * This file was copied from include/asm-generic/uaccess.h + */ + +#ifndef _ASM_RISCV_UACCESS_H +#define _ASM_RISCV_UACCESS_H + +#include <asm/asm-extable.h> +#include <asm/pgtable.h> /* for TASK_SIZE */ + +/* + * User space memory access functions + */ +#ifdef CONFIG_MMU +#include <linux/errno.h> +#include <linux/compiler.h> +#include <linux/thread_info.h> +#include <asm/byteorder.h> +#include <asm/extable.h> +#include <asm/asm.h> +#include <asm-generic/access_ok.h> + +#define __enable_user_access() \ + __asm__ __volatile__ ("csrs sstatus, %0" : : "r" (SR_SUM) : "memory") +#define __disable_user_access() \ + __asm__ __volatile__ ("csrc sstatus, %0" : : "r" (SR_SUM) : "memory") + +/* + * The exception table consists of pairs of addresses: the first is the + * address of an instruction that is allowed to fault, and the second is + * the address at which the program should continue. No registers are + * modified, so it is entirely up to the continuation code to figure out + * what to do. + * + * All the routines below use bits of fixup code that are out of line + * with the main instruction path. This means when everything is well, + * we don't even have to jump over them. Further, they do not intrude + * on our cache or tlb entries. + */ + +#define __LSW 0 +#define __MSW 1 + +/* + * The "__xxx" versions of the user access functions do not verify the address + * space - it must have been done previously with a separate "access_ok()" + * call. + */ + +#define __get_user_asm(insn, x, ptr, err) \ +do { \ + __typeof__(x) __x; \ + __asm__ __volatile__ ( \ + "1:\n" \ + " " insn " %1, %2\n" \ + "2:\n" \ + _ASM_EXTABLE_UACCESS_ERR_ZERO(1b, 2b, %0, %1) \ + : "+r" (err), "=&r" (__x) \ + : "m" (*(ptr))); \ + (x) = __x; \ +} while (0) + +#ifdef CONFIG_64BIT +#define __get_user_8(x, ptr, err) \ + __get_user_asm("ld", x, ptr, err) +#else /* !CONFIG_64BIT */ +#define __get_user_8(x, ptr, err) \ +do { \ + u32 __user *__ptr = (u32 __user *)(ptr); \ + u32 __lo, __hi; \ + __asm__ __volatile__ ( \ + "1:\n" \ + " lw %1, %3\n" \ + "2:\n" \ + " lw %2, %4\n" \ + "3:\n" \ + _ASM_EXTABLE_UACCESS_ERR_ZERO(1b, 3b, %0, %1) \ + _ASM_EXTABLE_UACCESS_ERR_ZERO(2b, 3b, %0, %1) \ + : "+r" (err), "=&r" (__lo), "=r" (__hi) \ + : "m" (__ptr[__LSW]), "m" (__ptr[__MSW])); \ + if (err) \ + __hi = 0; \ + (x) = (__typeof__(x))((__typeof__((x)-(x)))( \ + (((u64)__hi << 32) | __lo))); \ +} while (0) +#endif /* CONFIG_64BIT */ + +#define __get_user_nocheck(x, __gu_ptr, __gu_err) \ +do { \ + switch (sizeof(*__gu_ptr)) { \ + case 1: \ + __get_user_asm("lb", (x), __gu_ptr, __gu_err); \ + break; \ + case 2: \ + __get_user_asm("lh", (x), __gu_ptr, __gu_err); \ + break; \ + case 4: \ + __get_user_asm("lw", (x), __gu_ptr, __gu_err); \ + break; \ + case 8: \ + __get_user_8((x), __gu_ptr, __gu_err); \ + break; \ + default: \ + BUILD_BUG(); \ + } \ +} while (0) + +/** + * __get_user: - Get a simple variable from user space, with less checking. + * @x: Variable to store result. + * @ptr: Source address, in user space. + * + * Context: User context only. This function may sleep. + * + * This macro copies a single simple variable from user space to kernel + * space. It supports simple types like char and int, but not larger + * data types like structures or arrays. + * + * @ptr must have pointer-to-simple-variable type, and the result of + * dereferencing @ptr must be assignable to @x without a cast. + * + * Caller must check the pointer with access_ok() before calling this + * function. + * + * Returns zero on success, or -EFAULT on error. + * On error, the variable @x is set to zero. + */ +#define __get_user(x, ptr) \ +({ \ + const __typeof__(*(ptr)) __user *__gu_ptr = (ptr); \ + long __gu_err = 0; \ + \ + __chk_user_ptr(__gu_ptr); \ + \ + __enable_user_access(); \ + __get_user_nocheck(x, __gu_ptr, __gu_err); \ + __disable_user_access(); \ + \ + __gu_err; \ +}) + +/** + * get_user: - Get a simple variable from user space. + * @x: Variable to store result. + * @ptr: Source address, in user space. + * + * Context: User context only. This function may sleep. + * + * This macro copies a single simple variable from user space to kernel + * space. It supports simple types like char and int, but not larger + * data types like structures or arrays. + * + * @ptr must have pointer-to-simple-variable type, and the result of + * dereferencing @ptr must be assignable to @x without a cast. + * + * Returns zero on success, or -EFAULT on error. + * On error, the variable @x is set to zero. + */ +#define get_user(x, ptr) \ +({ \ + const __typeof__(*(ptr)) __user *__p = (ptr); \ + might_fault(); \ + access_ok(__p, sizeof(*__p)) ? \ + __get_user((x), __p) : \ + ((x) = (__force __typeof__(x))0, -EFAULT); \ +}) + +#define __put_user_asm(insn, x, ptr, err) \ +do { \ + __typeof__(*(ptr)) __x = x; \ + __asm__ __volatile__ ( \ + "1:\n" \ + " " insn " %z2, %1\n" \ + "2:\n" \ + _ASM_EXTABLE_UACCESS_ERR(1b, 2b, %0) \ + : "+r" (err), "=m" (*(ptr)) \ + : "rJ" (__x)); \ +} while (0) + +#ifdef CONFIG_64BIT +#define __put_user_8(x, ptr, err) \ + __put_user_asm("sd", x, ptr, err) +#else /* !CONFIG_64BIT */ +#define __put_user_8(x, ptr, err) \ +do { \ + u32 __user *__ptr = (u32 __user *)(ptr); \ + u64 __x = (__typeof__((x)-(x)))(x); \ + __asm__ __volatile__ ( \ + "1:\n" \ + " sw %z3, %1\n" \ + "2:\n" \ + " sw %z4, %2\n" \ + "3:\n" \ + _ASM_EXTABLE_UACCESS_ERR(1b, 3b, %0) \ + _ASM_EXTABLE_UACCESS_ERR(2b, 3b, %0) \ + : "+r" (err), \ + "=m" (__ptr[__LSW]), \ + "=m" (__ptr[__MSW]) \ + : "rJ" (__x), "rJ" (__x >> 32)); \ +} while (0) +#endif /* CONFIG_64BIT */ + +#define __put_user_nocheck(x, __gu_ptr, __pu_err) \ +do { \ + switch (sizeof(*__gu_ptr)) { \ + case 1: \ + __put_user_asm("sb", (x), __gu_ptr, __pu_err); \ + break; \ + case 2: \ + __put_user_asm("sh", (x), __gu_ptr, __pu_err); \ + break; \ + case 4: \ + __put_user_asm("sw", (x), __gu_ptr, __pu_err); \ + break; \ + case 8: \ + __put_user_8((x), __gu_ptr, __pu_err); \ + break; \ + default: \ + BUILD_BUG(); \ + } \ +} while (0) + +/** + * __put_user: - Write a simple value into user space, with less checking. + * @x: Value to copy to user space. + * @ptr: Destination address, in user space. + * + * Context: User context only. This function may sleep. + * + * This macro copies a single simple value from kernel space to user + * space. It supports simple types like char and int, but not larger + * data types like structures or arrays. + * + * @ptr must have pointer-to-simple-variable type, and @x must be assignable + * to the result of dereferencing @ptr. The value of @x is copied to avoid + * re-ordering where @x is evaluated inside the block that enables user-space + * access (thus bypassing user space protection if @x is a function). + * + * Caller must check the pointer with access_ok() before calling this + * function. + * + * Returns zero on success, or -EFAULT on error. + */ +#define __put_user(x, ptr) \ +({ \ + __typeof__(*(ptr)) __user *__gu_ptr = (ptr); \ + __typeof__(*__gu_ptr) __val = (x); \ + long __pu_err = 0; \ + \ + __chk_user_ptr(__gu_ptr); \ + \ + __enable_user_access(); \ + __put_user_nocheck(__val, __gu_ptr, __pu_err); \ + __disable_user_access(); \ + \ + __pu_err; \ +}) + +/** + * put_user: - Write a simple value into user space. + * @x: Value to copy to user space. + * @ptr: Destination address, in user space. + * + * Context: User context only. This function may sleep. + * + * This macro copies a single simple value from kernel space to user + * space. It supports simple types like char and int, but not larger + * data types like structures or arrays. + * + * @ptr must have pointer-to-simple-variable type, and @x must be assignable + * to the result of dereferencing @ptr. + * + * Returns zero on success, or -EFAULT on error. + */ +#define put_user(x, ptr) \ +({ \ + __typeof__(*(ptr)) __user *__p = (ptr); \ + might_fault(); \ + access_ok(__p, sizeof(*__p)) ? \ + __put_user((x), __p) : \ + -EFAULT; \ +}) + + +unsigned long __must_check __asm_copy_to_user(void __user *to, + const void *from, unsigned long n); +unsigned long __must_check __asm_copy_from_user(void *to, + const void __user *from, unsigned long n); + +static inline unsigned long +raw_copy_from_user(void *to, const void __user *from, unsigned long n) +{ + return __asm_copy_from_user(to, from, n); +} + +static inline unsigned long +raw_copy_to_user(void __user *to, const void *from, unsigned long n) +{ + return __asm_copy_to_user(to, from, n); +} + +extern long strncpy_from_user(char *dest, const char __user *src, long count); + +extern long __must_check strnlen_user(const char __user *str, long n); + +extern +unsigned long __must_check __clear_user(void __user *addr, unsigned long n); + +static inline +unsigned long __must_check clear_user(void __user *to, unsigned long n) +{ + might_fault(); + return access_ok(to, n) ? + __clear_user(to, n) : n; +} + +#define __get_kernel_nofault(dst, src, type, err_label) \ +do { \ + long __kr_err; \ + \ + __get_user_nocheck(*((type *)(dst)), (type *)(src), __kr_err); \ + if (unlikely(__kr_err)) \ + goto err_label; \ +} while (0) + +#define __put_kernel_nofault(dst, src, type, err_label) \ +do { \ + long __kr_err; \ + \ + __put_user_nocheck(*((type *)(src)), (type *)(dst), __kr_err); \ + if (unlikely(__kr_err)) \ + goto err_label; \ +} while (0) + +#else /* CONFIG_MMU */ +#include <asm-generic/uaccess.h> +#endif /* CONFIG_MMU */ +#endif /* _ASM_RISCV_UACCESS_H */ diff --git a/arch/riscv/include/asm/unistd.h b/arch/riscv/include/asm/unistd.h new file mode 100644 index 000000000..221630bdb --- /dev/null +++ b/arch/riscv/include/asm/unistd.h @@ -0,0 +1,26 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2012 Regents of the University of California + */ + +/* + * There is explicitly no include guard here because this file is expected to + * be included multiple times. + */ + +#define __ARCH_WANT_SYS_CLONE + +#ifdef CONFIG_COMPAT +#define __ARCH_WANT_COMPAT_TRUNCATE64 +#define __ARCH_WANT_COMPAT_FTRUNCATE64 +#define __ARCH_WANT_COMPAT_FALLOCATE +#define __ARCH_WANT_COMPAT_PREAD64 +#define __ARCH_WANT_COMPAT_PWRITE64 +#define __ARCH_WANT_COMPAT_SYNC_FILE_RANGE +#define __ARCH_WANT_COMPAT_READAHEAD +#define __ARCH_WANT_COMPAT_FADVISE64_64 +#endif + +#include <uapi/asm/unistd.h> + +#define NR_syscalls (__NR_syscalls) diff --git a/arch/riscv/include/asm/uprobes.h b/arch/riscv/include/asm/uprobes.h new file mode 100644 index 000000000..f2183e00f --- /dev/null +++ b/arch/riscv/include/asm/uprobes.h @@ -0,0 +1,40 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ + +#ifndef _ASM_RISCV_UPROBES_H +#define _ASM_RISCV_UPROBES_H + +#include <asm/probes.h> +#include <asm/patch.h> +#include <asm/bug.h> + +#define MAX_UINSN_BYTES 8 + +#ifdef CONFIG_RISCV_ISA_C +#define UPROBE_SWBP_INSN __BUG_INSN_16 +#define UPROBE_SWBP_INSN_SIZE 2 +#else +#define UPROBE_SWBP_INSN __BUG_INSN_32 +#define UPROBE_SWBP_INSN_SIZE 4 +#endif +#define UPROBE_XOL_SLOT_BYTES MAX_UINSN_BYTES + +typedef u32 uprobe_opcode_t; + +struct arch_uprobe_task { + unsigned long saved_cause; +}; + +struct arch_uprobe { + union { + u8 insn[MAX_UINSN_BYTES]; + u8 ixol[MAX_UINSN_BYTES]; + }; + struct arch_probe_insn api; + unsigned long insn_size; + bool simulate; +}; + +bool uprobe_breakpoint_handler(struct pt_regs *regs); +bool uprobe_single_step_handler(struct pt_regs *regs); + +#endif /* _ASM_RISCV_UPROBES_H */ diff --git a/arch/riscv/include/asm/vdso.h b/arch/riscv/include/asm/vdso.h new file mode 100644 index 000000000..af981426f --- /dev/null +++ b/arch/riscv/include/asm/vdso.h @@ -0,0 +1,37 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2012 ARM Limited + * Copyright (C) 2014 Regents of the University of California + * Copyright (C) 2017 SiFive + */ + +#ifndef _ASM_RISCV_VDSO_H +#define _ASM_RISCV_VDSO_H + +/* + * All systems with an MMU have a VDSO, but systems without an MMU don't + * support shared libraries and therefor don't have one. + */ +#ifdef CONFIG_MMU + +#define __VVAR_PAGES 2 + +#ifndef __ASSEMBLY__ +#include <generated/vdso-offsets.h> + +#define VDSO_SYMBOL(base, name) \ + (void __user *)((unsigned long)(base) + __vdso_##name##_offset) + +#ifdef CONFIG_COMPAT +#include <generated/compat_vdso-offsets.h> + +#define COMPAT_VDSO_SYMBOL(base, name) \ + (void __user *)((unsigned long)(base) + compat__vdso_##name##_offset) + +#endif /* CONFIG_COMPAT */ + +#endif /* !__ASSEMBLY__ */ + +#endif /* CONFIG_MMU */ + +#endif /* _ASM_RISCV_VDSO_H */ diff --git a/arch/riscv/include/asm/vdso/clocksource.h b/arch/riscv/include/asm/vdso/clocksource.h new file mode 100644 index 000000000..df6ea65c1 --- /dev/null +++ b/arch/riscv/include/asm/vdso/clocksource.h @@ -0,0 +1,8 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __ASM_VDSOCLOCKSOURCE_H +#define __ASM_VDSOCLOCKSOURCE_H + +#define VDSO_ARCH_CLOCKMODES \ + VDSO_CLOCKMODE_ARCHTIMER + +#endif diff --git a/arch/riscv/include/asm/vdso/gettimeofday.h b/arch/riscv/include/asm/vdso/gettimeofday.h new file mode 100644 index 000000000..77d9c2f72 --- /dev/null +++ b/arch/riscv/include/asm/vdso/gettimeofday.h @@ -0,0 +1,88 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __ASM_VDSO_GETTIMEOFDAY_H +#define __ASM_VDSO_GETTIMEOFDAY_H + +#ifndef __ASSEMBLY__ + +#include <asm/barrier.h> +#include <asm/unistd.h> +#include <asm/csr.h> +#include <uapi/linux/time.h> + +#define VDSO_HAS_CLOCK_GETRES 1 + +static __always_inline +int gettimeofday_fallback(struct __kernel_old_timeval *_tv, + struct timezone *_tz) +{ + register struct __kernel_old_timeval *tv asm("a0") = _tv; + register struct timezone *tz asm("a1") = _tz; + register long ret asm("a0"); + register long nr asm("a7") = __NR_gettimeofday; + + asm volatile ("ecall\n" + : "=r" (ret) + : "r"(tv), "r"(tz), "r"(nr) + : "memory"); + + return ret; +} + +static __always_inline +long clock_gettime_fallback(clockid_t _clkid, struct __kernel_timespec *_ts) +{ + register clockid_t clkid asm("a0") = _clkid; + register struct __kernel_timespec *ts asm("a1") = _ts; + register long ret asm("a0"); + register long nr asm("a7") = __NR_clock_gettime; + + asm volatile ("ecall\n" + : "=r" (ret) + : "r"(clkid), "r"(ts), "r"(nr) + : "memory"); + + return ret; +} + +static __always_inline +int clock_getres_fallback(clockid_t _clkid, struct __kernel_timespec *_ts) +{ + register clockid_t clkid asm("a0") = _clkid; + register struct __kernel_timespec *ts asm("a1") = _ts; + register long ret asm("a0"); + register long nr asm("a7") = __NR_clock_getres; + + asm volatile ("ecall\n" + : "=r" (ret) + : "r"(clkid), "r"(ts), "r"(nr) + : "memory"); + + return ret; +} + +static __always_inline u64 __arch_get_hw_counter(s32 clock_mode, + const struct vdso_data *vd) +{ + /* + * The purpose of csr_read(CSR_TIME) is to trap the system into + * M-mode to obtain the value of CSR_TIME. Hence, unlike other + * architecture, no fence instructions surround the csr_read() + */ + return csr_read(CSR_TIME); +} + +static __always_inline const struct vdso_data *__arch_get_vdso_data(void) +{ + return _vdso_data; +} + +#ifdef CONFIG_TIME_NS +static __always_inline +const struct vdso_data *__arch_get_timens_vdso_data(const struct vdso_data *vd) +{ + return _timens_data; +} +#endif +#endif /* !__ASSEMBLY__ */ + +#endif /* __ASM_VDSO_GETTIMEOFDAY_H */ diff --git a/arch/riscv/include/asm/vdso/processor.h b/arch/riscv/include/asm/vdso/processor.h new file mode 100644 index 000000000..fa70cfe50 --- /dev/null +++ b/arch/riscv/include/asm/vdso/processor.h @@ -0,0 +1,36 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +#ifndef __ASM_VDSO_PROCESSOR_H +#define __ASM_VDSO_PROCESSOR_H + +#ifndef __ASSEMBLY__ + +#include <linux/jump_label.h> +#include <asm/barrier.h> +#include <asm/hwcap.h> + +static inline void cpu_relax(void) +{ + if (!static_branch_likely(&riscv_isa_ext_keys[RISCV_ISA_EXT_KEY_ZIHINTPAUSE])) { +#ifdef __riscv_muldiv + int dummy; + /* In lieu of a halt instruction, induce a long-latency stall. */ + __asm__ __volatile__ ("div %0, %0, zero" : "=r" (dummy)); +#endif + } else { + /* + * Reduce instruction retirement. + * This assumes the PC changes. + */ +#ifdef CONFIG_TOOLCHAIN_HAS_ZIHINTPAUSE + __asm__ __volatile__ ("pause"); +#else + /* Encoding of the pause instruction */ + __asm__ __volatile__ (".4byte 0x100000F"); +#endif + } + barrier(); +} + +#endif /* __ASSEMBLY__ */ + +#endif /* __ASM_VDSO_PROCESSOR_H */ diff --git a/arch/riscv/include/asm/vdso/vsyscall.h b/arch/riscv/include/asm/vdso/vsyscall.h new file mode 100644 index 000000000..82fd5d83b --- /dev/null +++ b/arch/riscv/include/asm/vdso/vsyscall.h @@ -0,0 +1,27 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef __ASM_VDSO_VSYSCALL_H +#define __ASM_VDSO_VSYSCALL_H + +#ifndef __ASSEMBLY__ + +#include <linux/timekeeper_internal.h> +#include <vdso/datapage.h> + +extern struct vdso_data *vdso_data; + +/* + * Update the vDSO data page to keep in sync with kernel timekeeping. + */ +static __always_inline struct vdso_data *__riscv_get_k_vdso_data(void) +{ + return vdso_data; +} + +#define __arch_get_k_vdso_data __riscv_get_k_vdso_data + +/* The asm-generic header needs to be included after the definitions above */ +#include <asm-generic/vdso/vsyscall.h> + +#endif /* !__ASSEMBLY__ */ + +#endif /* __ASM_VDSO_VSYSCALL_H */ diff --git a/arch/riscv/include/asm/vendorid_list.h b/arch/riscv/include/asm/vendorid_list.h new file mode 100644 index 000000000..cb89af3f0 --- /dev/null +++ b/arch/riscv/include/asm/vendorid_list.h @@ -0,0 +1,11 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2021 SiFive + */ +#ifndef ASM_VENDOR_LIST_H +#define ASM_VENDOR_LIST_H + +#define SIFIVE_VENDOR_ID 0x489 +#define THEAD_VENDOR_ID 0x5b7 + +#endif diff --git a/arch/riscv/include/asm/vermagic.h b/arch/riscv/include/asm/vermagic.h new file mode 100644 index 000000000..7b9441a57 --- /dev/null +++ b/arch/riscv/include/asm/vermagic.h @@ -0,0 +1,9 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* Copyright (C) 2017 Andes Technology Corporation */ + +#ifndef _ASM_VERMAGIC_H +#define _ASM_VERMAGIC_H + +#define MODULE_ARCH_VERMAGIC "riscv" + +#endif /* _ASM_VERMAGIC_H */ diff --git a/arch/riscv/include/asm/vmalloc.h b/arch/riscv/include/asm/vmalloc.h new file mode 100644 index 000000000..ff9abc00d --- /dev/null +++ b/arch/riscv/include/asm/vmalloc.h @@ -0,0 +1,4 @@ +#ifndef _ASM_RISCV_VMALLOC_H +#define _ASM_RISCV_VMALLOC_H + +#endif /* _ASM_RISCV_VMALLOC_H */ diff --git a/arch/riscv/include/asm/word-at-a-time.h b/arch/riscv/include/asm/word-at-a-time.h new file mode 100644 index 000000000..7c086ac6e --- /dev/null +++ b/arch/riscv/include/asm/word-at-a-time.h @@ -0,0 +1,48 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2012 Regents of the University of California + * + * Derived from arch/x86/include/asm/word-at-a-time.h + */ + +#ifndef _ASM_RISCV_WORD_AT_A_TIME_H +#define _ASM_RISCV_WORD_AT_A_TIME_H + + +#include <linux/kernel.h> + +struct word_at_a_time { + const unsigned long one_bits, high_bits; +}; + +#define WORD_AT_A_TIME_CONSTANTS { REPEAT_BYTE(0x01), REPEAT_BYTE(0x80) } + +static inline unsigned long has_zero(unsigned long val, + unsigned long *bits, const struct word_at_a_time *c) +{ + unsigned long mask = ((val - c->one_bits) & ~val) & c->high_bits; + *bits = mask; + return mask; +} + +static inline unsigned long prep_zero_mask(unsigned long val, + unsigned long bits, const struct word_at_a_time *c) +{ + return bits; +} + +static inline unsigned long create_zero_mask(unsigned long bits) +{ + bits = (bits - 1) & ~bits; + return bits >> 7; +} + +static inline unsigned long find_zero(unsigned long mask) +{ + return fls64(mask) >> 3; +} + +/* The mask we created is directly usable as a bytemask */ +#define zero_bytemask(mask) (mask) + +#endif /* _ASM_RISCV_WORD_AT_A_TIME_H */ diff --git a/arch/riscv/include/asm/xip_fixup.h b/arch/riscv/include/asm/xip_fixup.h new file mode 100644 index 000000000..b65bf6306 --- /dev/null +++ b/arch/riscv/include/asm/xip_fixup.h @@ -0,0 +1,31 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * XIP fixup macros, only useful in assembly. + */ +#ifndef _ASM_RISCV_XIP_FIXUP_H +#define _ASM_RISCV_XIP_FIXUP_H + +#include <linux/pgtable.h> + +#ifdef CONFIG_XIP_KERNEL +.macro XIP_FIXUP_OFFSET reg + REG_L t0, _xip_fixup + add \reg, \reg, t0 +.endm +.macro XIP_FIXUP_FLASH_OFFSET reg + la t0, __data_loc + REG_L t1, _xip_phys_offset + sub \reg, \reg, t1 + add \reg, \reg, t0 +.endm + +_xip_fixup: .dword CONFIG_PHYS_RAM_BASE - CONFIG_XIP_PHYS_ADDR - XIP_OFFSET +_xip_phys_offset: .dword CONFIG_XIP_PHYS_ADDR + XIP_OFFSET +#else +.macro XIP_FIXUP_OFFSET reg +.endm +.macro XIP_FIXUP_FLASH_OFFSET reg +.endm +#endif /* CONFIG_XIP_KERNEL */ + +#endif diff --git a/arch/riscv/include/uapi/asm/Kbuild b/arch/riscv/include/uapi/asm/Kbuild new file mode 100644 index 000000000..f66554cd5 --- /dev/null +++ b/arch/riscv/include/uapi/asm/Kbuild @@ -0,0 +1 @@ +# SPDX-License-Identifier: GPL-2.0 diff --git a/arch/riscv/include/uapi/asm/auxvec.h b/arch/riscv/include/uapi/asm/auxvec.h new file mode 100644 index 000000000..fb187a33c --- /dev/null +++ b/arch/riscv/include/uapi/asm/auxvec.h @@ -0,0 +1,39 @@ +/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */ +/* + * Copyright (C) 2012 ARM Ltd. + * Copyright (C) 2015 Regents of the University of California + */ + +#ifndef _UAPI_ASM_RISCV_AUXVEC_H +#define _UAPI_ASM_RISCV_AUXVEC_H + +/* vDSO location */ +#define AT_SYSINFO_EHDR 33 + +/* + * The set of entries below represent more extensive information + * about the caches, in the form of two entry per cache type, + * one entry containing the cache size in bytes, and the other + * containing the cache line size in bytes in the bottom 16 bits + * and the cache associativity in the next 16 bits. + * + * The associativity is such that if N is the 16-bit value, the + * cache is N way set associative. A value if 0xffff means fully + * associative, a value of 1 means directly mapped. + * + * For all these fields, a value of 0 means that the information + * is not known. + */ +#define AT_L1I_CACHESIZE 40 +#define AT_L1I_CACHEGEOMETRY 41 +#define AT_L1D_CACHESIZE 42 +#define AT_L1D_CACHEGEOMETRY 43 +#define AT_L2_CACHESIZE 44 +#define AT_L2_CACHEGEOMETRY 45 +#define AT_L3_CACHESIZE 46 +#define AT_L3_CACHEGEOMETRY 47 + +/* entries in ARCH_DLINFO */ +#define AT_VECTOR_SIZE_ARCH 9 + +#endif /* _UAPI_ASM_RISCV_AUXVEC_H */ diff --git a/arch/riscv/include/uapi/asm/bitsperlong.h b/arch/riscv/include/uapi/asm/bitsperlong.h new file mode 100644 index 000000000..7d0b32e3b --- /dev/null +++ b/arch/riscv/include/uapi/asm/bitsperlong.h @@ -0,0 +1,14 @@ +/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */ +/* + * Copyright (C) 2012 ARM Ltd. + * Copyright (C) 2015 Regents of the University of California + */ + +#ifndef _UAPI_ASM_RISCV_BITSPERLONG_H +#define _UAPI_ASM_RISCV_BITSPERLONG_H + +#define __BITS_PER_LONG (__SIZEOF_POINTER__ * 8) + +#include <asm-generic/bitsperlong.h> + +#endif /* _UAPI_ASM_RISCV_BITSPERLONG_H */ diff --git a/arch/riscv/include/uapi/asm/bpf_perf_event.h b/arch/riscv/include/uapi/asm/bpf_perf_event.h new file mode 100644 index 000000000..6cb1c2823 --- /dev/null +++ b/arch/riscv/include/uapi/asm/bpf_perf_event.h @@ -0,0 +1,9 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +#ifndef _UAPI__ASM_BPF_PERF_EVENT_H__ +#define _UAPI__ASM_BPF_PERF_EVENT_H__ + +#include <asm/ptrace.h> + +typedef struct user_regs_struct bpf_user_pt_regs_t; + +#endif /* _UAPI__ASM_BPF_PERF_EVENT_H__ */ diff --git a/arch/riscv/include/uapi/asm/byteorder.h b/arch/riscv/include/uapi/asm/byteorder.h new file mode 100644 index 000000000..f671e16bf --- /dev/null +++ b/arch/riscv/include/uapi/asm/byteorder.h @@ -0,0 +1,12 @@ +/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */ +/* + * Copyright (C) 2012 ARM Ltd. + * Copyright (C) 2015 Regents of the University of California + */ + +#ifndef _UAPI_ASM_RISCV_BYTEORDER_H +#define _UAPI_ASM_RISCV_BYTEORDER_H + +#include <linux/byteorder/little_endian.h> + +#endif /* _UAPI_ASM_RISCV_BYTEORDER_H */ diff --git a/arch/riscv/include/uapi/asm/elf.h b/arch/riscv/include/uapi/asm/elf.h new file mode 100644 index 000000000..d696d6610 --- /dev/null +++ b/arch/riscv/include/uapi/asm/elf.h @@ -0,0 +1,98 @@ +/* + * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com> + * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se> + * Copyright (C) 2012 Regents of the University of California + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + */ + +#ifndef _UAPI_ASM_RISCV_ELF_H +#define _UAPI_ASM_RISCV_ELF_H + +#include <asm/ptrace.h> + +/* ELF register definitions */ +typedef unsigned long elf_greg_t; +typedef struct user_regs_struct elf_gregset_t; +#define ELF_NGREG (sizeof(elf_gregset_t) / sizeof(elf_greg_t)) + +/* We don't support f without d, or q. */ +typedef __u64 elf_fpreg_t; +typedef union __riscv_fp_state elf_fpregset_t; +#define ELF_NFPREG (sizeof(struct __riscv_d_ext_state) / sizeof(elf_fpreg_t)) + +#if __riscv_xlen == 64 +#define ELF_RISCV_R_SYM(r_info) ELF64_R_SYM(r_info) +#define ELF_RISCV_R_TYPE(r_info) ELF64_R_TYPE(r_info) +#else +#define ELF_RISCV_R_SYM(r_info) ELF32_R_SYM(r_info) +#define ELF_RISCV_R_TYPE(r_info) ELF32_R_TYPE(r_info) +#endif + +/* + * RISC-V relocation types + */ + +/* Relocation types used by the dynamic linker */ +#define R_RISCV_NONE 0 +#define R_RISCV_32 1 +#define R_RISCV_64 2 +#define R_RISCV_RELATIVE 3 +#define R_RISCV_COPY 4 +#define R_RISCV_JUMP_SLOT 5 +#define R_RISCV_TLS_DTPMOD32 6 +#define R_RISCV_TLS_DTPMOD64 7 +#define R_RISCV_TLS_DTPREL32 8 +#define R_RISCV_TLS_DTPREL64 9 +#define R_RISCV_TLS_TPREL32 10 +#define R_RISCV_TLS_TPREL64 11 + +/* Relocation types not used by the dynamic linker */ +#define R_RISCV_BRANCH 16 +#define R_RISCV_JAL 17 +#define R_RISCV_CALL 18 +#define R_RISCV_CALL_PLT 19 +#define R_RISCV_GOT_HI20 20 +#define R_RISCV_TLS_GOT_HI20 21 +#define R_RISCV_TLS_GD_HI20 22 +#define R_RISCV_PCREL_HI20 23 +#define R_RISCV_PCREL_LO12_I 24 +#define R_RISCV_PCREL_LO12_S 25 +#define R_RISCV_HI20 26 +#define R_RISCV_LO12_I 27 +#define R_RISCV_LO12_S 28 +#define R_RISCV_TPREL_HI20 29 +#define R_RISCV_TPREL_LO12_I 30 +#define R_RISCV_TPREL_LO12_S 31 +#define R_RISCV_TPREL_ADD 32 +#define R_RISCV_ADD8 33 +#define R_RISCV_ADD16 34 +#define R_RISCV_ADD32 35 +#define R_RISCV_ADD64 36 +#define R_RISCV_SUB8 37 +#define R_RISCV_SUB16 38 +#define R_RISCV_SUB32 39 +#define R_RISCV_SUB64 40 +#define R_RISCV_GNU_VTINHERIT 41 +#define R_RISCV_GNU_VTENTRY 42 +#define R_RISCV_ALIGN 43 +#define R_RISCV_RVC_BRANCH 44 +#define R_RISCV_RVC_JUMP 45 +#define R_RISCV_LUI 46 +#define R_RISCV_GPREL_I 47 +#define R_RISCV_GPREL_S 48 +#define R_RISCV_TPREL_I 49 +#define R_RISCV_TPREL_S 50 +#define R_RISCV_RELAX 51 +#define R_RISCV_SUB6 52 +#define R_RISCV_SET6 53 +#define R_RISCV_SET8 54 +#define R_RISCV_SET16 55 +#define R_RISCV_SET32 56 +#define R_RISCV_32_PCREL 57 + + +#endif /* _UAPI_ASM_RISCV_ELF_H */ diff --git a/arch/riscv/include/uapi/asm/hwcap.h b/arch/riscv/include/uapi/asm/hwcap.h new file mode 100644 index 000000000..46dc3f5ee --- /dev/null +++ b/arch/riscv/include/uapi/asm/hwcap.h @@ -0,0 +1,25 @@ +/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */ +/* + * Copied from arch/arm64/include/asm/hwcap.h + * + * Copyright (C) 2012 ARM Ltd. + * Copyright (C) 2017 SiFive + */ +#ifndef _UAPI_ASM_RISCV_HWCAP_H +#define _UAPI_ASM_RISCV_HWCAP_H + +/* + * Linux saves the floating-point registers according to the ISA Linux is + * executing on, as opposed to the ISA the user program is compiled for. This + * is necessary for a handful of esoteric use cases: for example, userspace + * threading libraries must be able to examine the actual machine state in + * order to fully reconstruct the state of a thread. + */ +#define COMPAT_HWCAP_ISA_I (1 << ('I' - 'A')) +#define COMPAT_HWCAP_ISA_M (1 << ('M' - 'A')) +#define COMPAT_HWCAP_ISA_A (1 << ('A' - 'A')) +#define COMPAT_HWCAP_ISA_F (1 << ('F' - 'A')) +#define COMPAT_HWCAP_ISA_D (1 << ('D' - 'A')) +#define COMPAT_HWCAP_ISA_C (1 << ('C' - 'A')) + +#endif /* _UAPI_ASM_RISCV_HWCAP_H */ diff --git a/arch/riscv/include/uapi/asm/kvm.h b/arch/riscv/include/uapi/asm/kvm.h new file mode 100644 index 000000000..8985ff234 --- /dev/null +++ b/arch/riscv/include/uapi/asm/kvm.h @@ -0,0 +1,154 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +/* + * Copyright (C) 2019 Western Digital Corporation or its affiliates. + * + * Authors: + * Anup Patel <anup.patel@wdc.com> + */ + +#ifndef __LINUX_KVM_RISCV_H +#define __LINUX_KVM_RISCV_H + +#ifndef __ASSEMBLY__ + +#include <linux/types.h> +#include <asm/ptrace.h> + +#define __KVM_HAVE_READONLY_MEM + +#define KVM_COALESCED_MMIO_PAGE_OFFSET 1 + +#define KVM_INTERRUPT_SET -1U +#define KVM_INTERRUPT_UNSET -2U + +/* for KVM_GET_REGS and KVM_SET_REGS */ +struct kvm_regs { +}; + +/* for KVM_GET_FPU and KVM_SET_FPU */ +struct kvm_fpu { +}; + +/* KVM Debug exit structure */ +struct kvm_debug_exit_arch { +}; + +/* for KVM_SET_GUEST_DEBUG */ +struct kvm_guest_debug_arch { +}; + +/* definition of registers in kvm_run */ +struct kvm_sync_regs { +}; + +/* for KVM_GET_SREGS and KVM_SET_SREGS */ +struct kvm_sregs { +}; + +/* CONFIG registers for KVM_GET_ONE_REG and KVM_SET_ONE_REG */ +struct kvm_riscv_config { + unsigned long isa; + unsigned long zicbom_block_size; +}; + +/* CORE registers for KVM_GET_ONE_REG and KVM_SET_ONE_REG */ +struct kvm_riscv_core { + struct user_regs_struct regs; + unsigned long mode; +}; + +/* Possible privilege modes for kvm_riscv_core */ +#define KVM_RISCV_MODE_S 1 +#define KVM_RISCV_MODE_U 0 + +/* CSR registers for KVM_GET_ONE_REG and KVM_SET_ONE_REG */ +struct kvm_riscv_csr { + unsigned long sstatus; + unsigned long sie; + unsigned long stvec; + unsigned long sscratch; + unsigned long sepc; + unsigned long scause; + unsigned long stval; + unsigned long sip; + unsigned long satp; + unsigned long scounteren; +}; + +/* TIMER registers for KVM_GET_ONE_REG and KVM_SET_ONE_REG */ +struct kvm_riscv_timer { + __u64 frequency; + __u64 time; + __u64 compare; + __u64 state; +}; + +/* + * ISA extension IDs specific to KVM. This is not the same as the host ISA + * extension IDs as that is internal to the host and should not be exposed + * to the guest. This should always be contiguous to keep the mapping simple + * in KVM implementation. + */ +enum KVM_RISCV_ISA_EXT_ID { + KVM_RISCV_ISA_EXT_A = 0, + KVM_RISCV_ISA_EXT_C, + KVM_RISCV_ISA_EXT_D, + KVM_RISCV_ISA_EXT_F, + KVM_RISCV_ISA_EXT_H, + KVM_RISCV_ISA_EXT_I, + KVM_RISCV_ISA_EXT_M, + KVM_RISCV_ISA_EXT_SVPBMT, + KVM_RISCV_ISA_EXT_SSTC, + KVM_RISCV_ISA_EXT_SVINVAL, + KVM_RISCV_ISA_EXT_ZIHINTPAUSE, + KVM_RISCV_ISA_EXT_ZICBOM, + KVM_RISCV_ISA_EXT_MAX, +}; + +/* Possible states for kvm_riscv_timer */ +#define KVM_RISCV_TIMER_STATE_OFF 0 +#define KVM_RISCV_TIMER_STATE_ON 1 + +#define KVM_REG_SIZE(id) \ + (1U << (((id) & KVM_REG_SIZE_MASK) >> KVM_REG_SIZE_SHIFT)) + +/* If you need to interpret the index values, here is the key: */ +#define KVM_REG_RISCV_TYPE_MASK 0x00000000FF000000 +#define KVM_REG_RISCV_TYPE_SHIFT 24 + +/* Config registers are mapped as type 1 */ +#define KVM_REG_RISCV_CONFIG (0x01 << KVM_REG_RISCV_TYPE_SHIFT) +#define KVM_REG_RISCV_CONFIG_REG(name) \ + (offsetof(struct kvm_riscv_config, name) / sizeof(unsigned long)) + +/* Core registers are mapped as type 2 */ +#define KVM_REG_RISCV_CORE (0x02 << KVM_REG_RISCV_TYPE_SHIFT) +#define KVM_REG_RISCV_CORE_REG(name) \ + (offsetof(struct kvm_riscv_core, name) / sizeof(unsigned long)) + +/* Control and status registers are mapped as type 3 */ +#define KVM_REG_RISCV_CSR (0x03 << KVM_REG_RISCV_TYPE_SHIFT) +#define KVM_REG_RISCV_CSR_REG(name) \ + (offsetof(struct kvm_riscv_csr, name) / sizeof(unsigned long)) + +/* Timer registers are mapped as type 4 */ +#define KVM_REG_RISCV_TIMER (0x04 << KVM_REG_RISCV_TYPE_SHIFT) +#define KVM_REG_RISCV_TIMER_REG(name) \ + (offsetof(struct kvm_riscv_timer, name) / sizeof(__u64)) + +/* F extension registers are mapped as type 5 */ +#define KVM_REG_RISCV_FP_F (0x05 << KVM_REG_RISCV_TYPE_SHIFT) +#define KVM_REG_RISCV_FP_F_REG(name) \ + (offsetof(struct __riscv_f_ext_state, name) / sizeof(__u32)) + +/* D extension registers are mapped as type 6 */ +#define KVM_REG_RISCV_FP_D (0x06 << KVM_REG_RISCV_TYPE_SHIFT) +#define KVM_REG_RISCV_FP_D_REG(name) \ + (offsetof(struct __riscv_d_ext_state, name) / sizeof(__u64)) + +/* ISA Extension registers are mapped as type 7 */ +#define KVM_REG_RISCV_ISA_EXT (0x07 << KVM_REG_RISCV_TYPE_SHIFT) + +#endif + +#endif /* __LINUX_KVM_RISCV_H */ diff --git a/arch/riscv/include/uapi/asm/perf_regs.h b/arch/riscv/include/uapi/asm/perf_regs.h new file mode 100644 index 000000000..196f964bf --- /dev/null +++ b/arch/riscv/include/uapi/asm/perf_regs.h @@ -0,0 +1,42 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +/* Copyright (C) 2019 Hangzhou C-SKY Microsystems co.,ltd. */ + +#ifndef _ASM_RISCV_PERF_REGS_H +#define _ASM_RISCV_PERF_REGS_H + +enum perf_event_riscv_regs { + PERF_REG_RISCV_PC, + PERF_REG_RISCV_RA, + PERF_REG_RISCV_SP, + PERF_REG_RISCV_GP, + PERF_REG_RISCV_TP, + PERF_REG_RISCV_T0, + PERF_REG_RISCV_T1, + PERF_REG_RISCV_T2, + PERF_REG_RISCV_S0, + PERF_REG_RISCV_S1, + PERF_REG_RISCV_A0, + PERF_REG_RISCV_A1, + PERF_REG_RISCV_A2, + PERF_REG_RISCV_A3, + PERF_REG_RISCV_A4, + PERF_REG_RISCV_A5, + PERF_REG_RISCV_A6, + PERF_REG_RISCV_A7, + PERF_REG_RISCV_S2, + PERF_REG_RISCV_S3, + PERF_REG_RISCV_S4, + PERF_REG_RISCV_S5, + PERF_REG_RISCV_S6, + PERF_REG_RISCV_S7, + PERF_REG_RISCV_S8, + PERF_REG_RISCV_S9, + PERF_REG_RISCV_S10, + PERF_REG_RISCV_S11, + PERF_REG_RISCV_T3, + PERF_REG_RISCV_T4, + PERF_REG_RISCV_T5, + PERF_REG_RISCV_T6, + PERF_REG_RISCV_MAX, +}; +#endif /* _ASM_RISCV_PERF_REGS_H */ diff --git a/arch/riscv/include/uapi/asm/ptrace.h b/arch/riscv/include/uapi/asm/ptrace.h new file mode 100644 index 000000000..882547f6b --- /dev/null +++ b/arch/riscv/include/uapi/asm/ptrace.h @@ -0,0 +1,82 @@ +/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */ +/* + * Copyright (C) 2012 Regents of the University of California + */ + +#ifndef _UAPI_ASM_RISCV_PTRACE_H +#define _UAPI_ASM_RISCV_PTRACE_H + +#ifndef __ASSEMBLY__ + +#include <linux/types.h> + +/* + * User-mode register state for core dumps, ptrace, sigcontext + * + * This decouples struct pt_regs from the userspace ABI. + * struct user_regs_struct must form a prefix of struct pt_regs. + */ +struct user_regs_struct { + unsigned long pc; + unsigned long ra; + unsigned long sp; + unsigned long gp; + unsigned long tp; + unsigned long t0; + unsigned long t1; + unsigned long t2; + unsigned long s0; + unsigned long s1; + unsigned long a0; + unsigned long a1; + unsigned long a2; + unsigned long a3; + unsigned long a4; + unsigned long a5; + unsigned long a6; + unsigned long a7; + unsigned long s2; + unsigned long s3; + unsigned long s4; + unsigned long s5; + unsigned long s6; + unsigned long s7; + unsigned long s8; + unsigned long s9; + unsigned long s10; + unsigned long s11; + unsigned long t3; + unsigned long t4; + unsigned long t5; + unsigned long t6; +}; + +struct __riscv_f_ext_state { + __u32 f[32]; + __u32 fcsr; +}; + +struct __riscv_d_ext_state { + __u64 f[32]; + __u32 fcsr; +}; + +struct __riscv_q_ext_state { + __u64 f[64] __attribute__((aligned(16))); + __u32 fcsr; + /* + * Reserved for expansion of sigcontext structure. Currently zeroed + * upon signal, and must be zero upon sigreturn. + */ + __u32 reserved[3]; +}; + +union __riscv_fp_state { + struct __riscv_f_ext_state f; + struct __riscv_d_ext_state d; + struct __riscv_q_ext_state q; +}; + +#endif /* __ASSEMBLY__ */ + +#endif /* _UAPI_ASM_RISCV_PTRACE_H */ diff --git a/arch/riscv/include/uapi/asm/setup.h b/arch/riscv/include/uapi/asm/setup.h new file mode 100644 index 000000000..66b13a522 --- /dev/null +++ b/arch/riscv/include/uapi/asm/setup.h @@ -0,0 +1,8 @@ +/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */ + +#ifndef _UAPI_ASM_RISCV_SETUP_H +#define _UAPI_ASM_RISCV_SETUP_H + +#define COMMAND_LINE_SIZE 1024 + +#endif /* _UAPI_ASM_RISCV_SETUP_H */ diff --git a/arch/riscv/include/uapi/asm/sigcontext.h b/arch/riscv/include/uapi/asm/sigcontext.h new file mode 100644 index 000000000..84f2dfcfd --- /dev/null +++ b/arch/riscv/include/uapi/asm/sigcontext.h @@ -0,0 +1,22 @@ +/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */ +/* + * Copyright (C) 2012 Regents of the University of California + */ + +#ifndef _UAPI_ASM_RISCV_SIGCONTEXT_H +#define _UAPI_ASM_RISCV_SIGCONTEXT_H + +#include <asm/ptrace.h> + +/* + * Signal context structure + * + * This contains the context saved before a signal handler is invoked; + * it is restored by sys_sigreturn / sys_rt_sigreturn. + */ +struct sigcontext { + struct user_regs_struct sc_regs; + union __riscv_fp_state sc_fpregs; +}; + +#endif /* _UAPI_ASM_RISCV_SIGCONTEXT_H */ diff --git a/arch/riscv/include/uapi/asm/ucontext.h b/arch/riscv/include/uapi/asm/ucontext.h new file mode 100644 index 000000000..44eb99395 --- /dev/null +++ b/arch/riscv/include/uapi/asm/ucontext.h @@ -0,0 +1,34 @@ +/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */ +/* + * Copyright (C) 2012 ARM Ltd. + * Copyright (C) 2017 SiFive, Inc. + * + * This file was copied from arch/arm64/include/uapi/asm/ucontext.h + */ +#ifndef _UAPI_ASM_RISCV_UCONTEXT_H +#define _UAPI_ASM_RISCV_UCONTEXT_H + +#include <linux/types.h> + +struct ucontext { + unsigned long uc_flags; + struct ucontext *uc_link; + stack_t uc_stack; + sigset_t uc_sigmask; + /* There's some padding here to allow sigset_t to be expanded in the + * future. Though this is unlikely, other architectures put uc_sigmask + * at the end of this structure and explicitly state it can be + * expanded, so we didn't want to box ourselves in here. */ + __u8 __unused[1024 / 8 - sizeof(sigset_t)]; + /* We can't put uc_sigmask at the end of this structure because we need + * to be able to expand sigcontext in the future. For example, the + * vector ISA extension will almost certainly add ISA state. We want + * to ensure all user-visible ISA state can be saved and restored via a + * ucontext, so we're putting this at the end in order to allow for + * infinite extensibility. Since we know this will be extended and we + * assume sigset_t won't be extended an extreme amount, we're + * prioritizing this. */ + struct sigcontext uc_mcontext; +}; + +#endif /* _UAPI_ASM_RISCV_UCONTEXT_H */ diff --git a/arch/riscv/include/uapi/asm/unistd.h b/arch/riscv/include/uapi/asm/unistd.h new file mode 100644 index 000000000..73d7cdd2e --- /dev/null +++ b/arch/riscv/include/uapi/asm/unistd.h @@ -0,0 +1,45 @@ +/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */ +/* + * Copyright (C) 2018 David Abdurachmanov <david.abdurachmanov@gmail.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <https://www.gnu.org/licenses/>. + */ + +#if defined(__LP64__) && !defined(__SYSCALL_COMPAT) +#define __ARCH_WANT_NEW_STAT +#define __ARCH_WANT_SET_GET_RLIMIT +#endif /* __LP64__ */ + +#define __ARCH_WANT_SYS_CLONE3 +#define __ARCH_WANT_MEMFD_SECRET + +#include <asm-generic/unistd.h> + +/* + * Allows the instruction cache to be flushed from userspace. Despite RISC-V + * having a direct 'fence.i' instruction available to userspace (which we + * can't trap!), that's not actually viable when running on Linux because the + * kernel might schedule a process on another hart. There is no way for + * userspace to handle this without invoking the kernel (as it doesn't know the + * thread->hart mappings), so we've defined a RISC-V specific system call to + * flush the instruction cache. + * + * __NR_riscv_flush_icache is defined to flush the instruction cache over an + * address range, with the flush applying to either all threads or just the + * caller. We don't currently do anything with the address range, that's just + * in there for forwards compatibility. + */ +#ifndef __NR_riscv_flush_icache +#define __NR_riscv_flush_icache (__NR_arch_specific_syscall + 15) +#endif +__SYSCALL(__NR_riscv_flush_icache, sys_riscv_flush_icache) |