diff options
Diffstat (limited to 'arch/riscv/include')
35 files changed, 634 insertions, 122 deletions
diff --git a/arch/riscv/include/asm/acpi.h b/arch/riscv/include/asm/acpi.h index d5604d2073..7dad0cf9d7 100644 --- a/arch/riscv/include/asm/acpi.h +++ b/arch/riscv/include/asm/acpi.h @@ -66,6 +66,8 @@ int acpi_get_riscv_isa(struct acpi_table_header *table, unsigned int cpu, const char **isa); static inline int acpi_numa_get_nid(unsigned int cpu) { return NUMA_NO_NODE; } +void acpi_get_cbo_block_size(struct acpi_table_header *table, u32 *cbom_size, + u32 *cboz_size, u32 *cbop_size); #else static inline void acpi_init_rintc_map(void) { } static inline struct acpi_madt_rintc *acpi_cpu_get_madt_rintc(int cpu) @@ -79,6 +81,10 @@ static inline int acpi_get_riscv_isa(struct acpi_table_header *table, return -EINVAL; } +static inline void acpi_get_cbo_block_size(struct acpi_table_header *table, + u32 *cbom_size, u32 *cboz_size, + u32 *cbop_size) { } + #endif /* CONFIG_ACPI */ #endif /*_ASM_ACPI_H*/ diff --git a/arch/riscv/include/asm/asm.h b/arch/riscv/include/asm/asm.h index bfb4c26f11..b0487b39e6 100644 --- a/arch/riscv/include/asm/asm.h +++ b/arch/riscv/include/asm/asm.h @@ -104,6 +104,25 @@ .endm #endif /* CONFIG_SMP */ +.macro load_per_cpu dst ptr tmp + asm_per_cpu \dst \ptr \tmp + REG_L \dst, 0(\dst) +.endm + +#ifdef CONFIG_SHADOW_CALL_STACK +/* gp is used as the shadow call stack pointer instead */ +.macro load_global_pointer +.endm +#else +/* load __global_pointer to gp */ +.macro load_global_pointer +.option push +.option norelax + la gp, __global_pointer$ +.option pop +.endm +#endif /* CONFIG_SHADOW_CALL_STACK */ + /* save all GPs except x1 ~ x5 */ .macro save_from_x6_to_x31 REG_S x6, PT_T1(sp) diff --git a/arch/riscv/include/asm/bitops.h b/arch/riscv/include/asm/bitops.h index 3540b69094..ce47613e38 100644 --- a/arch/riscv/include/asm/bitops.h +++ b/arch/riscv/include/asm/bitops.h @@ -15,13 +15,261 @@ #include <asm/barrier.h> #include <asm/bitsperlong.h> +#if !defined(CONFIG_RISCV_ISA_ZBB) || defined(NO_ALTERNATIVE) #include <asm-generic/bitops/__ffs.h> -#include <asm-generic/bitops/ffz.h> -#include <asm-generic/bitops/fls.h> #include <asm-generic/bitops/__fls.h> +#include <asm-generic/bitops/ffs.h> +#include <asm-generic/bitops/fls.h> + +#else +#include <asm/alternative-macros.h> +#include <asm/hwcap.h> + +#if (BITS_PER_LONG == 64) +#define CTZW "ctzw " +#define CLZW "clzw " +#elif (BITS_PER_LONG == 32) +#define CTZW "ctz " +#define CLZW "clz " +#else +#error "Unexpected BITS_PER_LONG" +#endif + +static __always_inline unsigned long variable__ffs(unsigned long word) +{ + int num; + + asm goto(ALTERNATIVE("j %l[legacy]", "nop", 0, + RISCV_ISA_EXT_ZBB, 1) + : : : : legacy); + + asm volatile (".option push\n" + ".option arch,+zbb\n" + "ctz %0, %1\n" + ".option pop\n" + : "=r" (word) : "r" (word) :); + + return word; + +legacy: + num = 0; +#if BITS_PER_LONG == 64 + if ((word & 0xffffffff) == 0) { + num += 32; + word >>= 32; + } +#endif + if ((word & 0xffff) == 0) { + num += 16; + word >>= 16; + } + if ((word & 0xff) == 0) { + num += 8; + word >>= 8; + } + if ((word & 0xf) == 0) { + num += 4; + word >>= 4; + } + if ((word & 0x3) == 0) { + num += 2; + word >>= 2; + } + if ((word & 0x1) == 0) + num += 1; + return num; +} + +/** + * __ffs - find first set bit in a long word + * @word: The word to search + * + * Undefined if no set bit exists, so code should check against 0 first. + */ +#define __ffs(word) \ + (__builtin_constant_p(word) ? \ + (unsigned long)__builtin_ctzl(word) : \ + variable__ffs(word)) + +static __always_inline unsigned long variable__fls(unsigned long word) +{ + int num; + + asm goto(ALTERNATIVE("j %l[legacy]", "nop", 0, + RISCV_ISA_EXT_ZBB, 1) + : : : : legacy); + + asm volatile (".option push\n" + ".option arch,+zbb\n" + "clz %0, %1\n" + ".option pop\n" + : "=r" (word) : "r" (word) :); + + return BITS_PER_LONG - 1 - word; + +legacy: + num = BITS_PER_LONG - 1; +#if BITS_PER_LONG == 64 + if (!(word & (~0ul << 32))) { + num -= 32; + word <<= 32; + } +#endif + if (!(word & (~0ul << (BITS_PER_LONG - 16)))) { + num -= 16; + word <<= 16; + } + if (!(word & (~0ul << (BITS_PER_LONG - 8)))) { + num -= 8; + word <<= 8; + } + if (!(word & (~0ul << (BITS_PER_LONG - 4)))) { + num -= 4; + word <<= 4; + } + if (!(word & (~0ul << (BITS_PER_LONG - 2)))) { + num -= 2; + word <<= 2; + } + if (!(word & (~0ul << (BITS_PER_LONG - 1)))) + num -= 1; + return num; +} + +/** + * __fls - find last set bit in a long word + * @word: the word to search + * + * Undefined if no set bit exists, so code should check against 0 first. + */ +#define __fls(word) \ + (__builtin_constant_p(word) ? \ + (unsigned long)(BITS_PER_LONG - 1 - __builtin_clzl(word)) : \ + variable__fls(word)) + +static __always_inline int variable_ffs(int x) +{ + int r; + + if (!x) + return 0; + + asm goto(ALTERNATIVE("j %l[legacy]", "nop", 0, + RISCV_ISA_EXT_ZBB, 1) + : : : : legacy); + + asm volatile (".option push\n" + ".option arch,+zbb\n" + CTZW "%0, %1\n" + ".option pop\n" + : "=r" (r) : "r" (x) :); + + return r + 1; + +legacy: + r = 1; + if (!(x & 0xffff)) { + x >>= 16; + r += 16; + } + if (!(x & 0xff)) { + x >>= 8; + r += 8; + } + if (!(x & 0xf)) { + x >>= 4; + r += 4; + } + if (!(x & 3)) { + x >>= 2; + r += 2; + } + if (!(x & 1)) { + x >>= 1; + r += 1; + } + return r; +} + +/** + * ffs - find first set bit in a word + * @x: the word to search + * + * This is defined the same way as the libc and compiler builtin ffs routines. + * + * ffs(value) returns 0 if value is 0 or the position of the first set bit if + * value is nonzero. The first (least significant) bit is at position 1. + */ +#define ffs(x) (__builtin_constant_p(x) ? __builtin_ffs(x) : variable_ffs(x)) + +static __always_inline int variable_fls(unsigned int x) +{ + int r; + + if (!x) + return 0; + + asm goto(ALTERNATIVE("j %l[legacy]", "nop", 0, + RISCV_ISA_EXT_ZBB, 1) + : : : : legacy); + + asm volatile (".option push\n" + ".option arch,+zbb\n" + CLZW "%0, %1\n" + ".option pop\n" + : "=r" (r) : "r" (x) :); + + return 32 - r; + +legacy: + r = 32; + if (!(x & 0xffff0000u)) { + x <<= 16; + r -= 16; + } + if (!(x & 0xff000000u)) { + x <<= 8; + r -= 8; + } + if (!(x & 0xf0000000u)) { + x <<= 4; + r -= 4; + } + if (!(x & 0xc0000000u)) { + x <<= 2; + r -= 2; + } + if (!(x & 0x80000000u)) { + x <<= 1; + r -= 1; + } + return r; +} + +/** + * fls - find last set bit in a word + * @x: the word to search + * + * This is defined in a similar way as ffs, but returns the position of the most + * significant set bit. + * + * fls(value) returns 0 if value is 0 or the position of the last set bit if + * value is nonzero. The last (most significant) bit is at position 32. + */ +#define fls(x) \ +({ \ + typeof(x) x_ = (x); \ + __builtin_constant_p(x_) ? \ + (int)((x_ != 0) ? (32 - __builtin_clz(x_)) : 0) \ + : \ + variable_fls(x_); \ +}) + +#endif /* !defined(CONFIG_RISCV_ISA_ZBB) || defined(NO_ALTERNATIVE) */ + +#include <asm-generic/bitops/ffz.h> #include <asm-generic/bitops/fls64.h> #include <asm-generic/bitops/sched.h> -#include <asm-generic/bitops/ffs.h> #include <asm-generic/bitops/hweight.h> @@ -191,6 +439,18 @@ static inline void __clear_bit_unlock( clear_bit_unlock(nr, addr); } +static inline bool xor_unlock_is_negative_byte(unsigned long mask, + volatile unsigned long *addr) +{ + unsigned long res; + __asm__ __volatile__ ( + __AMO(xor) ".rl %0, %2, %1" + : "=r" (res), "+A" (*addr) + : "r" (__NOP(mask)) + : "memory"); + return (res & BIT(7)) != 0; +} + #undef __test_and_op_bit #undef __op_bit #undef __NOP diff --git a/arch/riscv/include/asm/cacheflush.h b/arch/riscv/include/asm/cacheflush.h index 3cb53c4df2..a129dac452 100644 --- a/arch/riscv/include/asm/cacheflush.h +++ b/arch/riscv/include/asm/cacheflush.h @@ -37,7 +37,8 @@ static inline void flush_dcache_page(struct page *page) flush_icache_mm(vma->vm_mm, 0) #ifdef CONFIG_64BIT -#define flush_cache_vmap(start, end) flush_tlb_kernel_range(start, end) +#define flush_cache_vmap(start, end) flush_tlb_kernel_range(start, end) +#define flush_cache_vmap_early(start, end) local_flush_tlb_kernel_range(start, end) #endif #ifndef CONFIG_SMP diff --git a/arch/riscv/include/asm/cpufeature.h b/arch/riscv/include/asm/cpufeature.h index d0345bd659..aa6548b46a 100644 --- a/arch/riscv/include/asm/cpufeature.h +++ b/arch/riscv/include/asm/cpufeature.h @@ -7,7 +7,10 @@ #define _ASM_CPUFEATURE_H #include <linux/bitmap.h> +#include <linux/jump_label.h> #include <asm/hwcap.h> +#include <asm/alternative-macros.h> +#include <asm/errno.h> /* * These are probed via a device_initcall(), via either the SBI or directly @@ -30,6 +33,104 @@ DECLARE_PER_CPU(long, misaligned_access_speed); /* Per-cpu ISA extensions. */ extern struct riscv_isainfo hart_isa[NR_CPUS]; -void check_unaligned_access(int cpu); +void riscv_user_isa_enable(void); + +#ifdef CONFIG_RISCV_MISALIGNED +bool unaligned_ctl_available(void); +bool check_unaligned_access_emulated(int cpu); +void unaligned_emulation_finish(void); +#else +static inline bool unaligned_ctl_available(void) +{ + return false; +} + +static inline bool check_unaligned_access_emulated(int cpu) +{ + return false; +} + +static inline void unaligned_emulation_finish(void) {} +#endif + +unsigned long riscv_get_elf_hwcap(void); + +struct riscv_isa_ext_data { + const unsigned int id; + const char *name; + const char *property; +}; + +extern const struct riscv_isa_ext_data riscv_isa_ext[]; +extern const size_t riscv_isa_ext_count; +extern bool riscv_isa_fallback; + +unsigned long riscv_isa_extension_base(const unsigned long *isa_bitmap); + +bool __riscv_isa_extension_available(const unsigned long *isa_bitmap, int bit); +#define riscv_isa_extension_available(isa_bitmap, ext) \ + __riscv_isa_extension_available(isa_bitmap, RISCV_ISA_EXT_##ext) + +static __always_inline bool +riscv_has_extension_likely(const unsigned long ext) +{ + compiletime_assert(ext < RISCV_ISA_EXT_MAX, + "ext must be < RISCV_ISA_EXT_MAX"); + + if (IS_ENABLED(CONFIG_RISCV_ALTERNATIVE)) { + asm goto( + ALTERNATIVE("j %l[l_no]", "nop", 0, %[ext], 1) + : + : [ext] "i" (ext) + : + : l_no); + } else { + if (!__riscv_isa_extension_available(NULL, ext)) + goto l_no; + } + + return true; +l_no: + return false; +} + +static __always_inline bool +riscv_has_extension_unlikely(const unsigned long ext) +{ + compiletime_assert(ext < RISCV_ISA_EXT_MAX, + "ext must be < RISCV_ISA_EXT_MAX"); + + if (IS_ENABLED(CONFIG_RISCV_ALTERNATIVE)) { + asm goto( + ALTERNATIVE("nop", "j %l[l_yes]", 0, %[ext], 1) + : + : [ext] "i" (ext) + : + : l_yes); + } else { + if (__riscv_isa_extension_available(NULL, ext)) + goto l_yes; + } + + return false; +l_yes: + return true; +} + +static __always_inline bool riscv_cpu_has_extension_likely(int cpu, const unsigned long ext) +{ + if (IS_ENABLED(CONFIG_RISCV_ALTERNATIVE) && riscv_has_extension_likely(ext)) + return true; + + return __riscv_isa_extension_available(hart_isa[cpu].isa, ext); +} + +static __always_inline bool riscv_cpu_has_extension_unlikely(int cpu, const unsigned long ext) +{ + if (IS_ENABLED(CONFIG_RISCV_ALTERNATIVE) && riscv_has_extension_unlikely(ext)) + return true; + + return __riscv_isa_extension_available(hart_isa[cpu].isa, ext); +} #endif diff --git a/arch/riscv/include/asm/crash_core.h b/arch/riscv/include/asm/crash_core.h new file mode 100644 index 0000000000..e1874b23fe --- /dev/null +++ b/arch/riscv/include/asm/crash_core.h @@ -0,0 +1,11 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +#ifndef _RISCV_CRASH_CORE_H +#define _RISCV_CRASH_CORE_H + +#define CRASH_ALIGN PMD_SIZE + +#define CRASH_ADDR_LOW_MAX dma32_phys_limit +#define CRASH_ADDR_HIGH_MAX memblock_end_of_DRAM() + +extern phys_addr_t memblock_end_of_DRAM(void); +#endif diff --git a/arch/riscv/include/asm/csr.h b/arch/riscv/include/asm/csr.h index 777cb82995..306a19a550 100644 --- a/arch/riscv/include/asm/csr.h +++ b/arch/riscv/include/asm/csr.h @@ -203,6 +203,18 @@ #define ENVCFG_CBIE_INV _AC(0x3, UL) #define ENVCFG_FIOM _AC(0x1, UL) +/* Smstateen bits */ +#define SMSTATEEN0_AIA_IMSIC_SHIFT 58 +#define SMSTATEEN0_AIA_IMSIC (_ULL(1) << SMSTATEEN0_AIA_IMSIC_SHIFT) +#define SMSTATEEN0_AIA_SHIFT 59 +#define SMSTATEEN0_AIA (_ULL(1) << SMSTATEEN0_AIA_SHIFT) +#define SMSTATEEN0_AIA_ISEL_SHIFT 60 +#define SMSTATEEN0_AIA_ISEL (_ULL(1) << SMSTATEEN0_AIA_ISEL_SHIFT) +#define SMSTATEEN0_HSENVCFG_SHIFT 62 +#define SMSTATEEN0_HSENVCFG (_ULL(1) << SMSTATEEN0_HSENVCFG_SHIFT) +#define SMSTATEEN0_SSTATEEN0_SHIFT 63 +#define SMSTATEEN0_SSTATEEN0 (_ULL(1) << SMSTATEEN0_SSTATEEN0_SHIFT) + /* symbolic CSR names: */ #define CSR_CYCLE 0xc00 #define CSR_TIME 0xc01 @@ -275,6 +287,8 @@ #define CSR_SIE 0x104 #define CSR_STVEC 0x105 #define CSR_SCOUNTEREN 0x106 +#define CSR_SENVCFG 0x10a +#define CSR_SSTATEEN0 0x10c #define CSR_SSCRATCH 0x140 #define CSR_SEPC 0x141 #define CSR_SCAUSE 0x142 @@ -349,6 +363,10 @@ #define CSR_VSIEH 0x214 #define CSR_VSIPH 0x254 +/* Hypervisor stateen CSRs */ +#define CSR_HSTATEEN0 0x60c +#define CSR_HSTATEEN0H 0x61c + #define CSR_MSTATUS 0x300 #define CSR_MISA 0x301 #define CSR_MIDELEG 0x303 diff --git a/arch/riscv/include/asm/elf.h b/arch/riscv/include/asm/elf.h index b3b2dfbdf9..06c236bfab 100644 --- a/arch/riscv/include/asm/elf.h +++ b/arch/riscv/include/asm/elf.h @@ -14,7 +14,7 @@ #include <asm/auxvec.h> #include <asm/byteorder.h> #include <asm/cacheinfo.h> -#include <asm/hwcap.h> +#include <asm/cpufeature.h> /* * These are used to set parameters in the core dumps. diff --git a/arch/riscv/include/asm/entry-common.h b/arch/riscv/include/asm/entry-common.h index 6e4dee49d8..7ab5e34318 100644 --- a/arch/riscv/include/asm/entry-common.h +++ b/arch/riscv/include/asm/entry-common.h @@ -8,4 +8,18 @@ void handle_page_fault(struct pt_regs *regs); void handle_break(struct pt_regs *regs); +#ifdef CONFIG_RISCV_MISALIGNED +int handle_misaligned_load(struct pt_regs *regs); +int handle_misaligned_store(struct pt_regs *regs); +#else +static inline int handle_misaligned_load(struct pt_regs *regs) +{ + return -1; +} +static inline int handle_misaligned_store(struct pt_regs *regs) +{ + return -1; +} +#endif + #endif /* _ASM_RISCV_ENTRY_COMMON_H */ diff --git a/arch/riscv/include/asm/errata_list.h b/arch/riscv/include/asm/errata_list.h index b55b434f00..83ed25e435 100644 --- a/arch/riscv/include/asm/errata_list.h +++ b/arch/riscv/include/asm/errata_list.h @@ -95,31 +95,31 @@ asm volatile(ALTERNATIVE( \ #endif /* - * dcache.ipa rs1 (invalidate, physical address) + * th.dcache.ipa rs1 (invalidate, physical address) * | 31 - 25 | 24 - 20 | 19 - 15 | 14 - 12 | 11 - 7 | 6 - 0 | * 0000001 01010 rs1 000 00000 0001011 - * dache.iva rs1 (invalida, virtual address) + * th.dache.iva rs1 (invalida, virtual address) * 0000001 00110 rs1 000 00000 0001011 * - * dcache.cpa rs1 (clean, physical address) + * th.dcache.cpa rs1 (clean, physical address) * | 31 - 25 | 24 - 20 | 19 - 15 | 14 - 12 | 11 - 7 | 6 - 0 | * 0000001 01001 rs1 000 00000 0001011 - * dcache.cva rs1 (clean, virtual address) + * th.dcache.cva rs1 (clean, virtual address) * 0000001 00101 rs1 000 00000 0001011 * - * dcache.cipa rs1 (clean then invalidate, physical address) + * th.dcache.cipa rs1 (clean then invalidate, physical address) * | 31 - 25 | 24 - 20 | 19 - 15 | 14 - 12 | 11 - 7 | 6 - 0 | * 0000001 01011 rs1 000 00000 0001011 - * dcache.civa rs1 (... virtual address) + * th.dcache.civa rs1 (... virtual address) * 0000001 00111 rs1 000 00000 0001011 * - * sync.s (make sure all cache operations finished) + * th.sync.s (make sure all cache operations finished) * | 31 - 25 | 24 - 20 | 19 - 15 | 14 - 12 | 11 - 7 | 6 - 0 | * 0000000 11001 00000 000 00000 0001011 */ -#define THEAD_inval_A0 ".long 0x0265000b" -#define THEAD_clean_A0 ".long 0x0255000b" -#define THEAD_flush_A0 ".long 0x0275000b" +#define THEAD_INVAL_A0 ".long 0x0265000b" +#define THEAD_CLEAN_A0 ".long 0x0255000b" +#define THEAD_FLUSH_A0 ".long 0x0275000b" #define THEAD_SYNC_S ".long 0x0190000b" #define ALT_CMO_OP(_op, _start, _size, _cachesize) \ diff --git a/arch/riscv/include/asm/hugetlb.h b/arch/riscv/include/asm/hugetlb.h index 4c5b0e9298..20f9c3ba23 100644 --- a/arch/riscv/include/asm/hugetlb.h +++ b/arch/riscv/include/asm/hugetlb.h @@ -11,6 +11,9 @@ static inline void arch_clear_hugepage_flags(struct page *page) } #define arch_clear_hugepage_flags arch_clear_hugepage_flags +bool arch_hugetlb_migration_supported(struct hstate *h); +#define arch_hugetlb_migration_supported arch_hugetlb_migration_supported + #ifdef CONFIG_RISCV_ISA_SVNAPOT #define __HAVE_ARCH_HUGE_PTE_CLEAR void huge_pte_clear(struct mm_struct *mm, unsigned long addr, diff --git a/arch/riscv/include/asm/hwcap.h b/arch/riscv/include/asm/hwcap.h index b7b58258f6..06d30526ef 100644 --- a/arch/riscv/include/asm/hwcap.h +++ b/arch/riscv/include/asm/hwcap.h @@ -8,9 +8,6 @@ #ifndef _ASM_RISCV_HWCAP_H #define _ASM_RISCV_HWCAP_H -#include <asm/alternative-macros.h> -#include <asm/errno.h> -#include <linux/bits.h> #include <uapi/asm/hwcap.h> #define RISCV_ISA_EXT_a ('a' - 'a') @@ -58,6 +55,8 @@ #define RISCV_ISA_EXT_ZICSR 40 #define RISCV_ISA_EXT_ZIFENCEI 41 #define RISCV_ISA_EXT_ZIHPM 42 +#define RISCV_ISA_EXT_SMSTATEEN 43 +#define RISCV_ISA_EXT_ZICOND 44 #define RISCV_ISA_EXT_MAX 64 @@ -67,76 +66,4 @@ #define RISCV_ISA_EXT_SxAIA RISCV_ISA_EXT_SSAIA #endif -#ifndef __ASSEMBLY__ - -#include <linux/jump_label.h> - -unsigned long riscv_get_elf_hwcap(void); - -struct riscv_isa_ext_data { - const unsigned int id; - const char *name; - const char *property; -}; - -extern const struct riscv_isa_ext_data riscv_isa_ext[]; -extern const size_t riscv_isa_ext_count; -extern bool riscv_isa_fallback; - -unsigned long riscv_isa_extension_base(const unsigned long *isa_bitmap); - -#define riscv_isa_extension_mask(ext) BIT_MASK(RISCV_ISA_EXT_##ext) - -bool __riscv_isa_extension_available(const unsigned long *isa_bitmap, int bit); -#define riscv_isa_extension_available(isa_bitmap, ext) \ - __riscv_isa_extension_available(isa_bitmap, RISCV_ISA_EXT_##ext) - -static __always_inline bool -riscv_has_extension_likely(const unsigned long ext) -{ - compiletime_assert(ext < RISCV_ISA_EXT_MAX, - "ext must be < RISCV_ISA_EXT_MAX"); - - if (IS_ENABLED(CONFIG_RISCV_ALTERNATIVE)) { - asm_volatile_goto( - ALTERNATIVE("j %l[l_no]", "nop", 0, %[ext], 1) - : - : [ext] "i" (ext) - : - : l_no); - } else { - if (!__riscv_isa_extension_available(NULL, ext)) - goto l_no; - } - - return true; -l_no: - return false; -} - -static __always_inline bool -riscv_has_extension_unlikely(const unsigned long ext) -{ - compiletime_assert(ext < RISCV_ISA_EXT_MAX, - "ext must be < RISCV_ISA_EXT_MAX"); - - if (IS_ENABLED(CONFIG_RISCV_ALTERNATIVE)) { - asm_volatile_goto( - ALTERNATIVE("nop", "j %l[l_yes]", 0, %[ext], 1) - : - : [ext] "i" (ext) - : - : l_yes); - } else { - if (__riscv_isa_extension_available(NULL, ext)) - goto l_yes; - } - - return false; -l_yes: - return true; -} - -#endif - #endif /* _ASM_RISCV_HWCAP_H */ diff --git a/arch/riscv/include/asm/hwprobe.h b/arch/riscv/include/asm/hwprobe.h index 7cad513538..5c48f48e79 100644 --- a/arch/riscv/include/asm/hwprobe.h +++ b/arch/riscv/include/asm/hwprobe.h @@ -8,7 +8,7 @@ #include <uapi/asm/hwprobe.h> -#define RISCV_HWPROBE_MAX_KEY 5 +#define RISCV_HWPROBE_MAX_KEY 6 static inline bool riscv_hwprobe_key_is_valid(__s64 key) { diff --git a/arch/riscv/include/asm/insn-def.h b/arch/riscv/include/asm/insn-def.h index 6960beb75f..e27179b260 100644 --- a/arch/riscv/include/asm/insn-def.h +++ b/arch/riscv/include/asm/insn-def.h @@ -180,19 +180,19 @@ INSN_R(OPCODE_SYSTEM, FUNC3(0), FUNC7(51), \ __RD(0), RS1(gaddr), RS2(vmid)) -#define CBO_inval(base) \ +#define CBO_INVAL(base) \ INSN_I(OPCODE_MISC_MEM, FUNC3(2), __RD(0), \ RS1(base), SIMM12(0)) -#define CBO_clean(base) \ +#define CBO_CLEAN(base) \ INSN_I(OPCODE_MISC_MEM, FUNC3(2), __RD(0), \ RS1(base), SIMM12(1)) -#define CBO_flush(base) \ +#define CBO_FLUSH(base) \ INSN_I(OPCODE_MISC_MEM, FUNC3(2), __RD(0), \ RS1(base), SIMM12(2)) -#define CBO_zero(base) \ +#define CBO_ZERO(base) \ INSN_I(OPCODE_MISC_MEM, FUNC3(2), __RD(0), \ RS1(base), SIMM12(4)) diff --git a/arch/riscv/include/asm/irq_stack.h b/arch/riscv/include/asm/irq_stack.h index e4042d2975..6441ded3b0 100644 --- a/arch/riscv/include/asm/irq_stack.h +++ b/arch/riscv/include/asm/irq_stack.h @@ -12,6 +12,9 @@ DECLARE_PER_CPU(ulong *, irq_stack_ptr); +asmlinkage void call_on_irq_stack(struct pt_regs *regs, + void (*func)(struct pt_regs *)); + #ifdef CONFIG_VMAP_STACK /* * To ensure that VMAP'd stack overflow detection works correctly, all VMAP'd diff --git a/arch/riscv/include/asm/irq_work.h b/arch/riscv/include/asm/irq_work.h index b53891964a..b27a4d64fc 100644 --- a/arch/riscv/include/asm/irq_work.h +++ b/arch/riscv/include/asm/irq_work.h @@ -6,5 +6,5 @@ static inline bool arch_irq_work_has_interrupt(void) { return IS_ENABLED(CONFIG_SMP); } -extern void arch_irq_work_raise(void); + #endif /* _ASM_RISCV_IRQ_WORK_H */ diff --git a/arch/riscv/include/asm/jump_label.h b/arch/riscv/include/asm/jump_label.h index 14a5ea8d8e..4a35d787c0 100644 --- a/arch/riscv/include/asm/jump_label.h +++ b/arch/riscv/include/asm/jump_label.h @@ -17,7 +17,7 @@ static __always_inline bool arch_static_branch(struct static_key * const key, const bool branch) { - asm_volatile_goto( + asm goto( " .align 2 \n\t" " .option push \n\t" " .option norelax \n\t" @@ -39,7 +39,7 @@ label: static __always_inline bool arch_static_branch_jump(struct static_key * const key, const bool branch) { - asm_volatile_goto( + asm goto( " .align 2 \n\t" " .option push \n\t" " .option norelax \n\t" diff --git a/arch/riscv/include/asm/kvm_host.h b/arch/riscv/include/asm/kvm_host.h index 1ebf20dfba..0eefd9c991 100644 --- a/arch/riscv/include/asm/kvm_host.h +++ b/arch/riscv/include/asm/kvm_host.h @@ -162,6 +162,16 @@ struct kvm_vcpu_csr { unsigned long hvip; unsigned long vsatp; unsigned long scounteren; + unsigned long senvcfg; +}; + +struct kvm_vcpu_config { + u64 henvcfg; + u64 hstateen0; +}; + +struct kvm_vcpu_smstateen_csr { + unsigned long sstateen0; }; struct kvm_vcpu_arch { @@ -183,6 +193,8 @@ struct kvm_vcpu_arch { unsigned long host_sscratch; unsigned long host_stvec; unsigned long host_scounteren; + unsigned long host_senvcfg; + unsigned long host_sstateen0; /* CPU context of Host */ struct kvm_cpu_context host_context; @@ -193,6 +205,9 @@ struct kvm_vcpu_arch { /* CPU CSR context of Guest VCPU */ struct kvm_vcpu_csr guest_csr; + /* CPU Smstateen CSR context of Guest VCPU */ + struct kvm_vcpu_smstateen_csr smstateen_csr; + /* CPU context upon Guest VCPU reset */ struct kvm_cpu_context guest_reset_context; @@ -244,6 +259,9 @@ struct kvm_vcpu_arch { /* Performance monitoring context */ struct kvm_pmu pmu_context; + + /* 'static' configurations which are set only once */ + struct kvm_vcpu_config cfg; }; static inline void kvm_arch_sync_events(struct kvm *kvm) {} diff --git a/arch/riscv/include/asm/kvm_vcpu_sbi.h b/arch/riscv/include/asm/kvm_vcpu_sbi.h index cdcf0ff07b..6a453f7f8b 100644 --- a/arch/riscv/include/asm/kvm_vcpu_sbi.h +++ b/arch/riscv/include/asm/kvm_vcpu_sbi.h @@ -11,7 +11,7 @@ #define KVM_SBI_IMPID 3 -#define KVM_SBI_VERSION_MAJOR 1 +#define KVM_SBI_VERSION_MAJOR 2 #define KVM_SBI_VERSION_MINOR 0 enum kvm_riscv_sbi_ext_status { @@ -35,6 +35,9 @@ struct kvm_vcpu_sbi_return { struct kvm_vcpu_sbi_extension { unsigned long extid_start; unsigned long extid_end; + + bool default_unavail; + /** * SBI extension handler. It can be defined for a given extension or group of * extension. But it should always return linux error codes rather than SBI @@ -59,6 +62,7 @@ int kvm_riscv_vcpu_get_reg_sbi_ext(struct kvm_vcpu *vcpu, const struct kvm_vcpu_sbi_extension *kvm_vcpu_sbi_find_ext( struct kvm_vcpu *vcpu, unsigned long extid); int kvm_riscv_vcpu_sbi_ecall(struct kvm_vcpu *vcpu, struct kvm_run *run); +void kvm_riscv_vcpu_sbi_init(struct kvm_vcpu *vcpu); #ifdef CONFIG_RISCV_SBI_V01 extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_v01; @@ -69,6 +73,7 @@ extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_ipi; extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_rfence; extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_srst; extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_hsm; +extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_dbcn; extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_experimental; extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_vendor; diff --git a/arch/riscv/include/asm/pgtable-32.h b/arch/riscv/include/asm/pgtable-32.h index 59ba1fbaf7..00f3369570 100644 --- a/arch/riscv/include/asm/pgtable-32.h +++ b/arch/riscv/include/asm/pgtable-32.h @@ -33,4 +33,7 @@ _PAGE_WRITE | _PAGE_EXEC | \ _PAGE_USER | _PAGE_GLOBAL)) +static const __maybe_unused int pgtable_l4_enabled; +static const __maybe_unused int pgtable_l5_enabled; + #endif /* _ASM_RISCV_PGTABLE_32_H */ diff --git a/arch/riscv/include/asm/pgtable-64.h b/arch/riscv/include/asm/pgtable-64.h index 7a5097202e..9a2c780a11 100644 --- a/arch/riscv/include/asm/pgtable-64.h +++ b/arch/riscv/include/asm/pgtable-64.h @@ -126,14 +126,18 @@ enum napot_cont_order { /* * [63:59] T-Head Memory Type definitions: - * - * 00000 - NC Weakly-ordered, Non-cacheable, Non-bufferable, Non-shareable, Non-trustable + * bit[63] SO - Strong Order + * bit[62] C - Cacheable + * bit[61] B - Bufferable + * bit[60] SH - Shareable + * bit[59] Sec - Trustable + * 00110 - NC Weakly-ordered, Non-cacheable, Bufferable, Shareable, Non-trustable * 01110 - PMA Weakly-ordered, Cacheable, Bufferable, Shareable, Non-trustable - * 10000 - IO Strongly-ordered, Non-cacheable, Non-bufferable, Non-shareable, Non-trustable + * 10010 - IO Strongly-ordered, Non-cacheable, Non-bufferable, Shareable, Non-trustable */ #define _PAGE_PMA_THEAD ((1UL << 62) | (1UL << 61) | (1UL << 60)) -#define _PAGE_NOCACHE_THEAD 0UL -#define _PAGE_IO_THEAD (1UL << 63) +#define _PAGE_NOCACHE_THEAD ((1UL < 61) | (1UL << 60)) +#define _PAGE_IO_THEAD ((1UL << 63) | (1UL << 60)) #define _PAGE_MTMASK_THEAD (_PAGE_PMA_THEAD | _PAGE_IO_THEAD | (1UL << 59)) static inline u64 riscv_page_mtmask(void) diff --git a/arch/riscv/include/asm/pgtable-bits.h b/arch/riscv/include/asm/pgtable-bits.h index f896708e83..179bd4afec 100644 --- a/arch/riscv/include/asm/pgtable-bits.h +++ b/arch/riscv/include/asm/pgtable-bits.h @@ -16,9 +16,9 @@ #define _PAGE_GLOBAL (1 << 5) /* Global */ #define _PAGE_ACCESSED (1 << 6) /* Set by hardware on any access */ #define _PAGE_DIRTY (1 << 7) /* Set by hardware on any write */ -#define _PAGE_SOFT (1 << 8) /* Reserved for software */ +#define _PAGE_SOFT (3 << 8) /* Reserved for software */ -#define _PAGE_SPECIAL _PAGE_SOFT +#define _PAGE_SPECIAL (1 << 8) /* RSW: 0x1 */ #define _PAGE_TABLE _PAGE_PRESENT /* diff --git a/arch/riscv/include/asm/pgtable.h b/arch/riscv/include/asm/pgtable.h index 511cb385be..74ffb2178f 100644 --- a/arch/riscv/include/asm/pgtable.h +++ b/arch/riscv/include/asm/pgtable.h @@ -291,6 +291,7 @@ static inline pte_t pud_pte(pud_t pud) } #ifdef CONFIG_RISCV_ISA_SVNAPOT +#include <asm/cpufeature.h> static __always_inline bool has_svnapot(void) { @@ -811,7 +812,7 @@ extern pmd_t pmdp_collapse_flush(struct vm_area_struct *vma, * bit 5: _PAGE_PROT_NONE (zero) * bit 6: exclusive marker * bits 7 to 11: swap type - * bits 11 to XLEN-1: swap offset + * bits 12 to XLEN-1: swap offset */ #define __SWP_TYPE_SHIFT 7 #define __SWP_TYPE_BITS 5 @@ -898,7 +899,7 @@ static inline pte_t pte_swp_clear_exclusive(pte_t pte) #define PAGE_KERNEL __pgprot(0) #define swapper_pg_dir NULL #define TASK_SIZE 0xffffffffUL -#define VMALLOC_START 0 +#define VMALLOC_START _AC(0, UL) #define VMALLOC_END TASK_SIZE #endif /* !CONFIG_MMU */ @@ -914,7 +915,6 @@ extern uintptr_t _dtb_early_pa; #define dtb_early_pa _dtb_early_pa #endif /* CONFIG_XIP_KERNEL */ extern u64 satp_mode; -extern bool pgtable_l4_enabled; void paging_init(void); void misc_mem_init(void); diff --git a/arch/riscv/include/asm/processor.h b/arch/riscv/include/asm/processor.h index 4f6af8c6cf..e1944ff075 100644 --- a/arch/riscv/include/asm/processor.h +++ b/arch/riscv/include/asm/processor.h @@ -8,6 +8,7 @@ #include <linux/const.h> #include <linux/cache.h> +#include <linux/prctl.h> #include <vdso/processor.h> @@ -82,6 +83,7 @@ struct thread_struct { unsigned long bad_cause; unsigned long vstate_ctrl; struct __riscv_v_ext_state vstate; + unsigned long align_ctl; }; /* Whitelist the fstate from the task_struct for hardened usercopy */ @@ -94,6 +96,7 @@ static inline void arch_thread_struct_whitelist(unsigned long *offset, #define INIT_THREAD { \ .sp = sizeof(init_stack) + (long)&init_stack, \ + .align_ctl = PR_UNALIGN_NOPRINT, \ } #define task_pt_regs(tsk) \ @@ -116,6 +119,8 @@ static inline void wait_for_interrupt(void) __asm__ __volatile__ ("wfi"); } +extern phys_addr_t dma32_phys_limit; + struct device_node; int riscv_of_processor_hartid(struct device_node *node, unsigned long *hartid); int riscv_early_of_processor_hartid(struct device_node *node, unsigned long *hartid); @@ -134,6 +139,12 @@ extern long riscv_v_vstate_ctrl_set_current(unsigned long arg); extern long riscv_v_vstate_ctrl_get_current(void); #endif /* CONFIG_RISCV_ISA_V */ +extern int get_unalign_ctl(struct task_struct *tsk, unsigned long addr); +extern int set_unalign_ctl(struct task_struct *tsk, unsigned int val); + +#define GET_UNALIGN_CTL(tsk, addr) get_unalign_ctl((tsk), (addr)) +#define SET_UNALIGN_CTL(tsk, val) set_unalign_ctl((tsk), (val)) + #endif /* __ASSEMBLY__ */ #endif /* _ASM_RISCV_PROCESSOR_H */ diff --git a/arch/riscv/include/asm/sbi.h b/arch/riscv/include/asm/sbi.h index 5b4a1bf5f4..0892f4421b 100644 --- a/arch/riscv/include/asm/sbi.h +++ b/arch/riscv/include/asm/sbi.h @@ -30,6 +30,7 @@ enum sbi_ext_id { SBI_EXT_HSM = 0x48534D, SBI_EXT_SRST = 0x53525354, SBI_EXT_PMU = 0x504D55, + SBI_EXT_DBCN = 0x4442434E, /* Experimentals extensions must lie within this range */ SBI_EXT_EXPERIMENTAL_START = 0x08000000, @@ -236,6 +237,12 @@ enum sbi_pmu_ctr_type { /* Flags defined for counter stop function */ #define SBI_PMU_STOP_FLAG_RESET (1 << 0) +enum sbi_ext_dbcn_fid { + SBI_EXT_DBCN_CONSOLE_WRITE = 0, + SBI_EXT_DBCN_CONSOLE_READ = 1, + SBI_EXT_DBCN_CONSOLE_WRITE_BYTE = 2, +}; + #define SBI_SPEC_VERSION_DEFAULT 0x1 #define SBI_SPEC_VERSION_MAJOR_SHIFT 24 #define SBI_SPEC_VERSION_MAJOR_MASK 0x7f @@ -273,9 +280,6 @@ void sbi_set_timer(uint64_t stime_value); void sbi_shutdown(void); void sbi_send_ipi(unsigned int cpu); int sbi_remote_fence_i(const struct cpumask *cpu_mask); -int sbi_remote_sfence_vma(const struct cpumask *cpu_mask, - unsigned long start, - unsigned long size); int sbi_remote_sfence_vma_asid(const struct cpumask *cpu_mask, unsigned long start, diff --git a/arch/riscv/include/asm/scs.h b/arch/riscv/include/asm/scs.h new file mode 100644 index 0000000000..0e45db78b2 --- /dev/null +++ b/arch/riscv/include/asm/scs.h @@ -0,0 +1,54 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_SCS_H +#define _ASM_SCS_H + +#ifdef __ASSEMBLY__ +#include <asm/asm-offsets.h> + +#ifdef CONFIG_SHADOW_CALL_STACK + +/* Load init_shadow_call_stack to gp. */ +.macro scs_load_init_stack + la gp, init_shadow_call_stack + XIP_FIXUP_OFFSET gp +.endm + +/* Load the per-CPU IRQ shadow call stack to gp. */ +.macro scs_load_irq_stack tmp + load_per_cpu gp, irq_shadow_call_stack_ptr, \tmp +.endm + +/* Load task_scs_sp(current) to gp. */ +.macro scs_load_current + REG_L gp, TASK_TI_SCS_SP(tp) +.endm + +/* Load task_scs_sp(current) to gp, but only if tp has changed. */ +.macro scs_load_current_if_task_changed prev + beq \prev, tp, _skip_scs + scs_load_current +_skip_scs: +.endm + +/* Save gp to task_scs_sp(current). */ +.macro scs_save_current + REG_S gp, TASK_TI_SCS_SP(tp) +.endm + +#else /* CONFIG_SHADOW_CALL_STACK */ + +.macro scs_load_init_stack +.endm +.macro scs_load_irq_stack tmp +.endm +.macro scs_load_current +.endm +.macro scs_load_current_if_task_changed prev +.endm +.macro scs_save_current +.endm + +#endif /* CONFIG_SHADOW_CALL_STACK */ +#endif /* __ASSEMBLY__ */ + +#endif /* _ASM_SCS_H */ diff --git a/arch/riscv/include/asm/stacktrace.h b/arch/riscv/include/asm/stacktrace.h index f7e8ef2418..b1495a7e06 100644 --- a/arch/riscv/include/asm/stacktrace.h +++ b/arch/riscv/include/asm/stacktrace.h @@ -21,4 +21,9 @@ static inline bool on_thread_stack(void) return !(((unsigned long)(current->stack) ^ current_stack_pointer) & ~(THREAD_SIZE - 1)); } + +#ifdef CONFIG_VMAP_STACK +DECLARE_PER_CPU(unsigned long [OVERFLOW_STACK_SIZE/sizeof(long)], overflow_stack); +#endif /* CONFIG_VMAP_STACK */ + #endif /* _ASM_RISCV_STACKTRACE_H */ diff --git a/arch/riscv/include/asm/switch_to.h b/arch/riscv/include/asm/switch_to.h index a727be723c..f90d8e42f3 100644 --- a/arch/riscv/include/asm/switch_to.h +++ b/arch/riscv/include/asm/switch_to.h @@ -9,7 +9,7 @@ #include <linux/jump_label.h> #include <linux/sched/task_stack.h> #include <asm/vector.h> -#include <asm/hwcap.h> +#include <asm/cpufeature.h> #include <asm/processor.h> #include <asm/ptrace.h> #include <asm/csr.h> diff --git a/arch/riscv/include/asm/thread_info.h b/arch/riscv/include/asm/thread_info.h index d18ce0113c..574779900b 100644 --- a/arch/riscv/include/asm/thread_info.h +++ b/arch/riscv/include/asm/thread_info.h @@ -57,8 +57,20 @@ struct thread_info { long user_sp; /* User stack pointer */ int cpu; unsigned long syscall_work; /* SYSCALL_WORK_ flags */ +#ifdef CONFIG_SHADOW_CALL_STACK + void *scs_base; + void *scs_sp; +#endif }; +#ifdef CONFIG_SHADOW_CALL_STACK +#define INIT_SCS \ + .scs_base = init_shadow_call_stack, \ + .scs_sp = init_shadow_call_stack, +#else +#define INIT_SCS +#endif + /* * macros/functions for gaining access to the thread information structure * @@ -68,6 +80,7 @@ struct thread_info { { \ .flags = 0, \ .preempt_count = INIT_PREEMPT_COUNT, \ + INIT_SCS \ } void arch_release_task_struct(struct task_struct *tsk); diff --git a/arch/riscv/include/asm/tlb.h b/arch/riscv/include/asm/tlb.h index 120bcf2ed8..50b63b5c15 100644 --- a/arch/riscv/include/asm/tlb.h +++ b/arch/riscv/include/asm/tlb.h @@ -15,7 +15,13 @@ static void tlb_flush(struct mmu_gather *tlb); static inline void tlb_flush(struct mmu_gather *tlb) { - flush_tlb_mm(tlb->mm); +#ifdef CONFIG_MMU + if (tlb->fullmm || tlb->need_flush_all || tlb->freed_tables) + flush_tlb_mm(tlb->mm); + else + flush_tlb_mm_range(tlb->mm, tlb->start, tlb->end, + tlb_get_unmap_size(tlb)); +#endif } #endif /* _ASM_RISCV_TLB_H */ diff --git a/arch/riscv/include/asm/tlbflush.h b/arch/riscv/include/asm/tlbflush.h index a09196f8de..51664ae485 100644 --- a/arch/riscv/include/asm/tlbflush.h +++ b/arch/riscv/include/asm/tlbflush.h @@ -11,6 +11,9 @@ #include <asm/smp.h> #include <asm/errata_list.h> +#define FLUSH_TLB_MAX_SIZE ((unsigned long)-1) +#define FLUSH_TLB_NO_ASID ((unsigned long)-1) + #ifdef CONFIG_MMU extern unsigned long asid_mask; @@ -32,9 +35,13 @@ static inline void local_flush_tlb_page(unsigned long addr) #if defined(CONFIG_SMP) && defined(CONFIG_MMU) void flush_tlb_all(void); void flush_tlb_mm(struct mm_struct *mm); +void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start, + unsigned long end, unsigned int page_size); void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr); void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end); +void flush_tlb_kernel_range(unsigned long start, unsigned long end); +void local_flush_tlb_kernel_range(unsigned long start, unsigned long end); #ifdef CONFIG_TRANSPARENT_HUGEPAGE #define __HAVE_ARCH_FLUSH_PMD_TLB_RANGE void flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start, @@ -51,14 +58,16 @@ static inline void flush_tlb_range(struct vm_area_struct *vma, local_flush_tlb_all(); } -#define flush_tlb_mm(mm) flush_tlb_all() -#endif /* !CONFIG_SMP || !CONFIG_MMU */ - /* Flush a range of kernel pages */ static inline void flush_tlb_kernel_range(unsigned long start, unsigned long end) { - flush_tlb_all(); + local_flush_tlb_all(); } +#define flush_tlb_mm(mm) flush_tlb_all() +#define flush_tlb_mm_range(mm, start, end, page_size) flush_tlb_all() +#define local_flush_tlb_kernel_range(start, end) flush_tlb_all() +#endif /* !CONFIG_SMP || !CONFIG_MMU */ + #endif /* _ASM_RISCV_TLBFLUSH_H */ diff --git a/arch/riscv/include/asm/vector.h b/arch/riscv/include/asm/vector.h index c5ee07b3df..87aaef6562 100644 --- a/arch/riscv/include/asm/vector.h +++ b/arch/riscv/include/asm/vector.h @@ -15,7 +15,7 @@ #include <linux/sched.h> #include <linux/sched/task_stack.h> #include <asm/ptrace.h> -#include <asm/hwcap.h> +#include <asm/cpufeature.h> #include <asm/csr.h> #include <asm/asm.h> diff --git a/arch/riscv/include/uapi/asm/elf.h b/arch/riscv/include/uapi/asm/elf.h index d696d66102..11a71b8533 100644 --- a/arch/riscv/include/uapi/asm/elf.h +++ b/arch/riscv/include/uapi/asm/elf.h @@ -49,6 +49,7 @@ typedef union __riscv_fp_state elf_fpregset_t; #define R_RISCV_TLS_DTPREL64 9 #define R_RISCV_TLS_TPREL32 10 #define R_RISCV_TLS_TPREL64 11 +#define R_RISCV_IRELATIVE 58 /* Relocation types not used by the dynamic linker */ #define R_RISCV_BRANCH 16 @@ -81,7 +82,6 @@ typedef union __riscv_fp_state elf_fpregset_t; #define R_RISCV_ALIGN 43 #define R_RISCV_RVC_BRANCH 44 #define R_RISCV_RVC_JUMP 45 -#define R_RISCV_LUI 46 #define R_RISCV_GPREL_I 47 #define R_RISCV_GPREL_S 48 #define R_RISCV_TPREL_I 49 @@ -93,6 +93,9 @@ typedef union __riscv_fp_state elf_fpregset_t; #define R_RISCV_SET16 55 #define R_RISCV_SET32 56 #define R_RISCV_32_PCREL 57 +#define R_RISCV_PLT32 59 +#define R_RISCV_SET_ULEB128 60 +#define R_RISCV_SUB_ULEB128 61 #endif /* _UAPI_ASM_RISCV_ELF_H */ diff --git a/arch/riscv/include/uapi/asm/hwprobe.h b/arch/riscv/include/uapi/asm/hwprobe.h index 006bfb4834..b659ffcfcd 100644 --- a/arch/riscv/include/uapi/asm/hwprobe.h +++ b/arch/riscv/include/uapi/asm/hwprobe.h @@ -10,7 +10,7 @@ /* * Interface for probing hardware capabilities from userspace, see - * Documentation/riscv/hwprobe.rst for more information. + * Documentation/arch/riscv/hwprobe.rst for more information. */ struct riscv_hwprobe { __s64 key; @@ -29,6 +29,7 @@ struct riscv_hwprobe { #define RISCV_HWPROBE_EXT_ZBA (1 << 3) #define RISCV_HWPROBE_EXT_ZBB (1 << 4) #define RISCV_HWPROBE_EXT_ZBS (1 << 5) +#define RISCV_HWPROBE_EXT_ZICBOZ (1 << 6) #define RISCV_HWPROBE_KEY_CPUPERF_0 5 #define RISCV_HWPROBE_MISALIGNED_UNKNOWN (0 << 0) #define RISCV_HWPROBE_MISALIGNED_EMULATED (1 << 0) @@ -36,6 +37,7 @@ struct riscv_hwprobe { #define RISCV_HWPROBE_MISALIGNED_FAST (3 << 0) #define RISCV_HWPROBE_MISALIGNED_UNSUPPORTED (4 << 0) #define RISCV_HWPROBE_MISALIGNED_MASK (7 << 0) +#define RISCV_HWPROBE_KEY_ZICBOZ_BLOCK_SIZE 6 /* Increase RISCV_HWPROBE_MAX_KEY when adding items. */ #endif diff --git a/arch/riscv/include/uapi/asm/kvm.h b/arch/riscv/include/uapi/asm/kvm.h index 992c5e4071..60d3b21dea 100644 --- a/arch/riscv/include/uapi/asm/kvm.h +++ b/arch/riscv/include/uapi/asm/kvm.h @@ -80,6 +80,7 @@ struct kvm_riscv_csr { unsigned long sip; unsigned long satp; unsigned long scounteren; + unsigned long senvcfg; }; /* AIA CSR registers for KVM_GET_ONE_REG and KVM_SET_ONE_REG */ @@ -93,6 +94,11 @@ struct kvm_riscv_aia_csr { unsigned long iprio2h; }; +/* Smstateen CSR for KVM_GET_ONE_REG and KVM_SET_ONE_REG */ +struct kvm_riscv_smstateen_csr { + unsigned long sstateen0; +}; + /* TIMER registers for KVM_GET_ONE_REG and KVM_SET_ONE_REG */ struct kvm_riscv_timer { __u64 frequency; @@ -131,6 +137,8 @@ enum KVM_RISCV_ISA_EXT_ID { KVM_RISCV_ISA_EXT_ZICSR, KVM_RISCV_ISA_EXT_ZIFENCEI, KVM_RISCV_ISA_EXT_ZIHPM, + KVM_RISCV_ISA_EXT_SMSTATEEN, + KVM_RISCV_ISA_EXT_ZICOND, KVM_RISCV_ISA_EXT_MAX, }; @@ -148,6 +156,7 @@ enum KVM_RISCV_SBI_EXT_ID { KVM_RISCV_SBI_EXT_PMU, KVM_RISCV_SBI_EXT_EXPERIMENTAL, KVM_RISCV_SBI_EXT_VENDOR, + KVM_RISCV_SBI_EXT_DBCN, KVM_RISCV_SBI_EXT_MAX, }; @@ -178,10 +187,13 @@ enum KVM_RISCV_SBI_EXT_ID { #define KVM_REG_RISCV_CSR (0x03 << KVM_REG_RISCV_TYPE_SHIFT) #define KVM_REG_RISCV_CSR_GENERAL (0x0 << KVM_REG_RISCV_SUBTYPE_SHIFT) #define KVM_REG_RISCV_CSR_AIA (0x1 << KVM_REG_RISCV_SUBTYPE_SHIFT) +#define KVM_REG_RISCV_CSR_SMSTATEEN (0x2 << KVM_REG_RISCV_SUBTYPE_SHIFT) #define KVM_REG_RISCV_CSR_REG(name) \ (offsetof(struct kvm_riscv_csr, name) / sizeof(unsigned long)) #define KVM_REG_RISCV_CSR_AIA_REG(name) \ (offsetof(struct kvm_riscv_aia_csr, name) / sizeof(unsigned long)) +#define KVM_REG_RISCV_CSR_SMSTATEEN_REG(name) \ + (offsetof(struct kvm_riscv_smstateen_csr, name) / sizeof(unsigned long)) /* Timer registers are mapped as type 4 */ #define KVM_REG_RISCV_TIMER (0x04 << KVM_REG_RISCV_TYPE_SHIFT) |