diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-05-18 18:50:03 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-05-18 18:50:03 +0000 |
commit | 01a69402cf9d38ff180345d55c2ee51c7e89fbc7 (patch) | |
tree | b406c5242a088c4f59c6e4b719b783f43aca6ae9 /arch/riscv/kernel | |
parent | Adding upstream version 6.7.12. (diff) | |
download | linux-01a69402cf9d38ff180345d55c2ee51c7e89fbc7.tar.xz linux-01a69402cf9d38ff180345d55c2ee51c7e89fbc7.zip |
Adding upstream version 6.8.9.upstream/6.8.9
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'arch/riscv/kernel')
36 files changed, 1581 insertions, 556 deletions
diff --git a/arch/riscv/kernel/Makefile b/arch/riscv/kernel/Makefile index 40d054939a..604d6bf7e4 100644 --- a/arch/riscv/kernel/Makefile +++ b/arch/riscv/kernel/Makefile @@ -12,7 +12,7 @@ endif CFLAGS_syscall_table.o += $(call cc-option,-Wno-override-init,) CFLAGS_compat_syscall_table.o += $(call cc-option,-Wno-override-init,) -ifdef CONFIG_KEXEC +ifdef CONFIG_KEXEC_CORE AFLAGS_kexec_relocate.o := -mcmodel=medany $(call cc-option,-mno-relax) endif @@ -52,6 +52,7 @@ obj-y += setup.o obj-y += signal.o obj-y += syscall_table.o obj-y += sys_riscv.o +obj-y += sys_hwprobe.o obj-y += time.o obj-y += traps.o obj-y += riscv_ksyms.o @@ -65,6 +66,7 @@ obj-$(CONFIG_MMU) += vdso.o vdso/ obj-$(CONFIG_RISCV_MISALIGNED) += traps_misaligned.o obj-$(CONFIG_FPU) += fpu.o obj-$(CONFIG_RISCV_ISA_V) += vector.o +obj-$(CONFIG_RISCV_ISA_V) += kernel_mode_vector.o obj-$(CONFIG_SMP) += smpboot.o obj-$(CONFIG_SMP) += smp.o obj-$(CONFIG_SMP) += cpu_ops.o @@ -87,6 +89,7 @@ obj-$(CONFIG_SMP) += sbi-ipi.o obj-$(CONFIG_SMP) += cpu_ops_sbi.o endif obj-$(CONFIG_HOTPLUG_CPU) += cpu-hotplug.o +obj-$(CONFIG_PARAVIRT) += paravirt.o obj-$(CONFIG_KGDB) += kgdb.o obj-$(CONFIG_KEXEC_CORE) += kexec_relocate.o crash_save_regs.o machine_kexec.o obj-$(CONFIG_KEXEC_FILE) += elf_kexec.o machine_kexec_file.o diff --git a/arch/riscv/kernel/cfi.c b/arch/riscv/kernel/cfi.c index 820158d7a2..6ec9dbd729 100644 --- a/arch/riscv/kernel/cfi.c +++ b/arch/riscv/kernel/cfi.c @@ -4,7 +4,7 @@ * * Copyright (C) 2023 Google LLC */ -#include <asm/cfi.h> +#include <linux/cfi.h> #include <asm/insn.h> /* diff --git a/arch/riscv/kernel/cpu-hotplug.c b/arch/riscv/kernel/cpu-hotplug.c index 457a18efcb..28b58fc5ad 100644 --- a/arch/riscv/kernel/cpu-hotplug.c +++ b/arch/riscv/kernel/cpu-hotplug.c @@ -18,7 +18,7 @@ bool cpu_has_hotplug(unsigned int cpu) { - if (cpu_ops[cpu]->cpu_stop) + if (cpu_ops->cpu_stop) return true; return false; @@ -29,25 +29,18 @@ bool cpu_has_hotplug(unsigned int cpu) */ int __cpu_disable(void) { - int ret = 0; unsigned int cpu = smp_processor_id(); - if (!cpu_ops[cpu] || !cpu_ops[cpu]->cpu_stop) + if (!cpu_ops->cpu_stop) return -EOPNOTSUPP; - if (cpu_ops[cpu]->cpu_disable) - ret = cpu_ops[cpu]->cpu_disable(cpu); - - if (ret) - return ret; - remove_cpu_topology(cpu); numa_remove_cpu(cpu); set_cpu_online(cpu, false); riscv_ipi_disable(); irq_migrate_all_off_this_cpu(); - return ret; + return 0; } #ifdef CONFIG_HOTPLUG_CPU @@ -62,8 +55,8 @@ void arch_cpuhp_cleanup_dead_cpu(unsigned int cpu) pr_notice("CPU%u: off\n", cpu); /* Verify from the firmware if the cpu is really stopped*/ - if (cpu_ops[cpu]->cpu_is_stopped) - ret = cpu_ops[cpu]->cpu_is_stopped(cpu); + if (cpu_ops->cpu_is_stopped) + ret = cpu_ops->cpu_is_stopped(cpu); if (ret) pr_warn("CPU%d may not have stopped: %d\n", cpu, ret); } @@ -77,7 +70,7 @@ void __noreturn arch_cpu_idle_dead(void) cpuhp_ap_report_dead(); - cpu_ops[smp_processor_id()]->cpu_stop(); + cpu_ops->cpu_stop(); /* It should never reach here */ BUG(); } diff --git a/arch/riscv/kernel/cpu_ops.c b/arch/riscv/kernel/cpu_ops.c index eb479a88a9..6a8bd8f4db 100644 --- a/arch/riscv/kernel/cpu_ops.c +++ b/arch/riscv/kernel/cpu_ops.c @@ -13,25 +13,21 @@ #include <asm/sbi.h> #include <asm/smp.h> -const struct cpu_operations *cpu_ops[NR_CPUS] __ro_after_init; +const struct cpu_operations *cpu_ops __ro_after_init = &cpu_ops_spinwait; extern const struct cpu_operations cpu_ops_sbi; #ifndef CONFIG_RISCV_BOOT_SPINWAIT const struct cpu_operations cpu_ops_spinwait = { - .name = "", - .cpu_prepare = NULL, .cpu_start = NULL, }; #endif -void __init cpu_set_ops(int cpuid) +void __init cpu_set_ops(void) { #if IS_ENABLED(CONFIG_RISCV_SBI) if (sbi_probe_extension(SBI_EXT_HSM)) { - if (!cpuid) - pr_info("SBI HSM extension detected\n"); - cpu_ops[cpuid] = &cpu_ops_sbi; - } else + pr_info("SBI HSM extension detected\n"); + cpu_ops = &cpu_ops_sbi; + } #endif - cpu_ops[cpuid] = &cpu_ops_spinwait; } diff --git a/arch/riscv/kernel/cpu_ops_sbi.c b/arch/riscv/kernel/cpu_ops_sbi.c index efa0f08166..1cc7df740e 100644 --- a/arch/riscv/kernel/cpu_ops_sbi.c +++ b/arch/riscv/kernel/cpu_ops_sbi.c @@ -79,23 +79,7 @@ static int sbi_cpu_start(unsigned int cpuid, struct task_struct *tidle) return sbi_hsm_hart_start(hartid, boot_addr, hsm_data); } -static int sbi_cpu_prepare(unsigned int cpuid) -{ - if (!cpu_ops_sbi.cpu_start) { - pr_err("cpu start method not defined for CPU [%d]\n", cpuid); - return -ENODEV; - } - return 0; -} - #ifdef CONFIG_HOTPLUG_CPU -static int sbi_cpu_disable(unsigned int cpuid) -{ - if (!cpu_ops_sbi.cpu_stop) - return -EOPNOTSUPP; - return 0; -} - static void sbi_cpu_stop(void) { int ret; @@ -118,11 +102,8 @@ static int sbi_cpu_is_stopped(unsigned int cpuid) #endif const struct cpu_operations cpu_ops_sbi = { - .name = "sbi", - .cpu_prepare = sbi_cpu_prepare, .cpu_start = sbi_cpu_start, #ifdef CONFIG_HOTPLUG_CPU - .cpu_disable = sbi_cpu_disable, .cpu_stop = sbi_cpu_stop, .cpu_is_stopped = sbi_cpu_is_stopped, #endif diff --git a/arch/riscv/kernel/cpu_ops_spinwait.c b/arch/riscv/kernel/cpu_ops_spinwait.c index d98d19226b..613872b0a2 100644 --- a/arch/riscv/kernel/cpu_ops_spinwait.c +++ b/arch/riscv/kernel/cpu_ops_spinwait.c @@ -39,15 +39,6 @@ static void cpu_update_secondary_bootdata(unsigned int cpuid, WRITE_ONCE(__cpu_spinwait_task_pointer[hartid], tidle); } -static int spinwait_cpu_prepare(unsigned int cpuid) -{ - if (!cpu_ops_spinwait.cpu_start) { - pr_err("cpu start method not defined for CPU [%d]\n", cpuid); - return -ENODEV; - } - return 0; -} - static int spinwait_cpu_start(unsigned int cpuid, struct task_struct *tidle) { /* @@ -64,7 +55,5 @@ static int spinwait_cpu_start(unsigned int cpuid, struct task_struct *tidle) } const struct cpu_operations cpu_ops_spinwait = { - .name = "spinwait", - .cpu_prepare = spinwait_cpu_prepare, .cpu_start = spinwait_cpu_start, }; diff --git a/arch/riscv/kernel/cpufeature.c b/arch/riscv/kernel/cpufeature.c index 8677788133..79a5a35fab 100644 --- a/arch/riscv/kernel/cpufeature.c +++ b/arch/riscv/kernel/cpufeature.c @@ -8,8 +8,10 @@ #include <linux/acpi.h> #include <linux/bitmap.h> +#include <linux/cpu.h> #include <linux/cpuhotplug.h> #include <linux/ctype.h> +#include <linux/jump_label.h> #include <linux/log2.h> #include <linux/memory.h> #include <linux/module.h> @@ -45,6 +47,8 @@ struct riscv_isainfo hart_isa[NR_CPUS]; /* Performance information */ DEFINE_PER_CPU(long, misaligned_access_speed); +static cpumask_t fast_misaligned_access; + /** * riscv_isa_extension_base() - Get base extension word * @@ -71,7 +75,7 @@ EXPORT_SYMBOL_GPL(riscv_isa_extension_base); * * NOTE: If isa_bitmap is NULL then Host ISA bitmap will be used. */ -bool __riscv_isa_extension_available(const unsigned long *isa_bitmap, int bit) +bool __riscv_isa_extension_available(const unsigned long *isa_bitmap, unsigned int bit) { const unsigned long *bmap = (isa_bitmap) ? isa_bitmap : riscv_isa; @@ -103,17 +107,111 @@ static bool riscv_isa_extension_check(int id) return false; } return true; + case RISCV_ISA_EXT_INVALID: + return false; } return true; } -#define __RISCV_ISA_EXT_DATA(_name, _id) { \ - .name = #_name, \ - .property = #_name, \ - .id = _id, \ +#define _RISCV_ISA_EXT_DATA(_name, _id, _subset_exts, _subset_exts_size) { \ + .name = #_name, \ + .property = #_name, \ + .id = _id, \ + .subset_ext_ids = _subset_exts, \ + .subset_ext_size = _subset_exts_size \ } +#define __RISCV_ISA_EXT_DATA(_name, _id) _RISCV_ISA_EXT_DATA(_name, _id, NULL, 0) + +/* Used to declare pure "lasso" extension (Zk for instance) */ +#define __RISCV_ISA_EXT_BUNDLE(_name, _bundled_exts) \ + _RISCV_ISA_EXT_DATA(_name, RISCV_ISA_EXT_INVALID, _bundled_exts, ARRAY_SIZE(_bundled_exts)) + +/* Used to declare extensions that are a superset of other extensions (Zvbb for instance) */ +#define __RISCV_ISA_EXT_SUPERSET(_name, _id, _sub_exts) \ + _RISCV_ISA_EXT_DATA(_name, _id, _sub_exts, ARRAY_SIZE(_sub_exts)) + +static const unsigned int riscv_zk_bundled_exts[] = { + RISCV_ISA_EXT_ZBKB, + RISCV_ISA_EXT_ZBKC, + RISCV_ISA_EXT_ZBKX, + RISCV_ISA_EXT_ZKND, + RISCV_ISA_EXT_ZKNE, + RISCV_ISA_EXT_ZKR, + RISCV_ISA_EXT_ZKT, +}; + +static const unsigned int riscv_zkn_bundled_exts[] = { + RISCV_ISA_EXT_ZBKB, + RISCV_ISA_EXT_ZBKC, + RISCV_ISA_EXT_ZBKX, + RISCV_ISA_EXT_ZKND, + RISCV_ISA_EXT_ZKNE, + RISCV_ISA_EXT_ZKNH, +}; + +static const unsigned int riscv_zks_bundled_exts[] = { + RISCV_ISA_EXT_ZBKB, + RISCV_ISA_EXT_ZBKC, + RISCV_ISA_EXT_ZKSED, + RISCV_ISA_EXT_ZKSH +}; + +#define RISCV_ISA_EXT_ZVKN \ + RISCV_ISA_EXT_ZVKNED, \ + RISCV_ISA_EXT_ZVKNHB, \ + RISCV_ISA_EXT_ZVKB, \ + RISCV_ISA_EXT_ZVKT + +static const unsigned int riscv_zvkn_bundled_exts[] = { + RISCV_ISA_EXT_ZVKN +}; + +static const unsigned int riscv_zvknc_bundled_exts[] = { + RISCV_ISA_EXT_ZVKN, + RISCV_ISA_EXT_ZVBC +}; + +static const unsigned int riscv_zvkng_bundled_exts[] = { + RISCV_ISA_EXT_ZVKN, + RISCV_ISA_EXT_ZVKG +}; + +#define RISCV_ISA_EXT_ZVKS \ + RISCV_ISA_EXT_ZVKSED, \ + RISCV_ISA_EXT_ZVKSH, \ + RISCV_ISA_EXT_ZVKB, \ + RISCV_ISA_EXT_ZVKT + +static const unsigned int riscv_zvks_bundled_exts[] = { + RISCV_ISA_EXT_ZVKS +}; + +static const unsigned int riscv_zvksc_bundled_exts[] = { + RISCV_ISA_EXT_ZVKS, + RISCV_ISA_EXT_ZVBC +}; + +static const unsigned int riscv_zvksg_bundled_exts[] = { + RISCV_ISA_EXT_ZVKS, + RISCV_ISA_EXT_ZVKG +}; + +static const unsigned int riscv_zvbb_exts[] = { + RISCV_ISA_EXT_ZVKB +}; + +/* + * While the [ms]envcfg CSRs were not defined until version 1.12 of the RISC-V + * privileged ISA, the existence of the CSRs is implied by any extension which + * specifies [ms]envcfg bit(s). Hence, we define a custom ISA extension for the + * existence of the CSR, and treat it as a subset of those other extensions. + */ +static const unsigned int riscv_xlinuxenvcfg_exts[] = { + RISCV_ISA_EXT_XLINUXENVCFG +}; + /* * The canonical order of ISA extension names in the ISA string is defined in * chapter 27 of the unprivileged specification. @@ -161,23 +259,57 @@ const struct riscv_isa_ext_data riscv_isa_ext[] = { __RISCV_ISA_EXT_DATA(d, RISCV_ISA_EXT_d), __RISCV_ISA_EXT_DATA(q, RISCV_ISA_EXT_q), __RISCV_ISA_EXT_DATA(c, RISCV_ISA_EXT_c), - __RISCV_ISA_EXT_DATA(b, RISCV_ISA_EXT_b), - __RISCV_ISA_EXT_DATA(k, RISCV_ISA_EXT_k), - __RISCV_ISA_EXT_DATA(j, RISCV_ISA_EXT_j), - __RISCV_ISA_EXT_DATA(p, RISCV_ISA_EXT_p), __RISCV_ISA_EXT_DATA(v, RISCV_ISA_EXT_v), __RISCV_ISA_EXT_DATA(h, RISCV_ISA_EXT_h), - __RISCV_ISA_EXT_DATA(zicbom, RISCV_ISA_EXT_ZICBOM), - __RISCV_ISA_EXT_DATA(zicboz, RISCV_ISA_EXT_ZICBOZ), + __RISCV_ISA_EXT_SUPERSET(zicbom, RISCV_ISA_EXT_ZICBOM, riscv_xlinuxenvcfg_exts), + __RISCV_ISA_EXT_SUPERSET(zicboz, RISCV_ISA_EXT_ZICBOZ, riscv_xlinuxenvcfg_exts), __RISCV_ISA_EXT_DATA(zicntr, RISCV_ISA_EXT_ZICNTR), __RISCV_ISA_EXT_DATA(zicond, RISCV_ISA_EXT_ZICOND), __RISCV_ISA_EXT_DATA(zicsr, RISCV_ISA_EXT_ZICSR), __RISCV_ISA_EXT_DATA(zifencei, RISCV_ISA_EXT_ZIFENCEI), + __RISCV_ISA_EXT_DATA(zihintntl, RISCV_ISA_EXT_ZIHINTNTL), __RISCV_ISA_EXT_DATA(zihintpause, RISCV_ISA_EXT_ZIHINTPAUSE), __RISCV_ISA_EXT_DATA(zihpm, RISCV_ISA_EXT_ZIHPM), + __RISCV_ISA_EXT_DATA(zacas, RISCV_ISA_EXT_ZACAS), + __RISCV_ISA_EXT_DATA(zfa, RISCV_ISA_EXT_ZFA), + __RISCV_ISA_EXT_DATA(zfh, RISCV_ISA_EXT_ZFH), + __RISCV_ISA_EXT_DATA(zfhmin, RISCV_ISA_EXT_ZFHMIN), __RISCV_ISA_EXT_DATA(zba, RISCV_ISA_EXT_ZBA), __RISCV_ISA_EXT_DATA(zbb, RISCV_ISA_EXT_ZBB), + __RISCV_ISA_EXT_DATA(zbc, RISCV_ISA_EXT_ZBC), + __RISCV_ISA_EXT_DATA(zbkb, RISCV_ISA_EXT_ZBKB), + __RISCV_ISA_EXT_DATA(zbkc, RISCV_ISA_EXT_ZBKC), + __RISCV_ISA_EXT_DATA(zbkx, RISCV_ISA_EXT_ZBKX), __RISCV_ISA_EXT_DATA(zbs, RISCV_ISA_EXT_ZBS), + __RISCV_ISA_EXT_BUNDLE(zk, riscv_zk_bundled_exts), + __RISCV_ISA_EXT_BUNDLE(zkn, riscv_zkn_bundled_exts), + __RISCV_ISA_EXT_DATA(zknd, RISCV_ISA_EXT_ZKND), + __RISCV_ISA_EXT_DATA(zkne, RISCV_ISA_EXT_ZKNE), + __RISCV_ISA_EXT_DATA(zknh, RISCV_ISA_EXT_ZKNH), + __RISCV_ISA_EXT_DATA(zkr, RISCV_ISA_EXT_ZKR), + __RISCV_ISA_EXT_BUNDLE(zks, riscv_zks_bundled_exts), + __RISCV_ISA_EXT_DATA(zkt, RISCV_ISA_EXT_ZKT), + __RISCV_ISA_EXT_DATA(zksed, RISCV_ISA_EXT_ZKSED), + __RISCV_ISA_EXT_DATA(zksh, RISCV_ISA_EXT_ZKSH), + __RISCV_ISA_EXT_DATA(ztso, RISCV_ISA_EXT_ZTSO), + __RISCV_ISA_EXT_SUPERSET(zvbb, RISCV_ISA_EXT_ZVBB, riscv_zvbb_exts), + __RISCV_ISA_EXT_DATA(zvbc, RISCV_ISA_EXT_ZVBC), + __RISCV_ISA_EXT_DATA(zvfh, RISCV_ISA_EXT_ZVFH), + __RISCV_ISA_EXT_DATA(zvfhmin, RISCV_ISA_EXT_ZVFHMIN), + __RISCV_ISA_EXT_DATA(zvkb, RISCV_ISA_EXT_ZVKB), + __RISCV_ISA_EXT_DATA(zvkg, RISCV_ISA_EXT_ZVKG), + __RISCV_ISA_EXT_BUNDLE(zvkn, riscv_zvkn_bundled_exts), + __RISCV_ISA_EXT_BUNDLE(zvknc, riscv_zvknc_bundled_exts), + __RISCV_ISA_EXT_DATA(zvkned, RISCV_ISA_EXT_ZVKNED), + __RISCV_ISA_EXT_BUNDLE(zvkng, riscv_zvkng_bundled_exts), + __RISCV_ISA_EXT_DATA(zvknha, RISCV_ISA_EXT_ZVKNHA), + __RISCV_ISA_EXT_DATA(zvknhb, RISCV_ISA_EXT_ZVKNHB), + __RISCV_ISA_EXT_BUNDLE(zvks, riscv_zvks_bundled_exts), + __RISCV_ISA_EXT_BUNDLE(zvksc, riscv_zvksc_bundled_exts), + __RISCV_ISA_EXT_DATA(zvksed, RISCV_ISA_EXT_ZVKSED), + __RISCV_ISA_EXT_DATA(zvksh, RISCV_ISA_EXT_ZVKSH), + __RISCV_ISA_EXT_BUNDLE(zvksg, riscv_zvksg_bundled_exts), + __RISCV_ISA_EXT_DATA(zvkt, RISCV_ISA_EXT_ZVKT), __RISCV_ISA_EXT_DATA(smaia, RISCV_ISA_EXT_SMAIA), __RISCV_ISA_EXT_DATA(smstateen, RISCV_ISA_EXT_SMSTATEEN), __RISCV_ISA_EXT_DATA(ssaia, RISCV_ISA_EXT_SSAIA), @@ -190,6 +322,31 @@ const struct riscv_isa_ext_data riscv_isa_ext[] = { const size_t riscv_isa_ext_count = ARRAY_SIZE(riscv_isa_ext); +static void __init match_isa_ext(const struct riscv_isa_ext_data *ext, const char *name, + const char *name_end, struct riscv_isainfo *isainfo) +{ + if ((name_end - name == strlen(ext->name)) && + !strncasecmp(name, ext->name, name_end - name)) { + /* + * If this is a bundle, enable all the ISA extensions that + * comprise the bundle. + */ + if (ext->subset_ext_size) { + for (int i = 0; i < ext->subset_ext_size; i++) { + if (riscv_isa_extension_check(ext->subset_ext_ids[i])) + set_bit(ext->subset_ext_ids[i], isainfo->isa); + } + } + + /* + * This is valid even for bundle extensions which uses the RISCV_ISA_EXT_INVALID id + * (rejected by riscv_isa_extension_check()). + */ + if (riscv_isa_extension_check(ext->id)) + set_bit(ext->id, isainfo->isa); + } +} + static void __init riscv_parse_isa_string(unsigned long *this_hwcap, struct riscv_isainfo *isainfo, unsigned long *isa2hwcap, const char *isa) { @@ -322,14 +479,6 @@ static void __init riscv_parse_isa_string(unsigned long *this_hwcap, struct risc if (*isa == '_') ++isa; -#define SET_ISA_EXT_MAP(name, bit) \ - do { \ - if ((ext_end - ext == strlen(name)) && \ - !strncasecmp(ext, name, strlen(name)) && \ - riscv_isa_extension_check(bit)) \ - set_bit(bit, isainfo->isa); \ - } while (false) \ - if (unlikely(ext_err)) continue; if (!ext_long) { @@ -341,10 +490,8 @@ static void __init riscv_parse_isa_string(unsigned long *this_hwcap, struct risc } } else { for (int i = 0; i < riscv_isa_ext_count; i++) - SET_ISA_EXT_MAP(riscv_isa_ext[i].name, - riscv_isa_ext[i].id); + match_isa_ext(&riscv_isa_ext[i], ext, ext_end, isainfo); } -#undef SET_ISA_EXT_MAP } } @@ -457,18 +604,26 @@ static int __init riscv_fill_hwcap_from_ext_list(unsigned long *isa2hwcap) } for (int i = 0; i < riscv_isa_ext_count; i++) { + const struct riscv_isa_ext_data *ext = &riscv_isa_ext[i]; + if (of_property_match_string(cpu_node, "riscv,isa-extensions", - riscv_isa_ext[i].property) < 0) + ext->property) < 0) continue; - if (!riscv_isa_extension_check(riscv_isa_ext[i].id)) - continue; + if (ext->subset_ext_size) { + for (int j = 0; j < ext->subset_ext_size; j++) { + if (riscv_isa_extension_check(ext->subset_ext_ids[i])) + set_bit(ext->subset_ext_ids[j], isainfo->isa); + } + } - /* Only single letter extensions get set in hwcap */ - if (strnlen(riscv_isa_ext[i].name, 2) == 1) - this_hwcap |= isa2hwcap[riscv_isa_ext[i].id]; + if (riscv_isa_extension_check(ext->id)) { + set_bit(ext->id, isainfo->isa); - set_bit(riscv_isa_ext[i].id, isainfo->isa); + /* Only single letter extensions get set in hwcap */ + if (strnlen(riscv_isa_ext[i].name, 2) == 1) + this_hwcap |= isa2hwcap[riscv_isa_ext[i].id]; + } } of_node_put(cpu_node); @@ -658,6 +813,16 @@ static int check_unaligned_access(void *param) (speed == RISCV_HWPROBE_MISALIGNED_FAST) ? "fast" : "slow"); per_cpu(misaligned_access_speed, cpu) = speed; + + /* + * Set the value of fast_misaligned_access of a CPU. These operations + * are atomic to avoid race conditions. + */ + if (speed == RISCV_HWPROBE_MISALIGNED_FAST) + cpumask_set_cpu(cpu, &fast_misaligned_access); + else + cpumask_clear_cpu(cpu, &fast_misaligned_access); + return 0; } @@ -670,13 +835,69 @@ static void check_unaligned_access_nonboot_cpu(void *param) check_unaligned_access(pages[cpu]); } +DEFINE_STATIC_KEY_FALSE(fast_misaligned_access_speed_key); + +static void modify_unaligned_access_branches(cpumask_t *mask, int weight) +{ + if (cpumask_weight(mask) == weight) + static_branch_enable_cpuslocked(&fast_misaligned_access_speed_key); + else + static_branch_disable_cpuslocked(&fast_misaligned_access_speed_key); +} + +static void set_unaligned_access_static_branches_except_cpu(int cpu) +{ + /* + * Same as set_unaligned_access_static_branches, except excludes the + * given CPU from the result. When a CPU is hotplugged into an offline + * state, this function is called before the CPU is set to offline in + * the cpumask, and thus the CPU needs to be explicitly excluded. + */ + + cpumask_t fast_except_me; + + cpumask_and(&fast_except_me, &fast_misaligned_access, cpu_online_mask); + cpumask_clear_cpu(cpu, &fast_except_me); + + modify_unaligned_access_branches(&fast_except_me, num_online_cpus() - 1); +} + +static void set_unaligned_access_static_branches(void) +{ + /* + * This will be called after check_unaligned_access_all_cpus so the + * result of unaligned access speed for all CPUs will be available. + * + * To avoid the number of online cpus changing between reading + * cpu_online_mask and calling num_online_cpus, cpus_read_lock must be + * held before calling this function. + */ + + cpumask_t fast_and_online; + + cpumask_and(&fast_and_online, &fast_misaligned_access, cpu_online_mask); + + modify_unaligned_access_branches(&fast_and_online, num_online_cpus()); +} + +static int lock_and_set_unaligned_access_static_branch(void) +{ + cpus_read_lock(); + set_unaligned_access_static_branches(); + cpus_read_unlock(); + + return 0; +} + +arch_initcall_sync(lock_and_set_unaligned_access_static_branch); + static int riscv_online_cpu(unsigned int cpu) { static struct page *buf; /* We are already set since the last check */ if (per_cpu(misaligned_access_speed, cpu) != RISCV_HWPROBE_MISALIGNED_UNKNOWN) - return 0; + goto exit; buf = alloc_pages(GFP_KERNEL, MISALIGNED_BUFFER_ORDER); if (!buf) { @@ -686,6 +907,17 @@ static int riscv_online_cpu(unsigned int cpu) check_unaligned_access(buf); __free_pages(buf, MISALIGNED_BUFFER_ORDER); + +exit: + set_unaligned_access_static_branches(); + + return 0; +} + +static int riscv_offline_cpu(unsigned int cpu) +{ + set_unaligned_access_static_branches_except_cpu(cpu); + return 0; } @@ -720,9 +952,12 @@ static int check_unaligned_access_all_cpus(void) /* Check core 0. */ smp_call_on_cpu(0, check_unaligned_access, bufs[0], true); - /* Setup hotplug callback for any new CPUs that come online. */ + /* + * Setup hotplug callbacks for any new CPUs that come online or go + * offline. + */ cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "riscv:online", - riscv_online_cpu, NULL); + riscv_online_cpu, riscv_offline_cpu); out: unaligned_emulation_finish(); diff --git a/arch/riscv/kernel/efi.c b/arch/riscv/kernel/efi.c index aa6209a74c..b64bf1624a 100644 --- a/arch/riscv/kernel/efi.c +++ b/arch/riscv/kernel/efi.c @@ -60,7 +60,7 @@ int __init efi_create_mapping(struct mm_struct *mm, efi_memory_desc_t *md) static int __init set_permissions(pte_t *ptep, unsigned long addr, void *data) { efi_memory_desc_t *md = data; - pte_t pte = READ_ONCE(*ptep); + pte_t pte = ptep_get(ptep); unsigned long val; if (md->attribute & EFI_MEMORY_RO) { diff --git a/arch/riscv/kernel/elf_kexec.c b/arch/riscv/kernel/elf_kexec.c index e60fbd8660..5bd1ec3341 100644 --- a/arch/riscv/kernel/elf_kexec.c +++ b/arch/riscv/kernel/elf_kexec.c @@ -216,7 +216,6 @@ static void *elf_kexec_load(struct kimage *image, char *kernel_buf, if (ret) goto out; kernel_start = image->start; - pr_notice("The entry point of kernel at 0x%lx\n", image->start); /* Add the kernel binary to the image */ ret = riscv_kexec_elf_load(image, &ehdr, &elf_info, @@ -252,8 +251,8 @@ static void *elf_kexec_load(struct kimage *image, char *kernel_buf, image->elf_load_addr = kbuf.mem; image->elf_headers_sz = headers_sz; - pr_debug("Loaded elf core header at 0x%lx bufsz=0x%lx memsz=0x%lx\n", - image->elf_load_addr, kbuf.bufsz, kbuf.memsz); + kexec_dprintk("Loaded elf core header at 0x%lx bufsz=0x%lx memsz=0x%lx\n", + image->elf_load_addr, kbuf.bufsz, kbuf.memsz); /* Setup cmdline for kdump kernel case */ modified_cmdline = setup_kdump_cmdline(image, cmdline, @@ -275,6 +274,8 @@ static void *elf_kexec_load(struct kimage *image, char *kernel_buf, pr_err("Error loading purgatory ret=%d\n", ret); goto out; } + kexec_dprintk("Loaded purgatory at 0x%lx\n", kbuf.mem); + ret = kexec_purgatory_get_set_symbol(image, "riscv_kernel_entry", &kernel_start, sizeof(kernel_start), 0); @@ -293,7 +294,7 @@ static void *elf_kexec_load(struct kimage *image, char *kernel_buf, if (ret) goto out; initrd_pbase = kbuf.mem; - pr_notice("Loaded initrd at 0x%lx\n", initrd_pbase); + kexec_dprintk("Loaded initrd at 0x%lx\n", initrd_pbase); } /* Add the DTB to the image */ @@ -318,7 +319,7 @@ static void *elf_kexec_load(struct kimage *image, char *kernel_buf, } /* Cache the fdt buffer address for memory cleanup */ image->arch.fdt = fdt; - pr_notice("Loaded device tree at 0x%lx\n", kbuf.mem); + kexec_dprintk("Loaded device tree at 0x%lx\n", kbuf.mem); goto out; out_free_fdt: diff --git a/arch/riscv/kernel/entry.S b/arch/riscv/kernel/entry.S index 54ca4564a9..9d1a305d55 100644 --- a/arch/riscv/kernel/entry.S +++ b/arch/riscv/kernel/entry.S @@ -83,6 +83,10 @@ SYM_CODE_START(handle_exception) /* Load the kernel shadow call stack pointer if coming from userspace */ scs_load_current_if_task_changed s5 +#ifdef CONFIG_RISCV_ISA_V_PREEMPTIVE + move a0, sp + call riscv_v_context_nesting_start +#endif move a0, sp /* pt_regs */ la ra, ret_from_exception @@ -138,6 +142,10 @@ SYM_CODE_START_NOALIGN(ret_from_exception) */ csrw CSR_SCRATCH, tp 1: +#ifdef CONFIG_RISCV_ISA_V_PREEMPTIVE + move a0, sp + call riscv_v_context_nesting_end +#endif REG_L a0, PT_STATUS(sp) /* * The current load reservation is effectively part of the processor's diff --git a/arch/riscv/kernel/ftrace.c b/arch/riscv/kernel/ftrace.c index 03a6434a8c..f5aa24d9e1 100644 --- a/arch/riscv/kernel/ftrace.c +++ b/arch/riscv/kernel/ftrace.c @@ -178,32 +178,28 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr, } #ifdef CONFIG_DYNAMIC_FTRACE +#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS +void ftrace_graph_func(unsigned long ip, unsigned long parent_ip, + struct ftrace_ops *op, struct ftrace_regs *fregs) +{ + struct pt_regs *regs = arch_ftrace_get_regs(fregs); + unsigned long *parent = (unsigned long *)®s->ra; + + prepare_ftrace_return(parent, ip, frame_pointer(regs)); +} +#else /* CONFIG_DYNAMIC_FTRACE_WITH_REGS */ extern void ftrace_graph_call(void); -extern void ftrace_graph_regs_call(void); int ftrace_enable_ftrace_graph_caller(void) { - int ret; - - ret = __ftrace_modify_call((unsigned long)&ftrace_graph_call, - (unsigned long)&prepare_ftrace_return, true, true); - if (ret) - return ret; - - return __ftrace_modify_call((unsigned long)&ftrace_graph_regs_call, + return __ftrace_modify_call((unsigned long)&ftrace_graph_call, (unsigned long)&prepare_ftrace_return, true, true); } int ftrace_disable_ftrace_graph_caller(void) { - int ret; - - ret = __ftrace_modify_call((unsigned long)&ftrace_graph_call, - (unsigned long)&prepare_ftrace_return, false, true); - if (ret) - return ret; - - return __ftrace_modify_call((unsigned long)&ftrace_graph_regs_call, + return __ftrace_modify_call((unsigned long)&ftrace_graph_call, (unsigned long)&prepare_ftrace_return, false, true); } +#endif /* CONFIG_DYNAMIC_FTRACE_WITH_REGS */ #endif /* CONFIG_DYNAMIC_FTRACE */ #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ diff --git a/arch/riscv/kernel/head.S b/arch/riscv/kernel/head.S index 663881785b..4236a69c35 100644 --- a/arch/riscv/kernel/head.S +++ b/arch/riscv/kernel/head.S @@ -11,7 +11,6 @@ #include <asm/page.h> #include <asm/pgtable.h> #include <asm/csr.h> -#include <asm/cpu_ops_sbi.h> #include <asm/hwcap.h> #include <asm/image.h> #include <asm/scs.h> @@ -266,10 +265,12 @@ SYM_CODE_START(_start_kernel) la sp, _end + THREAD_SIZE XIP_FIXUP_OFFSET sp mv s0, a0 + mv s1, a1 call __copy_data - /* Restore a0 copy */ + /* Restore a0 & a1 copy */ mv a0, s0 + mv a1, s1 #endif #ifndef CONFIG_XIP_KERNEL diff --git a/arch/riscv/kernel/kernel_mode_vector.c b/arch/riscv/kernel/kernel_mode_vector.c new file mode 100644 index 0000000000..6afe80c7f0 --- /dev/null +++ b/arch/riscv/kernel/kernel_mode_vector.c @@ -0,0 +1,247 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Copyright (C) 2012 ARM Ltd. + * Author: Catalin Marinas <catalin.marinas@arm.com> + * Copyright (C) 2017 Linaro Ltd. <ard.biesheuvel@linaro.org> + * Copyright (C) 2021 SiFive + */ +#include <linux/compiler.h> +#include <linux/irqflags.h> +#include <linux/percpu.h> +#include <linux/preempt.h> +#include <linux/types.h> + +#include <asm/vector.h> +#include <asm/switch_to.h> +#include <asm/simd.h> +#ifdef CONFIG_RISCV_ISA_V_PREEMPTIVE +#include <asm/asm-prototypes.h> +#endif + +static inline void riscv_v_flags_set(u32 flags) +{ + WRITE_ONCE(current->thread.riscv_v_flags, flags); +} + +static inline void riscv_v_start(u32 flags) +{ + int orig; + + orig = riscv_v_flags(); + BUG_ON((orig & flags) != 0); + riscv_v_flags_set(orig | flags); + barrier(); +} + +static inline void riscv_v_stop(u32 flags) +{ + int orig; + + barrier(); + orig = riscv_v_flags(); + BUG_ON((orig & flags) == 0); + riscv_v_flags_set(orig & ~flags); +} + +/* + * Claim ownership of the CPU vector context for use by the calling context. + * + * The caller may freely manipulate the vector context metadata until + * put_cpu_vector_context() is called. + */ +void get_cpu_vector_context(void) +{ + /* + * disable softirqs so it is impossible for softirqs to nest + * get_cpu_vector_context() when kernel is actively using Vector. + */ + if (!IS_ENABLED(CONFIG_PREEMPT_RT)) + local_bh_disable(); + else + preempt_disable(); + + riscv_v_start(RISCV_KERNEL_MODE_V); +} + +/* + * Release the CPU vector context. + * + * Must be called from a context in which get_cpu_vector_context() was + * previously called, with no call to put_cpu_vector_context() in the + * meantime. + */ +void put_cpu_vector_context(void) +{ + riscv_v_stop(RISCV_KERNEL_MODE_V); + + if (!IS_ENABLED(CONFIG_PREEMPT_RT)) + local_bh_enable(); + else + preempt_enable(); +} + +#ifdef CONFIG_RISCV_ISA_V_PREEMPTIVE +static __always_inline u32 *riscv_v_flags_ptr(void) +{ + return ¤t->thread.riscv_v_flags; +} + +static inline void riscv_preempt_v_set_dirty(void) +{ + *riscv_v_flags_ptr() |= RISCV_PREEMPT_V_DIRTY; +} + +static inline void riscv_preempt_v_reset_flags(void) +{ + *riscv_v_flags_ptr() &= ~(RISCV_PREEMPT_V_DIRTY | RISCV_PREEMPT_V_NEED_RESTORE); +} + +static inline void riscv_v_ctx_depth_inc(void) +{ + *riscv_v_flags_ptr() += RISCV_V_CTX_UNIT_DEPTH; +} + +static inline void riscv_v_ctx_depth_dec(void) +{ + *riscv_v_flags_ptr() -= RISCV_V_CTX_UNIT_DEPTH; +} + +static inline u32 riscv_v_ctx_get_depth(void) +{ + return *riscv_v_flags_ptr() & RISCV_V_CTX_DEPTH_MASK; +} + +static int riscv_v_stop_kernel_context(void) +{ + if (riscv_v_ctx_get_depth() != 0 || !riscv_preempt_v_started(current)) + return 1; + + riscv_preempt_v_clear_dirty(current); + riscv_v_stop(RISCV_PREEMPT_V); + return 0; +} + +static int riscv_v_start_kernel_context(bool *is_nested) +{ + struct __riscv_v_ext_state *kvstate, *uvstate; + + kvstate = ¤t->thread.kernel_vstate; + if (!kvstate->datap) + return -ENOENT; + + if (riscv_preempt_v_started(current)) { + WARN_ON(riscv_v_ctx_get_depth() == 0); + *is_nested = true; + get_cpu_vector_context(); + if (riscv_preempt_v_dirty(current)) { + __riscv_v_vstate_save(kvstate, kvstate->datap); + riscv_preempt_v_clear_dirty(current); + } + riscv_preempt_v_set_restore(current); + return 0; + } + + /* Transfer the ownership of V from user to kernel, then save */ + riscv_v_start(RISCV_PREEMPT_V | RISCV_PREEMPT_V_DIRTY); + if ((task_pt_regs(current)->status & SR_VS) == SR_VS_DIRTY) { + uvstate = ¤t->thread.vstate; + __riscv_v_vstate_save(uvstate, uvstate->datap); + } + riscv_preempt_v_clear_dirty(current); + return 0; +} + +/* low-level V context handling code, called with irq disabled */ +asmlinkage void riscv_v_context_nesting_start(struct pt_regs *regs) +{ + int depth; + + if (!riscv_preempt_v_started(current)) + return; + + depth = riscv_v_ctx_get_depth(); + if (depth == 0 && (regs->status & SR_VS) == SR_VS_DIRTY) + riscv_preempt_v_set_dirty(); + + riscv_v_ctx_depth_inc(); +} + +asmlinkage void riscv_v_context_nesting_end(struct pt_regs *regs) +{ + struct __riscv_v_ext_state *vstate = ¤t->thread.kernel_vstate; + u32 depth; + + WARN_ON(!irqs_disabled()); + + if (!riscv_preempt_v_started(current)) + return; + + riscv_v_ctx_depth_dec(); + depth = riscv_v_ctx_get_depth(); + if (depth == 0) { + if (riscv_preempt_v_restore(current)) { + __riscv_v_vstate_restore(vstate, vstate->datap); + __riscv_v_vstate_clean(regs); + riscv_preempt_v_reset_flags(); + } + } +} +#else +#define riscv_v_start_kernel_context(nested) (-ENOENT) +#define riscv_v_stop_kernel_context() (-ENOENT) +#endif /* CONFIG_RISCV_ISA_V_PREEMPTIVE */ + +/* + * kernel_vector_begin(): obtain the CPU vector registers for use by the calling + * context + * + * Must not be called unless may_use_simd() returns true. + * Task context in the vector registers is saved back to memory as necessary. + * + * A matching call to kernel_vector_end() must be made before returning from the + * calling context. + * + * The caller may freely use the vector registers until kernel_vector_end() is + * called. + */ +void kernel_vector_begin(void) +{ + bool nested = false; + + if (WARN_ON(!has_vector())) + return; + + BUG_ON(!may_use_simd()); + + if (riscv_v_start_kernel_context(&nested)) { + get_cpu_vector_context(); + riscv_v_vstate_save(¤t->thread.vstate, task_pt_regs(current)); + } + + if (!nested) + riscv_v_vstate_set_restore(current, task_pt_regs(current)); + + riscv_v_enable(); +} +EXPORT_SYMBOL_GPL(kernel_vector_begin); + +/* + * kernel_vector_end(): give the CPU vector registers back to the current task + * + * Must be called from a context in which kernel_vector_begin() was previously + * called, with no call to kernel_vector_end() in the meantime. + * + * The caller must not use the vector registers after this function is called, + * unless kernel_vector_begin() is called again in the meantime. + */ +void kernel_vector_end(void) +{ + if (WARN_ON(!has_vector())) + return; + + riscv_v_disable(); + + if (riscv_v_stop_kernel_context()) + put_cpu_vector_context(); +} +EXPORT_SYMBOL_GPL(kernel_vector_end); diff --git a/arch/riscv/kernel/machine_kexec.c b/arch/riscv/kernel/machine_kexec.c index 2d139b724b..ed9cad20c0 100644 --- a/arch/riscv/kernel/machine_kexec.c +++ b/arch/riscv/kernel/machine_kexec.c @@ -19,30 +19,6 @@ #include <linux/irq.h> /* - * kexec_image_info - Print received image details - */ -static void -kexec_image_info(const struct kimage *image) -{ - unsigned long i; - - pr_debug("Kexec image info:\n"); - pr_debug("\ttype: %d\n", image->type); - pr_debug("\tstart: %lx\n", image->start); - pr_debug("\thead: %lx\n", image->head); - pr_debug("\tnr_segments: %lu\n", image->nr_segments); - - for (i = 0; i < image->nr_segments; i++) { - pr_debug("\t segment[%lu]: %016lx - %016lx", i, - image->segment[i].mem, - image->segment[i].mem + image->segment[i].memsz); - pr_debug("\t\t0x%lx bytes, %lu pages\n", - (unsigned long) image->segment[i].memsz, - (unsigned long) image->segment[i].memsz / PAGE_SIZE); - } -} - -/* * machine_kexec_prepare - Initialize kexec * * This function is called from do_kexec_load, when the user has @@ -60,8 +36,6 @@ machine_kexec_prepare(struct kimage *image) unsigned int control_code_buffer_sz = 0; int i = 0; - kexec_image_info(image); - /* Find the Flattened Device Tree and save its physical address */ for (i = 0; i < image->nr_segments; i++) { if (image->segment[i].memsz <= sizeof(fdt)) diff --git a/arch/riscv/kernel/mcount-dyn.S b/arch/riscv/kernel/mcount-dyn.S index 58dd96a2a1..b7561288e8 100644 --- a/arch/riscv/kernel/mcount-dyn.S +++ b/arch/riscv/kernel/mcount-dyn.S @@ -3,12 +3,12 @@ #include <linux/init.h> #include <linux/linkage.h> +#include <linux/export.h> #include <asm/asm.h> #include <asm/csr.h> #include <asm/unistd.h> #include <asm/thread_info.h> #include <asm/asm-offsets.h> -#include <asm-generic/export.h> #include <asm/ftrace.h> .text @@ -57,31 +57,150 @@ .endm #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS - .macro SAVE_ALL + +/** +* SAVE_ABI_REGS - save regs against the pt_regs struct +* +* @all: tell if saving all the regs +* +* If all is set, all the regs will be saved, otherwise only ABI +* related regs (a0-a7,epc,ra and optional s0) will be saved. +* +* After the stack is established, +* +* 0(sp) stores the PC of the traced function which can be accessed +* by &(fregs)->regs->epc in tracing function. Note that the real +* function entry address should be computed with -FENTRY_RA_OFFSET. +* +* 8(sp) stores the function return address (i.e. parent IP) that +* can be accessed by &(fregs)->regs->ra in tracing function. +* +* The other regs are saved at the respective localtion and accessed +* by the respective pt_regs member. +* +* Here is the layout of stack for your reference. +* +* PT_SIZE_ON_STACK -> +++++++++ +* + ..... + +* + t3-t6 + +* + s2-s11+ +* + a0-a7 + --++++-> ftrace_caller saved +* + s1 + + +* + s0 + --+ +* + t0-t2 + + +* + tp + + +* + gp + + +* + sp + + +* + ra + --+ // parent IP +* sp -> + epc + --+ // PC +* +++++++++ +**/ + .macro SAVE_ABI_REGS, all=0 addi sp, sp, -PT_SIZE_ON_STACK - REG_S t0, PT_EPC(sp) - REG_S x1, PT_RA(sp) - REG_S x2, PT_SP(sp) - REG_S x3, PT_GP(sp) - REG_S x4, PT_TP(sp) - REG_S x5, PT_T0(sp) - save_from_x6_to_x31 + REG_S t0, PT_EPC(sp) + REG_S x1, PT_RA(sp) + + // save the ABI regs + + REG_S x10, PT_A0(sp) + REG_S x11, PT_A1(sp) + REG_S x12, PT_A2(sp) + REG_S x13, PT_A3(sp) + REG_S x14, PT_A4(sp) + REG_S x15, PT_A5(sp) + REG_S x16, PT_A6(sp) + REG_S x17, PT_A7(sp) + + // save the leftover regs + + .if \all == 1 + REG_S x2, PT_SP(sp) + REG_S x3, PT_GP(sp) + REG_S x4, PT_TP(sp) + REG_S x5, PT_T0(sp) + REG_S x6, PT_T1(sp) + REG_S x7, PT_T2(sp) + REG_S x8, PT_S0(sp) + REG_S x9, PT_S1(sp) + REG_S x18, PT_S2(sp) + REG_S x19, PT_S3(sp) + REG_S x20, PT_S4(sp) + REG_S x21, PT_S5(sp) + REG_S x22, PT_S6(sp) + REG_S x23, PT_S7(sp) + REG_S x24, PT_S8(sp) + REG_S x25, PT_S9(sp) + REG_S x26, PT_S10(sp) + REG_S x27, PT_S11(sp) + REG_S x28, PT_T3(sp) + REG_S x29, PT_T4(sp) + REG_S x30, PT_T5(sp) + REG_S x31, PT_T6(sp) + + // save s0 if FP_TEST defined + + .else +#ifdef HAVE_FUNCTION_GRAPH_FP_TEST + REG_S x8, PT_S0(sp) +#endif + .endif .endm - .macro RESTORE_ALL - REG_L x1, PT_RA(sp) - REG_L x2, PT_SP(sp) - REG_L x3, PT_GP(sp) - REG_L x4, PT_TP(sp) - /* Restore t0 with PT_EPC */ - REG_L x5, PT_EPC(sp) - restore_from_x6_to_x31 + .macro RESTORE_ABI_REGS, all=0 + REG_L t0, PT_EPC(sp) + REG_L x1, PT_RA(sp) + REG_L x10, PT_A0(sp) + REG_L x11, PT_A1(sp) + REG_L x12, PT_A2(sp) + REG_L x13, PT_A3(sp) + REG_L x14, PT_A4(sp) + REG_L x15, PT_A5(sp) + REG_L x16, PT_A6(sp) + REG_L x17, PT_A7(sp) + .if \all == 1 + REG_L x2, PT_SP(sp) + REG_L x3, PT_GP(sp) + REG_L x4, PT_TP(sp) + REG_L x6, PT_T1(sp) + REG_L x7, PT_T2(sp) + REG_L x8, PT_S0(sp) + REG_L x9, PT_S1(sp) + REG_L x18, PT_S2(sp) + REG_L x19, PT_S3(sp) + REG_L x20, PT_S4(sp) + REG_L x21, PT_S5(sp) + REG_L x22, PT_S6(sp) + REG_L x23, PT_S7(sp) + REG_L x24, PT_S8(sp) + REG_L x25, PT_S9(sp) + REG_L x26, PT_S10(sp) + REG_L x27, PT_S11(sp) + REG_L x28, PT_T3(sp) + REG_L x29, PT_T4(sp) + REG_L x30, PT_T5(sp) + REG_L x31, PT_T6(sp) + + .else +#ifdef HAVE_FUNCTION_GRAPH_FP_TEST + REG_L x8, PT_S0(sp) +#endif + .endif addi sp, sp, PT_SIZE_ON_STACK .endm + + .macro PREPARE_ARGS + addi a0, t0, -FENTRY_RA_OFFSET + la a1, function_trace_op + REG_L a2, 0(a1) + mv a1, ra + mv a3, sp + .endm + #endif /* CONFIG_DYNAMIC_FTRACE_WITH_REGS */ +#ifndef CONFIG_DYNAMIC_FTRACE_WITH_REGS SYM_FUNC_START(ftrace_caller) SAVE_ABI @@ -105,34 +224,39 @@ SYM_INNER_LABEL(ftrace_graph_call, SYM_L_GLOBAL) call ftrace_stub #endif RESTORE_ABI - jr t0 + jr t0 SYM_FUNC_END(ftrace_caller) -#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS +#else /* CONFIG_DYNAMIC_FTRACE_WITH_REGS */ SYM_FUNC_START(ftrace_regs_caller) - SAVE_ALL - - addi a0, t0, -FENTRY_RA_OFFSET - la a1, function_trace_op - REG_L a2, 0(a1) - mv a1, ra - mv a3, sp + mv t1, zero + SAVE_ABI_REGS 1 + PREPARE_ARGS SYM_INNER_LABEL(ftrace_regs_call, SYM_L_GLOBAL) call ftrace_stub -#ifdef CONFIG_FUNCTION_GRAPH_TRACER - addi a0, sp, PT_RA - REG_L a1, PT_EPC(sp) - addi a1, a1, -FENTRY_RA_OFFSET -#ifdef HAVE_FUNCTION_GRAPH_FP_TEST - mv a2, s0 -#endif -SYM_INNER_LABEL(ftrace_graph_regs_call, SYM_L_GLOBAL) + RESTORE_ABI_REGS 1 + bnez t1, .Ldirect + jr t0 +.Ldirect: + jr t1 +SYM_FUNC_END(ftrace_regs_caller) + +SYM_FUNC_START(ftrace_caller) + SAVE_ABI_REGS 0 + PREPARE_ARGS + +SYM_INNER_LABEL(ftrace_call, SYM_L_GLOBAL) call ftrace_stub -#endif - RESTORE_ALL - jr t0 -SYM_FUNC_END(ftrace_regs_caller) + RESTORE_ABI_REGS 0 + jr t0 +SYM_FUNC_END(ftrace_caller) #endif /* CONFIG_DYNAMIC_FTRACE_WITH_REGS */ + +#ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS +SYM_CODE_START(ftrace_stub_direct_tramp) + jr t0 +SYM_CODE_END(ftrace_stub_direct_tramp) +#endif /* CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS */ diff --git a/arch/riscv/kernel/mcount.S b/arch/riscv/kernel/mcount.S index b4dd9ed684..d7ec69ac69 100644 --- a/arch/riscv/kernel/mcount.S +++ b/arch/riscv/kernel/mcount.S @@ -4,12 +4,12 @@ #include <linux/init.h> #include <linux/linkage.h> #include <linux/cfi_types.h> +#include <linux/export.h> #include <asm/asm.h> #include <asm/csr.h> #include <asm/unistd.h> #include <asm/thread_info.h> #include <asm/asm-offsets.h> -#include <asm-generic/export.h> #include <asm/ftrace.h> .text diff --git a/arch/riscv/kernel/module.c b/arch/riscv/kernel/module.c index c9d59a5448..5e5a826444 100644 --- a/arch/riscv/kernel/module.c +++ b/arch/riscv/kernel/module.c @@ -783,6 +783,7 @@ int apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab, Elf_Sym *sym; void *location; unsigned int i, type; + unsigned int j_idx = 0; Elf_Addr v; int res; unsigned int num_relocations = sechdrs[relsec].sh_size / sizeof(*rel); @@ -833,9 +834,10 @@ int apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab, v = sym->st_value + rel[i].r_addend; if (type == R_RISCV_PCREL_LO12_I || type == R_RISCV_PCREL_LO12_S) { - unsigned int j; + unsigned int j = j_idx; + bool found = false; - for (j = 0; j < sechdrs[relsec].sh_size / sizeof(*rel); j++) { + do { unsigned long hi20_loc = sechdrs[sechdrs[relsec].sh_info].sh_addr + rel[j].r_offset; @@ -864,16 +866,26 @@ int apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab, hi20 = (offset + 0x800) & 0xfffff000; lo12 = offset - hi20; v = lo12; + found = true; break; } - } - if (j == sechdrs[relsec].sh_size / sizeof(*rel)) { + + j++; + if (j > sechdrs[relsec].sh_size / sizeof(*rel)) + j = 0; + + } while (j_idx != j); + + if (!found) { pr_err( "%s: Can not find HI20 relocation information\n", me->name); return -EINVAL; } + + /* Record the previous j-loop end index */ + j_idx = j; } if (reloc_handlers[type].accumulate_handler) diff --git a/arch/riscv/kernel/paravirt.c b/arch/riscv/kernel/paravirt.c new file mode 100644 index 0000000000..0d6225fd31 --- /dev/null +++ b/arch/riscv/kernel/paravirt.c @@ -0,0 +1,135 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2023 Ventana Micro Systems Inc. + */ + +#define pr_fmt(fmt) "riscv-pv: " fmt + +#include <linux/cpuhotplug.h> +#include <linux/compiler.h> +#include <linux/errno.h> +#include <linux/init.h> +#include <linux/jump_label.h> +#include <linux/kconfig.h> +#include <linux/kernel.h> +#include <linux/percpu-defs.h> +#include <linux/printk.h> +#include <linux/static_call.h> +#include <linux/types.h> + +#include <asm/barrier.h> +#include <asm/page.h> +#include <asm/paravirt.h> +#include <asm/sbi.h> + +struct static_key paravirt_steal_enabled; +struct static_key paravirt_steal_rq_enabled; + +static u64 native_steal_clock(int cpu) +{ + return 0; +} + +DEFINE_STATIC_CALL(pv_steal_clock, native_steal_clock); + +static bool steal_acc = true; +static int __init parse_no_stealacc(char *arg) +{ + steal_acc = false; + return 0; +} + +early_param("no-steal-acc", parse_no_stealacc); + +static DEFINE_PER_CPU(struct sbi_sta_struct, steal_time) __aligned(64); + +static bool __init has_pv_steal_clock(void) +{ + if (sbi_spec_version >= sbi_mk_version(2, 0) && + sbi_probe_extension(SBI_EXT_STA) > 0) { + pr_info("SBI STA extension detected\n"); + return true; + } + + return false; +} + +static int sbi_sta_steal_time_set_shmem(unsigned long lo, unsigned long hi, + unsigned long flags) +{ + struct sbiret ret; + + ret = sbi_ecall(SBI_EXT_STA, SBI_EXT_STA_STEAL_TIME_SET_SHMEM, + lo, hi, flags, 0, 0, 0); + if (ret.error) { + if (lo == SBI_STA_SHMEM_DISABLE && hi == SBI_STA_SHMEM_DISABLE) + pr_warn("Failed to disable steal-time shmem"); + else + pr_warn("Failed to set steal-time shmem"); + return sbi_err_map_linux_errno(ret.error); + } + + return 0; +} + +static int pv_time_cpu_online(unsigned int cpu) +{ + struct sbi_sta_struct *st = this_cpu_ptr(&steal_time); + phys_addr_t pa = __pa(st); + unsigned long lo = (unsigned long)pa; + unsigned long hi = IS_ENABLED(CONFIG_32BIT) ? upper_32_bits((u64)pa) : 0; + + return sbi_sta_steal_time_set_shmem(lo, hi, 0); +} + +static int pv_time_cpu_down_prepare(unsigned int cpu) +{ + return sbi_sta_steal_time_set_shmem(SBI_STA_SHMEM_DISABLE, + SBI_STA_SHMEM_DISABLE, 0); +} + +static u64 pv_time_steal_clock(int cpu) +{ + struct sbi_sta_struct *st = per_cpu_ptr(&steal_time, cpu); + __le32 sequence; + __le64 steal; + + /* + * Check the sequence field before and after reading the steal + * field. Repeat the read if it is different or odd. + */ + do { + sequence = READ_ONCE(st->sequence); + virt_rmb(); + steal = READ_ONCE(st->steal); + virt_rmb(); + } while ((le32_to_cpu(sequence) & 1) || + sequence != READ_ONCE(st->sequence)); + + return le64_to_cpu(steal); +} + +int __init pv_time_init(void) +{ + int ret; + + if (!has_pv_steal_clock()) + return 0; + + ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, + "riscv/pv_time:online", + pv_time_cpu_online, + pv_time_cpu_down_prepare); + if (ret < 0) + return ret; + + static_call_update(pv_steal_clock, pv_time_steal_clock); + + static_key_slow_inc(¶virt_steal_enabled); + if (steal_acc) + static_key_slow_inc(¶virt_steal_rq_enabled); + + pr_info("Computing paravirt steal-time\n"); + + return 0; +} diff --git a/arch/riscv/kernel/patch.c b/arch/riscv/kernel/patch.c index 37e87fdcf6..30e12b310c 100644 --- a/arch/riscv/kernel/patch.c +++ b/arch/riscv/kernel/patch.c @@ -80,6 +80,8 @@ static int __patch_insn_set(void *addr, u8 c, size_t len) */ lockdep_assert_held(&text_mutex); + preempt_disable(); + if (across_pages) patch_map(addr + PAGE_SIZE, FIX_TEXT_POKE1); @@ -92,6 +94,8 @@ static int __patch_insn_set(void *addr, u8 c, size_t len) if (across_pages) patch_unmap(FIX_TEXT_POKE1); + preempt_enable(); + return 0; } NOKPROBE_SYMBOL(__patch_insn_set); @@ -122,6 +126,8 @@ static int __patch_insn_write(void *addr, const void *insn, size_t len) if (!riscv_patch_in_stop_machine) lockdep_assert_held(&text_mutex); + preempt_disable(); + if (across_pages) patch_map(addr + PAGE_SIZE, FIX_TEXT_POKE1); @@ -134,6 +140,8 @@ static int __patch_insn_write(void *addr, const void *insn, size_t len) if (across_pages) patch_unmap(FIX_TEXT_POKE1); + preempt_enable(); + return ret; } NOKPROBE_SYMBOL(__patch_insn_write); diff --git a/arch/riscv/kernel/process.c b/arch/riscv/kernel/process.c index 4f21d970a1..e4bc61c4e5 100644 --- a/arch/riscv/kernel/process.c +++ b/arch/riscv/kernel/process.c @@ -27,8 +27,6 @@ #include <asm/vector.h> #include <asm/cpufeature.h> -register unsigned long gp_in_global __asm__("gp"); - #if defined(CONFIG_STACKPROTECTOR) && !defined(CONFIG_STACKPROTECTOR_PER_TASK) #include <linux/stackprotector.h> unsigned long __stack_chk_guard __read_mostly; @@ -37,7 +35,7 @@ EXPORT_SYMBOL(__stack_chk_guard); extern asmlinkage void ret_from_fork(void); -void arch_cpu_idle(void) +void noinstr arch_cpu_idle(void) { cpu_do_idle(); } @@ -171,6 +169,7 @@ void flush_thread(void) riscv_v_vstate_off(task_pt_regs(current)); kfree(current->thread.vstate.datap); memset(¤t->thread.vstate, 0, sizeof(struct __riscv_v_ext_state)); + clear_tsk_thread_flag(current, TIF_RISCV_V_DEFER_RESTORE); #endif } @@ -178,7 +177,7 @@ void arch_release_task_struct(struct task_struct *tsk) { /* Free the vector context of datap. */ if (has_vector()) - kfree(tsk->thread.vstate.datap); + riscv_v_thread_free(tsk); } int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src) @@ -187,6 +186,8 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src) *dst = *src; /* clear entire V context, including datap for a new task */ memset(&dst->thread.vstate, 0, sizeof(struct __riscv_v_ext_state)); + memset(&dst->thread.kernel_vstate, 0, sizeof(struct __riscv_v_ext_state)); + clear_tsk_thread_flag(dst, TIF_RISCV_V_DEFER_RESTORE); return 0; } @@ -204,7 +205,6 @@ int copy_thread(struct task_struct *p, const struct kernel_clone_args *args) if (unlikely(args->fn)) { /* Kernel thread */ memset(childregs, 0, sizeof(struct pt_regs)); - childregs->gp = gp_in_global; /* Supervisor/Machine, irqs on: */ childregs->status = SR_PP | SR_PIE; @@ -221,7 +221,15 @@ int copy_thread(struct task_struct *p, const struct kernel_clone_args *args) childregs->a0 = 0; /* Return value of fork() */ p->thread.s[0] = 0; } + p->thread.riscv_v_flags = 0; + if (has_vector()) + riscv_v_thread_alloc(p); p->thread.ra = (unsigned long)ret_from_fork; p->thread.sp = (unsigned long)childregs; /* kernel sp */ return 0; } + +void __init arch_task_cache_init(void) +{ + riscv_v_setup_ctx_cache(); +} diff --git a/arch/riscv/kernel/ptrace.c b/arch/riscv/kernel/ptrace.c index 2afe460de1..e8515aa9d8 100644 --- a/arch/riscv/kernel/ptrace.c +++ b/arch/riscv/kernel/ptrace.c @@ -99,8 +99,11 @@ static int riscv_vr_get(struct task_struct *target, * Ensure the vector registers have been saved to the memory before * copying them to membuf. */ - if (target == current) - riscv_v_vstate_save(current, task_pt_regs(current)); + if (target == current) { + get_cpu_vector_context(); + riscv_v_vstate_save(¤t->thread.vstate, task_pt_regs(current)); + put_cpu_vector_context(); + } ptrace_vstate.vstart = vstate->vstart; ptrace_vstate.vl = vstate->vl; diff --git a/arch/riscv/kernel/sbi.c b/arch/riscv/kernel/sbi.c index 5a62ed1da4..e66e0999a8 100644 --- a/arch/riscv/kernel/sbi.c +++ b/arch/riscv/kernel/sbi.c @@ -7,6 +7,7 @@ #include <linux/bits.h> #include <linux/init.h> +#include <linux/mm.h> #include <linux/pm.h> #include <linux/reboot.h> #include <asm/sbi.h> @@ -571,6 +572,66 @@ long sbi_get_mimpid(void) } EXPORT_SYMBOL_GPL(sbi_get_mimpid); +bool sbi_debug_console_available; + +int sbi_debug_console_write(const char *bytes, unsigned int num_bytes) +{ + phys_addr_t base_addr; + struct sbiret ret; + + if (!sbi_debug_console_available) + return -EOPNOTSUPP; + + if (is_vmalloc_addr(bytes)) + base_addr = page_to_phys(vmalloc_to_page(bytes)) + + offset_in_page(bytes); + else + base_addr = __pa(bytes); + if (PAGE_SIZE < (offset_in_page(bytes) + num_bytes)) + num_bytes = PAGE_SIZE - offset_in_page(bytes); + + if (IS_ENABLED(CONFIG_32BIT)) + ret = sbi_ecall(SBI_EXT_DBCN, SBI_EXT_DBCN_CONSOLE_WRITE, + num_bytes, lower_32_bits(base_addr), + upper_32_bits(base_addr), 0, 0, 0); + else + ret = sbi_ecall(SBI_EXT_DBCN, SBI_EXT_DBCN_CONSOLE_WRITE, + num_bytes, base_addr, 0, 0, 0, 0); + + if (ret.error == SBI_ERR_FAILURE) + return -EIO; + return ret.error ? sbi_err_map_linux_errno(ret.error) : ret.value; +} + +int sbi_debug_console_read(char *bytes, unsigned int num_bytes) +{ + phys_addr_t base_addr; + struct sbiret ret; + + if (!sbi_debug_console_available) + return -EOPNOTSUPP; + + if (is_vmalloc_addr(bytes)) + base_addr = page_to_phys(vmalloc_to_page(bytes)) + + offset_in_page(bytes); + else + base_addr = __pa(bytes); + if (PAGE_SIZE < (offset_in_page(bytes) + num_bytes)) + num_bytes = PAGE_SIZE - offset_in_page(bytes); + + if (IS_ENABLED(CONFIG_32BIT)) + ret = sbi_ecall(SBI_EXT_DBCN, SBI_EXT_DBCN_CONSOLE_READ, + num_bytes, lower_32_bits(base_addr), + upper_32_bits(base_addr), 0, 0, 0); + else + ret = sbi_ecall(SBI_EXT_DBCN, SBI_EXT_DBCN_CONSOLE_READ, + num_bytes, base_addr, 0, 0, 0, 0); + + if (ret.error == SBI_ERR_FAILURE) + return -EIO; + return ret.error ? sbi_err_map_linux_errno(ret.error) : ret.value; +} + void __init sbi_init(void) { int ret; @@ -612,6 +673,11 @@ void __init sbi_init(void) sbi_srst_reboot_nb.priority = 192; register_restart_handler(&sbi_srst_reboot_nb); } + if ((sbi_spec_version >= sbi_mk_version(2, 0)) && + (sbi_probe_extension(SBI_EXT_DBCN) > 0)) { + pr_info("SBI DBCN extension detected\n"); + sbi_debug_console_available = true; + } } else { __sbi_set_timer = __sbi_set_timer_v01; __sbi_send_ipi = __sbi_send_ipi_v01; diff --git a/arch/riscv/kernel/setup.c b/arch/riscv/kernel/setup.c index 535a837de5..4f73c0ae44 100644 --- a/arch/riscv/kernel/setup.c +++ b/arch/riscv/kernel/setup.c @@ -26,7 +26,6 @@ #include <asm/alternative.h> #include <asm/cacheflush.h> #include <asm/cpufeature.h> -#include <asm/cpu_ops.h> #include <asm/early_ioremap.h> #include <asm/pgtable.h> #include <asm/setup.h> @@ -51,7 +50,6 @@ atomic_t hart_lottery __section(".sdata") #endif ; unsigned long boot_cpu_hartid; -static DEFINE_PER_CPU(struct cpu, cpu_devices); /* * Place kernel memory regions on the resource tree so that @@ -299,23 +297,10 @@ void __init setup_arch(char **cmdline_p) riscv_user_isa_enable(); } -static int __init topology_init(void) +bool arch_cpu_is_hotpluggable(int cpu) { - int i, ret; - - for_each_possible_cpu(i) { - struct cpu *cpu = &per_cpu(cpu_devices, i); - - cpu->hotpluggable = cpu_has_hotplug(i); - ret = register_cpu(cpu, i); - if (unlikely(ret)) - pr_warn("Warning: %s: register_cpu %d failed (%d)\n", - __func__, i, ret); - } - - return 0; + return cpu_has_hotplug(cpu); } -subsys_initcall(topology_init); void free_initmem(void) { diff --git a/arch/riscv/kernel/signal.c b/arch/riscv/kernel/signal.c index 88b6220b26..5a2edd7f02 100644 --- a/arch/riscv/kernel/signal.c +++ b/arch/riscv/kernel/signal.c @@ -86,12 +86,15 @@ static long save_v_state(struct pt_regs *regs, void __user **sc_vec) /* datap is designed to be 16 byte aligned for better performance */ WARN_ON(unlikely(!IS_ALIGNED((unsigned long)datap, 16))); - riscv_v_vstate_save(current, regs); + get_cpu_vector_context(); + riscv_v_vstate_save(¤t->thread.vstate, regs); + put_cpu_vector_context(); + /* Copy everything of vstate but datap. */ err = __copy_to_user(&state->v_state, ¤t->thread.vstate, offsetof(struct __riscv_v_ext_state, datap)); /* Copy the pointer datap itself. */ - err |= __put_user(datap, &state->v_state.datap); + err |= __put_user((__force void *)datap, &state->v_state.datap); /* Copy the whole vector content to user space datap. */ err |= __copy_to_user(datap, current->thread.vstate.datap, riscv_v_vsize); /* Copy magic to the user space after saving all vector conetext */ @@ -116,6 +119,13 @@ static long __restore_v_state(struct pt_regs *regs, void __user *sc_vec) struct __sc_riscv_v_state __user *state = sc_vec; void __user *datap; + /* + * Mark the vstate as clean prior performing the actual copy, + * to avoid getting the vstate incorrectly clobbered by the + * discarded vector state. + */ + riscv_v_vstate_set_restore(current, regs); + /* Copy everything of __sc_riscv_v_state except datap. */ err = __copy_from_user(¤t->thread.vstate, &state->v_state, offsetof(struct __riscv_v_ext_state, datap)); @@ -130,13 +140,7 @@ static long __restore_v_state(struct pt_regs *regs, void __user *sc_vec) * Copy the whole vector content from user space datap. Use * copy_from_user to prevent information leak. */ - err = copy_from_user(current->thread.vstate.datap, datap, riscv_v_vsize); - if (unlikely(err)) - return err; - - riscv_v_vstate_restore(current, regs); - - return err; + return copy_from_user(current->thread.vstate.datap, datap, riscv_v_vsize); } #else #define save_v_state(task, regs) (0) diff --git a/arch/riscv/kernel/smp.c b/arch/riscv/kernel/smp.c index 40420afbb1..45dd403541 100644 --- a/arch/riscv/kernel/smp.c +++ b/arch/riscv/kernel/smp.c @@ -81,7 +81,7 @@ static inline void ipi_cpu_crash_stop(unsigned int cpu, struct pt_regs *regs) #ifdef CONFIG_HOTPLUG_CPU if (cpu_has_hotplug(cpu)) - cpu_ops[cpu]->cpu_stop(); + cpu_ops->cpu_stop(); #endif for(;;) diff --git a/arch/riscv/kernel/smpboot.c b/arch/riscv/kernel/smpboot.c index d162bf339b..519b6bd946 100644 --- a/arch/riscv/kernel/smpboot.c +++ b/arch/riscv/kernel/smpboot.c @@ -49,7 +49,6 @@ void __init smp_prepare_boot_cpu(void) void __init smp_prepare_cpus(unsigned int max_cpus) { int cpuid; - int ret; unsigned int curr_cpuid; init_cpu_topology(); @@ -66,11 +65,6 @@ void __init smp_prepare_cpus(unsigned int max_cpus) for_each_possible_cpu(cpuid) { if (cpuid == curr_cpuid) continue; - if (cpu_ops[cpuid]->cpu_prepare) { - ret = cpu_ops[cpuid]->cpu_prepare(cpuid); - if (ret) - continue; - } set_cpu_present(cpuid, true); numa_store_cpu_info(cpuid); } @@ -125,18 +119,7 @@ static int __init acpi_parse_rintc(union acpi_subtable_headers *header, const un static void __init acpi_parse_and_init_cpus(void) { - int cpuid; - - cpu_set_ops(0); - acpi_table_parse_madt(ACPI_MADT_TYPE_RINTC, acpi_parse_rintc, 0); - - for (cpuid = 1; cpuid < nr_cpu_ids; cpuid++) { - if (cpuid_to_hartid_map(cpuid) != INVALID_HARTID) { - cpu_set_ops(cpuid); - set_cpu_possible(cpuid, true); - } - } } #else #define acpi_parse_and_init_cpus(...) do { } while (0) @@ -150,8 +133,6 @@ static void __init of_parse_and_init_cpus(void) int cpuid = 1; int rc; - cpu_set_ops(0); - for_each_of_cpu_node(dn) { rc = riscv_early_of_processor_hartid(dn, &hart); if (rc < 0) @@ -179,27 +160,28 @@ static void __init of_parse_and_init_cpus(void) if (cpuid > nr_cpu_ids) pr_warn("Total number of cpus [%d] is greater than nr_cpus option value [%d]\n", cpuid, nr_cpu_ids); - - for (cpuid = 1; cpuid < nr_cpu_ids; cpuid++) { - if (cpuid_to_hartid_map(cpuid) != INVALID_HARTID) { - cpu_set_ops(cpuid); - set_cpu_possible(cpuid, true); - } - } } void __init setup_smp(void) { + int cpuid; + + cpu_set_ops(); + if (acpi_disabled) of_parse_and_init_cpus(); else acpi_parse_and_init_cpus(); + + for (cpuid = 1; cpuid < nr_cpu_ids; cpuid++) + if (cpuid_to_hartid_map(cpuid) != INVALID_HARTID) + set_cpu_possible(cpuid, true); } static int start_secondary_cpu(int cpu, struct task_struct *tidle) { - if (cpu_ops[cpu]->cpu_start) - return cpu_ops[cpu]->cpu_start(cpu, tidle); + if (cpu_ops->cpu_start) + return cpu_ops->cpu_start(cpu, tidle); return -EOPNOTSUPP; } diff --git a/arch/riscv/kernel/suspend.c b/arch/riscv/kernel/suspend.c index 3c89b8ec69..299795341e 100644 --- a/arch/riscv/kernel/suspend.c +++ b/arch/riscv/kernel/suspend.c @@ -4,13 +4,19 @@ * Copyright (c) 2022 Ventana Micro Systems Inc. */ +#define pr_fmt(fmt) "suspend: " fmt + #include <linux/ftrace.h> +#include <linux/suspend.h> #include <asm/csr.h> +#include <asm/sbi.h> #include <asm/suspend.h> void suspend_save_csrs(struct suspend_context *context) { context->scratch = csr_read(CSR_SCRATCH); + if (riscv_cpu_has_extension_unlikely(smp_processor_id(), RISCV_ISA_EXT_XLINUXENVCFG)) + context->envcfg = csr_read(CSR_ENVCFG); context->tvec = csr_read(CSR_TVEC); context->ie = csr_read(CSR_IE); @@ -32,6 +38,8 @@ void suspend_save_csrs(struct suspend_context *context) void suspend_restore_csrs(struct suspend_context *context) { csr_write(CSR_SCRATCH, context->scratch); + if (riscv_cpu_has_extension_unlikely(smp_processor_id(), RISCV_ISA_EXT_XLINUXENVCFG)) + csr_write(CSR_ENVCFG, context->envcfg); csr_write(CSR_TVEC, context->tvec); csr_write(CSR_IE, context->ie); @@ -85,3 +93,43 @@ int cpu_suspend(unsigned long arg, return rc; } + +#ifdef CONFIG_RISCV_SBI +static int sbi_system_suspend(unsigned long sleep_type, + unsigned long resume_addr, + unsigned long opaque) +{ + struct sbiret ret; + + ret = sbi_ecall(SBI_EXT_SUSP, SBI_EXT_SUSP_SYSTEM_SUSPEND, + sleep_type, resume_addr, opaque, 0, 0, 0); + if (ret.error) + return sbi_err_map_linux_errno(ret.error); + + return ret.value; +} + +static int sbi_system_suspend_enter(suspend_state_t state) +{ + return cpu_suspend(SBI_SUSP_SLEEP_TYPE_SUSPEND_TO_RAM, sbi_system_suspend); +} + +static const struct platform_suspend_ops sbi_system_suspend_ops = { + .valid = suspend_valid_only_mem, + .enter = sbi_system_suspend_enter, +}; + +static int __init sbi_system_suspend_init(void) +{ + if (sbi_spec_version >= sbi_mk_version(2, 0) && + sbi_probe_extension(SBI_EXT_SUSP) > 0) { + pr_info("SBI SUSP extension detected\n"); + if (IS_ENABLED(CONFIG_SUSPEND)) + suspend_set_ops(&sbi_system_suspend_ops); + } + + return 0; +} + +arch_initcall(sbi_system_suspend_init); +#endif /* CONFIG_RISCV_SBI */ diff --git a/arch/riscv/kernel/sys_hwprobe.c b/arch/riscv/kernel/sys_hwprobe.c new file mode 100644 index 0000000000..a7c56b41ef --- /dev/null +++ b/arch/riscv/kernel/sys_hwprobe.c @@ -0,0 +1,411 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * The hwprobe interface, for allowing userspace to probe to see which features + * are supported by the hardware. See Documentation/arch/riscv/hwprobe.rst for + * more details. + */ +#include <linux/syscalls.h> +#include <asm/cacheflush.h> +#include <asm/cpufeature.h> +#include <asm/hwprobe.h> +#include <asm/sbi.h> +#include <asm/switch_to.h> +#include <asm/uaccess.h> +#include <asm/unistd.h> +#include <asm/vector.h> +#include <vdso/vsyscall.h> + + +static void hwprobe_arch_id(struct riscv_hwprobe *pair, + const struct cpumask *cpus) +{ + u64 id = -1ULL; + bool first = true; + int cpu; + + for_each_cpu(cpu, cpus) { + u64 cpu_id; + + switch (pair->key) { + case RISCV_HWPROBE_KEY_MVENDORID: + cpu_id = riscv_cached_mvendorid(cpu); + break; + case RISCV_HWPROBE_KEY_MIMPID: + cpu_id = riscv_cached_mimpid(cpu); + break; + case RISCV_HWPROBE_KEY_MARCHID: + cpu_id = riscv_cached_marchid(cpu); + break; + } + + if (first) { + id = cpu_id; + first = false; + } + + /* + * If there's a mismatch for the given set, return -1 in the + * value. + */ + if (id != cpu_id) { + id = -1ULL; + break; + } + } + + pair->value = id; +} + +static void hwprobe_isa_ext0(struct riscv_hwprobe *pair, + const struct cpumask *cpus) +{ + int cpu; + u64 missing = 0; + + pair->value = 0; + if (has_fpu()) + pair->value |= RISCV_HWPROBE_IMA_FD; + + if (riscv_isa_extension_available(NULL, c)) + pair->value |= RISCV_HWPROBE_IMA_C; + + if (has_vector()) + pair->value |= RISCV_HWPROBE_IMA_V; + + /* + * Loop through and record extensions that 1) anyone has, and 2) anyone + * doesn't have. + */ + for_each_cpu(cpu, cpus) { + struct riscv_isainfo *isainfo = &hart_isa[cpu]; + +#define EXT_KEY(ext) \ + do { \ + if (__riscv_isa_extension_available(isainfo->isa, RISCV_ISA_EXT_##ext)) \ + pair->value |= RISCV_HWPROBE_EXT_##ext; \ + else \ + missing |= RISCV_HWPROBE_EXT_##ext; \ + } while (false) + + /* + * Only use EXT_KEY() for extensions which can be exposed to userspace, + * regardless of the kernel's configuration, as no other checks, besides + * presence in the hart_isa bitmap, are made. + */ + EXT_KEY(ZBA); + EXT_KEY(ZBB); + EXT_KEY(ZBS); + EXT_KEY(ZICBOZ); + EXT_KEY(ZBC); + + EXT_KEY(ZBKB); + EXT_KEY(ZBKC); + EXT_KEY(ZBKX); + EXT_KEY(ZKND); + EXT_KEY(ZKNE); + EXT_KEY(ZKNH); + EXT_KEY(ZKSED); + EXT_KEY(ZKSH); + EXT_KEY(ZKT); + EXT_KEY(ZIHINTNTL); + EXT_KEY(ZTSO); + EXT_KEY(ZACAS); + EXT_KEY(ZICOND); + + if (has_vector()) { + EXT_KEY(ZVBB); + EXT_KEY(ZVBC); + EXT_KEY(ZVKB); + EXT_KEY(ZVKG); + EXT_KEY(ZVKNED); + EXT_KEY(ZVKNHA); + EXT_KEY(ZVKNHB); + EXT_KEY(ZVKSED); + EXT_KEY(ZVKSH); + EXT_KEY(ZVKT); + EXT_KEY(ZVFH); + EXT_KEY(ZVFHMIN); + } + + if (has_fpu()) { + EXT_KEY(ZFH); + EXT_KEY(ZFHMIN); + EXT_KEY(ZFA); + } +#undef EXT_KEY + } + + /* Now turn off reporting features if any CPU is missing it. */ + pair->value &= ~missing; +} + +static bool hwprobe_ext0_has(const struct cpumask *cpus, unsigned long ext) +{ + struct riscv_hwprobe pair; + + hwprobe_isa_ext0(&pair, cpus); + return (pair.value & ext); +} + +static u64 hwprobe_misaligned(const struct cpumask *cpus) +{ + int cpu; + u64 perf = -1ULL; + + for_each_cpu(cpu, cpus) { + int this_perf = per_cpu(misaligned_access_speed, cpu); + + if (perf == -1ULL) + perf = this_perf; + + if (perf != this_perf) { + perf = RISCV_HWPROBE_MISALIGNED_UNKNOWN; + break; + } + } + + if (perf == -1ULL) + return RISCV_HWPROBE_MISALIGNED_UNKNOWN; + + return perf; +} + +static void hwprobe_one_pair(struct riscv_hwprobe *pair, + const struct cpumask *cpus) +{ + switch (pair->key) { + case RISCV_HWPROBE_KEY_MVENDORID: + case RISCV_HWPROBE_KEY_MARCHID: + case RISCV_HWPROBE_KEY_MIMPID: + hwprobe_arch_id(pair, cpus); + break; + /* + * The kernel already assumes that the base single-letter ISA + * extensions are supported on all harts, and only supports the + * IMA base, so just cheat a bit here and tell that to + * userspace. + */ + case RISCV_HWPROBE_KEY_BASE_BEHAVIOR: + pair->value = RISCV_HWPROBE_BASE_BEHAVIOR_IMA; + break; + + case RISCV_HWPROBE_KEY_IMA_EXT_0: + hwprobe_isa_ext0(pair, cpus); + break; + + case RISCV_HWPROBE_KEY_CPUPERF_0: + pair->value = hwprobe_misaligned(cpus); + break; + + case RISCV_HWPROBE_KEY_ZICBOZ_BLOCK_SIZE: + pair->value = 0; + if (hwprobe_ext0_has(cpus, RISCV_HWPROBE_EXT_ZICBOZ)) + pair->value = riscv_cboz_block_size; + break; + + /* + * For forward compatibility, unknown keys don't fail the whole + * call, but get their element key set to -1 and value set to 0 + * indicating they're unrecognized. + */ + default: + pair->key = -1; + pair->value = 0; + break; + } +} + +static int hwprobe_get_values(struct riscv_hwprobe __user *pairs, + size_t pair_count, size_t cpusetsize, + unsigned long __user *cpus_user, + unsigned int flags) +{ + size_t out; + int ret; + cpumask_t cpus; + + /* Check the reserved flags. */ + if (flags != 0) + return -EINVAL; + + /* + * The interface supports taking in a CPU mask, and returns values that + * are consistent across that mask. Allow userspace to specify NULL and + * 0 as a shortcut to all online CPUs. + */ + cpumask_clear(&cpus); + if (!cpusetsize && !cpus_user) { + cpumask_copy(&cpus, cpu_online_mask); + } else { + if (cpusetsize > cpumask_size()) + cpusetsize = cpumask_size(); + + ret = copy_from_user(&cpus, cpus_user, cpusetsize); + if (ret) + return -EFAULT; + + /* + * Userspace must provide at least one online CPU, without that + * there's no way to define what is supported. + */ + cpumask_and(&cpus, &cpus, cpu_online_mask); + if (cpumask_empty(&cpus)) + return -EINVAL; + } + + for (out = 0; out < pair_count; out++, pairs++) { + struct riscv_hwprobe pair; + + if (get_user(pair.key, &pairs->key)) + return -EFAULT; + + pair.value = 0; + hwprobe_one_pair(&pair, &cpus); + ret = put_user(pair.key, &pairs->key); + if (ret == 0) + ret = put_user(pair.value, &pairs->value); + + if (ret) + return -EFAULT; + } + + return 0; +} + +static int hwprobe_get_cpus(struct riscv_hwprobe __user *pairs, + size_t pair_count, size_t cpusetsize, + unsigned long __user *cpus_user, + unsigned int flags) +{ + cpumask_t cpus, one_cpu; + bool clear_all = false; + size_t i; + int ret; + + if (flags != RISCV_HWPROBE_WHICH_CPUS) + return -EINVAL; + + if (!cpusetsize || !cpus_user) + return -EINVAL; + + if (cpusetsize > cpumask_size()) + cpusetsize = cpumask_size(); + + ret = copy_from_user(&cpus, cpus_user, cpusetsize); + if (ret) + return -EFAULT; + + if (cpumask_empty(&cpus)) + cpumask_copy(&cpus, cpu_online_mask); + + cpumask_and(&cpus, &cpus, cpu_online_mask); + + cpumask_clear(&one_cpu); + + for (i = 0; i < pair_count; i++) { + struct riscv_hwprobe pair, tmp; + int cpu; + + ret = copy_from_user(&pair, &pairs[i], sizeof(pair)); + if (ret) + return -EFAULT; + + if (!riscv_hwprobe_key_is_valid(pair.key)) { + clear_all = true; + pair = (struct riscv_hwprobe){ .key = -1, }; + ret = copy_to_user(&pairs[i], &pair, sizeof(pair)); + if (ret) + return -EFAULT; + } + + if (clear_all) + continue; + + tmp = (struct riscv_hwprobe){ .key = pair.key, }; + + for_each_cpu(cpu, &cpus) { + cpumask_set_cpu(cpu, &one_cpu); + + hwprobe_one_pair(&tmp, &one_cpu); + + if (!riscv_hwprobe_pair_cmp(&tmp, &pair)) + cpumask_clear_cpu(cpu, &cpus); + + cpumask_clear_cpu(cpu, &one_cpu); + } + } + + if (clear_all) + cpumask_clear(&cpus); + + ret = copy_to_user(cpus_user, &cpus, cpusetsize); + if (ret) + return -EFAULT; + + return 0; +} + +static int do_riscv_hwprobe(struct riscv_hwprobe __user *pairs, + size_t pair_count, size_t cpusetsize, + unsigned long __user *cpus_user, + unsigned int flags) +{ + if (flags & RISCV_HWPROBE_WHICH_CPUS) + return hwprobe_get_cpus(pairs, pair_count, cpusetsize, + cpus_user, flags); + + return hwprobe_get_values(pairs, pair_count, cpusetsize, + cpus_user, flags); +} + +#ifdef CONFIG_MMU + +static int __init init_hwprobe_vdso_data(void) +{ + struct vdso_data *vd = __arch_get_k_vdso_data(); + struct arch_vdso_data *avd = &vd->arch_data; + u64 id_bitsmash = 0; + struct riscv_hwprobe pair; + int key; + + /* + * Initialize vDSO data with the answers for the "all CPUs" case, to + * save a syscall in the common case. + */ + for (key = 0; key <= RISCV_HWPROBE_MAX_KEY; key++) { + pair.key = key; + hwprobe_one_pair(&pair, cpu_online_mask); + + WARN_ON_ONCE(pair.key < 0); + + avd->all_cpu_hwprobe_values[key] = pair.value; + /* + * Smash together the vendor, arch, and impl IDs to see if + * they're all 0 or any negative. + */ + if (key <= RISCV_HWPROBE_KEY_MIMPID) + id_bitsmash |= pair.value; + } + + /* + * If the arch, vendor, and implementation ID are all the same across + * all harts, then assume all CPUs are the same, and allow the vDSO to + * answer queries for arbitrary masks. However if all values are 0 (not + * populated) or any value returns -1 (varies across CPUs), then the + * vDSO should defer to the kernel for exotic cpu masks. + */ + avd->homogeneous_cpus = id_bitsmash != 0 && id_bitsmash != -1; + return 0; +} + +arch_initcall_sync(init_hwprobe_vdso_data); + +#endif /* CONFIG_MMU */ + +SYSCALL_DEFINE5(riscv_hwprobe, struct riscv_hwprobe __user *, pairs, + size_t, pair_count, size_t, cpusetsize, unsigned long __user *, + cpus, unsigned int, flags) +{ + return do_riscv_hwprobe(pairs, pair_count, cpusetsize, + cpus, flags); +} diff --git a/arch/riscv/kernel/sys_riscv.c b/arch/riscv/kernel/sys_riscv.c index a2ca5b7756..f1c1416a9f 100644 --- a/arch/riscv/kernel/sys_riscv.c +++ b/arch/riscv/kernel/sys_riscv.c @@ -7,15 +7,7 @@ #include <linux/syscalls.h> #include <asm/cacheflush.h> -#include <asm/cpufeature.h> -#include <asm/hwprobe.h> -#include <asm/sbi.h> -#include <asm/vector.h> -#include <asm/switch_to.h> -#include <asm/uaccess.h> -#include <asm/unistd.h> #include <asm-generic/mman-common.h> -#include <vdso/vsyscall.h> static long riscv_sys_mmap(unsigned long addr, unsigned long len, unsigned long prot, unsigned long flags, @@ -77,283 +69,6 @@ SYSCALL_DEFINE3(riscv_flush_icache, uintptr_t, start, uintptr_t, end, return 0; } -/* - * The hwprobe interface, for allowing userspace to probe to see which features - * are supported by the hardware. See Documentation/arch/riscv/hwprobe.rst for more - * details. - */ -static void hwprobe_arch_id(struct riscv_hwprobe *pair, - const struct cpumask *cpus) -{ - u64 id = -1ULL; - bool first = true; - int cpu; - - for_each_cpu(cpu, cpus) { - u64 cpu_id; - - switch (pair->key) { - case RISCV_HWPROBE_KEY_MVENDORID: - cpu_id = riscv_cached_mvendorid(cpu); - break; - case RISCV_HWPROBE_KEY_MIMPID: - cpu_id = riscv_cached_mimpid(cpu); - break; - case RISCV_HWPROBE_KEY_MARCHID: - cpu_id = riscv_cached_marchid(cpu); - break; - } - - if (first) { - id = cpu_id; - first = false; - } - - /* - * If there's a mismatch for the given set, return -1 in the - * value. - */ - if (id != cpu_id) { - id = -1ULL; - break; - } - } - - pair->value = id; -} - -static void hwprobe_isa_ext0(struct riscv_hwprobe *pair, - const struct cpumask *cpus) -{ - int cpu; - u64 missing = 0; - - pair->value = 0; - if (has_fpu()) - pair->value |= RISCV_HWPROBE_IMA_FD; - - if (riscv_isa_extension_available(NULL, c)) - pair->value |= RISCV_HWPROBE_IMA_C; - - if (has_vector()) - pair->value |= RISCV_HWPROBE_IMA_V; - - /* - * Loop through and record extensions that 1) anyone has, and 2) anyone - * doesn't have. - */ - for_each_cpu(cpu, cpus) { - struct riscv_isainfo *isainfo = &hart_isa[cpu]; - -#define EXT_KEY(ext) \ - do { \ - if (__riscv_isa_extension_available(isainfo->isa, RISCV_ISA_EXT_##ext)) \ - pair->value |= RISCV_HWPROBE_EXT_##ext; \ - else \ - missing |= RISCV_HWPROBE_EXT_##ext; \ - } while (false) - - /* - * Only use EXT_KEY() for extensions which can be exposed to userspace, - * regardless of the kernel's configuration, as no other checks, besides - * presence in the hart_isa bitmap, are made. - */ - EXT_KEY(ZBA); - EXT_KEY(ZBB); - EXT_KEY(ZBS); - EXT_KEY(ZICBOZ); -#undef EXT_KEY - } - - /* Now turn off reporting features if any CPU is missing it. */ - pair->value &= ~missing; -} - -static bool hwprobe_ext0_has(const struct cpumask *cpus, u64 ext) -{ - struct riscv_hwprobe pair; - - hwprobe_isa_ext0(&pair, cpus); - return (pair.value & ext); -} - -static u64 hwprobe_misaligned(const struct cpumask *cpus) -{ - int cpu; - u64 perf = -1ULL; - - for_each_cpu(cpu, cpus) { - int this_perf = per_cpu(misaligned_access_speed, cpu); - - if (perf == -1ULL) - perf = this_perf; - - if (perf != this_perf) { - perf = RISCV_HWPROBE_MISALIGNED_UNKNOWN; - break; - } - } - - if (perf == -1ULL) - return RISCV_HWPROBE_MISALIGNED_UNKNOWN; - - return perf; -} - -static void hwprobe_one_pair(struct riscv_hwprobe *pair, - const struct cpumask *cpus) -{ - switch (pair->key) { - case RISCV_HWPROBE_KEY_MVENDORID: - case RISCV_HWPROBE_KEY_MARCHID: - case RISCV_HWPROBE_KEY_MIMPID: - hwprobe_arch_id(pair, cpus); - break; - /* - * The kernel already assumes that the base single-letter ISA - * extensions are supported on all harts, and only supports the - * IMA base, so just cheat a bit here and tell that to - * userspace. - */ - case RISCV_HWPROBE_KEY_BASE_BEHAVIOR: - pair->value = RISCV_HWPROBE_BASE_BEHAVIOR_IMA; - break; - - case RISCV_HWPROBE_KEY_IMA_EXT_0: - hwprobe_isa_ext0(pair, cpus); - break; - - case RISCV_HWPROBE_KEY_CPUPERF_0: - pair->value = hwprobe_misaligned(cpus); - break; - - case RISCV_HWPROBE_KEY_ZICBOZ_BLOCK_SIZE: - pair->value = 0; - if (hwprobe_ext0_has(cpus, RISCV_HWPROBE_EXT_ZICBOZ)) - pair->value = riscv_cboz_block_size; - break; - - /* - * For forward compatibility, unknown keys don't fail the whole - * call, but get their element key set to -1 and value set to 0 - * indicating they're unrecognized. - */ - default: - pair->key = -1; - pair->value = 0; - break; - } -} - -static int do_riscv_hwprobe(struct riscv_hwprobe __user *pairs, - size_t pair_count, size_t cpu_count, - unsigned long __user *cpus_user, - unsigned int flags) -{ - size_t out; - int ret; - cpumask_t cpus; - - /* Check the reserved flags. */ - if (flags != 0) - return -EINVAL; - - /* - * The interface supports taking in a CPU mask, and returns values that - * are consistent across that mask. Allow userspace to specify NULL and - * 0 as a shortcut to all online CPUs. - */ - cpumask_clear(&cpus); - if (!cpu_count && !cpus_user) { - cpumask_copy(&cpus, cpu_online_mask); - } else { - if (cpu_count > cpumask_size()) - cpu_count = cpumask_size(); - - ret = copy_from_user(&cpus, cpus_user, cpu_count); - if (ret) - return -EFAULT; - - /* - * Userspace must provide at least one online CPU, without that - * there's no way to define what is supported. - */ - cpumask_and(&cpus, &cpus, cpu_online_mask); - if (cpumask_empty(&cpus)) - return -EINVAL; - } - - for (out = 0; out < pair_count; out++, pairs++) { - struct riscv_hwprobe pair; - - if (get_user(pair.key, &pairs->key)) - return -EFAULT; - - pair.value = 0; - hwprobe_one_pair(&pair, &cpus); - ret = put_user(pair.key, &pairs->key); - if (ret == 0) - ret = put_user(pair.value, &pairs->value); - - if (ret) - return -EFAULT; - } - - return 0; -} - -#ifdef CONFIG_MMU - -static int __init init_hwprobe_vdso_data(void) -{ - struct vdso_data *vd = __arch_get_k_vdso_data(); - struct arch_vdso_data *avd = &vd->arch_data; - u64 id_bitsmash = 0; - struct riscv_hwprobe pair; - int key; - - /* - * Initialize vDSO data with the answers for the "all CPUs" case, to - * save a syscall in the common case. - */ - for (key = 0; key <= RISCV_HWPROBE_MAX_KEY; key++) { - pair.key = key; - hwprobe_one_pair(&pair, cpu_online_mask); - - WARN_ON_ONCE(pair.key < 0); - - avd->all_cpu_hwprobe_values[key] = pair.value; - /* - * Smash together the vendor, arch, and impl IDs to see if - * they're all 0 or any negative. - */ - if (key <= RISCV_HWPROBE_KEY_MIMPID) - id_bitsmash |= pair.value; - } - - /* - * If the arch, vendor, and implementation ID are all the same across - * all harts, then assume all CPUs are the same, and allow the vDSO to - * answer queries for arbitrary masks. However if all values are 0 (not - * populated) or any value returns -1 (varies across CPUs), then the - * vDSO should defer to the kernel for exotic cpu masks. - */ - avd->homogeneous_cpus = id_bitsmash != 0 && id_bitsmash != -1; - return 0; -} - -arch_initcall_sync(init_hwprobe_vdso_data); - -#endif /* CONFIG_MMU */ - -SYSCALL_DEFINE5(riscv_hwprobe, struct riscv_hwprobe __user *, pairs, - size_t, pair_count, size_t, cpu_count, unsigned long __user *, - cpus, unsigned int, flags) -{ - return do_riscv_hwprobe(pairs, pair_count, cpu_count, - cpus, flags); -} - /* Not defined using SYSCALL_DEFINE0 to avoid error injection */ asmlinkage long __riscv_sys_ni_syscall(const struct pt_regs *__unused) { diff --git a/arch/riscv/kernel/time.c b/arch/riscv/kernel/time.c index 23641e82a9..ba34771977 100644 --- a/arch/riscv/kernel/time.c +++ b/arch/riscv/kernel/time.c @@ -12,6 +12,7 @@ #include <asm/sbi.h> #include <asm/processor.h> #include <asm/timex.h> +#include <asm/paravirt.h> unsigned long riscv_timebase __ro_after_init; EXPORT_SYMBOL_GPL(riscv_timebase); @@ -45,4 +46,6 @@ void __init time_init(void) timer_probe(); tick_setup_hrtimer_broadcast(); + + pv_time_init(); } diff --git a/arch/riscv/kernel/traps.c b/arch/riscv/kernel/traps.c index a1b9be3c43..142f5f5168 100644 --- a/arch/riscv/kernel/traps.c +++ b/arch/riscv/kernel/traps.c @@ -121,7 +121,7 @@ void do_trap(struct pt_regs *regs, int signo, int code, unsigned long addr) print_vma_addr(KERN_CONT " in ", instruction_pointer(regs)); pr_cont("\n"); __show_regs(regs); - dump_instr(KERN_EMERG, regs); + dump_instr(KERN_INFO, regs); } force_sig_fault(signo, code, (void __user *)addr); diff --git a/arch/riscv/kernel/traps_misaligned.c b/arch/riscv/kernel/traps_misaligned.c index 1ed769c87a..c2ed4e689b 100644 --- a/arch/riscv/kernel/traps_misaligned.c +++ b/arch/riscv/kernel/traps_misaligned.c @@ -319,7 +319,7 @@ static inline int get_insn(struct pt_regs *regs, ulong mepc, ulong *r_insn) static inline int load_u8(struct pt_regs *regs, const u8 *addr, u8 *r_val) { if (user_mode(regs)) { - return __get_user(*r_val, addr); + return __get_user(*r_val, (u8 __user *)addr); } else { *r_val = *addr; return 0; @@ -329,7 +329,7 @@ static inline int load_u8(struct pt_regs *regs, const u8 *addr, u8 *r_val) static inline int store_u8(struct pt_regs *regs, u8 *addr, u8 val) { if (user_mode(regs)) { - return __put_user(val, addr); + return __put_user(val, (u8 __user *)addr); } else { *addr = val; return 0; @@ -343,7 +343,7 @@ static inline int store_u8(struct pt_regs *regs, u8 *addr, u8 val) if (user_mode(regs)) { \ __ret = __get_user(insn, insn_addr); \ } else { \ - insn = *insn_addr; \ + insn = *(__force u16 *)insn_addr; \ __ret = 0; \ } \ \ diff --git a/arch/riscv/kernel/vdso/Makefile b/arch/riscv/kernel/vdso/Makefile index 9b517fe1b8..272c431ac5 100644 --- a/arch/riscv/kernel/vdso/Makefile +++ b/arch/riscv/kernel/vdso/Makefile @@ -37,6 +37,7 @@ endif # Disable -pg to prevent insert call site CFLAGS_REMOVE_vgettimeofday.o = $(CC_FLAGS_FTRACE) $(CC_FLAGS_SCS) +CFLAGS_REMOVE_hwprobe.o = $(CC_FLAGS_FTRACE) $(CC_FLAGS_SCS) # Disable profiling and instrumentation for VDSO code GCOV_PROFILE := n diff --git a/arch/riscv/kernel/vdso/hwprobe.c b/arch/riscv/kernel/vdso/hwprobe.c index cadf725ef7..1e926e4b58 100644 --- a/arch/riscv/kernel/vdso/hwprobe.c +++ b/arch/riscv/kernel/vdso/hwprobe.c @@ -3,26 +3,22 @@ * Copyright 2023 Rivos, Inc */ +#include <linux/string.h> #include <linux/types.h> #include <vdso/datapage.h> #include <vdso/helpers.h> extern int riscv_hwprobe(struct riscv_hwprobe *pairs, size_t pair_count, - size_t cpu_count, unsigned long *cpus, + size_t cpusetsize, unsigned long *cpus, unsigned int flags); -/* Add a prototype to avoid -Wmissing-prototypes warning. */ -int __vdso_riscv_hwprobe(struct riscv_hwprobe *pairs, size_t pair_count, - size_t cpu_count, unsigned long *cpus, - unsigned int flags); - -int __vdso_riscv_hwprobe(struct riscv_hwprobe *pairs, size_t pair_count, - size_t cpu_count, unsigned long *cpus, - unsigned int flags) +static int riscv_vdso_get_values(struct riscv_hwprobe *pairs, size_t pair_count, + size_t cpusetsize, unsigned long *cpus, + unsigned int flags) { const struct vdso_data *vd = __arch_get_vdso_data(); const struct arch_vdso_data *avd = &vd->arch_data; - bool all_cpus = !cpu_count && !cpus; + bool all_cpus = !cpusetsize && !cpus; struct riscv_hwprobe *p = pairs; struct riscv_hwprobe *end = pairs + pair_count; @@ -33,7 +29,7 @@ int __vdso_riscv_hwprobe(struct riscv_hwprobe *pairs, size_t pair_count, * masks. */ if ((flags != 0) || (!all_cpus && !avd->homogeneous_cpus)) - return riscv_hwprobe(pairs, pair_count, cpu_count, cpus, flags); + return riscv_hwprobe(pairs, pair_count, cpusetsize, cpus, flags); /* This is something we can handle, fill out the pairs. */ while (p < end) { @@ -50,3 +46,71 @@ int __vdso_riscv_hwprobe(struct riscv_hwprobe *pairs, size_t pair_count, return 0; } + +static int riscv_vdso_get_cpus(struct riscv_hwprobe *pairs, size_t pair_count, + size_t cpusetsize, unsigned long *cpus, + unsigned int flags) +{ + const struct vdso_data *vd = __arch_get_vdso_data(); + const struct arch_vdso_data *avd = &vd->arch_data; + struct riscv_hwprobe *p = pairs; + struct riscv_hwprobe *end = pairs + pair_count; + unsigned char *c = (unsigned char *)cpus; + bool empty_cpus = true; + bool clear_all = false; + int i; + + if (!cpusetsize || !cpus) + return -EINVAL; + + for (i = 0; i < cpusetsize; i++) { + if (c[i]) { + empty_cpus = false; + break; + } + } + + if (empty_cpus || flags != RISCV_HWPROBE_WHICH_CPUS || !avd->homogeneous_cpus) + return riscv_hwprobe(pairs, pair_count, cpusetsize, cpus, flags); + + while (p < end) { + if (riscv_hwprobe_key_is_valid(p->key)) { + struct riscv_hwprobe t = { + .key = p->key, + .value = avd->all_cpu_hwprobe_values[p->key], + }; + + if (!riscv_hwprobe_pair_cmp(&t, p)) + clear_all = true; + } else { + clear_all = true; + p->key = -1; + p->value = 0; + } + p++; + } + + if (clear_all) { + for (i = 0; i < cpusetsize; i++) + c[i] = 0; + } + + return 0; +} + +/* Add a prototype to avoid -Wmissing-prototypes warning. */ +int __vdso_riscv_hwprobe(struct riscv_hwprobe *pairs, size_t pair_count, + size_t cpusetsize, unsigned long *cpus, + unsigned int flags); + +int __vdso_riscv_hwprobe(struct riscv_hwprobe *pairs, size_t pair_count, + size_t cpusetsize, unsigned long *cpus, + unsigned int flags) +{ + if (flags & RISCV_HWPROBE_WHICH_CPUS) + return riscv_vdso_get_cpus(pairs, pair_count, cpusetsize, + cpus, flags); + + return riscv_vdso_get_values(pairs, pair_count, cpusetsize, + cpus, flags); +} diff --git a/arch/riscv/kernel/vdso/vgettimeofday.c b/arch/riscv/kernel/vdso/vgettimeofday.c index cc0d80699c..b350578025 100644 --- a/arch/riscv/kernel/vdso/vgettimeofday.c +++ b/arch/riscv/kernel/vdso/vgettimeofday.c @@ -8,23 +8,18 @@ #include <linux/time.h> #include <linux/types.h> +#include <vdso/gettime.h> -extern -int __vdso_clock_gettime(clockid_t clock, struct __kernel_timespec *ts); int __vdso_clock_gettime(clockid_t clock, struct __kernel_timespec *ts) { return __cvdso_clock_gettime(clock, ts); } -extern -int __vdso_gettimeofday(struct __kernel_old_timeval *tv, struct timezone *tz); int __vdso_gettimeofday(struct __kernel_old_timeval *tv, struct timezone *tz) { return __cvdso_gettimeofday(tv, tz); } -extern -int __vdso_clock_getres(clockid_t clock_id, struct __kernel_timespec *res); int __vdso_clock_getres(clockid_t clock_id, struct __kernel_timespec *res) { return __cvdso_clock_getres(clock_id, res); diff --git a/arch/riscv/kernel/vector.c b/arch/riscv/kernel/vector.c index 578b629248..6727d1d3b8 100644 --- a/arch/riscv/kernel/vector.c +++ b/arch/riscv/kernel/vector.c @@ -21,6 +21,10 @@ #include <asm/bug.h> static bool riscv_v_implicit_uacc = IS_ENABLED(CONFIG_RISCV_ISA_V_DEFAULT_ENABLE); +static struct kmem_cache *riscv_v_user_cachep; +#ifdef CONFIG_RISCV_ISA_V_PREEMPTIVE +static struct kmem_cache *riscv_v_kernel_cachep; +#endif unsigned long riscv_v_vsize __read_mostly; EXPORT_SYMBOL_GPL(riscv_v_vsize); @@ -47,6 +51,21 @@ int riscv_v_setup_vsize(void) return 0; } +void __init riscv_v_setup_ctx_cache(void) +{ + if (!has_vector()) + return; + + riscv_v_user_cachep = kmem_cache_create_usercopy("riscv_vector_ctx", + riscv_v_vsize, 16, SLAB_PANIC, + 0, riscv_v_vsize, NULL); +#ifdef CONFIG_RISCV_ISA_V_PREEMPTIVE + riscv_v_kernel_cachep = kmem_cache_create("riscv_vector_kctx", + riscv_v_vsize, 16, + SLAB_PANIC, NULL); +#endif +} + static bool insn_is_vector(u32 insn_buf) { u32 opcode = insn_buf & __INSN_OPCODE_MASK; @@ -80,20 +99,37 @@ static bool insn_is_vector(u32 insn_buf) return false; } -static int riscv_v_thread_zalloc(void) +static int riscv_v_thread_zalloc(struct kmem_cache *cache, + struct __riscv_v_ext_state *ctx) { void *datap; - datap = kzalloc(riscv_v_vsize, GFP_KERNEL); + datap = kmem_cache_zalloc(cache, GFP_KERNEL); if (!datap) return -ENOMEM; - current->thread.vstate.datap = datap; - memset(¤t->thread.vstate, 0, offsetof(struct __riscv_v_ext_state, - datap)); + ctx->datap = datap; + memset(ctx, 0, offsetof(struct __riscv_v_ext_state, datap)); return 0; } +void riscv_v_thread_alloc(struct task_struct *tsk) +{ +#ifdef CONFIG_RISCV_ISA_V_PREEMPTIVE + riscv_v_thread_zalloc(riscv_v_kernel_cachep, &tsk->thread.kernel_vstate); +#endif +} + +void riscv_v_thread_free(struct task_struct *tsk) +{ + if (tsk->thread.vstate.datap) + kmem_cache_free(riscv_v_user_cachep, tsk->thread.vstate.datap); +#ifdef CONFIG_RISCV_ISA_V_PREEMPTIVE + if (tsk->thread.kernel_vstate.datap) + kmem_cache_free(riscv_v_kernel_cachep, tsk->thread.kernel_vstate.datap); +#endif +} + #define VSTATE_CTRL_GET_CUR(x) ((x) & PR_RISCV_V_VSTATE_CTRL_CUR_MASK) #define VSTATE_CTRL_GET_NEXT(x) (((x) & PR_RISCV_V_VSTATE_CTRL_NEXT_MASK) >> 2) #define VSTATE_CTRL_MAKE_NEXT(x) (((x) << 2) & PR_RISCV_V_VSTATE_CTRL_NEXT_MASK) @@ -122,7 +158,8 @@ static inline void riscv_v_ctrl_set(struct task_struct *tsk, int cur, int nxt, ctrl |= VSTATE_CTRL_MAKE_NEXT(nxt); if (inherit) ctrl |= PR_RISCV_V_VSTATE_CTRL_INHERIT; - tsk->thread.vstate_ctrl = ctrl; + tsk->thread.vstate_ctrl &= ~PR_RISCV_V_VSTATE_CTRL_MASK; + tsk->thread.vstate_ctrl |= ctrl; } bool riscv_v_vstate_ctrl_user_allowed(void) @@ -162,12 +199,12 @@ bool riscv_v_first_use_handler(struct pt_regs *regs) * context where VS has been off. So, try to allocate the user's V * context and resume execution. */ - if (riscv_v_thread_zalloc()) { + if (riscv_v_thread_zalloc(riscv_v_user_cachep, ¤t->thread.vstate)) { force_sig(SIGBUS); return true; } riscv_v_vstate_on(regs); - riscv_v_vstate_restore(current, regs); + riscv_v_vstate_set_restore(current, regs); return true; } |