diff options
author | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-05-18 18:50:36 +0000 |
---|---|---|
committer | Daniel Baumann <daniel.baumann@progress-linux.org> | 2024-05-18 18:50:36 +0000 |
commit | 50ba0232fd5312410f1b65247e774244f89a628e (patch) | |
tree | fd8f2fc78e9e548af0ff9590276602ee6125be00 /arch/riscv | |
parent | Releasing progress-linux version 6.7.12-1~progress7.99u1. (diff) | |
download | linux-50ba0232fd5312410f1b65247e774244f89a628e.tar.xz linux-50ba0232fd5312410f1b65247e774244f89a628e.zip |
Merging upstream version 6.8.9.
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'arch/riscv')
127 files changed, 4564 insertions, 1172 deletions
diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig index f6df30e8d1..e3142ce531 100644 --- a/arch/riscv/Kconfig +++ b/arch/riscv/Kconfig @@ -53,24 +53,28 @@ config RISCV select ARCH_USE_MEMTEST select ARCH_USE_QUEUED_RWLOCKS select ARCH_USES_CFI_TRAPS if CFI_CLANG + select ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH if SMP && MMU select ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT if MMU select ARCH_WANT_FRAME_POINTERS select ARCH_WANT_GENERAL_HUGETLB if !RISCV_ISA_SVNAPOT select ARCH_WANT_HUGE_PMD_SHARE if 64BIT select ARCH_WANT_LD_ORPHAN_WARN if !XIP_KERNEL select ARCH_WANT_OPTIMIZE_HUGETLB_VMEMMAP + select ARCH_WANTS_NO_INSTR select ARCH_WANTS_THP_SWAP if HAVE_ARCH_TRANSPARENT_HUGEPAGE select BINFMT_FLAT_NO_DATA_START_OFFSET if !MMU select BUILDTIME_TABLE_SORT if MMU select CLINT_TIMER if !MMU select CLONE_BACKWARDS select COMMON_CLK - select CPU_PM if CPU_IDLE || HIBERNATION + select CPU_PM if CPU_IDLE || HIBERNATION || SUSPEND select EDAC_SUPPORT select FRAME_POINTER if PERF_EVENTS || (FUNCTION_TRACER && !DYNAMIC_FTRACE) + select FTRACE_MCOUNT_USE_PATCHABLE_FUNCTION_ENTRY if DYNAMIC_FTRACE select GENERIC_ARCH_TOPOLOGY select GENERIC_ATOMIC64 if !64BIT select GENERIC_CLOCKEVENTS_BROADCAST if SMP + select GENERIC_CPU_DEVICES select GENERIC_EARLY_IOREMAP select GENERIC_ENTRY select GENERIC_GETTIMEOFDAY if HAVE_GENERIC_VDSO @@ -113,6 +117,7 @@ config RISCV select HAVE_DEBUG_KMEMLEAK select HAVE_DMA_CONTIGUOUS if MMU select HAVE_DYNAMIC_FTRACE if !XIP_KERNEL && MMU && (CLANG_SUPPORTS_DYNAMIC_FTRACE || GCC_SUPPORTS_DYNAMIC_FTRACE) + select HAVE_DYNAMIC_FTRACE_WITH_DIRECT_CALLS select HAVE_DYNAMIC_FTRACE_WITH_REGS if HAVE_DYNAMIC_FTRACE select HAVE_FTRACE_MCOUNT_RECORD if !XIP_KERNEL select HAVE_FUNCTION_GRAPH_TRACER @@ -140,6 +145,8 @@ config RISCV select HAVE_REGS_AND_STACK_ACCESS_API select HAVE_RETHOOK if !XIP_KERNEL select HAVE_RSEQ + select HAVE_SAMPLE_FTRACE_DIRECT + select HAVE_SAMPLE_FTRACE_DIRECT_MULTI select HAVE_STACKPROTECTOR select HAVE_SYSCALL_TRACEPOINTS select HOTPLUG_CORE_SYNC_DEAD if HOTPLUG_CPU @@ -181,6 +188,20 @@ config HAVE_SHADOW_CALL_STACK # https://github.com/riscv-non-isa/riscv-elf-psabi-doc/commit/a484e843e6eeb51f0cb7b8819e50da6d2444d769 depends on $(ld-option,--no-relax-gp) +config RISCV_USE_LINKER_RELAXATION + def_bool y + # https://github.com/llvm/llvm-project/commit/6611d58f5bbcbec77262d392e2923e1d680f6985 + depends on !LD_IS_LLD || LLD_VERSION >= 150000 + +# https://github.com/llvm/llvm-project/commit/bbc0f99f3bc96f1db16f649fc21dd18e5b0918f6 +config ARCH_HAS_BROKEN_DWARF5 + def_bool y + depends on RISCV_USE_LINKER_RELAXATION + # https://github.com/llvm/llvm-project/commit/1df5ea29b43690b6622db2cad7b745607ca4de6a + depends on AS_IS_LLVM && AS_VERSION < 180000 + # https://github.com/llvm/llvm-project/commit/7ffabb61a5569444b5ac9322e22e5471cc5e4a77 + depends on LD_IS_LLD && LLD_VERSION < 180000 + config ARCH_MMAP_RND_BITS_MIN default 18 if 64BIT default 8 @@ -413,7 +434,9 @@ config NUMA depends on SMP && MMU select ARCH_SUPPORTS_NUMA_BALANCING select GENERIC_ARCH_NUMA + select HAVE_SETUP_PER_CPU_AREA select NEED_PER_CPU_EMBED_FIRST_CHUNK + select NEED_PER_CPU_PAGE_FIRST_CHUNK select OF_NUMA select USE_PERCPU_NUMA_NODE_ID help @@ -524,6 +547,28 @@ config RISCV_ISA_V_DEFAULT_ENABLE If you don't know what to do here, say Y. +config RISCV_ISA_V_UCOPY_THRESHOLD + int "Threshold size for vectorized user copies" + depends on RISCV_ISA_V + default 768 + help + Prefer using vectorized copy_to_user()/copy_from_user() when the + workload size exceeds this value. + +config RISCV_ISA_V_PREEMPTIVE + bool "Run kernel-mode Vector with kernel preemption" + depends on PREEMPTION + depends on RISCV_ISA_V + default y + help + Usually, in-kernel SIMD routines are run with preemption disabled. + Functions which envoke long running SIMD thus must yield core's + vector unit to prevent blocking other tasks for too long. + + This config allows kernel to run SIMD without explicitly disable + preemption. Enabling this config will result in higher memory + consumption due to the allocation of per-task's kernel Vector context. + config TOOLCHAIN_HAS_ZBB bool default y @@ -650,6 +695,20 @@ config RISCV_MISALIGNED load/store for both kernel and userspace. When disable, misaligned accesses will generate SIGBUS in userspace and panic in kernel. +config RISCV_EFFICIENT_UNALIGNED_ACCESS + bool "Assume the CPU supports fast unaligned memory accesses" + depends on NONPORTABLE + select DCACHE_WORD_ACCESS if MMU + select HAVE_EFFICIENT_UNALIGNED_ACCESS + help + Say Y here if you want the kernel to assume that the CPU supports + efficient unaligned memory accesses. When enabled, this option + improves the performance of the kernel on such CPUs. However, the + kernel will run much more slowly, or will not be able to run at all, + on CPUs that do not support efficient unaligned memory accesses. + + If unsure what to do here, say N. + endmenu # "Platform type" menu "Kernel features" @@ -721,6 +780,25 @@ config COMPAT If you want to execute 32-bit userspace applications, say Y. +config PARAVIRT + bool "Enable paravirtualization code" + depends on RISCV_SBI + help + This changes the kernel so it can modify itself when it is run + under a hypervisor, potentially improving performance significantly + over full virtualization. + +config PARAVIRT_TIME_ACCOUNTING + bool "Paravirtual steal time accounting" + depends on PARAVIRT + help + Select this option to enable fine granularity task steal time + accounting. Time spent executing other tasks in parallel with + the current vCPU is discounted from the vCPU power. To account for + that, there can be a small performance impact. + + If in doubt, say N here. + config RELOCATABLE bool "Build a relocatable kernel" depends on MMU && 64BIT && !XIP_KERNEL @@ -901,13 +979,13 @@ config RISCV_ISA_FALLBACK on the replacement properties, "riscv,isa-base" and "riscv,isa-extensions". -endmenu # "Boot options" - config BUILTIN_DTB - bool + bool "Built-in device tree" depends on OF && NONPORTABLE default y if XIP_KERNEL +endmenu # "Boot options" + config PORTABLE bool default !NONPORTABLE diff --git a/arch/riscv/Kconfig.errata b/arch/riscv/Kconfig.errata index e2c731cfed..910ba8837a 100644 --- a/arch/riscv/Kconfig.errata +++ b/arch/riscv/Kconfig.errata @@ -53,6 +53,25 @@ config ERRATA_SIFIVE_CIP_1200 If you don't know what to do here, say "Y". +config ERRATA_STARFIVE_JH7100 + bool "StarFive JH7100 support" + depends on ARCH_STARFIVE + depends on !DMA_DIRECT_REMAP + depends on NONPORTABLE + select DMA_GLOBAL_POOL + select RISCV_DMA_NONCOHERENT + select RISCV_NONSTANDARD_CACHE_OPS + select SIFIVE_CCACHE + default n + help + The StarFive JH7100 was a test chip for the JH7110 and has + caches that are non-coherent with respect to peripheral DMAs. + It was designed before the Zicbom extension so needs non-standard + cache operations through the SiFive cache controller. + + Say "Y" if you want to support the BeagleV Starlight and/or + StarFive VisionFive V1 boards. + config ERRATA_THEAD bool "T-HEAD errata" depends on RISCV_ALTERNATIVE @@ -79,6 +98,7 @@ config ERRATA_THEAD_CMO depends on ERRATA_THEAD && MMU select DMA_DIRECT_REMAP select RISCV_DMA_NONCOHERENT + select RISCV_NONSTANDARD_CACHE_OPS default y help This will apply the cache management errata to handle the diff --git a/arch/riscv/Makefile b/arch/riscv/Makefile index a74be78678..0b7d109258 100644 --- a/arch/riscv/Makefile +++ b/arch/riscv/Makefile @@ -43,8 +43,7 @@ else KBUILD_LDFLAGS += -melf32lriscv endif -ifeq ($(CONFIG_LD_IS_LLD),y) -ifeq ($(call test-lt, $(CONFIG_LLD_VERSION), 150000),y) +ifndef CONFIG_RISCV_USE_LINKER_RELAXATION KBUILD_CFLAGS += -mno-relax KBUILD_AFLAGS += -mno-relax ifndef CONFIG_AS_IS_LLVM @@ -52,7 +51,6 @@ ifndef CONFIG_AS_IS_LLVM KBUILD_AFLAGS += -Wa,-mno-relax endif endif -endif ifeq ($(CONFIG_SHADOW_CALL_STACK),y) KBUILD_LDFLAGS += --no-relax-gp @@ -108,7 +106,9 @@ KBUILD_AFLAGS_MODULE += $(call as-option,-Wa$(comma)-mno-relax) # unaligned accesses. While unaligned accesses are explicitly allowed in the # RISC-V ISA, they're emulated by machine mode traps on all extant # architectures. It's faster to have GCC emit only aligned accesses. +ifneq ($(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS),y) KBUILD_CFLAGS += $(call cc-option,-mstrict-align) +endif ifeq ($(CONFIG_STACKPROTECTOR_PER_TASK),y) prepare: stack_protector_prepare @@ -163,6 +163,8 @@ BOOT_TARGETS := Image Image.gz loader loader.bin xipImage vmlinuz.efi all: $(notdir $(KBUILD_IMAGE)) +loader.bin: loader +Image.gz loader vmlinuz.efi: Image $(BOOT_TARGETS): vmlinux $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@ @$(kecho) ' Kernel: $(boot)/$@ is ready' diff --git a/arch/riscv/boot/dts/microchip/mpfs-icicle-kit.dts b/arch/riscv/boot/dts/microchip/mpfs-icicle-kit.dts index dce96f27cc..222a39d90f 100644 --- a/arch/riscv/boot/dts/microchip/mpfs-icicle-kit.dts +++ b/arch/riscv/boot/dts/microchip/mpfs-icicle-kit.dts @@ -192,6 +192,27 @@ status = "okay"; }; +&syscontroller_qspi { + /* + * The flash *is* there, but Icicle kits that have engineering sample + * silicon (write?) access to this flash to non-functional. The system + * controller itself can actually access it, but the MSS cannot write + * an image there. Instantiating a coreQSPI in the fabric & connecting + * it to the flash instead should work though. Pre-production or later + * silicon does not have this issue. + */ + status = "disabled"; + + sys_ctrl_flash: flash@0 { // MT25QL01GBBB8ESF-0SIT + compatible = "jedec,spi-nor"; + #address-cells = <1>; + #size-cells = <1>; + spi-max-frequency = <20000000>; + spi-rx-bus-width = <1>; + reg = <0>; + }; +}; + &usb { status = "okay"; dr_mode = "host"; diff --git a/arch/riscv/boot/dts/microchip/mpfs.dtsi b/arch/riscv/boot/dts/microchip/mpfs.dtsi index 266489d439..59fd2d4ea5 100644 --- a/arch/riscv/boot/dts/microchip/mpfs.dtsi +++ b/arch/riscv/boot/dts/microchip/mpfs.dtsi @@ -194,6 +194,12 @@ mboxes = <&mbox 0>; }; + scbclk: mssclkclk { + compatible = "fixed-clock"; + #clock-cells = <0>; + clock-frequency = <80000000>; + }; + soc { #address-cells = <2>; #size-cells = <2>; @@ -524,5 +530,16 @@ #mbox-cells = <1>; status = "disabled"; }; + + syscontroller_qspi: spi@37020100 { + compatible = "microchip,mpfs-qspi", "microchip,coreqspi-rtl-v2"; + #address-cells = <1>; + #size-cells = <0>; + reg = <0x0 0x37020100 0x0 0x100>; + interrupt-parent = <&plic>; + interrupts = <110>; + clocks = <&scbclk>; + status = "disabled"; + }; }; }; diff --git a/arch/riscv/boot/dts/renesas/r9a07g043f.dtsi b/arch/riscv/boot/dts/renesas/r9a07g043f.dtsi index b0796015e3..a92cfcfc02 100644 --- a/arch/riscv/boot/dts/renesas/r9a07g043f.dtsi +++ b/arch/riscv/boot/dts/renesas/r9a07g043f.dtsi @@ -24,6 +24,10 @@ reg = <0x0>; status = "okay"; riscv,isa = "rv64imafdc"; + riscv,isa-base = "rv64i"; + riscv,isa-extensions = "i", "m", "a", "f", "d", "c", + "zicntr", "zicsr", "zifencei", + "zihpm"; mmu-type = "riscv,sv39"; i-cache-size = <0x8000>; i-cache-line-size = <0x40>; diff --git a/arch/riscv/boot/dts/sophgo/Makefile b/arch/riscv/boot/dts/sophgo/Makefile index 3fb65512c6..57ad82a61e 100644 --- a/arch/riscv/boot/dts/sophgo/Makefile +++ b/arch/riscv/boot/dts/sophgo/Makefile @@ -1,3 +1,4 @@ # SPDX-License-Identifier: GPL-2.0 dtb-$(CONFIG_ARCH_SOPHGO) += cv1800b-milkv-duo.dtb +dtb-$(CONFIG_ARCH_SOPHGO) += cv1812h-huashan-pi.dtb dtb-$(CONFIG_ARCH_SOPHGO) += sg2042-milkv-pioneer.dtb diff --git a/arch/riscv/boot/dts/sophgo/cv1800b.dtsi b/arch/riscv/boot/dts/sophgo/cv1800b.dtsi index aec6401a46..165e9e320a 100644 --- a/arch/riscv/boot/dts/sophgo/cv1800b.dtsi +++ b/arch/riscv/boot/dts/sophgo/cv1800b.dtsi @@ -3,120 +3,16 @@ * Copyright (C) 2023 Jisheng Zhang <jszhang@kernel.org> */ -#include <dt-bindings/interrupt-controller/irq.h> +#include "cv18xx.dtsi" / { compatible = "sophgo,cv1800b"; - #address-cells = <1>; - #size-cells = <1>; - - cpus: cpus { - #address-cells = <1>; - #size-cells = <0>; - timebase-frequency = <25000000>; - - cpu0: cpu@0 { - compatible = "thead,c906", "riscv"; - device_type = "cpu"; - reg = <0>; - d-cache-block-size = <64>; - d-cache-sets = <512>; - d-cache-size = <65536>; - i-cache-block-size = <64>; - i-cache-sets = <128>; - i-cache-size = <32768>; - mmu-type = "riscv,sv39"; - riscv,isa = "rv64imafdc"; - riscv,isa-base = "rv64i"; - riscv,isa-extensions = "i", "m", "a", "f", "d", "c", "zicntr", "zicsr", - "zifencei", "zihpm"; - - cpu0_intc: interrupt-controller { - compatible = "riscv,cpu-intc"; - interrupt-controller; - #interrupt-cells = <1>; - }; - }; - }; - - osc: oscillator { - compatible = "fixed-clock"; - clock-output-names = "osc_25m"; - #clock-cells = <0>; - }; - - soc { - compatible = "simple-bus"; - interrupt-parent = <&plic>; - #address-cells = <1>; - #size-cells = <1>; - dma-noncoherent; - ranges; - - uart0: serial@4140000 { - compatible = "snps,dw-apb-uart"; - reg = <0x04140000 0x100>; - interrupts = <44 IRQ_TYPE_LEVEL_HIGH>; - clocks = <&osc>; - reg-shift = <2>; - reg-io-width = <4>; - status = "disabled"; - }; - - uart1: serial@4150000 { - compatible = "snps,dw-apb-uart"; - reg = <0x04150000 0x100>; - interrupts = <45 IRQ_TYPE_LEVEL_HIGH>; - clocks = <&osc>; - reg-shift = <2>; - reg-io-width = <4>; - status = "disabled"; - }; - - uart2: serial@4160000 { - compatible = "snps,dw-apb-uart"; - reg = <0x04160000 0x100>; - interrupts = <46 IRQ_TYPE_LEVEL_HIGH>; - clocks = <&osc>; - reg-shift = <2>; - reg-io-width = <4>; - status = "disabled"; - }; - - uart3: serial@4170000 { - compatible = "snps,dw-apb-uart"; - reg = <0x04170000 0x100>; - interrupts = <47 IRQ_TYPE_LEVEL_HIGH>; - clocks = <&osc>; - reg-shift = <2>; - reg-io-width = <4>; - status = "disabled"; - }; - - uart4: serial@41c0000 { - compatible = "snps,dw-apb-uart"; - reg = <0x041c0000 0x100>; - interrupts = <48 IRQ_TYPE_LEVEL_HIGH>; - clocks = <&osc>; - reg-shift = <2>; - reg-io-width = <4>; - status = "disabled"; - }; +}; - plic: interrupt-controller@70000000 { - compatible = "sophgo,cv1800b-plic", "thead,c900-plic"; - reg = <0x70000000 0x4000000>; - interrupts-extended = <&cpu0_intc 11>, <&cpu0_intc 9>; - interrupt-controller; - #address-cells = <0>; - #interrupt-cells = <2>; - riscv,ndev = <101>; - }; +&plic { + compatible = "sophgo,cv1800b-plic", "thead,c900-plic"; +}; - clint: timer@74000000 { - compatible = "sophgo,cv1800b-clint", "thead,c900-clint"; - reg = <0x74000000 0x10000>; - interrupts-extended = <&cpu0_intc 3>, <&cpu0_intc 7>; - }; - }; +&clint { + compatible = "sophgo,cv1800b-clint", "thead,c900-clint"; }; diff --git a/arch/riscv/boot/dts/sophgo/cv1812h-huashan-pi.dts b/arch/riscv/boot/dts/sophgo/cv1812h-huashan-pi.dts new file mode 100644 index 0000000000..aa361f3a86 --- /dev/null +++ b/arch/riscv/boot/dts/sophgo/cv1812h-huashan-pi.dts @@ -0,0 +1,48 @@ +// SPDX-License-Identifier: (GPL-2.0 OR MIT) +/* + * Copyright (C) 2023 Inochi Amaoto <inochiama@outlook.com> + */ + +/dts-v1/; + +#include "cv1812h.dtsi" + +/ { + model = "Huashan Pi"; + compatible = "sophgo,huashan-pi", "sophgo,cv1812h"; + + aliases { + gpio0 = &gpio0; + gpio1 = &gpio1; + gpio2 = &gpio2; + gpio3 = &gpio3; + serial0 = &uart0; + serial1 = &uart1; + serial2 = &uart2; + serial3 = &uart3; + serial4 = &uart4; + }; + + chosen { + stdout-path = "serial0:115200n8"; + }; + + reserved-memory { + #address-cells = <1>; + #size-cells = <1>; + ranges; + + coprocessor_rtos: region@8fe00000 { + reg = <0x8fe00000 0x200000>; + no-map; + }; + }; +}; + +&osc { + clock-frequency = <25000000>; +}; + +&uart0 { + status = "okay"; +}; diff --git a/arch/riscv/boot/dts/sophgo/cv1812h.dtsi b/arch/riscv/boot/dts/sophgo/cv1812h.dtsi new file mode 100644 index 0000000000..3e7a942f5c --- /dev/null +++ b/arch/riscv/boot/dts/sophgo/cv1812h.dtsi @@ -0,0 +1,24 @@ +// SPDX-License-Identifier: (GPL-2.0 OR MIT) +/* + * Copyright (C) 2023 Inochi Amaoto <inochiama@outlook.com> + */ + +#include <dt-bindings/interrupt-controller/irq.h> +#include "cv18xx.dtsi" + +/ { + compatible = "sophgo,cv1812h"; + + memory@80000000 { + device_type = "memory"; + reg = <0x80000000 0x10000000>; + }; +}; + +&plic { + compatible = "sophgo,cv1812h-plic", "thead,c900-plic"; +}; + +&clint { + compatible = "sophgo,cv1812h-clint", "thead,c900-clint"; +}; diff --git a/arch/riscv/boot/dts/sophgo/cv18xx.dtsi b/arch/riscv/boot/dts/sophgo/cv18xx.dtsi new file mode 100644 index 0000000000..2d6f4a4b1e --- /dev/null +++ b/arch/riscv/boot/dts/sophgo/cv18xx.dtsi @@ -0,0 +1,192 @@ +// SPDX-License-Identifier: (GPL-2.0 OR MIT) +/* + * Copyright (C) 2023 Jisheng Zhang <jszhang@kernel.org> + * Copyright (C) 2023 Inochi Amaoto <inochiama@outlook.com> + */ + +#include <dt-bindings/interrupt-controller/irq.h> + +/ { + #address-cells = <1>; + #size-cells = <1>; + + cpus: cpus { + #address-cells = <1>; + #size-cells = <0>; + timebase-frequency = <25000000>; + + cpu0: cpu@0 { + compatible = "thead,c906", "riscv"; + device_type = "cpu"; + reg = <0>; + d-cache-block-size = <64>; + d-cache-sets = <512>; + d-cache-size = <65536>; + i-cache-block-size = <64>; + i-cache-sets = <128>; + i-cache-size = <32768>; + mmu-type = "riscv,sv39"; + riscv,isa = "rv64imafdc"; + riscv,isa-base = "rv64i"; + riscv,isa-extensions = "i", "m", "a", "f", "d", "c", "zicntr", "zicsr", + "zifencei", "zihpm"; + + cpu0_intc: interrupt-controller { + compatible = "riscv,cpu-intc"; + interrupt-controller; + #interrupt-cells = <1>; + }; + }; + }; + + osc: oscillator { + compatible = "fixed-clock"; + clock-output-names = "osc_25m"; + #clock-cells = <0>; + }; + + soc { + compatible = "simple-bus"; + interrupt-parent = <&plic>; + #address-cells = <1>; + #size-cells = <1>; + dma-noncoherent; + ranges; + + gpio0: gpio@3020000 { + compatible = "snps,dw-apb-gpio"; + reg = <0x3020000 0x1000>; + #address-cells = <1>; + #size-cells = <0>; + + porta: gpio-controller@0 { + compatible = "snps,dw-apb-gpio-port"; + gpio-controller; + #gpio-cells = <2>; + ngpios = <32>; + reg = <0>; + interrupt-controller; + #interrupt-cells = <2>; + interrupts = <60 IRQ_TYPE_LEVEL_HIGH>; + }; + }; + + gpio1: gpio@3021000 { + compatible = "snps,dw-apb-gpio"; + reg = <0x3021000 0x1000>; + #address-cells = <1>; + #size-cells = <0>; + + portb: gpio-controller@0 { + compatible = "snps,dw-apb-gpio-port"; + gpio-controller; + #gpio-cells = <2>; + ngpios = <32>; + reg = <0>; + interrupt-controller; + #interrupt-cells = <2>; + interrupts = <61 IRQ_TYPE_LEVEL_HIGH>; + }; + }; + + gpio2: gpio@3022000 { + compatible = "snps,dw-apb-gpio"; + reg = <0x3022000 0x1000>; + #address-cells = <1>; + #size-cells = <0>; + + portc: gpio-controller@0 { + compatible = "snps,dw-apb-gpio-port"; + gpio-controller; + #gpio-cells = <2>; + ngpios = <32>; + reg = <0>; + interrupt-controller; + #interrupt-cells = <2>; + interrupts = <62 IRQ_TYPE_LEVEL_HIGH>; + }; + }; + + gpio3: gpio@3023000 { + compatible = "snps,dw-apb-gpio"; + reg = <0x3023000 0x1000>; + #address-cells = <1>; + #size-cells = <0>; + + portd: gpio-controller@0 { + compatible = "snps,dw-apb-gpio-port"; + gpio-controller; + #gpio-cells = <2>; + ngpios = <32>; + reg = <0>; + interrupt-controller; + #interrupt-cells = <2>; + interrupts = <63 IRQ_TYPE_LEVEL_HIGH>; + }; + }; + + uart0: serial@4140000 { + compatible = "snps,dw-apb-uart"; + reg = <0x04140000 0x100>; + interrupts = <44 IRQ_TYPE_LEVEL_HIGH>; + clocks = <&osc>; + reg-shift = <2>; + reg-io-width = <4>; + status = "disabled"; + }; + + uart1: serial@4150000 { + compatible = "snps,dw-apb-uart"; + reg = <0x04150000 0x100>; + interrupts = <45 IRQ_TYPE_LEVEL_HIGH>; + clocks = <&osc>; + reg-shift = <2>; + reg-io-width = <4>; + status = "disabled"; + }; + + uart2: serial@4160000 { + compatible = "snps,dw-apb-uart"; + reg = <0x04160000 0x100>; + interrupts = <46 IRQ_TYPE_LEVEL_HIGH>; + clocks = <&osc>; + reg-shift = <2>; + reg-io-width = <4>; + status = "disabled"; + }; + + uart3: serial@4170000 { + compatible = "snps,dw-apb-uart"; + reg = <0x04170000 0x100>; + interrupts = <47 IRQ_TYPE_LEVEL_HIGH>; + clocks = <&osc>; + reg-shift = <2>; + reg-io-width = <4>; + status = "disabled"; + }; + + uart4: serial@41c0000 { + compatible = "snps,dw-apb-uart"; + reg = <0x041c0000 0x100>; + interrupts = <48 IRQ_TYPE_LEVEL_HIGH>; + clocks = <&osc>; + reg-shift = <2>; + reg-io-width = <4>; + status = "disabled"; + }; + + plic: interrupt-controller@70000000 { + reg = <0x70000000 0x4000000>; + interrupts-extended = <&cpu0_intc 11>, <&cpu0_intc 9>; + interrupt-controller; + #address-cells = <0>; + #interrupt-cells = <2>; + riscv,ndev = <101>; + }; + + clint: timer@74000000 { + reg = <0x74000000 0x10000>; + interrupts-extended = <&cpu0_intc 3>, <&cpu0_intc 7>; + }; + }; +}; diff --git a/arch/riscv/boot/dts/starfive/jh7100-common.dtsi b/arch/riscv/boot/dts/starfive/jh7100-common.dtsi index b93ce351a9..42fb61c360 100644 --- a/arch/riscv/boot/dts/starfive/jh7100-common.dtsi +++ b/arch/riscv/boot/dts/starfive/jh7100-common.dtsi @@ -12,6 +12,8 @@ / { aliases { + mmc0 = &sdio0; + mmc1 = &sdio1; serial0 = &uart3; }; @@ -39,6 +41,35 @@ label = "ack"; }; }; + + reserved-memory { + #address-cells = <2>; + #size-cells = <2>; + ranges; + + dma-reserved@fa000000 { + reg = <0x0 0xfa000000 0x0 0x1000000>; + no-map; + }; + + linux,dma@107a000000 { + compatible = "shared-dma-pool"; + reg = <0x10 0x7a000000 0x0 0x1000000>; + no-map; + linux,dma-default; + }; + }; + + soc { + dma-ranges = <0x00 0x80000000 0x00 0x80000000 0x00 0x7a000000>, + <0x00 0xfa000000 0x10 0x7a000000 0x00 0x01000000>, + <0x00 0xfb000000 0x00 0xfb000000 0x07 0x85000000>; + }; + + wifi_pwrseq: wifi-pwrseq { + compatible = "mmc-pwrseq-simple"; + reset-gpios = <&gpio 37 GPIO_ACTIVE_LOW>; + }; }; &gpio { @@ -84,6 +115,78 @@ }; }; + sdio0_pins: sdio0-0 { + clk-pins { + pinmux = <GPIOMUX(54, GPO_SDIO0_PAD_CCLK_OUT, + GPO_ENABLE, GPI_NONE)>; + bias-disable; + input-disable; + input-schmitt-disable; + }; + sdio-pins { + pinmux = <GPIOMUX(55, GPO_LOW, GPO_DISABLE, + GPI_SDIO0_PAD_CARD_DETECT_N)>, + <GPIOMUX(53, + GPO_SDIO0_PAD_CCMD_OUT, + GPO_SDIO0_PAD_CCMD_OEN, + GPI_SDIO0_PAD_CCMD_IN)>, + <GPIOMUX(49, + GPO_SDIO0_PAD_CDATA_OUT_BIT0, + GPO_SDIO0_PAD_CDATA_OEN_BIT0, + GPI_SDIO0_PAD_CDATA_IN_BIT0)>, + <GPIOMUX(50, + GPO_SDIO0_PAD_CDATA_OUT_BIT1, + GPO_SDIO0_PAD_CDATA_OEN_BIT1, + GPI_SDIO0_PAD_CDATA_IN_BIT1)>, + <GPIOMUX(51, + GPO_SDIO0_PAD_CDATA_OUT_BIT2, + GPO_SDIO0_PAD_CDATA_OEN_BIT2, + GPI_SDIO0_PAD_CDATA_IN_BIT2)>, + <GPIOMUX(52, + GPO_SDIO0_PAD_CDATA_OUT_BIT3, + GPO_SDIO0_PAD_CDATA_OEN_BIT3, + GPI_SDIO0_PAD_CDATA_IN_BIT3)>; + bias-pull-up; + input-enable; + input-schmitt-enable; + }; + }; + + sdio1_pins: sdio1-0 { + clk-pins { + pinmux = <GPIOMUX(33, GPO_SDIO1_PAD_CCLK_OUT, + GPO_ENABLE, GPI_NONE)>; + bias-disable; + input-disable; + input-schmitt-disable; + }; + sdio-pins { + pinmux = <GPIOMUX(29, + GPO_SDIO1_PAD_CCMD_OUT, + GPO_SDIO1_PAD_CCMD_OEN, + GPI_SDIO1_PAD_CCMD_IN)>, + <GPIOMUX(36, + GPO_SDIO1_PAD_CDATA_OUT_BIT0, + GPO_SDIO1_PAD_CDATA_OEN_BIT0, + GPI_SDIO1_PAD_CDATA_IN_BIT0)>, + <GPIOMUX(30, + GPO_SDIO1_PAD_CDATA_OUT_BIT1, + GPO_SDIO1_PAD_CDATA_OEN_BIT1, + GPI_SDIO1_PAD_CDATA_IN_BIT1)>, + <GPIOMUX(34, + GPO_SDIO1_PAD_CDATA_OUT_BIT2, + GPO_SDIO1_PAD_CDATA_OEN_BIT2, + GPI_SDIO1_PAD_CDATA_IN_BIT2)>, + <GPIOMUX(31, + GPO_SDIO1_PAD_CDATA_OUT_BIT3, + GPO_SDIO1_PAD_CDATA_OEN_BIT3, + GPI_SDIO1_PAD_CDATA_IN_BIT3)>; + bias-pull-up; + input-enable; + input-schmitt-enable; + }; + }; + uart3_pins: uart3-0 { rx-pins { pinmux = <GPIOMUX(13, GPO_LOW, GPO_DISABLE, @@ -154,6 +257,34 @@ clock-frequency = <27000000>; }; +&sdio0 { + broken-cd; + bus-width = <4>; + cap-sd-highspeed; + pinctrl-names = "default"; + pinctrl-0 = <&sdio0_pins>; + status = "okay"; +}; + +&sdio1 { + #address-cells = <1>; + #size-cells = <0>; + bus-width = <4>; + cap-sd-highspeed; + cap-sdio-irq; + cap-power-off-card; + mmc-pwrseq = <&wifi_pwrseq>; + non-removable; + pinctrl-names = "default"; + pinctrl-0 = <&sdio1_pins>; + status = "okay"; + + wifi@1 { + compatible = "brcm,bcm4329-fmac"; + reg = <1>; + }; +}; + &uart3 { pinctrl-names = "default"; pinctrl-0 = <&uart3_pins>; diff --git a/arch/riscv/boot/dts/starfive/jh7100.dtsi b/arch/riscv/boot/dts/starfive/jh7100.dtsi index e68cafe754..5d499d8aa8 100644 --- a/arch/riscv/boot/dts/starfive/jh7100.dtsi +++ b/arch/riscv/boot/dts/starfive/jh7100.dtsi @@ -32,6 +32,7 @@ i-tlb-sets = <1>; i-tlb-size = <32>; mmu-type = "riscv,sv39"; + next-level-cache = <&ccache>; riscv,isa = "rv64imafdc"; riscv,isa-base = "rv64i"; riscv,isa-extensions = "i", "m", "a", "f", "d", "c", "zicntr", "zicsr", @@ -60,6 +61,7 @@ i-tlb-sets = <1>; i-tlb-size = <32>; mmu-type = "riscv,sv39"; + next-level-cache = <&ccache>; riscv,isa = "rv64imafdc"; riscv,isa-base = "rv64i"; riscv,isa-extensions = "i", "m", "a", "f", "d", "c", "zicntr", "zicsr", @@ -94,14 +96,14 @@ thermal-sensors = <&sfctemp>; trips { - cpu_alert0 { + cpu-alert0 { /* milliCelsius */ temperature = <75000>; hysteresis = <2000>; type = "passive"; }; - cpu_crit { + cpu-crit { /* milliCelsius */ temperature = <90000>; hysteresis = <2000>; @@ -111,30 +113,34 @@ }; }; - osc_sys: osc_sys { + osc_sys: osc-sys { compatible = "fixed-clock"; #clock-cells = <0>; + clock-output-names = "osc_sys"; /* This value must be overridden by the board */ clock-frequency = <0>; }; - osc_aud: osc_aud { + osc_aud: osc-aud { compatible = "fixed-clock"; #clock-cells = <0>; + clock-output-names = "osc_aud"; /* This value must be overridden by the board */ clock-frequency = <0>; }; - gmac_rmii_ref: gmac_rmii_ref { + gmac_rmii_ref: gmac-rmii-ref { compatible = "fixed-clock"; #clock-cells = <0>; + clock-output-names = "gmac_rmii_ref"; /* Should be overridden by the board when needed */ clock-frequency = <0>; }; - gmac_gr_mii_rxclk: gmac_gr_mii_rxclk { + gmac_gr_mii_rxclk: gmac-gr-mii-rxclk { compatible = "fixed-clock"; #clock-cells = <0>; + clock-output-names = "gmac_gr_mii_rxclk"; /* Should be overridden by the board when needed */ clock-frequency = <0>; }; @@ -144,26 +150,64 @@ interrupt-parent = <&plic>; #address-cells = <2>; #size-cells = <2>; + dma-noncoherent; ranges; clint: clint@2000000 { compatible = "starfive,jh7100-clint", "sifive,clint0"; reg = <0x0 0x2000000 0x0 0x10000>; - interrupts-extended = <&cpu0_intc 3 &cpu0_intc 7 - &cpu1_intc 3 &cpu1_intc 7>; + interrupts-extended = <&cpu0_intc 3>, <&cpu0_intc 7>, + <&cpu1_intc 3>, <&cpu1_intc 7>; + }; + + ccache: cache-controller@2010000 { + compatible = "starfive,jh7100-ccache", "sifive,ccache0", "cache"; + reg = <0x0 0x2010000 0x0 0x1000>; + interrupts = <128>, <130>, <131>, <129>; + cache-block-size = <64>; + cache-level = <2>; + cache-sets = <2048>; + cache-size = <2097152>; + cache-unified; }; plic: interrupt-controller@c000000 { compatible = "starfive,jh7100-plic", "sifive,plic-1.0.0"; reg = <0x0 0xc000000 0x0 0x4000000>; - interrupts-extended = <&cpu0_intc 11 &cpu0_intc 9 - &cpu1_intc 11 &cpu1_intc 9>; + interrupts-extended = <&cpu0_intc 11>, <&cpu0_intc 9>, + <&cpu1_intc 11>, <&cpu1_intc 9>; interrupt-controller; #address-cells = <0>; #interrupt-cells = <1>; riscv,ndev = <133>; }; + sdio0: mmc@10000000 { + compatible = "snps,dw-mshc"; + reg = <0x0 0x10000000 0x0 0x10000>; + clocks = <&clkgen JH7100_CLK_SDIO0_AHB>, + <&clkgen JH7100_CLK_SDIO0_CCLKINT_INV>; + clock-names = "biu", "ciu"; + interrupts = <4>; + data-addr = <0>; + fifo-depth = <32>; + fifo-watermark-aligned; + status = "disabled"; + }; + + sdio1: mmc@10010000 { + compatible = "snps,dw-mshc"; + reg = <0x0 0x10010000 0x0 0x10000>; + clocks = <&clkgen JH7100_CLK_SDIO1_AHB>, + <&clkgen JH7100_CLK_SDIO1_CCLKINT_INV>; + clock-names = "biu", "ciu"; + interrupts = <5>; + data-addr = <0>; + fifo-depth = <32>; + fifo-watermark-aligned; + status = "disabled"; + }; + clkgen: clock-controller@11800000 { compatible = "starfive,jh7100-clkgen"; reg = <0x0 0x11800000 0x0 0x10000>; diff --git a/arch/riscv/boot/dts/starfive/jh7110.dtsi b/arch/riscv/boot/dts/starfive/jh7110.dtsi index 45213cdf50..74ed3b9264 100644 --- a/arch/riscv/boot/dts/starfive/jh7110.dtsi +++ b/arch/riscv/boot/dts/starfive/jh7110.dtsi @@ -237,14 +237,14 @@ }; trips { - cpu_alert0: cpu_alert0 { + cpu_alert0: cpu-alert0 { /* milliCelsius */ temperature = <85000>; hysteresis = <2000>; type = "passive"; }; - cpu_crit { + cpu-crit { /* milliCelsius */ temperature = <100000>; hysteresis = <2000>; diff --git a/arch/riscv/boot/dts/thead/th1520-beaglev-ahead.dts b/arch/riscv/boot/dts/thead/th1520-beaglev-ahead.dts index 70e8042c83..d9b4de9e47 100644 --- a/arch/riscv/boot/dts/thead/th1520-beaglev-ahead.dts +++ b/arch/riscv/boot/dts/thead/th1520-beaglev-ahead.dts @@ -48,6 +48,10 @@ clock-frequency = <62500000>; }; +&sdhci_clk { + clock-frequency = <198000000>; +}; + &uart_sclk { clock-frequency = <100000000>; }; @@ -56,6 +60,22 @@ status = "okay"; }; +&emmc { + bus-width = <8>; + max-frequency = <198000000>; + mmc-hs400-1_8v; + non-removable; + no-sdio; + no-sd; + status = "okay"; +}; + +&sdio0 { + bus-width = <4>; + max-frequency = <198000000>; + status = "okay"; +}; + &uart0 { status = "okay"; }; diff --git a/arch/riscv/boot/dts/thead/th1520-lichee-module-4a.dtsi b/arch/riscv/boot/dts/thead/th1520-lichee-module-4a.dtsi index a802ab1104..1365d3a512 100644 --- a/arch/riscv/boot/dts/thead/th1520-lichee-module-4a.dtsi +++ b/arch/riscv/boot/dts/thead/th1520-lichee-module-4a.dtsi @@ -29,6 +29,10 @@ clock-frequency = <62500000>; }; +&sdhci_clk { + clock-frequency = <198000000>; +}; + &uart_sclk { clock-frequency = <100000000>; }; @@ -36,3 +40,19 @@ &dmac0 { status = "okay"; }; + +&emmc { + bus-width = <8>; + max-frequency = <198000000>; + mmc-hs400-1_8v; + non-removable; + no-sdio; + no-sd; + status = "okay"; +}; + +&sdio0 { + bus-width = <4>; + max-frequency = <198000000>; + status = "okay"; +}; diff --git a/arch/riscv/boot/dts/thead/th1520.dtsi b/arch/riscv/boot/dts/thead/th1520.dtsi index ba4d2c673a..8b915e206f 100644 --- a/arch/riscv/boot/dts/thead/th1520.dtsi +++ b/arch/riscv/boot/dts/thead/th1520.dtsi @@ -146,6 +146,13 @@ #clock-cells = <0>; }; + sdhci_clk: sdhci-clock { + compatible = "fixed-clock"; + clock-frequency = <198000000>; + clock-output-names = "sdhci_clk"; + #clock-cells = <0>; + }; + soc { compatible = "simple-bus"; interrupt-parent = <&plic>; @@ -304,6 +311,33 @@ status = "disabled"; }; + emmc: mmc@ffe7080000 { + compatible = "thead,th1520-dwcmshc"; + reg = <0xff 0xe7080000 0x0 0x10000>; + interrupts = <62 IRQ_TYPE_LEVEL_HIGH>; + clocks = <&sdhci_clk>; + clock-names = "core"; + status = "disabled"; + }; + + sdio0: mmc@ffe7090000 { + compatible = "thead,th1520-dwcmshc"; + reg = <0xff 0xe7090000 0x0 0x10000>; + interrupts = <64 IRQ_TYPE_LEVEL_HIGH>; + clocks = <&sdhci_clk>; + clock-names = "core"; + status = "disabled"; + }; + + sdio1: mmc@ffe70a0000 { + compatible = "thead,th1520-dwcmshc"; + reg = <0xff 0xe70a0000 0x0 0x10000>; + interrupts = <71 IRQ_TYPE_LEVEL_HIGH>; + clocks = <&sdhci_clk>; + clock-names = "core"; + status = "disabled"; + }; + timer0: timer@ffefc32000 { compatible = "snps,dw-apb-timer"; reg = <0xff 0xefc32000 0x0 0x14>; diff --git a/arch/riscv/configs/defconfig b/arch/riscv/configs/defconfig index 905881282a..eaf34e871e 100644 --- a/arch/riscv/configs/defconfig +++ b/arch/riscv/configs/defconfig @@ -149,6 +149,7 @@ CONFIG_SERIAL_8250_CONSOLE=y CONFIG_SERIAL_8250_DW=y CONFIG_SERIAL_OF_PLATFORM=y CONFIG_SERIAL_SH_SCI=y +CONFIG_SERIAL_EARLYCON_RISCV_SBI=y CONFIG_VIRTIO_CONSOLE=y CONFIG_HW_RANDOM=y CONFIG_HW_RANDOM_VIRTIO=y diff --git a/arch/riscv/configs/rv32_defconfig b/arch/riscv/configs/rv32_defconfig deleted file mode 100644 index 89b601e253..0000000000 --- a/arch/riscv/configs/rv32_defconfig +++ /dev/null @@ -1,139 +0,0 @@ -CONFIG_SYSVIPC=y -CONFIG_POSIX_MQUEUE=y -CONFIG_NO_HZ_IDLE=y -CONFIG_HIGH_RES_TIMERS=y -CONFIG_BPF_SYSCALL=y -CONFIG_IKCONFIG=y -CONFIG_IKCONFIG_PROC=y -CONFIG_CGROUPS=y -CONFIG_CGROUP_SCHED=y -CONFIG_CFS_BANDWIDTH=y -CONFIG_CGROUP_BPF=y -CONFIG_NAMESPACES=y -CONFIG_USER_NS=y -CONFIG_CHECKPOINT_RESTORE=y -CONFIG_BLK_DEV_INITRD=y -CONFIG_EXPERT=y -# CONFIG_SYSFS_SYSCALL is not set -CONFIG_PROFILING=y -CONFIG_SOC_SIFIVE=y -CONFIG_SOC_VIRT=y -CONFIG_NONPORTABLE=y -CONFIG_ARCH_RV32I=y -CONFIG_SMP=y -CONFIG_HOTPLUG_CPU=y -CONFIG_PM=y -CONFIG_CPU_IDLE=y -CONFIG_VIRTUALIZATION=y -CONFIG_KVM=m -CONFIG_JUMP_LABEL=y -CONFIG_MODULES=y -CONFIG_MODULE_UNLOAD=y -CONFIG_NET=y -CONFIG_PACKET=y -CONFIG_UNIX=y -CONFIG_INET=y -CONFIG_IP_MULTICAST=y -CONFIG_IP_ADVANCED_ROUTER=y -CONFIG_IP_PNP=y -CONFIG_IP_PNP_DHCP=y -CONFIG_IP_PNP_BOOTP=y -CONFIG_IP_PNP_RARP=y -CONFIG_NETLINK_DIAG=y -CONFIG_NET_9P=y -CONFIG_NET_9P_VIRTIO=y -CONFIG_PCI=y -CONFIG_PCIEPORTBUS=y -CONFIG_PCI_HOST_GENERIC=y -CONFIG_PCIE_XILINX=y -CONFIG_DEVTMPFS=y -CONFIG_DEVTMPFS_MOUNT=y -CONFIG_BLK_DEV_LOOP=y -CONFIG_VIRTIO_BLK=y -CONFIG_BLK_DEV_SD=y -CONFIG_BLK_DEV_SR=y -CONFIG_SCSI_VIRTIO=y -CONFIG_ATA=y -CONFIG_SATA_AHCI=y -CONFIG_SATA_AHCI_PLATFORM=y -CONFIG_NETDEVICES=y -CONFIG_VIRTIO_NET=y -CONFIG_MACB=y -CONFIG_E1000E=y -CONFIG_R8169=y -CONFIG_MICROSEMI_PHY=y -CONFIG_INPUT_MOUSEDEV=y -CONFIG_SERIAL_8250=y -CONFIG_SERIAL_8250_CONSOLE=y -CONFIG_SERIAL_OF_PLATFORM=y -CONFIG_VIRTIO_CONSOLE=y -CONFIG_HW_RANDOM=y -CONFIG_HW_RANDOM_VIRTIO=y -CONFIG_SPI=y -CONFIG_SPI_SIFIVE=y -# CONFIG_PTP_1588_CLOCK is not set -CONFIG_DRM=y -CONFIG_DRM_RADEON=y -CONFIG_DRM_VIRTIO_GPU=y -CONFIG_FB=y -CONFIG_FRAMEBUFFER_CONSOLE=y -CONFIG_USB=y -CONFIG_USB_XHCI_HCD=y -CONFIG_USB_XHCI_PLATFORM=y -CONFIG_USB_EHCI_HCD=y -CONFIG_USB_EHCI_HCD_PLATFORM=y -CONFIG_USB_OHCI_HCD=y -CONFIG_USB_OHCI_HCD_PLATFORM=y -CONFIG_USB_STORAGE=y -CONFIG_USB_UAS=y -CONFIG_MMC=y -CONFIG_MMC_SPI=y -CONFIG_RTC_CLASS=y -CONFIG_VIRTIO_PCI=y -CONFIG_VIRTIO_BALLOON=y -CONFIG_VIRTIO_INPUT=y -CONFIG_VIRTIO_MMIO=y -CONFIG_RPMSG_CHAR=y -CONFIG_RPMSG_CTRL=y -CONFIG_RPMSG_VIRTIO=y -CONFIG_EXT4_FS=y -CONFIG_EXT4_FS_POSIX_ACL=y -CONFIG_AUTOFS_FS=y -CONFIG_MSDOS_FS=y -CONFIG_VFAT_FS=y -CONFIG_TMPFS=y -CONFIG_TMPFS_POSIX_ACL=y -CONFIG_HUGETLBFS=y -CONFIG_NFS_FS=y -CONFIG_NFS_V4=y -CONFIG_NFS_V4_1=y -CONFIG_NFS_V4_2=y -CONFIG_ROOT_NFS=y -CONFIG_9P_FS=y -CONFIG_CRYPTO_USER_API_HASH=y -CONFIG_CRYPTO_DEV_VIRTIO=y -CONFIG_PRINTK_TIME=y -CONFIG_DEBUG_FS=y -CONFIG_DEBUG_PAGEALLOC=y -CONFIG_SCHED_STACK_END_CHECK=y -CONFIG_DEBUG_VM=y -CONFIG_DEBUG_VM_PGFLAGS=y -CONFIG_DEBUG_MEMORY_INIT=y -CONFIG_DEBUG_PER_CPU_MAPS=y -CONFIG_SOFTLOCKUP_DETECTOR=y -CONFIG_WQ_WATCHDOG=y -CONFIG_DEBUG_TIMEKEEPING=y -CONFIG_DEBUG_RT_MUTEXES=y -CONFIG_DEBUG_SPINLOCK=y -CONFIG_DEBUG_MUTEXES=y -CONFIG_DEBUG_RWSEMS=y -CONFIG_DEBUG_ATOMIC_SLEEP=y -CONFIG_STACKTRACE=y -CONFIG_DEBUG_LIST=y -CONFIG_DEBUG_PLIST=y -CONFIG_DEBUG_SG=y -# CONFIG_RCU_TRACE is not set -CONFIG_RCU_EQS_DEBUG=y -# CONFIG_FTRACE is not set -# CONFIG_RUNTIME_TESTING_MENU is not set -CONFIG_MEMTEST=y diff --git a/arch/riscv/errata/thead/errata.c b/arch/riscv/errata/thead/errata.c index 0554ed4bf0..b1c410bbc1 100644 --- a/arch/riscv/errata/thead/errata.c +++ b/arch/riscv/errata/thead/errata.c @@ -12,8 +12,10 @@ #include <asm/alternative.h> #include <asm/cacheflush.h> #include <asm/cpufeature.h> +#include <asm/dma-noncoherent.h> #include <asm/errata_list.h> #include <asm/hwprobe.h> +#include <asm/io.h> #include <asm/patch.h> #include <asm/vendorid_list.h> @@ -33,6 +35,69 @@ static bool errata_probe_pbmt(unsigned int stage, return false; } +/* + * th.dcache.ipa rs1 (invalidate, physical address) + * | 31 - 25 | 24 - 20 | 19 - 15 | 14 - 12 | 11 - 7 | 6 - 0 | + * 0000001 01010 rs1 000 00000 0001011 + * th.dcache.iva rs1 (invalidate, virtual address) + * 0000001 00110 rs1 000 00000 0001011 + * + * th.dcache.cpa rs1 (clean, physical address) + * | 31 - 25 | 24 - 20 | 19 - 15 | 14 - 12 | 11 - 7 | 6 - 0 | + * 0000001 01001 rs1 000 00000 0001011 + * th.dcache.cva rs1 (clean, virtual address) + * 0000001 00101 rs1 000 00000 0001011 + * + * th.dcache.cipa rs1 (clean then invalidate, physical address) + * | 31 - 25 | 24 - 20 | 19 - 15 | 14 - 12 | 11 - 7 | 6 - 0 | + * 0000001 01011 rs1 000 00000 0001011 + * th.dcache.civa rs1 (clean then invalidate, virtual address) + * 0000001 00111 rs1 000 00000 0001011 + * + * th.sync.s (make sure all cache operations finished) + * | 31 - 25 | 24 - 20 | 19 - 15 | 14 - 12 | 11 - 7 | 6 - 0 | + * 0000000 11001 00000 000 00000 0001011 + */ +#define THEAD_INVAL_A0 ".long 0x02a5000b" +#define THEAD_CLEAN_A0 ".long 0x0295000b" +#define THEAD_FLUSH_A0 ".long 0x02b5000b" +#define THEAD_SYNC_S ".long 0x0190000b" + +#define THEAD_CMO_OP(_op, _start, _size, _cachesize) \ +asm volatile("mv a0, %1\n\t" \ + "j 2f\n\t" \ + "3:\n\t" \ + THEAD_##_op##_A0 "\n\t" \ + "add a0, a0, %0\n\t" \ + "2:\n\t" \ + "bltu a0, %2, 3b\n\t" \ + THEAD_SYNC_S \ + : : "r"(_cachesize), \ + "r"((unsigned long)(_start) & ~((_cachesize) - 1UL)), \ + "r"((unsigned long)(_start) + (_size)) \ + : "a0") + +static void thead_errata_cache_inv(phys_addr_t paddr, size_t size) +{ + THEAD_CMO_OP(INVAL, paddr, size, riscv_cbom_block_size); +} + +static void thead_errata_cache_wback(phys_addr_t paddr, size_t size) +{ + THEAD_CMO_OP(CLEAN, paddr, size, riscv_cbom_block_size); +} + +static void thead_errata_cache_wback_inv(phys_addr_t paddr, size_t size) +{ + THEAD_CMO_OP(FLUSH, paddr, size, riscv_cbom_block_size); +} + +static const struct riscv_nonstd_cache_ops thead_errata_cmo_ops = { + .wback = &thead_errata_cache_wback, + .inv = &thead_errata_cache_inv, + .wback_inv = &thead_errata_cache_wback_inv, +}; + static bool errata_probe_cmo(unsigned int stage, unsigned long arch_id, unsigned long impid) { @@ -48,6 +113,7 @@ static bool errata_probe_cmo(unsigned int stage, if (stage == RISCV_ALTERNATIVES_BOOT) { riscv_cbom_block_size = L1_CACHE_BYTES; riscv_noncoherent_supported(); + riscv_noncoherent_register_cache_ops(&thead_errata_cmo_ops); } return true; @@ -77,8 +143,7 @@ static u32 thead_errata_probe(unsigned int stage, if (errata_probe_pbmt(stage, archid, impid)) cpu_req_errata |= BIT(ERRATA_THEAD_PBMT); - if (errata_probe_cmo(stage, archid, impid)) - cpu_req_errata |= BIT(ERRATA_THEAD_CMO); + errata_probe_cmo(stage, archid, impid); if (errata_probe_pmu(stage, archid, impid)) cpu_req_errata |= BIT(ERRATA_THEAD_PMU); diff --git a/arch/riscv/include/asm/arch_hweight.h b/arch/riscv/include/asm/arch_hweight.h new file mode 100644 index 0000000000..85b2c44382 --- /dev/null +++ b/arch/riscv/include/asm/arch_hweight.h @@ -0,0 +1,78 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Based on arch/x86/include/asm/arch_hweight.h + */ + +#ifndef _ASM_RISCV_HWEIGHT_H +#define _ASM_RISCV_HWEIGHT_H + +#include <asm/alternative-macros.h> +#include <asm/hwcap.h> + +#if (BITS_PER_LONG == 64) +#define CPOPW "cpopw " +#elif (BITS_PER_LONG == 32) +#define CPOPW "cpop " +#else +#error "Unexpected BITS_PER_LONG" +#endif + +static __always_inline unsigned int __arch_hweight32(unsigned int w) +{ +#ifdef CONFIG_RISCV_ISA_ZBB + asm goto(ALTERNATIVE("j %l[legacy]", "nop", 0, + RISCV_ISA_EXT_ZBB, 1) + : : : : legacy); + + asm (".option push\n" + ".option arch,+zbb\n" + CPOPW "%0, %0\n" + ".option pop\n" + : "+r" (w) : :); + + return w; + +legacy: +#endif + return __sw_hweight32(w); +} + +static inline unsigned int __arch_hweight16(unsigned int w) +{ + return __arch_hweight32(w & 0xffff); +} + +static inline unsigned int __arch_hweight8(unsigned int w) +{ + return __arch_hweight32(w & 0xff); +} + +#if BITS_PER_LONG == 64 +static __always_inline unsigned long __arch_hweight64(__u64 w) +{ +# ifdef CONFIG_RISCV_ISA_ZBB + asm goto(ALTERNATIVE("j %l[legacy]", "nop", 0, + RISCV_ISA_EXT_ZBB, 1) + : : : : legacy); + + asm (".option push\n" + ".option arch,+zbb\n" + "cpop %0, %0\n" + ".option pop\n" + : "+r" (w) : :); + + return w; + +legacy: +# endif + return __sw_hweight64(w); +} +#else /* BITS_PER_LONG == 64 */ +static inline unsigned long __arch_hweight64(__u64 w) +{ + return __arch_hweight32((u32)w) + + __arch_hweight32((u32)(w >> 32)); +} +#endif /* !(BITS_PER_LONG == 64) */ + +#endif /* _ASM_RISCV_HWEIGHT_H */ diff --git a/arch/riscv/include/asm/archrandom.h b/arch/riscv/include/asm/archrandom.h new file mode 100644 index 0000000000..5345360adf --- /dev/null +++ b/arch/riscv/include/asm/archrandom.h @@ -0,0 +1,72 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Kernel interface for the RISCV arch_random_* functions + * + * Copyright (c) 2023 Rivos Inc. + * + */ + +#ifndef ASM_RISCV_ARCHRANDOM_H +#define ASM_RISCV_ARCHRANDOM_H + +#include <asm/csr.h> +#include <asm/processor.h> + +#define SEED_RETRY_LOOPS 100 + +static inline bool __must_check csr_seed_long(unsigned long *v) +{ + unsigned int retry = SEED_RETRY_LOOPS, valid_seeds = 0; + const int needed_seeds = sizeof(long) / sizeof(u16); + u16 *entropy = (u16 *)v; + + do { + /* + * The SEED CSR must be accessed with a read-write instruction. + */ + unsigned long csr_seed = csr_swap(CSR_SEED, 0); + unsigned long opst = csr_seed & SEED_OPST_MASK; + + switch (opst) { + case SEED_OPST_ES16: + entropy[valid_seeds++] = csr_seed & SEED_ENTROPY_MASK; + if (valid_seeds == needed_seeds) + return true; + break; + + case SEED_OPST_DEAD: + pr_err_once("archrandom: Unrecoverable error\n"); + return false; + + case SEED_OPST_BIST: + case SEED_OPST_WAIT: + default: + cpu_relax(); + continue; + } + } while (--retry); + + return false; +} + +static inline size_t __must_check arch_get_random_longs(unsigned long *v, size_t max_longs) +{ + return 0; +} + +static inline size_t __must_check arch_get_random_seed_longs(unsigned long *v, size_t max_longs) +{ + if (!max_longs) + return 0; + + /* + * If Zkr is supported and csr_seed_long succeeds, we return one long + * worth of entropy. + */ + if (riscv_has_extension_likely(RISCV_ISA_EXT_ZKR) && csr_seed_long(v)) + return 1; + + return 0; +} + +#endif /* ASM_RISCV_ARCHRANDOM_H */ diff --git a/arch/riscv/include/asm/asm-extable.h b/arch/riscv/include/asm/asm-extable.h index 00a96e7a96..0c8bfd54fc 100644 --- a/arch/riscv/include/asm/asm-extable.h +++ b/arch/riscv/include/asm/asm-extable.h @@ -6,6 +6,7 @@ #define EX_TYPE_FIXUP 1 #define EX_TYPE_BPF 2 #define EX_TYPE_UACCESS_ERR_ZERO 3 +#define EX_TYPE_LOAD_UNALIGNED_ZEROPAD 4 #ifdef CONFIG_MMU @@ -47,6 +48,11 @@ #define EX_DATA_REG_ZERO_SHIFT 5 #define EX_DATA_REG_ZERO GENMASK(9, 5) +#define EX_DATA_REG_DATA_SHIFT 0 +#define EX_DATA_REG_DATA GENMASK(4, 0) +#define EX_DATA_REG_ADDR_SHIFT 5 +#define EX_DATA_REG_ADDR GENMASK(9, 5) + #define EX_DATA_REG(reg, gpr) \ "((.L__gpr_num_" #gpr ") << " __stringify(EX_DATA_REG_##reg##_SHIFT) ")" @@ -62,6 +68,15 @@ #define _ASM_EXTABLE_UACCESS_ERR(insn, fixup, err) \ _ASM_EXTABLE_UACCESS_ERR_ZERO(insn, fixup, err, zero) +#define _ASM_EXTABLE_LOAD_UNALIGNED_ZEROPAD(insn, fixup, data, addr) \ + __DEFINE_ASM_GPR_NUMS \ + __ASM_EXTABLE_RAW(#insn, #fixup, \ + __stringify(EX_TYPE_LOAD_UNALIGNED_ZEROPAD), \ + "(" \ + EX_DATA_REG(DATA, data) " | " \ + EX_DATA_REG(ADDR, addr) \ + ")") + #endif /* __ASSEMBLY__ */ #else /* CONFIG_MMU */ diff --git a/arch/riscv/include/asm/asm-prototypes.h b/arch/riscv/include/asm/asm-prototypes.h index 36b955c762..cd627ec289 100644 --- a/arch/riscv/include/asm/asm-prototypes.h +++ b/arch/riscv/include/asm/asm-prototypes.h @@ -9,6 +9,33 @@ long long __lshrti3(long long a, int b); long long __ashrti3(long long a, int b); long long __ashlti3(long long a, int b); +#ifdef CONFIG_RISCV_ISA_V + +#ifdef CONFIG_MMU +asmlinkage int enter_vector_usercopy(void *dst, void *src, size_t n); +#endif /* CONFIG_MMU */ + +void xor_regs_2_(unsigned long bytes, unsigned long *__restrict p1, + const unsigned long *__restrict p2); +void xor_regs_3_(unsigned long bytes, unsigned long *__restrict p1, + const unsigned long *__restrict p2, + const unsigned long *__restrict p3); +void xor_regs_4_(unsigned long bytes, unsigned long *__restrict p1, + const unsigned long *__restrict p2, + const unsigned long *__restrict p3, + const unsigned long *__restrict p4); +void xor_regs_5_(unsigned long bytes, unsigned long *__restrict p1, + const unsigned long *__restrict p2, + const unsigned long *__restrict p3, + const unsigned long *__restrict p4, + const unsigned long *__restrict p5); + +#ifdef CONFIG_RISCV_ISA_V_PREEMPTIVE +asmlinkage void riscv_v_context_nesting_start(struct pt_regs *regs); +asmlinkage void riscv_v_context_nesting_end(struct pt_regs *regs); +#endif /* CONFIG_RISCV_ISA_V_PREEMPTIVE */ + +#endif /* CONFIG_RISCV_ISA_V */ #define DECLARE_DO_ERROR_INFO(name) asmlinkage void name(struct pt_regs *regs) diff --git a/arch/riscv/include/asm/bitops.h b/arch/riscv/include/asm/bitops.h index ce47613e38..329d8244a9 100644 --- a/arch/riscv/include/asm/bitops.h +++ b/arch/riscv/include/asm/bitops.h @@ -271,7 +271,9 @@ legacy: #include <asm-generic/bitops/fls64.h> #include <asm-generic/bitops/sched.h> -#include <asm-generic/bitops/hweight.h> +#include <asm/arch_hweight.h> + +#include <asm-generic/bitops/const_hweight.h> #if (BITS_PER_LONG == 64) #define __AMO(op) "amo" #op ".d" diff --git a/arch/riscv/include/asm/cfi.h b/arch/riscv/include/asm/cfi.h index 56bf9d69d5..8f7a622570 100644 --- a/arch/riscv/include/asm/cfi.h +++ b/arch/riscv/include/asm/cfi.h @@ -7,8 +7,9 @@ * * Copyright (C) 2023 Google LLC */ +#include <linux/bug.h> -#include <linux/cfi.h> +struct pt_regs; #ifdef CONFIG_CFI_CLANG enum bug_trap_type handle_cfi_failure(struct pt_regs *regs); diff --git a/arch/riscv/include/asm/checksum.h b/arch/riscv/include/asm/checksum.h new file mode 100644 index 0000000000..88e6f1499e --- /dev/null +++ b/arch/riscv/include/asm/checksum.h @@ -0,0 +1,93 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Checksum routines + * + * Copyright (C) 2023 Rivos Inc. + */ +#ifndef __ASM_RISCV_CHECKSUM_H +#define __ASM_RISCV_CHECKSUM_H + +#include <linux/in6.h> +#include <linux/uaccess.h> + +#define ip_fast_csum ip_fast_csum + +extern unsigned int do_csum(const unsigned char *buff, int len); +#define do_csum do_csum + +/* Default version is sufficient for 32 bit */ +#ifndef CONFIG_32BIT +#define _HAVE_ARCH_IPV6_CSUM +__sum16 csum_ipv6_magic(const struct in6_addr *saddr, + const struct in6_addr *daddr, + __u32 len, __u8 proto, __wsum sum); +#endif + +/* Define riscv versions of functions before importing asm-generic/checksum.h */ +#include <asm-generic/checksum.h> + +/** + * Quickly compute an IP checksum with the assumption that IPv4 headers will + * always be in multiples of 32-bits, and have an ihl of at least 5. + * + * @ihl: the number of 32 bit segments and must be greater than or equal to 5. + * @iph: assumed to be word aligned given that NET_IP_ALIGN is set to 2 on + * riscv, defining IP headers to be aligned. + */ +static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl) +{ + unsigned long csum = 0; + int pos = 0; + + do { + csum += ((const unsigned int *)iph)[pos]; + if (IS_ENABLED(CONFIG_32BIT)) + csum += csum < ((const unsigned int *)iph)[pos]; + } while (++pos < ihl); + + /* + * ZBB only saves three instructions on 32-bit and five on 64-bit so not + * worth checking if supported without Alternatives. + */ + if (IS_ENABLED(CONFIG_RISCV_ISA_ZBB) && + IS_ENABLED(CONFIG_RISCV_ALTERNATIVE)) { + unsigned long fold_temp; + + asm goto(ALTERNATIVE("j %l[no_zbb]", "nop", 0, + RISCV_ISA_EXT_ZBB, 1) + : + : + : + : no_zbb); + + if (IS_ENABLED(CONFIG_32BIT)) { + asm(".option push \n\ + .option arch,+zbb \n\ + not %[fold_temp], %[csum] \n\ + rori %[csum], %[csum], 16 \n\ + sub %[csum], %[fold_temp], %[csum] \n\ + .option pop" + : [csum] "+r" (csum), [fold_temp] "=&r" (fold_temp)); + } else { + asm(".option push \n\ + .option arch,+zbb \n\ + rori %[fold_temp], %[csum], 32 \n\ + add %[csum], %[fold_temp], %[csum] \n\ + srli %[csum], %[csum], 32 \n\ + not %[fold_temp], %[csum] \n\ + roriw %[csum], %[csum], 16 \n\ + subw %[csum], %[fold_temp], %[csum] \n\ + .option pop" + : [csum] "+r" (csum), [fold_temp] "=&r" (fold_temp)); + } + return (__force __sum16)(csum >> 16); + } +no_zbb: +#ifndef CONFIG_32BIT + csum += ror64(csum, 32); + csum >>= 32; +#endif + return csum_fold((__force __wsum)csum); +} + +#endif /* __ASM_RISCV_CHECKSUM_H */ diff --git a/arch/riscv/include/asm/cpu_ops.h b/arch/riscv/include/asm/cpu_ops.h index aa128466c4..176b570ef9 100644 --- a/arch/riscv/include/asm/cpu_ops.h +++ b/arch/riscv/include/asm/cpu_ops.h @@ -13,33 +13,23 @@ /** * struct cpu_operations - Callback operations for hotplugging CPUs. * - * @name: Name of the boot protocol. - * @cpu_prepare: Early one-time preparation step for a cpu. If there - * is a mechanism for doing so, tests whether it is - * possible to boot the given HART. * @cpu_start: Boots a cpu into the kernel. - * @cpu_disable: Prepares a cpu to die. May fail for some - * mechanism-specific reason, which will cause the hot - * unplug to be aborted. Called from the cpu to be killed. * @cpu_stop: Makes a cpu leave the kernel. Must not fail. Called from * the cpu being stopped. * @cpu_is_stopped: Ensures a cpu has left the kernel. Called from another * cpu. */ struct cpu_operations { - const char *name; - int (*cpu_prepare)(unsigned int cpu); int (*cpu_start)(unsigned int cpu, struct task_struct *tidle); #ifdef CONFIG_HOTPLUG_CPU - int (*cpu_disable)(unsigned int cpu); void (*cpu_stop)(void); int (*cpu_is_stopped)(unsigned int cpu); #endif }; extern const struct cpu_operations cpu_ops_spinwait; -extern const struct cpu_operations *cpu_ops[NR_CPUS]; -void __init cpu_set_ops(int cpu); +extern const struct cpu_operations *cpu_ops; +void __init cpu_set_ops(void); #endif /* ifndef __ASM_CPU_OPS_H */ diff --git a/arch/riscv/include/asm/cpufeature.h b/arch/riscv/include/asm/cpufeature.h index aa6548b46a..0bd11862b7 100644 --- a/arch/riscv/include/asm/cpufeature.h +++ b/arch/riscv/include/asm/cpufeature.h @@ -59,6 +59,8 @@ struct riscv_isa_ext_data { const unsigned int id; const char *name; const char *property; + const unsigned int *subset_ext_ids; + const unsigned int subset_ext_size; }; extern const struct riscv_isa_ext_data riscv_isa_ext[]; @@ -67,7 +69,7 @@ extern bool riscv_isa_fallback; unsigned long riscv_isa_extension_base(const unsigned long *isa_bitmap); -bool __riscv_isa_extension_available(const unsigned long *isa_bitmap, int bit); +bool __riscv_isa_extension_available(const unsigned long *isa_bitmap, unsigned int bit); #define riscv_isa_extension_available(isa_bitmap, ext) \ __riscv_isa_extension_available(isa_bitmap, RISCV_ISA_EXT_##ext) @@ -133,4 +135,6 @@ static __always_inline bool riscv_cpu_has_extension_unlikely(int cpu, const unsi return __riscv_isa_extension_available(hart_isa[cpu].isa, ext); } +DECLARE_STATIC_KEY_FALSE(fast_misaligned_access_speed_key); + #endif diff --git a/arch/riscv/include/asm/csr.h b/arch/riscv/include/asm/csr.h index 0f2e68f4e8..2468c55933 100644 --- a/arch/riscv/include/asm/csr.h +++ b/arch/riscv/include/asm/csr.h @@ -411,6 +411,15 @@ #define CSR_VTYPE 0xc21 #define CSR_VLENB 0xc22 +/* Scalar Crypto Extension - Entropy */ +#define CSR_SEED 0x015 +#define SEED_OPST_MASK _AC(0xC0000000, UL) +#define SEED_OPST_BIST _AC(0x00000000, UL) +#define SEED_OPST_WAIT _AC(0x40000000, UL) +#define SEED_OPST_ES16 _AC(0x80000000, UL) +#define SEED_OPST_DEAD _AC(0xC0000000, UL) +#define SEED_ENTROPY_MASK _AC(0xFFFF, UL) + #ifdef CONFIG_RISCV_M_MODE # define CSR_STATUS CSR_MSTATUS # define CSR_IE CSR_MIE diff --git a/arch/riscv/include/asm/entry-common.h b/arch/riscv/include/asm/entry-common.h index 7ab5e34318..2293e535f8 100644 --- a/arch/riscv/include/asm/entry-common.h +++ b/arch/riscv/include/asm/entry-common.h @@ -4,6 +4,23 @@ #define _ASM_RISCV_ENTRY_COMMON_H #include <asm/stacktrace.h> +#include <asm/thread_info.h> +#include <asm/vector.h> + +static inline void arch_exit_to_user_mode_prepare(struct pt_regs *regs, + unsigned long ti_work) +{ + if (ti_work & _TIF_RISCV_V_DEFER_RESTORE) { + clear_thread_flag(TIF_RISCV_V_DEFER_RESTORE); + /* + * We are already called with irq disabled, so go without + * keeping track of riscv_v_flags. + */ + riscv_v_vstate_restore(¤t->thread.vstate, regs); + } +} + +#define arch_exit_to_user_mode_prepare arch_exit_to_user_mode_prepare void handle_page_fault(struct pt_regs *regs); void handle_break(struct pt_regs *regs); diff --git a/arch/riscv/include/asm/errata_list.h b/arch/riscv/include/asm/errata_list.h index 83ed25e435..ea33288f8a 100644 --- a/arch/riscv/include/asm/errata_list.h +++ b/arch/riscv/include/asm/errata_list.h @@ -24,9 +24,8 @@ #ifdef CONFIG_ERRATA_THEAD #define ERRATA_THEAD_PBMT 0 -#define ERRATA_THEAD_CMO 1 -#define ERRATA_THEAD_PMU 2 -#define ERRATA_THEAD_NUMBER 3 +#define ERRATA_THEAD_PMU 1 +#define ERRATA_THEAD_NUMBER 2 #endif #ifdef __ASSEMBLY__ @@ -94,54 +93,17 @@ asm volatile(ALTERNATIVE( \ #define ALT_THEAD_PMA(_val) #endif -/* - * th.dcache.ipa rs1 (invalidate, physical address) - * | 31 - 25 | 24 - 20 | 19 - 15 | 14 - 12 | 11 - 7 | 6 - 0 | - * 0000001 01010 rs1 000 00000 0001011 - * th.dache.iva rs1 (invalida, virtual address) - * 0000001 00110 rs1 000 00000 0001011 - * - * th.dcache.cpa rs1 (clean, physical address) - * | 31 - 25 | 24 - 20 | 19 - 15 | 14 - 12 | 11 - 7 | 6 - 0 | - * 0000001 01001 rs1 000 00000 0001011 - * th.dcache.cva rs1 (clean, virtual address) - * 0000001 00101 rs1 000 00000 0001011 - * - * th.dcache.cipa rs1 (clean then invalidate, physical address) - * | 31 - 25 | 24 - 20 | 19 - 15 | 14 - 12 | 11 - 7 | 6 - 0 | - * 0000001 01011 rs1 000 00000 0001011 - * th.dcache.civa rs1 (... virtual address) - * 0000001 00111 rs1 000 00000 0001011 - * - * th.sync.s (make sure all cache operations finished) - * | 31 - 25 | 24 - 20 | 19 - 15 | 14 - 12 | 11 - 7 | 6 - 0 | - * 0000000 11001 00000 000 00000 0001011 - */ -#define THEAD_INVAL_A0 ".long 0x0265000b" -#define THEAD_CLEAN_A0 ".long 0x0255000b" -#define THEAD_FLUSH_A0 ".long 0x0275000b" -#define THEAD_SYNC_S ".long 0x0190000b" - #define ALT_CMO_OP(_op, _start, _size, _cachesize) \ -asm volatile(ALTERNATIVE_2( \ - __nops(6), \ +asm volatile(ALTERNATIVE( \ + __nops(5), \ "mv a0, %1\n\t" \ "j 2f\n\t" \ "3:\n\t" \ CBO_##_op(a0) \ "add a0, a0, %0\n\t" \ "2:\n\t" \ - "bltu a0, %2, 3b\n\t" \ - "nop", 0, RISCV_ISA_EXT_ZICBOM, CONFIG_RISCV_ISA_ZICBOM, \ - "mv a0, %1\n\t" \ - "j 2f\n\t" \ - "3:\n\t" \ - THEAD_##_op##_A0 "\n\t" \ - "add a0, a0, %0\n\t" \ - "2:\n\t" \ - "bltu a0, %2, 3b\n\t" \ - THEAD_SYNC_S, THEAD_VENDOR_ID, \ - ERRATA_THEAD_CMO, CONFIG_ERRATA_THEAD_CMO) \ + "bltu a0, %2, 3b\n\t", \ + 0, RISCV_ISA_EXT_ZICBOM, CONFIG_RISCV_ISA_ZICBOM) \ : : "r"(_cachesize), \ "r"((unsigned long)(_start) & ~((_cachesize) - 1UL)), \ "r"((unsigned long)(_start) + (_size)) \ diff --git a/arch/riscv/include/asm/ftrace.h b/arch/riscv/include/asm/ftrace.h index 42777f91a9..15055f9df4 100644 --- a/arch/riscv/include/asm/ftrace.h +++ b/arch/riscv/include/asm/ftrace.h @@ -133,7 +133,23 @@ do { \ struct dyn_ftrace; int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec); #define ftrace_init_nop ftrace_init_nop -#endif + +#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS +struct ftrace_ops; +struct ftrace_regs; +void ftrace_graph_func(unsigned long ip, unsigned long parent_ip, + struct ftrace_ops *op, struct ftrace_regs *fregs); +#define ftrace_graph_func ftrace_graph_func + +static inline void __arch_ftrace_set_direct_caller(struct pt_regs *regs, unsigned long addr) +{ + regs->t1 = addr; +} +#define arch_ftrace_set_direct_caller(fregs, addr) \ + __arch_ftrace_set_direct_caller(&(fregs)->regs, addr) +#endif /* CONFIG_DYNAMIC_FTRACE_WITH_REGS */ + +#endif /* __ASSEMBLY__ */ #endif /* CONFIG_DYNAMIC_FTRACE */ diff --git a/arch/riscv/include/asm/hwcap.h b/arch/riscv/include/asm/hwcap.h index 06d30526ef..1f2d2599c6 100644 --- a/arch/riscv/include/asm/hwcap.h +++ b/arch/riscv/include/asm/hwcap.h @@ -11,19 +11,13 @@ #include <uapi/asm/hwcap.h> #define RISCV_ISA_EXT_a ('a' - 'a') -#define RISCV_ISA_EXT_b ('b' - 'a') #define RISCV_ISA_EXT_c ('c' - 'a') #define RISCV_ISA_EXT_d ('d' - 'a') #define RISCV_ISA_EXT_f ('f' - 'a') #define RISCV_ISA_EXT_h ('h' - 'a') #define RISCV_ISA_EXT_i ('i' - 'a') -#define RISCV_ISA_EXT_j ('j' - 'a') -#define RISCV_ISA_EXT_k ('k' - 'a') #define RISCV_ISA_EXT_m ('m' - 'a') -#define RISCV_ISA_EXT_p ('p' - 'a') #define RISCV_ISA_EXT_q ('q' - 'a') -#define RISCV_ISA_EXT_s ('s' - 'a') -#define RISCV_ISA_EXT_u ('u' - 'a') #define RISCV_ISA_EXT_v ('v' - 'a') /* @@ -57,8 +51,40 @@ #define RISCV_ISA_EXT_ZIHPM 42 #define RISCV_ISA_EXT_SMSTATEEN 43 #define RISCV_ISA_EXT_ZICOND 44 +#define RISCV_ISA_EXT_ZBC 45 +#define RISCV_ISA_EXT_ZBKB 46 +#define RISCV_ISA_EXT_ZBKC 47 +#define RISCV_ISA_EXT_ZBKX 48 +#define RISCV_ISA_EXT_ZKND 49 +#define RISCV_ISA_EXT_ZKNE 50 +#define RISCV_ISA_EXT_ZKNH 51 +#define RISCV_ISA_EXT_ZKR 52 +#define RISCV_ISA_EXT_ZKSED 53 +#define RISCV_ISA_EXT_ZKSH 54 +#define RISCV_ISA_EXT_ZKT 55 +#define RISCV_ISA_EXT_ZVBB 56 +#define RISCV_ISA_EXT_ZVBC 57 +#define RISCV_ISA_EXT_ZVKB 58 +#define RISCV_ISA_EXT_ZVKG 59 +#define RISCV_ISA_EXT_ZVKNED 60 +#define RISCV_ISA_EXT_ZVKNHA 61 +#define RISCV_ISA_EXT_ZVKNHB 62 +#define RISCV_ISA_EXT_ZVKSED 63 +#define RISCV_ISA_EXT_ZVKSH 64 +#define RISCV_ISA_EXT_ZVKT 65 +#define RISCV_ISA_EXT_ZFH 66 +#define RISCV_ISA_EXT_ZFHMIN 67 +#define RISCV_ISA_EXT_ZIHINTNTL 68 +#define RISCV_ISA_EXT_ZVFH 69 +#define RISCV_ISA_EXT_ZVFHMIN 70 +#define RISCV_ISA_EXT_ZFA 71 +#define RISCV_ISA_EXT_ZTSO 72 +#define RISCV_ISA_EXT_ZACAS 73 -#define RISCV_ISA_EXT_MAX 64 +#define RISCV_ISA_EXT_XLINUXENVCFG 127 + +#define RISCV_ISA_EXT_MAX 128 +#define RISCV_ISA_EXT_INVALID U32_MAX #ifdef CONFIG_RISCV_M_MODE #define RISCV_ISA_EXT_SxAIA RISCV_ISA_EXT_SMAIA diff --git a/arch/riscv/include/asm/hwprobe.h b/arch/riscv/include/asm/hwprobe.h index 5c48f48e79..630507dff5 100644 --- a/arch/riscv/include/asm/hwprobe.h +++ b/arch/riscv/include/asm/hwprobe.h @@ -15,4 +15,28 @@ static inline bool riscv_hwprobe_key_is_valid(__s64 key) return key >= 0 && key <= RISCV_HWPROBE_MAX_KEY; } +static inline bool hwprobe_key_is_bitmask(__s64 key) +{ + switch (key) { + case RISCV_HWPROBE_KEY_BASE_BEHAVIOR: + case RISCV_HWPROBE_KEY_IMA_EXT_0: + case RISCV_HWPROBE_KEY_CPUPERF_0: + return true; + } + + return false; +} + +static inline bool riscv_hwprobe_pair_cmp(struct riscv_hwprobe *pair, + struct riscv_hwprobe *other_pair) +{ + if (pair->key != other_pair->key) + return false; + + if (hwprobe_key_is_bitmask(pair->key)) + return (pair->value & other_pair->value) == other_pair->value; + + return pair->value == other_pair->value; +} + #endif diff --git a/arch/riscv/include/asm/kfence.h b/arch/riscv/include/asm/kfence.h index 0bbffd5280..7388edd889 100644 --- a/arch/riscv/include/asm/kfence.h +++ b/arch/riscv/include/asm/kfence.h @@ -18,9 +18,9 @@ static inline bool kfence_protect_page(unsigned long addr, bool protect) pte_t *pte = virt_to_kpte(addr); if (protect) - set_pte(pte, __pte(pte_val(*pte) & ~_PAGE_PRESENT)); + set_pte(pte, __pte(pte_val(ptep_get(pte)) & ~_PAGE_PRESENT)); else - set_pte(pte, __pte(pte_val(*pte) | _PAGE_PRESENT)); + set_pte(pte, __pte(pte_val(ptep_get(pte)) | _PAGE_PRESENT)); flush_tlb_kernel_range(addr, addr + PAGE_SIZE); diff --git a/arch/riscv/include/asm/kvm_host.h b/arch/riscv/include/asm/kvm_host.h index 0eefd9c991..484d04a92f 100644 --- a/arch/riscv/include/asm/kvm_host.h +++ b/arch/riscv/include/asm/kvm_host.h @@ -41,6 +41,7 @@ KVM_ARCH_REQ_FLAGS(4, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP) #define KVM_REQ_HFENCE \ KVM_ARCH_REQ_FLAGS(5, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP) +#define KVM_REQ_STEAL_UPDATE KVM_ARCH_REQ(6) enum kvm_riscv_hfence_type { KVM_RISCV_HFENCE_UNKNOWN = 0, @@ -262,13 +263,17 @@ struct kvm_vcpu_arch { /* 'static' configurations which are set only once */ struct kvm_vcpu_config cfg; + + /* SBI steal-time accounting */ + struct { + gpa_t shmem; + u64 last_steal; + } sta; }; static inline void kvm_arch_sync_events(struct kvm *kvm) {} static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {} -#define KVM_ARCH_WANT_MMU_NOTIFIER - #define KVM_RISCV_GSTAGE_TLB_MIN_ORDER 12 void kvm_riscv_local_hfence_gvma_vmid_gpa(unsigned long vmid, @@ -372,4 +377,7 @@ bool kvm_riscv_vcpu_has_interrupts(struct kvm_vcpu *vcpu, u64 mask); void kvm_riscv_vcpu_power_off(struct kvm_vcpu *vcpu); void kvm_riscv_vcpu_power_on(struct kvm_vcpu *vcpu); +void kvm_riscv_vcpu_sbi_sta_reset(struct kvm_vcpu *vcpu); +void kvm_riscv_vcpu_record_steal_time(struct kvm_vcpu *vcpu); + #endif /* __RISCV_KVM_HOST_H__ */ diff --git a/arch/riscv/include/asm/kvm_vcpu_sbi.h b/arch/riscv/include/asm/kvm_vcpu_sbi.h index 6a453f7f8b..b96705258c 100644 --- a/arch/riscv/include/asm/kvm_vcpu_sbi.h +++ b/arch/riscv/include/asm/kvm_vcpu_sbi.h @@ -15,9 +15,10 @@ #define KVM_SBI_VERSION_MINOR 0 enum kvm_riscv_sbi_ext_status { - KVM_RISCV_SBI_EXT_UNINITIALIZED, - KVM_RISCV_SBI_EXT_AVAILABLE, - KVM_RISCV_SBI_EXT_UNAVAILABLE, + KVM_RISCV_SBI_EXT_STATUS_UNINITIALIZED, + KVM_RISCV_SBI_EXT_STATUS_UNAVAILABLE, + KVM_RISCV_SBI_EXT_STATUS_ENABLED, + KVM_RISCV_SBI_EXT_STATUS_DISABLED, }; struct kvm_vcpu_sbi_context { @@ -36,7 +37,7 @@ struct kvm_vcpu_sbi_extension { unsigned long extid_start; unsigned long extid_end; - bool default_unavail; + bool default_disabled; /** * SBI extension handler. It can be defined for a given extension or group of @@ -59,11 +60,21 @@ int kvm_riscv_vcpu_set_reg_sbi_ext(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg); int kvm_riscv_vcpu_get_reg_sbi_ext(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg); +int kvm_riscv_vcpu_set_reg_sbi(struct kvm_vcpu *vcpu, + const struct kvm_one_reg *reg); +int kvm_riscv_vcpu_get_reg_sbi(struct kvm_vcpu *vcpu, + const struct kvm_one_reg *reg); const struct kvm_vcpu_sbi_extension *kvm_vcpu_sbi_find_ext( struct kvm_vcpu *vcpu, unsigned long extid); +bool riscv_vcpu_supports_sbi_ext(struct kvm_vcpu *vcpu, int idx); int kvm_riscv_vcpu_sbi_ecall(struct kvm_vcpu *vcpu, struct kvm_run *run); void kvm_riscv_vcpu_sbi_init(struct kvm_vcpu *vcpu); +int kvm_riscv_vcpu_get_reg_sbi_sta(struct kvm_vcpu *vcpu, unsigned long reg_num, + unsigned long *reg_val); +int kvm_riscv_vcpu_set_reg_sbi_sta(struct kvm_vcpu *vcpu, unsigned long reg_num, + unsigned long reg_val); + #ifdef CONFIG_RISCV_SBI_V01 extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_v01; #endif @@ -74,6 +85,7 @@ extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_rfence; extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_srst; extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_hsm; extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_dbcn; +extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_sta; extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_experimental; extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_vendor; diff --git a/arch/riscv/include/asm/page.h b/arch/riscv/include/asm/page.h index 57e887bfa3..94b3d6930f 100644 --- a/arch/riscv/include/asm/page.h +++ b/arch/riscv/include/asm/page.h @@ -89,7 +89,7 @@ typedef struct page *pgtable_t; #define PTE_FMT "%08lx" #endif -#ifdef CONFIG_64BIT +#if defined(CONFIG_64BIT) && defined(CONFIG_MMU) /* * We override this value as its generic definition uses __pa too early in * the boot process (before kernel_map.va_pa_offset is set). diff --git a/arch/riscv/include/asm/paravirt.h b/arch/riscv/include/asm/paravirt.h new file mode 100644 index 0000000000..c0abde70fc --- /dev/null +++ b/arch/riscv/include/asm/paravirt.h @@ -0,0 +1,28 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +#ifndef _ASM_RISCV_PARAVIRT_H +#define _ASM_RISCV_PARAVIRT_H + +#ifdef CONFIG_PARAVIRT +#include <linux/static_call_types.h> + +struct static_key; +extern struct static_key paravirt_steal_enabled; +extern struct static_key paravirt_steal_rq_enabled; + +u64 dummy_steal_clock(int cpu); + +DECLARE_STATIC_CALL(pv_steal_clock, dummy_steal_clock); + +static inline u64 paravirt_steal_clock(int cpu) +{ + return static_call(pv_steal_clock)(cpu); +} + +int __init pv_time_init(void); + +#else + +#define pv_time_init() do {} while (0) + +#endif /* CONFIG_PARAVIRT */ +#endif /* _ASM_RISCV_PARAVIRT_H */ diff --git a/arch/riscv/include/asm/paravirt_api_clock.h b/arch/riscv/include/asm/paravirt_api_clock.h new file mode 100644 index 0000000000..65ac7cee0d --- /dev/null +++ b/arch/riscv/include/asm/paravirt_api_clock.h @@ -0,0 +1 @@ +#include <asm/paravirt.h> diff --git a/arch/riscv/include/asm/pgtable-64.h b/arch/riscv/include/asm/pgtable-64.h index 783837bbd8..b99bd66107 100644 --- a/arch/riscv/include/asm/pgtable-64.h +++ b/arch/riscv/include/asm/pgtable-64.h @@ -202,7 +202,7 @@ static inline int pud_user(pud_t pud) static inline void set_pud(pud_t *pudp, pud_t pud) { - *pudp = pud; + WRITE_ONCE(*pudp, pud); } static inline void pud_clear(pud_t *pudp) @@ -278,7 +278,7 @@ static inline unsigned long _pmd_pfn(pmd_t pmd) static inline void set_p4d(p4d_t *p4dp, p4d_t p4d) { if (pgtable_l4_enabled) - *p4dp = p4d; + WRITE_ONCE(*p4dp, p4d); else set_pud((pud_t *)p4dp, (pud_t){ p4d_val(p4d) }); } @@ -340,18 +340,12 @@ static inline struct page *p4d_page(p4d_t p4d) #define pud_index(addr) (((addr) >> PUD_SHIFT) & (PTRS_PER_PUD - 1)) #define pud_offset pud_offset -static inline pud_t *pud_offset(p4d_t *p4d, unsigned long address) -{ - if (pgtable_l4_enabled) - return p4d_pgtable(*p4d) + pud_index(address); - - return (pud_t *)p4d; -} +pud_t *pud_offset(p4d_t *p4d, unsigned long address); static inline void set_pgd(pgd_t *pgdp, pgd_t pgd) { if (pgtable_l5_enabled) - *pgdp = pgd; + WRITE_ONCE(*pgdp, pgd); else set_p4d((p4d_t *)pgdp, (p4d_t){ pgd_val(pgd) }); } @@ -404,12 +398,6 @@ static inline struct page *pgd_page(pgd_t pgd) #define p4d_index(addr) (((addr) >> P4D_SHIFT) & (PTRS_PER_P4D - 1)) #define p4d_offset p4d_offset -static inline p4d_t *p4d_offset(pgd_t *pgd, unsigned long address) -{ - if (pgtable_l5_enabled) - return pgd_pgtable(*pgd) + p4d_index(address); - - return (p4d_t *)pgd; -} +p4d_t *p4d_offset(pgd_t *pgd, unsigned long address); #endif /* _ASM_RISCV_PGTABLE_64_H */ diff --git a/arch/riscv/include/asm/pgtable.h b/arch/riscv/include/asm/pgtable.h index a66845e31a..39c6bb8254 100644 --- a/arch/riscv/include/asm/pgtable.h +++ b/arch/riscv/include/asm/pgtable.h @@ -248,7 +248,7 @@ static inline int pmd_leaf(pmd_t pmd) static inline void set_pmd(pmd_t *pmdp, pmd_t pmd) { - *pmdp = pmd; + WRITE_ONCE(*pmdp, pmd); } static inline void pmd_clear(pmd_t *pmdp) @@ -516,7 +516,7 @@ static inline int pte_same(pte_t pte_a, pte_t pte_b) */ static inline void set_pte(pte_t *ptep, pte_t pteval) { - *ptep = pteval; + WRITE_ONCE(*ptep, pteval); } void flush_icache_pte(pte_t pte); @@ -550,19 +550,12 @@ static inline void pte_clear(struct mm_struct *mm, __set_pte_at(ptep, __pte(0)); } -#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS -static inline int ptep_set_access_flags(struct vm_area_struct *vma, - unsigned long address, pte_t *ptep, - pte_t entry, int dirty) -{ - if (!pte_same(*ptep, entry)) - __set_pte_at(ptep, entry); - /* - * update_mmu_cache will unconditionally execute, handling both - * the case that the PTE changed and the spurious fault case. - */ - return true; -} +#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS /* defined in mm/pgtable.c */ +extern int ptep_set_access_flags(struct vm_area_struct *vma, unsigned long address, + pte_t *ptep, pte_t entry, int dirty); +#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG /* defined in mm/pgtable.c */ +extern int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned long address, + pte_t *ptep); #define __HAVE_ARCH_PTEP_GET_AND_CLEAR static inline pte_t ptep_get_and_clear(struct mm_struct *mm, @@ -575,16 +568,6 @@ static inline pte_t ptep_get_and_clear(struct mm_struct *mm, return pte; } -#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG -static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, - unsigned long address, - pte_t *ptep) -{ - if (!pte_young(*ptep)) - return 0; - return test_and_clear_bit(_PAGE_ACCESSED_OFFSET, &pte_val(*ptep)); -} - #define __HAVE_ARCH_PTEP_SET_WRPROTECT static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long address, pte_t *ptep) @@ -679,6 +662,7 @@ static inline int pmd_write(pmd_t pmd) return pte_write(pmd_pte(pmd)); } +#define pmd_dirty pmd_dirty static inline int pmd_dirty(pmd_t pmd) { return pte_dirty(pmd_pte(pmd)); @@ -904,7 +888,7 @@ static inline pte_t pte_swp_clear_exclusive(pte_t pte) #define PAGE_SHARED __pgprot(0) #define PAGE_KERNEL __pgprot(0) #define swapper_pg_dir NULL -#define TASK_SIZE 0xffffffffUL +#define TASK_SIZE _AC(-1, UL) #define VMALLOC_START _AC(0, UL) #define VMALLOC_END TASK_SIZE diff --git a/arch/riscv/include/asm/processor.h b/arch/riscv/include/asm/processor.h index e1944ff075..a8509cc31a 100644 --- a/arch/riscv/include/asm/processor.h +++ b/arch/riscv/include/asm/processor.h @@ -73,6 +73,43 @@ struct task_struct; struct pt_regs; +/* + * We use a flag to track in-kernel Vector context. Currently the flag has the + * following meaning: + * + * - bit 0: indicates whether the in-kernel Vector context is active. The + * activation of this state disables the preemption. On a non-RT kernel, it + * also disable bh. + * - bits 8: is used for tracking preemptible kernel-mode Vector, when + * RISCV_ISA_V_PREEMPTIVE is enabled. Calling kernel_vector_begin() does not + * disable the preemption if the thread's kernel_vstate.datap is allocated. + * Instead, the kernel set this bit field. Then the trap entry/exit code + * knows if we are entering/exiting the context that owns preempt_v. + * - 0: the task is not using preempt_v + * - 1: the task is actively using preempt_v. But whether does the task own + * the preempt_v context is decided by bits in RISCV_V_CTX_DEPTH_MASK. + * - bit 16-23 are RISCV_V_CTX_DEPTH_MASK, used by context tracking routine + * when preempt_v starts: + * - 0: the task is actively using, and own preempt_v context. + * - non-zero: the task was using preempt_v, but then took a trap within. + * Thus, the task does not own preempt_v. Any use of Vector will have to + * save preempt_v, if dirty, and fallback to non-preemptible kernel-mode + * Vector. + * - bit 30: The in-kernel preempt_v context is saved, and requries to be + * restored when returning to the context that owns the preempt_v. + * - bit 31: The in-kernel preempt_v context is dirty, as signaled by the + * trap entry code. Any context switches out-of current task need to save + * it to the task's in-kernel V context. Also, any traps nesting on-top-of + * preempt_v requesting to use V needs a save. + */ +#define RISCV_V_CTX_DEPTH_MASK 0x00ff0000 + +#define RISCV_V_CTX_UNIT_DEPTH 0x00010000 +#define RISCV_KERNEL_MODE_V 0x00000001 +#define RISCV_PREEMPT_V 0x00000100 +#define RISCV_PREEMPT_V_DIRTY 0x80000000 +#define RISCV_PREEMPT_V_NEED_RESTORE 0x40000000 + /* CPU-specific state of a task */ struct thread_struct { /* Callee-saved registers */ @@ -81,9 +118,11 @@ struct thread_struct { unsigned long s[12]; /* s[0]: frame pointer */ struct __riscv_d_ext_state fstate; unsigned long bad_cause; - unsigned long vstate_ctrl; + u32 riscv_v_flags; + u32 vstate_ctrl; struct __riscv_v_ext_state vstate; unsigned long align_ctl; + struct __riscv_v_ext_state kernel_vstate; }; /* Whitelist the fstate from the task_struct for hardened usercopy */ diff --git a/arch/riscv/include/asm/sbi.h b/arch/riscv/include/asm/sbi.h index 0892f4421b..6e68f8dff7 100644 --- a/arch/riscv/include/asm/sbi.h +++ b/arch/riscv/include/asm/sbi.h @@ -29,8 +29,10 @@ enum sbi_ext_id { SBI_EXT_RFENCE = 0x52464E43, SBI_EXT_HSM = 0x48534D, SBI_EXT_SRST = 0x53525354, + SBI_EXT_SUSP = 0x53555350, SBI_EXT_PMU = 0x504D55, SBI_EXT_DBCN = 0x4442434E, + SBI_EXT_STA = 0x535441, /* Experimentals extensions must lie within this range */ SBI_EXT_EXPERIMENTAL_START = 0x08000000, @@ -114,6 +116,14 @@ enum sbi_srst_reset_reason { SBI_SRST_RESET_REASON_SYS_FAILURE, }; +enum sbi_ext_susp_fid { + SBI_EXT_SUSP_SYSTEM_SUSPEND = 0, +}; + +enum sbi_ext_susp_sleep_type { + SBI_SUSP_SLEEP_TYPE_SUSPEND_TO_RAM = 0, +}; + enum sbi_ext_pmu_fid { SBI_EXT_PMU_NUM_COUNTERS = 0, SBI_EXT_PMU_COUNTER_GET_INFO, @@ -243,6 +253,22 @@ enum sbi_ext_dbcn_fid { SBI_EXT_DBCN_CONSOLE_WRITE_BYTE = 2, }; +/* SBI STA (steal-time accounting) extension */ +enum sbi_ext_sta_fid { + SBI_EXT_STA_STEAL_TIME_SET_SHMEM = 0, +}; + +struct sbi_sta_struct { + __le32 sequence; + __le32 flags; + __le64 steal; + u8 preempted; + u8 pad[47]; +} __packed; + +#define SBI_STA_SHMEM_DISABLE -1 + +/* SBI spec version fields */ #define SBI_SPEC_VERSION_DEFAULT 0x1 #define SBI_SPEC_VERSION_MAJOR_SHIFT 24 #define SBI_SPEC_VERSION_MAJOR_MASK 0x7f @@ -271,8 +297,13 @@ struct sbiret sbi_ecall(int ext, int fid, unsigned long arg0, unsigned long arg3, unsigned long arg4, unsigned long arg5); +#ifdef CONFIG_RISCV_SBI_V01 void sbi_console_putchar(int ch); int sbi_console_getchar(void); +#else +static inline void sbi_console_putchar(int ch) { } +static inline int sbi_console_getchar(void) { return -ENOENT; } +#endif long sbi_get_mvendorid(void); long sbi_get_marchid(void); long sbi_get_mimpid(void); @@ -329,6 +360,11 @@ static inline unsigned long sbi_mk_version(unsigned long major, } int sbi_err_map_linux_errno(int err); + +extern bool sbi_debug_console_available; +int sbi_debug_console_write(const char *bytes, unsigned int num_bytes); +int sbi_debug_console_read(char *bytes, unsigned int num_bytes); + #else /* CONFIG_RISCV_SBI */ static inline int sbi_remote_fence_i(const struct cpumask *cpu_mask) { return -1; } static inline void sbi_init(void) {} diff --git a/arch/riscv/include/asm/simd.h b/arch/riscv/include/asm/simd.h new file mode 100644 index 0000000000..54efbf523d --- /dev/null +++ b/arch/riscv/include/asm/simd.h @@ -0,0 +1,64 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2017 Linaro Ltd. <ard.biesheuvel@linaro.org> + * Copyright (C) 2023 SiFive + */ + +#ifndef __ASM_SIMD_H +#define __ASM_SIMD_H + +#include <linux/compiler.h> +#include <linux/irqflags.h> +#include <linux/percpu.h> +#include <linux/preempt.h> +#include <linux/types.h> +#include <linux/thread_info.h> + +#include <asm/vector.h> + +#ifdef CONFIG_RISCV_ISA_V +/* + * may_use_simd - whether it is allowable at this time to issue vector + * instructions or access the vector register file + * + * Callers must not assume that the result remains true beyond the next + * preempt_enable() or return from softirq context. + */ +static __must_check inline bool may_use_simd(void) +{ + /* + * RISCV_KERNEL_MODE_V is only set while preemption is disabled, + * and is clear whenever preemption is enabled. + */ + if (in_hardirq() || in_nmi()) + return false; + + /* + * Nesting is acheived in preempt_v by spreading the control for + * preemptible and non-preemptible kernel-mode Vector into two fields. + * Always try to match with prempt_v if kernel V-context exists. Then, + * fallback to check non preempt_v if nesting happens, or if the config + * is not set. + */ + if (IS_ENABLED(CONFIG_RISCV_ISA_V_PREEMPTIVE) && current->thread.kernel_vstate.datap) { + if (!riscv_preempt_v_started(current)) + return true; + } + /* + * Non-preemptible kernel-mode Vector temporarily disables bh. So we + * must not return true on irq_disabled(). Otherwise we would fail the + * lockdep check calling local_bh_enable() + */ + return !irqs_disabled() && !(riscv_v_flags() & RISCV_KERNEL_MODE_V); +} + +#else /* ! CONFIG_RISCV_ISA_V */ + +static __must_check inline bool may_use_simd(void) +{ + return false; +} + +#endif /* ! CONFIG_RISCV_ISA_V */ + +#endif diff --git a/arch/riscv/include/asm/suspend.h b/arch/riscv/include/asm/suspend.h index 02f8786738..491296a335 100644 --- a/arch/riscv/include/asm/suspend.h +++ b/arch/riscv/include/asm/suspend.h @@ -14,6 +14,7 @@ struct suspend_context { struct pt_regs regs; /* Saved and restored by high-level functions */ unsigned long scratch; + unsigned long envcfg; unsigned long tvec; unsigned long ie; #ifdef CONFIG_MMU diff --git a/arch/riscv/include/asm/switch_to.h b/arch/riscv/include/asm/switch_to.h index f90d8e42f3..7efdb0584d 100644 --- a/arch/riscv/include/asm/switch_to.h +++ b/arch/riscv/include/asm/switch_to.h @@ -53,8 +53,7 @@ static inline void __switch_to_fpu(struct task_struct *prev, struct pt_regs *regs; regs = task_pt_regs(prev); - if (unlikely(regs->status & SR_SD)) - fstate_save(prev, regs); + fstate_save(prev, regs); fstate_restore(next, task_pt_regs(next)); } diff --git a/arch/riscv/include/asm/thread_info.h b/arch/riscv/include/asm/thread_info.h index 574779900b..5d47334363 100644 --- a/arch/riscv/include/asm/thread_info.h +++ b/arch/riscv/include/asm/thread_info.h @@ -28,7 +28,6 @@ #define THREAD_SHIFT (PAGE_SHIFT + THREAD_SIZE_ORDER) #define OVERFLOW_STACK_SIZE SZ_4K -#define SHADOW_OVERFLOW_STACK_SIZE (1024) #define IRQ_STACK_SIZE THREAD_SIZE @@ -103,12 +102,14 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src); #define TIF_NOTIFY_SIGNAL 9 /* signal notifications exist */ #define TIF_UPROBE 10 /* uprobe breakpoint or singlestep */ #define TIF_32BIT 11 /* compat-mode 32bit process */ +#define TIF_RISCV_V_DEFER_RESTORE 12 /* restore Vector before returing to user */ #define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME) #define _TIF_SIGPENDING (1 << TIF_SIGPENDING) #define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED) #define _TIF_NOTIFY_SIGNAL (1 << TIF_NOTIFY_SIGNAL) #define _TIF_UPROBE (1 << TIF_UPROBE) +#define _TIF_RISCV_V_DEFER_RESTORE (1 << TIF_RISCV_V_DEFER_RESTORE) #define _TIF_WORK_MASK \ (_TIF_NOTIFY_RESUME | _TIF_SIGPENDING | _TIF_NEED_RESCHED | \ diff --git a/arch/riscv/include/asm/tlbbatch.h b/arch/riscv/include/asm/tlbbatch.h new file mode 100644 index 0000000000..46014f70b9 --- /dev/null +++ b/arch/riscv/include/asm/tlbbatch.h @@ -0,0 +1,15 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2023 Rivos Inc. + */ + +#ifndef _ASM_RISCV_TLBBATCH_H +#define _ASM_RISCV_TLBBATCH_H + +#include <linux/cpumask.h> + +struct arch_tlbflush_unmap_batch { + struct cpumask cpumask; +}; + +#endif /* _ASM_RISCV_TLBBATCH_H */ diff --git a/arch/riscv/include/asm/tlbflush.h b/arch/riscv/include/asm/tlbflush.h index 51664ae485..4112cc8d1d 100644 --- a/arch/riscv/include/asm/tlbflush.h +++ b/arch/riscv/include/asm/tlbflush.h @@ -47,6 +47,14 @@ void local_flush_tlb_kernel_range(unsigned long start, unsigned long end); void flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end); #endif + +bool arch_tlbbatch_should_defer(struct mm_struct *mm); +void arch_tlbbatch_add_pending(struct arch_tlbflush_unmap_batch *batch, + struct mm_struct *mm, + unsigned long uaddr); +void arch_flush_tlb_batched_pending(struct mm_struct *mm); +void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch); + #else /* CONFIG_SMP && CONFIG_MMU */ #define flush_tlb_all() local_flush_tlb_all() diff --git a/arch/riscv/include/asm/topology.h b/arch/riscv/include/asm/topology.h index e316ab3b77..61183688bd 100644 --- a/arch/riscv/include/asm/topology.h +++ b/arch/riscv/include/asm/topology.h @@ -9,6 +9,7 @@ #define arch_set_freq_scale topology_set_freq_scale #define arch_scale_freq_capacity topology_get_freq_scale #define arch_scale_freq_invariant topology_scale_freq_invariant +#define arch_scale_freq_ref topology_get_freq_ref /* Replace task scheduler's default cpu-invariant accounting */ #define arch_scale_cpu_capacity topology_get_cpu_scale diff --git a/arch/riscv/include/asm/uaccess.h b/arch/riscv/include/asm/uaccess.h index ec0cab9fbd..72ec1d9bd3 100644 --- a/arch/riscv/include/asm/uaccess.h +++ b/arch/riscv/include/asm/uaccess.h @@ -319,7 +319,7 @@ unsigned long __must_check clear_user(void __user *to, unsigned long n) #define __get_kernel_nofault(dst, src, type, err_label) \ do { \ - long __kr_err; \ + long __kr_err = 0; \ \ __get_user_nocheck(*((type *)(dst)), (type *)(src), __kr_err); \ if (unlikely(__kr_err)) \ @@ -328,7 +328,7 @@ do { \ #define __put_kernel_nofault(dst, src, type, err_label) \ do { \ - long __kr_err; \ + long __kr_err = 0; \ \ __put_user_nocheck(*((type *)(src)), (type *)(dst), __kr_err); \ if (unlikely(__kr_err)) \ diff --git a/arch/riscv/include/asm/vector.h b/arch/riscv/include/asm/vector.h index 87aaef6562..0cd6f0a027 100644 --- a/arch/riscv/include/asm/vector.h +++ b/arch/riscv/include/asm/vector.h @@ -22,6 +22,18 @@ extern unsigned long riscv_v_vsize; int riscv_v_setup_vsize(void); bool riscv_v_first_use_handler(struct pt_regs *regs); +void kernel_vector_begin(void); +void kernel_vector_end(void); +void get_cpu_vector_context(void); +void put_cpu_vector_context(void); +void riscv_v_thread_free(struct task_struct *tsk); +void __init riscv_v_setup_ctx_cache(void); +void riscv_v_thread_alloc(struct task_struct *tsk); + +static inline u32 riscv_v_flags(void) +{ + return READ_ONCE(current->thread.riscv_v_flags); +} static __always_inline bool has_vector(void) { @@ -162,36 +174,89 @@ static inline void riscv_v_vstate_discard(struct pt_regs *regs) __riscv_v_vstate_dirty(regs); } -static inline void riscv_v_vstate_save(struct task_struct *task, +static inline void riscv_v_vstate_save(struct __riscv_v_ext_state *vstate, struct pt_regs *regs) { if ((regs->status & SR_VS) == SR_VS_DIRTY) { - struct __riscv_v_ext_state *vstate = &task->thread.vstate; - __riscv_v_vstate_save(vstate, vstate->datap); __riscv_v_vstate_clean(regs); } } -static inline void riscv_v_vstate_restore(struct task_struct *task, +static inline void riscv_v_vstate_restore(struct __riscv_v_ext_state *vstate, struct pt_regs *regs) { if ((regs->status & SR_VS) != SR_VS_OFF) { - struct __riscv_v_ext_state *vstate = &task->thread.vstate; - __riscv_v_vstate_restore(vstate, vstate->datap); __riscv_v_vstate_clean(regs); } } +static inline void riscv_v_vstate_set_restore(struct task_struct *task, + struct pt_regs *regs) +{ + if ((regs->status & SR_VS) != SR_VS_OFF) { + set_tsk_thread_flag(task, TIF_RISCV_V_DEFER_RESTORE); + riscv_v_vstate_on(regs); + } +} + +#ifdef CONFIG_RISCV_ISA_V_PREEMPTIVE +static inline bool riscv_preempt_v_dirty(struct task_struct *task) +{ + return !!(task->thread.riscv_v_flags & RISCV_PREEMPT_V_DIRTY); +} + +static inline bool riscv_preempt_v_restore(struct task_struct *task) +{ + return !!(task->thread.riscv_v_flags & RISCV_PREEMPT_V_NEED_RESTORE); +} + +static inline void riscv_preempt_v_clear_dirty(struct task_struct *task) +{ + barrier(); + task->thread.riscv_v_flags &= ~RISCV_PREEMPT_V_DIRTY; +} + +static inline void riscv_preempt_v_set_restore(struct task_struct *task) +{ + barrier(); + task->thread.riscv_v_flags |= RISCV_PREEMPT_V_NEED_RESTORE; +} + +static inline bool riscv_preempt_v_started(struct task_struct *task) +{ + return !!(task->thread.riscv_v_flags & RISCV_PREEMPT_V); +} + +#else /* !CONFIG_RISCV_ISA_V_PREEMPTIVE */ +static inline bool riscv_preempt_v_dirty(struct task_struct *task) { return false; } +static inline bool riscv_preempt_v_restore(struct task_struct *task) { return false; } +static inline bool riscv_preempt_v_started(struct task_struct *task) { return false; } +#define riscv_preempt_v_clear_dirty(tsk) do {} while (0) +#define riscv_preempt_v_set_restore(tsk) do {} while (0) +#endif /* CONFIG_RISCV_ISA_V_PREEMPTIVE */ + static inline void __switch_to_vector(struct task_struct *prev, struct task_struct *next) { struct pt_regs *regs; - regs = task_pt_regs(prev); - riscv_v_vstate_save(prev, regs); - riscv_v_vstate_restore(next, task_pt_regs(next)); + if (riscv_preempt_v_started(prev)) { + if (riscv_preempt_v_dirty(prev)) { + __riscv_v_vstate_save(&prev->thread.kernel_vstate, + prev->thread.kernel_vstate.datap); + riscv_preempt_v_clear_dirty(prev); + } + } else { + regs = task_pt_regs(prev); + riscv_v_vstate_save(&prev->thread.vstate, regs); + } + + if (riscv_preempt_v_started(next)) + riscv_preempt_v_set_restore(next); + else + riscv_v_vstate_set_restore(next, task_pt_regs(next)); } void riscv_v_vstate_ctrl_init(struct task_struct *tsk); @@ -208,11 +273,14 @@ static inline bool riscv_v_vstate_query(struct pt_regs *regs) { return false; } static inline bool riscv_v_vstate_ctrl_user_allowed(void) { return false; } #define riscv_v_vsize (0) #define riscv_v_vstate_discard(regs) do {} while (0) -#define riscv_v_vstate_save(task, regs) do {} while (0) -#define riscv_v_vstate_restore(task, regs) do {} while (0) +#define riscv_v_vstate_save(vstate, regs) do {} while (0) +#define riscv_v_vstate_restore(vstate, regs) do {} while (0) #define __switch_to_vector(__prev, __next) do {} while (0) #define riscv_v_vstate_off(regs) do {} while (0) #define riscv_v_vstate_on(regs) do {} while (0) +#define riscv_v_thread_free(tsk) do {} while (0) +#define riscv_v_setup_ctx_cache() do {} while (0) +#define riscv_v_thread_alloc(tsk) do {} while (0) #endif /* CONFIG_RISCV_ISA_V */ diff --git a/arch/riscv/include/asm/word-at-a-time.h b/arch/riscv/include/asm/word-at-a-time.h index 7c086ac6ec..f3f031e341 100644 --- a/arch/riscv/include/asm/word-at-a-time.h +++ b/arch/riscv/include/asm/word-at-a-time.h @@ -9,6 +9,7 @@ #define _ASM_RISCV_WORD_AT_A_TIME_H +#include <asm/asm-extable.h> #include <linux/kernel.h> struct word_at_a_time { @@ -45,4 +46,30 @@ static inline unsigned long find_zero(unsigned long mask) /* The mask we created is directly usable as a bytemask */ #define zero_bytemask(mask) (mask) +#ifdef CONFIG_DCACHE_WORD_ACCESS + +/* + * Load an unaligned word from kernel space. + * + * In the (very unlikely) case of the word being a page-crosser + * and the next page not being mapped, take the exception and + * return zeroes in the non-existing part. + */ +static inline unsigned long load_unaligned_zeropad(const void *addr) +{ + unsigned long ret; + + /* Load word from unaligned pointer addr */ + asm( + "1: " REG_L " %0, %2\n" + "2:\n" + _ASM_EXTABLE_LOAD_UNALIGNED_ZEROPAD(1b, 2b, %0, %1) + : "=&r" (ret) + : "r" (addr), "m" (*(unsigned long *)addr)); + + return ret; +} + +#endif /* CONFIG_DCACHE_WORD_ACCESS */ + #endif /* _ASM_RISCV_WORD_AT_A_TIME_H */ diff --git a/arch/riscv/include/asm/xor.h b/arch/riscv/include/asm/xor.h new file mode 100644 index 0000000000..96011861e4 --- /dev/null +++ b/arch/riscv/include/asm/xor.h @@ -0,0 +1,68 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Copyright (C) 2021 SiFive + */ + +#include <linux/hardirq.h> +#include <asm-generic/xor.h> +#ifdef CONFIG_RISCV_ISA_V +#include <asm/vector.h> +#include <asm/switch_to.h> +#include <asm/asm-prototypes.h> + +static void xor_vector_2(unsigned long bytes, unsigned long *__restrict p1, + const unsigned long *__restrict p2) +{ + kernel_vector_begin(); + xor_regs_2_(bytes, p1, p2); + kernel_vector_end(); +} + +static void xor_vector_3(unsigned long bytes, unsigned long *__restrict p1, + const unsigned long *__restrict p2, + const unsigned long *__restrict p3) +{ + kernel_vector_begin(); + xor_regs_3_(bytes, p1, p2, p3); + kernel_vector_end(); +} + +static void xor_vector_4(unsigned long bytes, unsigned long *__restrict p1, + const unsigned long *__restrict p2, + const unsigned long *__restrict p3, + const unsigned long *__restrict p4) +{ + kernel_vector_begin(); + xor_regs_4_(bytes, p1, p2, p3, p4); + kernel_vector_end(); +} + +static void xor_vector_5(unsigned long bytes, unsigned long *__restrict p1, + const unsigned long *__restrict p2, + const unsigned long *__restrict p3, + const unsigned long *__restrict p4, + const unsigned long *__restrict p5) +{ + kernel_vector_begin(); + xor_regs_5_(bytes, p1, p2, p3, p4, p5); + kernel_vector_end(); +} + +static struct xor_block_template xor_block_rvv = { + .name = "rvv", + .do_2 = xor_vector_2, + .do_3 = xor_vector_3, + .do_4 = xor_vector_4, + .do_5 = xor_vector_5 +}; + +#undef XOR_TRY_TEMPLATES +#define XOR_TRY_TEMPLATES \ + do { \ + xor_speed(&xor_block_8regs); \ + xor_speed(&xor_block_32regs); \ + if (has_vector()) { \ + xor_speed(&xor_block_rvv);\ + } \ + } while (0) +#endif diff --git a/arch/riscv/include/uapi/asm/auxvec.h b/arch/riscv/include/uapi/asm/auxvec.h index 10aaa83db8..95050ebe9a 100644 --- a/arch/riscv/include/uapi/asm/auxvec.h +++ b/arch/riscv/include/uapi/asm/auxvec.h @@ -34,7 +34,7 @@ #define AT_L3_CACHEGEOMETRY 47 /* entries in ARCH_DLINFO */ -#define AT_VECTOR_SIZE_ARCH 9 +#define AT_VECTOR_SIZE_ARCH 10 #define AT_MINSIGSTKSZ 51 #endif /* _UAPI_ASM_RISCV_AUXVEC_H */ diff --git a/arch/riscv/include/uapi/asm/hwprobe.h b/arch/riscv/include/uapi/asm/hwprobe.h index b659ffcfcd..2902f68dc9 100644 --- a/arch/riscv/include/uapi/asm/hwprobe.h +++ b/arch/riscv/include/uapi/asm/hwprobe.h @@ -30,6 +30,35 @@ struct riscv_hwprobe { #define RISCV_HWPROBE_EXT_ZBB (1 << 4) #define RISCV_HWPROBE_EXT_ZBS (1 << 5) #define RISCV_HWPROBE_EXT_ZICBOZ (1 << 6) +#define RISCV_HWPROBE_EXT_ZBC (1 << 7) +#define RISCV_HWPROBE_EXT_ZBKB (1 << 8) +#define RISCV_HWPROBE_EXT_ZBKC (1 << 9) +#define RISCV_HWPROBE_EXT_ZBKX (1 << 10) +#define RISCV_HWPROBE_EXT_ZKND (1 << 11) +#define RISCV_HWPROBE_EXT_ZKNE (1 << 12) +#define RISCV_HWPROBE_EXT_ZKNH (1 << 13) +#define RISCV_HWPROBE_EXT_ZKSED (1 << 14) +#define RISCV_HWPROBE_EXT_ZKSH (1 << 15) +#define RISCV_HWPROBE_EXT_ZKT (1 << 16) +#define RISCV_HWPROBE_EXT_ZVBB (1 << 17) +#define RISCV_HWPROBE_EXT_ZVBC (1 << 18) +#define RISCV_HWPROBE_EXT_ZVKB (1 << 19) +#define RISCV_HWPROBE_EXT_ZVKG (1 << 20) +#define RISCV_HWPROBE_EXT_ZVKNED (1 << 21) +#define RISCV_HWPROBE_EXT_ZVKNHA (1 << 22) +#define RISCV_HWPROBE_EXT_ZVKNHB (1 << 23) +#define RISCV_HWPROBE_EXT_ZVKSED (1 << 24) +#define RISCV_HWPROBE_EXT_ZVKSH (1 << 25) +#define RISCV_HWPROBE_EXT_ZVKT (1 << 26) +#define RISCV_HWPROBE_EXT_ZFH (1 << 27) +#define RISCV_HWPROBE_EXT_ZFHMIN (1 << 28) +#define RISCV_HWPROBE_EXT_ZIHINTNTL (1 << 29) +#define RISCV_HWPROBE_EXT_ZVFH (1 << 30) +#define RISCV_HWPROBE_EXT_ZVFHMIN (1ULL << 31) +#define RISCV_HWPROBE_EXT_ZFA (1ULL << 32) +#define RISCV_HWPROBE_EXT_ZTSO (1ULL << 33) +#define RISCV_HWPROBE_EXT_ZACAS (1ULL << 34) +#define RISCV_HWPROBE_EXT_ZICOND (1ULL << 35) #define RISCV_HWPROBE_KEY_CPUPERF_0 5 #define RISCV_HWPROBE_MISALIGNED_UNKNOWN (0 << 0) #define RISCV_HWPROBE_MISALIGNED_EMULATED (1 << 0) @@ -40,4 +69,7 @@ struct riscv_hwprobe { #define RISCV_HWPROBE_KEY_ZICBOZ_BLOCK_SIZE 6 /* Increase RISCV_HWPROBE_MAX_KEY when adding items. */ +/* Flags */ +#define RISCV_HWPROBE_WHICH_CPUS (1 << 0) + #endif diff --git a/arch/riscv/include/uapi/asm/kvm.h b/arch/riscv/include/uapi/asm/kvm.h index 60d3b21dea..7499e88a94 100644 --- a/arch/riscv/include/uapi/asm/kvm.h +++ b/arch/riscv/include/uapi/asm/kvm.h @@ -139,6 +139,33 @@ enum KVM_RISCV_ISA_EXT_ID { KVM_RISCV_ISA_EXT_ZIHPM, KVM_RISCV_ISA_EXT_SMSTATEEN, KVM_RISCV_ISA_EXT_ZICOND, + KVM_RISCV_ISA_EXT_ZBC, + KVM_RISCV_ISA_EXT_ZBKB, + KVM_RISCV_ISA_EXT_ZBKC, + KVM_RISCV_ISA_EXT_ZBKX, + KVM_RISCV_ISA_EXT_ZKND, + KVM_RISCV_ISA_EXT_ZKNE, + KVM_RISCV_ISA_EXT_ZKNH, + KVM_RISCV_ISA_EXT_ZKR, + KVM_RISCV_ISA_EXT_ZKSED, + KVM_RISCV_ISA_EXT_ZKSH, + KVM_RISCV_ISA_EXT_ZKT, + KVM_RISCV_ISA_EXT_ZVBB, + KVM_RISCV_ISA_EXT_ZVBC, + KVM_RISCV_ISA_EXT_ZVKB, + KVM_RISCV_ISA_EXT_ZVKG, + KVM_RISCV_ISA_EXT_ZVKNED, + KVM_RISCV_ISA_EXT_ZVKNHA, + KVM_RISCV_ISA_EXT_ZVKNHB, + KVM_RISCV_ISA_EXT_ZVKSED, + KVM_RISCV_ISA_EXT_ZVKSH, + KVM_RISCV_ISA_EXT_ZVKT, + KVM_RISCV_ISA_EXT_ZFH, + KVM_RISCV_ISA_EXT_ZFHMIN, + KVM_RISCV_ISA_EXT_ZIHINTNTL, + KVM_RISCV_ISA_EXT_ZVFH, + KVM_RISCV_ISA_EXT_ZVFHMIN, + KVM_RISCV_ISA_EXT_ZFA, KVM_RISCV_ISA_EXT_MAX, }; @@ -157,9 +184,16 @@ enum KVM_RISCV_SBI_EXT_ID { KVM_RISCV_SBI_EXT_EXPERIMENTAL, KVM_RISCV_SBI_EXT_VENDOR, KVM_RISCV_SBI_EXT_DBCN, + KVM_RISCV_SBI_EXT_STA, KVM_RISCV_SBI_EXT_MAX, }; +/* SBI STA extension registers for KVM_GET_ONE_REG and KVM_SET_ONE_REG */ +struct kvm_riscv_sbi_sta { + unsigned long shmem_lo; + unsigned long shmem_hi; +}; + /* Possible states for kvm_riscv_timer */ #define KVM_RISCV_TIMER_STATE_OFF 0 #define KVM_RISCV_TIMER_STATE_ON 1 @@ -241,6 +275,12 @@ enum KVM_RISCV_SBI_EXT_ID { #define KVM_REG_RISCV_VECTOR_REG(n) \ ((n) + sizeof(struct __riscv_v_ext_state) / sizeof(unsigned long)) +/* Registers for specific SBI extensions are mapped as type 10 */ +#define KVM_REG_RISCV_SBI_STATE (0x0a << KVM_REG_RISCV_TYPE_SHIFT) +#define KVM_REG_RISCV_SBI_STA (0x0 << KVM_REG_RISCV_SUBTYPE_SHIFT) +#define KVM_REG_RISCV_SBI_STA_REG(name) \ + (offsetof(struct kvm_riscv_sbi_sta, name) / sizeof(unsigned long)) + /* Device Control API: RISC-V AIA */ #define KVM_DEV_RISCV_APLIC_ALIGN 0x1000 #define KVM_DEV_RISCV_APLIC_SIZE 0x4000 diff --git a/arch/riscv/kernel/Makefile b/arch/riscv/kernel/Makefile index 40d054939a..604d6bf7e4 100644 --- a/arch/riscv/kernel/Makefile +++ b/arch/riscv/kernel/Makefile @@ -12,7 +12,7 @@ endif CFLAGS_syscall_table.o += $(call cc-option,-Wno-override-init,) CFLAGS_compat_syscall_table.o += $(call cc-option,-Wno-override-init,) -ifdef CONFIG_KEXEC +ifdef CONFIG_KEXEC_CORE AFLAGS_kexec_relocate.o := -mcmodel=medany $(call cc-option,-mno-relax) endif @@ -52,6 +52,7 @@ obj-y += setup.o obj-y += signal.o obj-y += syscall_table.o obj-y += sys_riscv.o +obj-y += sys_hwprobe.o obj-y += time.o obj-y += traps.o obj-y += riscv_ksyms.o @@ -65,6 +66,7 @@ obj-$(CONFIG_MMU) += vdso.o vdso/ obj-$(CONFIG_RISCV_MISALIGNED) += traps_misaligned.o obj-$(CONFIG_FPU) += fpu.o obj-$(CONFIG_RISCV_ISA_V) += vector.o +obj-$(CONFIG_RISCV_ISA_V) += kernel_mode_vector.o obj-$(CONFIG_SMP) += smpboot.o obj-$(CONFIG_SMP) += smp.o obj-$(CONFIG_SMP) += cpu_ops.o @@ -87,6 +89,7 @@ obj-$(CONFIG_SMP) += sbi-ipi.o obj-$(CONFIG_SMP) += cpu_ops_sbi.o endif obj-$(CONFIG_HOTPLUG_CPU) += cpu-hotplug.o +obj-$(CONFIG_PARAVIRT) += paravirt.o obj-$(CONFIG_KGDB) += kgdb.o obj-$(CONFIG_KEXEC_CORE) += kexec_relocate.o crash_save_regs.o machine_kexec.o obj-$(CONFIG_KEXEC_FILE) += elf_kexec.o machine_kexec_file.o diff --git a/arch/riscv/kernel/cfi.c b/arch/riscv/kernel/cfi.c index 820158d7a2..6ec9dbd729 100644 --- a/arch/riscv/kernel/cfi.c +++ b/arch/riscv/kernel/cfi.c @@ -4,7 +4,7 @@ * * Copyright (C) 2023 Google LLC */ -#include <asm/cfi.h> +#include <linux/cfi.h> #include <asm/insn.h> /* diff --git a/arch/riscv/kernel/cpu-hotplug.c b/arch/riscv/kernel/cpu-hotplug.c index 457a18efcb..28b58fc5ad 100644 --- a/arch/riscv/kernel/cpu-hotplug.c +++ b/arch/riscv/kernel/cpu-hotplug.c @@ -18,7 +18,7 @@ bool cpu_has_hotplug(unsigned int cpu) { - if (cpu_ops[cpu]->cpu_stop) + if (cpu_ops->cpu_stop) return true; return false; @@ -29,25 +29,18 @@ bool cpu_has_hotplug(unsigned int cpu) */ int __cpu_disable(void) { - int ret = 0; unsigned int cpu = smp_processor_id(); - if (!cpu_ops[cpu] || !cpu_ops[cpu]->cpu_stop) + if (!cpu_ops->cpu_stop) return -EOPNOTSUPP; - if (cpu_ops[cpu]->cpu_disable) - ret = cpu_ops[cpu]->cpu_disable(cpu); - - if (ret) - return ret; - remove_cpu_topology(cpu); numa_remove_cpu(cpu); set_cpu_online(cpu, false); riscv_ipi_disable(); irq_migrate_all_off_this_cpu(); - return ret; + return 0; } #ifdef CONFIG_HOTPLUG_CPU @@ -62,8 +55,8 @@ void arch_cpuhp_cleanup_dead_cpu(unsigned int cpu) pr_notice("CPU%u: off\n", cpu); /* Verify from the firmware if the cpu is really stopped*/ - if (cpu_ops[cpu]->cpu_is_stopped) - ret = cpu_ops[cpu]->cpu_is_stopped(cpu); + if (cpu_ops->cpu_is_stopped) + ret = cpu_ops->cpu_is_stopped(cpu); if (ret) pr_warn("CPU%d may not have stopped: %d\n", cpu, ret); } @@ -77,7 +70,7 @@ void __noreturn arch_cpu_idle_dead(void) cpuhp_ap_report_dead(); - cpu_ops[smp_processor_id()]->cpu_stop(); + cpu_ops->cpu_stop(); /* It should never reach here */ BUG(); } diff --git a/arch/riscv/kernel/cpu_ops.c b/arch/riscv/kernel/cpu_ops.c index eb479a88a9..6a8bd8f4db 100644 --- a/arch/riscv/kernel/cpu_ops.c +++ b/arch/riscv/kernel/cpu_ops.c @@ -13,25 +13,21 @@ #include <asm/sbi.h> #include <asm/smp.h> -const struct cpu_operations *cpu_ops[NR_CPUS] __ro_after_init; +const struct cpu_operations *cpu_ops __ro_after_init = &cpu_ops_spinwait; extern const struct cpu_operations cpu_ops_sbi; #ifndef CONFIG_RISCV_BOOT_SPINWAIT const struct cpu_operations cpu_ops_spinwait = { - .name = "", - .cpu_prepare = NULL, .cpu_start = NULL, }; #endif -void __init cpu_set_ops(int cpuid) +void __init cpu_set_ops(void) { #if IS_ENABLED(CONFIG_RISCV_SBI) if (sbi_probe_extension(SBI_EXT_HSM)) { - if (!cpuid) - pr_info("SBI HSM extension detected\n"); - cpu_ops[cpuid] = &cpu_ops_sbi; - } else + pr_info("SBI HSM extension detected\n"); + cpu_ops = &cpu_ops_sbi; + } #endif - cpu_ops[cpuid] = &cpu_ops_spinwait; } diff --git a/arch/riscv/kernel/cpu_ops_sbi.c b/arch/riscv/kernel/cpu_ops_sbi.c index efa0f08166..1cc7df740e 100644 --- a/arch/riscv/kernel/cpu_ops_sbi.c +++ b/arch/riscv/kernel/cpu_ops_sbi.c @@ -79,23 +79,7 @@ static int sbi_cpu_start(unsigned int cpuid, struct task_struct *tidle) return sbi_hsm_hart_start(hartid, boot_addr, hsm_data); } -static int sbi_cpu_prepare(unsigned int cpuid) -{ - if (!cpu_ops_sbi.cpu_start) { - pr_err("cpu start method not defined for CPU [%d]\n", cpuid); - return -ENODEV; - } - return 0; -} - #ifdef CONFIG_HOTPLUG_CPU -static int sbi_cpu_disable(unsigned int cpuid) -{ - if (!cpu_ops_sbi.cpu_stop) - return -EOPNOTSUPP; - return 0; -} - static void sbi_cpu_stop(void) { int ret; @@ -118,11 +102,8 @@ static int sbi_cpu_is_stopped(unsigned int cpuid) #endif const struct cpu_operations cpu_ops_sbi = { - .name = "sbi", - .cpu_prepare = sbi_cpu_prepare, .cpu_start = sbi_cpu_start, #ifdef CONFIG_HOTPLUG_CPU - .cpu_disable = sbi_cpu_disable, .cpu_stop = sbi_cpu_stop, .cpu_is_stopped = sbi_cpu_is_stopped, #endif diff --git a/arch/riscv/kernel/cpu_ops_spinwait.c b/arch/riscv/kernel/cpu_ops_spinwait.c index d98d19226b..613872b0a2 100644 --- a/arch/riscv/kernel/cpu_ops_spinwait.c +++ b/arch/riscv/kernel/cpu_ops_spinwait.c @@ -39,15 +39,6 @@ static void cpu_update_secondary_bootdata(unsigned int cpuid, WRITE_ONCE(__cpu_spinwait_task_pointer[hartid], tidle); } -static int spinwait_cpu_prepare(unsigned int cpuid) -{ - if (!cpu_ops_spinwait.cpu_start) { - pr_err("cpu start method not defined for CPU [%d]\n", cpuid); - return -ENODEV; - } - return 0; -} - static int spinwait_cpu_start(unsigned int cpuid, struct task_struct *tidle) { /* @@ -64,7 +55,5 @@ static int spinwait_cpu_start(unsigned int cpuid, struct task_struct *tidle) } const struct cpu_operations cpu_ops_spinwait = { - .name = "spinwait", - .cpu_prepare = spinwait_cpu_prepare, .cpu_start = spinwait_cpu_start, }; diff --git a/arch/riscv/kernel/cpufeature.c b/arch/riscv/kernel/cpufeature.c index 8677788133..79a5a35fab 100644 --- a/arch/riscv/kernel/cpufeature.c +++ b/arch/riscv/kernel/cpufeature.c @@ -8,8 +8,10 @@ #include <linux/acpi.h> #include <linux/bitmap.h> +#include <linux/cpu.h> #include <linux/cpuhotplug.h> #include <linux/ctype.h> +#include <linux/jump_label.h> #include <linux/log2.h> #include <linux/memory.h> #include <linux/module.h> @@ -45,6 +47,8 @@ struct riscv_isainfo hart_isa[NR_CPUS]; /* Performance information */ DEFINE_PER_CPU(long, misaligned_access_speed); +static cpumask_t fast_misaligned_access; + /** * riscv_isa_extension_base() - Get base extension word * @@ -71,7 +75,7 @@ EXPORT_SYMBOL_GPL(riscv_isa_extension_base); * * NOTE: If isa_bitmap is NULL then Host ISA bitmap will be used. */ -bool __riscv_isa_extension_available(const unsigned long *isa_bitmap, int bit) +bool __riscv_isa_extension_available(const unsigned long *isa_bitmap, unsigned int bit) { const unsigned long *bmap = (isa_bitmap) ? isa_bitmap : riscv_isa; @@ -103,17 +107,111 @@ static bool riscv_isa_extension_check(int id) return false; } return true; + case RISCV_ISA_EXT_INVALID: + return false; } return true; } -#define __RISCV_ISA_EXT_DATA(_name, _id) { \ - .name = #_name, \ - .property = #_name, \ - .id = _id, \ +#define _RISCV_ISA_EXT_DATA(_name, _id, _subset_exts, _subset_exts_size) { \ + .name = #_name, \ + .property = #_name, \ + .id = _id, \ + .subset_ext_ids = _subset_exts, \ + .subset_ext_size = _subset_exts_size \ } +#define __RISCV_ISA_EXT_DATA(_name, _id) _RISCV_ISA_EXT_DATA(_name, _id, NULL, 0) + +/* Used to declare pure "lasso" extension (Zk for instance) */ +#define __RISCV_ISA_EXT_BUNDLE(_name, _bundled_exts) \ + _RISCV_ISA_EXT_DATA(_name, RISCV_ISA_EXT_INVALID, _bundled_exts, ARRAY_SIZE(_bundled_exts)) + +/* Used to declare extensions that are a superset of other extensions (Zvbb for instance) */ +#define __RISCV_ISA_EXT_SUPERSET(_name, _id, _sub_exts) \ + _RISCV_ISA_EXT_DATA(_name, _id, _sub_exts, ARRAY_SIZE(_sub_exts)) + +static const unsigned int riscv_zk_bundled_exts[] = { + RISCV_ISA_EXT_ZBKB, + RISCV_ISA_EXT_ZBKC, + RISCV_ISA_EXT_ZBKX, + RISCV_ISA_EXT_ZKND, + RISCV_ISA_EXT_ZKNE, + RISCV_ISA_EXT_ZKR, + RISCV_ISA_EXT_ZKT, +}; + +static const unsigned int riscv_zkn_bundled_exts[] = { + RISCV_ISA_EXT_ZBKB, + RISCV_ISA_EXT_ZBKC, + RISCV_ISA_EXT_ZBKX, + RISCV_ISA_EXT_ZKND, + RISCV_ISA_EXT_ZKNE, + RISCV_ISA_EXT_ZKNH, +}; + +static const unsigned int riscv_zks_bundled_exts[] = { + RISCV_ISA_EXT_ZBKB, + RISCV_ISA_EXT_ZBKC, + RISCV_ISA_EXT_ZKSED, + RISCV_ISA_EXT_ZKSH +}; + +#define RISCV_ISA_EXT_ZVKN \ + RISCV_ISA_EXT_ZVKNED, \ + RISCV_ISA_EXT_ZVKNHB, \ + RISCV_ISA_EXT_ZVKB, \ + RISCV_ISA_EXT_ZVKT + +static const unsigned int riscv_zvkn_bundled_exts[] = { + RISCV_ISA_EXT_ZVKN +}; + +static const unsigned int riscv_zvknc_bundled_exts[] = { + RISCV_ISA_EXT_ZVKN, + RISCV_ISA_EXT_ZVBC +}; + +static const unsigned int riscv_zvkng_bundled_exts[] = { + RISCV_ISA_EXT_ZVKN, + RISCV_ISA_EXT_ZVKG +}; + +#define RISCV_ISA_EXT_ZVKS \ + RISCV_ISA_EXT_ZVKSED, \ + RISCV_ISA_EXT_ZVKSH, \ + RISCV_ISA_EXT_ZVKB, \ + RISCV_ISA_EXT_ZVKT + +static const unsigned int riscv_zvks_bundled_exts[] = { + RISCV_ISA_EXT_ZVKS +}; + +static const unsigned int riscv_zvksc_bundled_exts[] = { + RISCV_ISA_EXT_ZVKS, + RISCV_ISA_EXT_ZVBC +}; + +static const unsigned int riscv_zvksg_bundled_exts[] = { + RISCV_ISA_EXT_ZVKS, + RISCV_ISA_EXT_ZVKG +}; + +static const unsigned int riscv_zvbb_exts[] = { + RISCV_ISA_EXT_ZVKB +}; + +/* + * While the [ms]envcfg CSRs were not defined until version 1.12 of the RISC-V + * privileged ISA, the existence of the CSRs is implied by any extension which + * specifies [ms]envcfg bit(s). Hence, we define a custom ISA extension for the + * existence of the CSR, and treat it as a subset of those other extensions. + */ +static const unsigned int riscv_xlinuxenvcfg_exts[] = { + RISCV_ISA_EXT_XLINUXENVCFG +}; + /* * The canonical order of ISA extension names in the ISA string is defined in * chapter 27 of the unprivileged specification. @@ -161,23 +259,57 @@ const struct riscv_isa_ext_data riscv_isa_ext[] = { __RISCV_ISA_EXT_DATA(d, RISCV_ISA_EXT_d), __RISCV_ISA_EXT_DATA(q, RISCV_ISA_EXT_q), __RISCV_ISA_EXT_DATA(c, RISCV_ISA_EXT_c), - __RISCV_ISA_EXT_DATA(b, RISCV_ISA_EXT_b), - __RISCV_ISA_EXT_DATA(k, RISCV_ISA_EXT_k), - __RISCV_ISA_EXT_DATA(j, RISCV_ISA_EXT_j), - __RISCV_ISA_EXT_DATA(p, RISCV_ISA_EXT_p), __RISCV_ISA_EXT_DATA(v, RISCV_ISA_EXT_v), __RISCV_ISA_EXT_DATA(h, RISCV_ISA_EXT_h), - __RISCV_ISA_EXT_DATA(zicbom, RISCV_ISA_EXT_ZICBOM), - __RISCV_ISA_EXT_DATA(zicboz, RISCV_ISA_EXT_ZICBOZ), + __RISCV_ISA_EXT_SUPERSET(zicbom, RISCV_ISA_EXT_ZICBOM, riscv_xlinuxenvcfg_exts), + __RISCV_ISA_EXT_SUPERSET(zicboz, RISCV_ISA_EXT_ZICBOZ, riscv_xlinuxenvcfg_exts), __RISCV_ISA_EXT_DATA(zicntr, RISCV_ISA_EXT_ZICNTR), __RISCV_ISA_EXT_DATA(zicond, RISCV_ISA_EXT_ZICOND), __RISCV_ISA_EXT_DATA(zicsr, RISCV_ISA_EXT_ZICSR), __RISCV_ISA_EXT_DATA(zifencei, RISCV_ISA_EXT_ZIFENCEI), + __RISCV_ISA_EXT_DATA(zihintntl, RISCV_ISA_EXT_ZIHINTNTL), __RISCV_ISA_EXT_DATA(zihintpause, RISCV_ISA_EXT_ZIHINTPAUSE), __RISCV_ISA_EXT_DATA(zihpm, RISCV_ISA_EXT_ZIHPM), + __RISCV_ISA_EXT_DATA(zacas, RISCV_ISA_EXT_ZACAS), + __RISCV_ISA_EXT_DATA(zfa, RISCV_ISA_EXT_ZFA), + __RISCV_ISA_EXT_DATA(zfh, RISCV_ISA_EXT_ZFH), + __RISCV_ISA_EXT_DATA(zfhmin, RISCV_ISA_EXT_ZFHMIN), __RISCV_ISA_EXT_DATA(zba, RISCV_ISA_EXT_ZBA), __RISCV_ISA_EXT_DATA(zbb, RISCV_ISA_EXT_ZBB), + __RISCV_ISA_EXT_DATA(zbc, RISCV_ISA_EXT_ZBC), + __RISCV_ISA_EXT_DATA(zbkb, RISCV_ISA_EXT_ZBKB), + __RISCV_ISA_EXT_DATA(zbkc, RISCV_ISA_EXT_ZBKC), + __RISCV_ISA_EXT_DATA(zbkx, RISCV_ISA_EXT_ZBKX), __RISCV_ISA_EXT_DATA(zbs, RISCV_ISA_EXT_ZBS), + __RISCV_ISA_EXT_BUNDLE(zk, riscv_zk_bundled_exts), + __RISCV_ISA_EXT_BUNDLE(zkn, riscv_zkn_bundled_exts), + __RISCV_ISA_EXT_DATA(zknd, RISCV_ISA_EXT_ZKND), + __RISCV_ISA_EXT_DATA(zkne, RISCV_ISA_EXT_ZKNE), + __RISCV_ISA_EXT_DATA(zknh, RISCV_ISA_EXT_ZKNH), + __RISCV_ISA_EXT_DATA(zkr, RISCV_ISA_EXT_ZKR), + __RISCV_ISA_EXT_BUNDLE(zks, riscv_zks_bundled_exts), + __RISCV_ISA_EXT_DATA(zkt, RISCV_ISA_EXT_ZKT), + __RISCV_ISA_EXT_DATA(zksed, RISCV_ISA_EXT_ZKSED), + __RISCV_ISA_EXT_DATA(zksh, RISCV_ISA_EXT_ZKSH), + __RISCV_ISA_EXT_DATA(ztso, RISCV_ISA_EXT_ZTSO), + __RISCV_ISA_EXT_SUPERSET(zvbb, RISCV_ISA_EXT_ZVBB, riscv_zvbb_exts), + __RISCV_ISA_EXT_DATA(zvbc, RISCV_ISA_EXT_ZVBC), + __RISCV_ISA_EXT_DATA(zvfh, RISCV_ISA_EXT_ZVFH), + __RISCV_ISA_EXT_DATA(zvfhmin, RISCV_ISA_EXT_ZVFHMIN), + __RISCV_ISA_EXT_DATA(zvkb, RISCV_ISA_EXT_ZVKB), + __RISCV_ISA_EXT_DATA(zvkg, RISCV_ISA_EXT_ZVKG), + __RISCV_ISA_EXT_BUNDLE(zvkn, riscv_zvkn_bundled_exts), + __RISCV_ISA_EXT_BUNDLE(zvknc, riscv_zvknc_bundled_exts), + __RISCV_ISA_EXT_DATA(zvkned, RISCV_ISA_EXT_ZVKNED), + __RISCV_ISA_EXT_BUNDLE(zvkng, riscv_zvkng_bundled_exts), + __RISCV_ISA_EXT_DATA(zvknha, RISCV_ISA_EXT_ZVKNHA), + __RISCV_ISA_EXT_DATA(zvknhb, RISCV_ISA_EXT_ZVKNHB), + __RISCV_ISA_EXT_BUNDLE(zvks, riscv_zvks_bundled_exts), + __RISCV_ISA_EXT_BUNDLE(zvksc, riscv_zvksc_bundled_exts), + __RISCV_ISA_EXT_DATA(zvksed, RISCV_ISA_EXT_ZVKSED), + __RISCV_ISA_EXT_DATA(zvksh, RISCV_ISA_EXT_ZVKSH), + __RISCV_ISA_EXT_BUNDLE(zvksg, riscv_zvksg_bundled_exts), + __RISCV_ISA_EXT_DATA(zvkt, RISCV_ISA_EXT_ZVKT), __RISCV_ISA_EXT_DATA(smaia, RISCV_ISA_EXT_SMAIA), __RISCV_ISA_EXT_DATA(smstateen, RISCV_ISA_EXT_SMSTATEEN), __RISCV_ISA_EXT_DATA(ssaia, RISCV_ISA_EXT_SSAIA), @@ -190,6 +322,31 @@ const struct riscv_isa_ext_data riscv_isa_ext[] = { const size_t riscv_isa_ext_count = ARRAY_SIZE(riscv_isa_ext); +static void __init match_isa_ext(const struct riscv_isa_ext_data *ext, const char *name, + const char *name_end, struct riscv_isainfo *isainfo) +{ + if ((name_end - name == strlen(ext->name)) && + !strncasecmp(name, ext->name, name_end - name)) { + /* + * If this is a bundle, enable all the ISA extensions that + * comprise the bundle. + */ + if (ext->subset_ext_size) { + for (int i = 0; i < ext->subset_ext_size; i++) { + if (riscv_isa_extension_check(ext->subset_ext_ids[i])) + set_bit(ext->subset_ext_ids[i], isainfo->isa); + } + } + + /* + * This is valid even for bundle extensions which uses the RISCV_ISA_EXT_INVALID id + * (rejected by riscv_isa_extension_check()). + */ + if (riscv_isa_extension_check(ext->id)) + set_bit(ext->id, isainfo->isa); + } +} + static void __init riscv_parse_isa_string(unsigned long *this_hwcap, struct riscv_isainfo *isainfo, unsigned long *isa2hwcap, const char *isa) { @@ -322,14 +479,6 @@ static void __init riscv_parse_isa_string(unsigned long *this_hwcap, struct risc if (*isa == '_') ++isa; -#define SET_ISA_EXT_MAP(name, bit) \ - do { \ - if ((ext_end - ext == strlen(name)) && \ - !strncasecmp(ext, name, strlen(name)) && \ - riscv_isa_extension_check(bit)) \ - set_bit(bit, isainfo->isa); \ - } while (false) \ - if (unlikely(ext_err)) continue; if (!ext_long) { @@ -341,10 +490,8 @@ static void __init riscv_parse_isa_string(unsigned long *this_hwcap, struct risc } } else { for (int i = 0; i < riscv_isa_ext_count; i++) - SET_ISA_EXT_MAP(riscv_isa_ext[i].name, - riscv_isa_ext[i].id); + match_isa_ext(&riscv_isa_ext[i], ext, ext_end, isainfo); } -#undef SET_ISA_EXT_MAP } } @@ -457,18 +604,26 @@ static int __init riscv_fill_hwcap_from_ext_list(unsigned long *isa2hwcap) } for (int i = 0; i < riscv_isa_ext_count; i++) { + const struct riscv_isa_ext_data *ext = &riscv_isa_ext[i]; + if (of_property_match_string(cpu_node, "riscv,isa-extensions", - riscv_isa_ext[i].property) < 0) + ext->property) < 0) continue; - if (!riscv_isa_extension_check(riscv_isa_ext[i].id)) - continue; + if (ext->subset_ext_size) { + for (int j = 0; j < ext->subset_ext_size; j++) { + if (riscv_isa_extension_check(ext->subset_ext_ids[i])) + set_bit(ext->subset_ext_ids[j], isainfo->isa); + } + } - /* Only single letter extensions get set in hwcap */ - if (strnlen(riscv_isa_ext[i].name, 2) == 1) - this_hwcap |= isa2hwcap[riscv_isa_ext[i].id]; + if (riscv_isa_extension_check(ext->id)) { + set_bit(ext->id, isainfo->isa); - set_bit(riscv_isa_ext[i].id, isainfo->isa); + /* Only single letter extensions get set in hwcap */ + if (strnlen(riscv_isa_ext[i].name, 2) == 1) + this_hwcap |= isa2hwcap[riscv_isa_ext[i].id]; + } } of_node_put(cpu_node); @@ -658,6 +813,16 @@ static int check_unaligned_access(void *param) (speed == RISCV_HWPROBE_MISALIGNED_FAST) ? "fast" : "slow"); per_cpu(misaligned_access_speed, cpu) = speed; + + /* + * Set the value of fast_misaligned_access of a CPU. These operations + * are atomic to avoid race conditions. + */ + if (speed == RISCV_HWPROBE_MISALIGNED_FAST) + cpumask_set_cpu(cpu, &fast_misaligned_access); + else + cpumask_clear_cpu(cpu, &fast_misaligned_access); + return 0; } @@ -670,13 +835,69 @@ static void check_unaligned_access_nonboot_cpu(void *param) check_unaligned_access(pages[cpu]); } +DEFINE_STATIC_KEY_FALSE(fast_misaligned_access_speed_key); + +static void modify_unaligned_access_branches(cpumask_t *mask, int weight) +{ + if (cpumask_weight(mask) == weight) + static_branch_enable_cpuslocked(&fast_misaligned_access_speed_key); + else + static_branch_disable_cpuslocked(&fast_misaligned_access_speed_key); +} + +static void set_unaligned_access_static_branches_except_cpu(int cpu) +{ + /* + * Same as set_unaligned_access_static_branches, except excludes the + * given CPU from the result. When a CPU is hotplugged into an offline + * state, this function is called before the CPU is set to offline in + * the cpumask, and thus the CPU needs to be explicitly excluded. + */ + + cpumask_t fast_except_me; + + cpumask_and(&fast_except_me, &fast_misaligned_access, cpu_online_mask); + cpumask_clear_cpu(cpu, &fast_except_me); + + modify_unaligned_access_branches(&fast_except_me, num_online_cpus() - 1); +} + +static void set_unaligned_access_static_branches(void) +{ + /* + * This will be called after check_unaligned_access_all_cpus so the + * result of unaligned access speed for all CPUs will be available. + * + * To avoid the number of online cpus changing between reading + * cpu_online_mask and calling num_online_cpus, cpus_read_lock must be + * held before calling this function. + */ + + cpumask_t fast_and_online; + + cpumask_and(&fast_and_online, &fast_misaligned_access, cpu_online_mask); + + modify_unaligned_access_branches(&fast_and_online, num_online_cpus()); +} + +static int lock_and_set_unaligned_access_static_branch(void) +{ + cpus_read_lock(); + set_unaligned_access_static_branches(); + cpus_read_unlock(); + + return 0; +} + +arch_initcall_sync(lock_and_set_unaligned_access_static_branch); + static int riscv_online_cpu(unsigned int cpu) { static struct page *buf; /* We are already set since the last check */ if (per_cpu(misaligned_access_speed, cpu) != RISCV_HWPROBE_MISALIGNED_UNKNOWN) - return 0; + goto exit; buf = alloc_pages(GFP_KERNEL, MISALIGNED_BUFFER_ORDER); if (!buf) { @@ -686,6 +907,17 @@ static int riscv_online_cpu(unsigned int cpu) check_unaligned_access(buf); __free_pages(buf, MISALIGNED_BUFFER_ORDER); + +exit: + set_unaligned_access_static_branches(); + + return 0; +} + +static int riscv_offline_cpu(unsigned int cpu) +{ + set_unaligned_access_static_branches_except_cpu(cpu); + return 0; } @@ -720,9 +952,12 @@ static int check_unaligned_access_all_cpus(void) /* Check core 0. */ smp_call_on_cpu(0, check_unaligned_access, bufs[0], true); - /* Setup hotplug callback for any new CPUs that come online. */ + /* + * Setup hotplug callbacks for any new CPUs that come online or go + * offline. + */ cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "riscv:online", - riscv_online_cpu, NULL); + riscv_online_cpu, riscv_offline_cpu); out: unaligned_emulation_finish(); diff --git a/arch/riscv/kernel/efi.c b/arch/riscv/kernel/efi.c index aa6209a74c..b64bf1624a 100644 --- a/arch/riscv/kernel/efi.c +++ b/arch/riscv/kernel/efi.c @@ -60,7 +60,7 @@ int __init efi_create_mapping(struct mm_struct *mm, efi_memory_desc_t *md) static int __init set_permissions(pte_t *ptep, unsigned long addr, void *data) { efi_memory_desc_t *md = data; - pte_t pte = READ_ONCE(*ptep); + pte_t pte = ptep_get(ptep); unsigned long val; if (md->attribute & EFI_MEMORY_RO) { diff --git a/arch/riscv/kernel/elf_kexec.c b/arch/riscv/kernel/elf_kexec.c index e60fbd8660..5bd1ec3341 100644 --- a/arch/riscv/kernel/elf_kexec.c +++ b/arch/riscv/kernel/elf_kexec.c @@ -216,7 +216,6 @@ static void *elf_kexec_load(struct kimage *image, char *kernel_buf, if (ret) goto out; kernel_start = image->start; - pr_notice("The entry point of kernel at 0x%lx\n", image->start); /* Add the kernel binary to the image */ ret = riscv_kexec_elf_load(image, &ehdr, &elf_info, @@ -252,8 +251,8 @@ static void *elf_kexec_load(struct kimage *image, char *kernel_buf, image->elf_load_addr = kbuf.mem; image->elf_headers_sz = headers_sz; - pr_debug("Loaded elf core header at 0x%lx bufsz=0x%lx memsz=0x%lx\n", - image->elf_load_addr, kbuf.bufsz, kbuf.memsz); + kexec_dprintk("Loaded elf core header at 0x%lx bufsz=0x%lx memsz=0x%lx\n", + image->elf_load_addr, kbuf.bufsz, kbuf.memsz); /* Setup cmdline for kdump kernel case */ modified_cmdline = setup_kdump_cmdline(image, cmdline, @@ -275,6 +274,8 @@ static void *elf_kexec_load(struct kimage *image, char *kernel_buf, pr_err("Error loading purgatory ret=%d\n", ret); goto out; } + kexec_dprintk("Loaded purgatory at 0x%lx\n", kbuf.mem); + ret = kexec_purgatory_get_set_symbol(image, "riscv_kernel_entry", &kernel_start, sizeof(kernel_start), 0); @@ -293,7 +294,7 @@ static void *elf_kexec_load(struct kimage *image, char *kernel_buf, if (ret) goto out; initrd_pbase = kbuf.mem; - pr_notice("Loaded initrd at 0x%lx\n", initrd_pbase); + kexec_dprintk("Loaded initrd at 0x%lx\n", initrd_pbase); } /* Add the DTB to the image */ @@ -318,7 +319,7 @@ static void *elf_kexec_load(struct kimage *image, char *kernel_buf, } /* Cache the fdt buffer address for memory cleanup */ image->arch.fdt = fdt; - pr_notice("Loaded device tree at 0x%lx\n", kbuf.mem); + kexec_dprintk("Loaded device tree at 0x%lx\n", kbuf.mem); goto out; out_free_fdt: diff --git a/arch/riscv/kernel/entry.S b/arch/riscv/kernel/entry.S index 54ca4564a9..9d1a305d55 100644 --- a/arch/riscv/kernel/entry.S +++ b/arch/riscv/kernel/entry.S @@ -83,6 +83,10 @@ SYM_CODE_START(handle_exception) /* Load the kernel shadow call stack pointer if coming from userspace */ scs_load_current_if_task_changed s5 +#ifdef CONFIG_RISCV_ISA_V_PREEMPTIVE + move a0, sp + call riscv_v_context_nesting_start +#endif move a0, sp /* pt_regs */ la ra, ret_from_exception @@ -138,6 +142,10 @@ SYM_CODE_START_NOALIGN(ret_from_exception) */ csrw CSR_SCRATCH, tp 1: +#ifdef CONFIG_RISCV_ISA_V_PREEMPTIVE + move a0, sp + call riscv_v_context_nesting_end +#endif REG_L a0, PT_STATUS(sp) /* * The current load reservation is effectively part of the processor's diff --git a/arch/riscv/kernel/ftrace.c b/arch/riscv/kernel/ftrace.c index 03a6434a8c..f5aa24d9e1 100644 --- a/arch/riscv/kernel/ftrace.c +++ b/arch/riscv/kernel/ftrace.c @@ -178,32 +178,28 @@ void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr, } #ifdef CONFIG_DYNAMIC_FTRACE +#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS +void ftrace_graph_func(unsigned long ip, unsigned long parent_ip, + struct ftrace_ops *op, struct ftrace_regs *fregs) +{ + struct pt_regs *regs = arch_ftrace_get_regs(fregs); + unsigned long *parent = (unsigned long *)®s->ra; + + prepare_ftrace_return(parent, ip, frame_pointer(regs)); +} +#else /* CONFIG_DYNAMIC_FTRACE_WITH_REGS */ extern void ftrace_graph_call(void); -extern void ftrace_graph_regs_call(void); int ftrace_enable_ftrace_graph_caller(void) { - int ret; - - ret = __ftrace_modify_call((unsigned long)&ftrace_graph_call, - (unsigned long)&prepare_ftrace_return, true, true); - if (ret) - return ret; - - return __ftrace_modify_call((unsigned long)&ftrace_graph_regs_call, + return __ftrace_modify_call((unsigned long)&ftrace_graph_call, (unsigned long)&prepare_ftrace_return, true, true); } int ftrace_disable_ftrace_graph_caller(void) { - int ret; - - ret = __ftrace_modify_call((unsigned long)&ftrace_graph_call, - (unsigned long)&prepare_ftrace_return, false, true); - if (ret) - return ret; - - return __ftrace_modify_call((unsigned long)&ftrace_graph_regs_call, + return __ftrace_modify_call((unsigned long)&ftrace_graph_call, (unsigned long)&prepare_ftrace_return, false, true); } +#endif /* CONFIG_DYNAMIC_FTRACE_WITH_REGS */ #endif /* CONFIG_DYNAMIC_FTRACE */ #endif /* CONFIG_FUNCTION_GRAPH_TRACER */ diff --git a/arch/riscv/kernel/head.S b/arch/riscv/kernel/head.S index 663881785b..4236a69c35 100644 --- a/arch/riscv/kernel/head.S +++ b/arch/riscv/kernel/head.S @@ -11,7 +11,6 @@ #include <asm/page.h> #include <asm/pgtable.h> #include <asm/csr.h> -#include <asm/cpu_ops_sbi.h> #include <asm/hwcap.h> #include <asm/image.h> #include <asm/scs.h> @@ -266,10 +265,12 @@ SYM_CODE_START(_start_kernel) la sp, _end + THREAD_SIZE XIP_FIXUP_OFFSET sp mv s0, a0 + mv s1, a1 call __copy_data - /* Restore a0 copy */ + /* Restore a0 & a1 copy */ mv a0, s0 + mv a1, s1 #endif #ifndef CONFIG_XIP_KERNEL diff --git a/arch/riscv/kernel/kernel_mode_vector.c b/arch/riscv/kernel/kernel_mode_vector.c new file mode 100644 index 0000000000..6afe80c7f0 --- /dev/null +++ b/arch/riscv/kernel/kernel_mode_vector.c @@ -0,0 +1,247 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Copyright (C) 2012 ARM Ltd. + * Author: Catalin Marinas <catalin.marinas@arm.com> + * Copyright (C) 2017 Linaro Ltd. <ard.biesheuvel@linaro.org> + * Copyright (C) 2021 SiFive + */ +#include <linux/compiler.h> +#include <linux/irqflags.h> +#include <linux/percpu.h> +#include <linux/preempt.h> +#include <linux/types.h> + +#include <asm/vector.h> +#include <asm/switch_to.h> +#include <asm/simd.h> +#ifdef CONFIG_RISCV_ISA_V_PREEMPTIVE +#include <asm/asm-prototypes.h> +#endif + +static inline void riscv_v_flags_set(u32 flags) +{ + WRITE_ONCE(current->thread.riscv_v_flags, flags); +} + +static inline void riscv_v_start(u32 flags) +{ + int orig; + + orig = riscv_v_flags(); + BUG_ON((orig & flags) != 0); + riscv_v_flags_set(orig | flags); + barrier(); +} + +static inline void riscv_v_stop(u32 flags) +{ + int orig; + + barrier(); + orig = riscv_v_flags(); + BUG_ON((orig & flags) == 0); + riscv_v_flags_set(orig & ~flags); +} + +/* + * Claim ownership of the CPU vector context for use by the calling context. + * + * The caller may freely manipulate the vector context metadata until + * put_cpu_vector_context() is called. + */ +void get_cpu_vector_context(void) +{ + /* + * disable softirqs so it is impossible for softirqs to nest + * get_cpu_vector_context() when kernel is actively using Vector. + */ + if (!IS_ENABLED(CONFIG_PREEMPT_RT)) + local_bh_disable(); + else + preempt_disable(); + + riscv_v_start(RISCV_KERNEL_MODE_V); +} + +/* + * Release the CPU vector context. + * + * Must be called from a context in which get_cpu_vector_context() was + * previously called, with no call to put_cpu_vector_context() in the + * meantime. + */ +void put_cpu_vector_context(void) +{ + riscv_v_stop(RISCV_KERNEL_MODE_V); + + if (!IS_ENABLED(CONFIG_PREEMPT_RT)) + local_bh_enable(); + else + preempt_enable(); +} + +#ifdef CONFIG_RISCV_ISA_V_PREEMPTIVE +static __always_inline u32 *riscv_v_flags_ptr(void) +{ + return ¤t->thread.riscv_v_flags; +} + +static inline void riscv_preempt_v_set_dirty(void) +{ + *riscv_v_flags_ptr() |= RISCV_PREEMPT_V_DIRTY; +} + +static inline void riscv_preempt_v_reset_flags(void) +{ + *riscv_v_flags_ptr() &= ~(RISCV_PREEMPT_V_DIRTY | RISCV_PREEMPT_V_NEED_RESTORE); +} + +static inline void riscv_v_ctx_depth_inc(void) +{ + *riscv_v_flags_ptr() += RISCV_V_CTX_UNIT_DEPTH; +} + +static inline void riscv_v_ctx_depth_dec(void) +{ + *riscv_v_flags_ptr() -= RISCV_V_CTX_UNIT_DEPTH; +} + +static inline u32 riscv_v_ctx_get_depth(void) +{ + return *riscv_v_flags_ptr() & RISCV_V_CTX_DEPTH_MASK; +} + +static int riscv_v_stop_kernel_context(void) +{ + if (riscv_v_ctx_get_depth() != 0 || !riscv_preempt_v_started(current)) + return 1; + + riscv_preempt_v_clear_dirty(current); + riscv_v_stop(RISCV_PREEMPT_V); + return 0; +} + +static int riscv_v_start_kernel_context(bool *is_nested) +{ + struct __riscv_v_ext_state *kvstate, *uvstate; + + kvstate = ¤t->thread.kernel_vstate; + if (!kvstate->datap) + return -ENOENT; + + if (riscv_preempt_v_started(current)) { + WARN_ON(riscv_v_ctx_get_depth() == 0); + *is_nested = true; + get_cpu_vector_context(); + if (riscv_preempt_v_dirty(current)) { + __riscv_v_vstate_save(kvstate, kvstate->datap); + riscv_preempt_v_clear_dirty(current); + } + riscv_preempt_v_set_restore(current); + return 0; + } + + /* Transfer the ownership of V from user to kernel, then save */ + riscv_v_start(RISCV_PREEMPT_V | RISCV_PREEMPT_V_DIRTY); + if ((task_pt_regs(current)->status & SR_VS) == SR_VS_DIRTY) { + uvstate = ¤t->thread.vstate; + __riscv_v_vstate_save(uvstate, uvstate->datap); + } + riscv_preempt_v_clear_dirty(current); + return 0; +} + +/* low-level V context handling code, called with irq disabled */ +asmlinkage void riscv_v_context_nesting_start(struct pt_regs *regs) +{ + int depth; + + if (!riscv_preempt_v_started(current)) + return; + + depth = riscv_v_ctx_get_depth(); + if (depth == 0 && (regs->status & SR_VS) == SR_VS_DIRTY) + riscv_preempt_v_set_dirty(); + + riscv_v_ctx_depth_inc(); +} + +asmlinkage void riscv_v_context_nesting_end(struct pt_regs *regs) +{ + struct __riscv_v_ext_state *vstate = ¤t->thread.kernel_vstate; + u32 depth; + + WARN_ON(!irqs_disabled()); + + if (!riscv_preempt_v_started(current)) + return; + + riscv_v_ctx_depth_dec(); + depth = riscv_v_ctx_get_depth(); + if (depth == 0) { + if (riscv_preempt_v_restore(current)) { + __riscv_v_vstate_restore(vstate, vstate->datap); + __riscv_v_vstate_clean(regs); + riscv_preempt_v_reset_flags(); + } + } +} +#else +#define riscv_v_start_kernel_context(nested) (-ENOENT) +#define riscv_v_stop_kernel_context() (-ENOENT) +#endif /* CONFIG_RISCV_ISA_V_PREEMPTIVE */ + +/* + * kernel_vector_begin(): obtain the CPU vector registers for use by the calling + * context + * + * Must not be called unless may_use_simd() returns true. + * Task context in the vector registers is saved back to memory as necessary. + * + * A matching call to kernel_vector_end() must be made before returning from the + * calling context. + * + * The caller may freely use the vector registers until kernel_vector_end() is + * called. + */ +void kernel_vector_begin(void) +{ + bool nested = false; + + if (WARN_ON(!has_vector())) + return; + + BUG_ON(!may_use_simd()); + + if (riscv_v_start_kernel_context(&nested)) { + get_cpu_vector_context(); + riscv_v_vstate_save(¤t->thread.vstate, task_pt_regs(current)); + } + + if (!nested) + riscv_v_vstate_set_restore(current, task_pt_regs(current)); + + riscv_v_enable(); +} +EXPORT_SYMBOL_GPL(kernel_vector_begin); + +/* + * kernel_vector_end(): give the CPU vector registers back to the current task + * + * Must be called from a context in which kernel_vector_begin() was previously + * called, with no call to kernel_vector_end() in the meantime. + * + * The caller must not use the vector registers after this function is called, + * unless kernel_vector_begin() is called again in the meantime. + */ +void kernel_vector_end(void) +{ + if (WARN_ON(!has_vector())) + return; + + riscv_v_disable(); + + if (riscv_v_stop_kernel_context()) + put_cpu_vector_context(); +} +EXPORT_SYMBOL_GPL(kernel_vector_end); diff --git a/arch/riscv/kernel/machine_kexec.c b/arch/riscv/kernel/machine_kexec.c index 2d139b724b..ed9cad20c0 100644 --- a/arch/riscv/kernel/machine_kexec.c +++ b/arch/riscv/kernel/machine_kexec.c @@ -19,30 +19,6 @@ #include <linux/irq.h> /* - * kexec_image_info - Print received image details - */ -static void -kexec_image_info(const struct kimage *image) -{ - unsigned long i; - - pr_debug("Kexec image info:\n"); - pr_debug("\ttype: %d\n", image->type); - pr_debug("\tstart: %lx\n", image->start); - pr_debug("\thead: %lx\n", image->head); - pr_debug("\tnr_segments: %lu\n", image->nr_segments); - - for (i = 0; i < image->nr_segments; i++) { - pr_debug("\t segment[%lu]: %016lx - %016lx", i, - image->segment[i].mem, - image->segment[i].mem + image->segment[i].memsz); - pr_debug("\t\t0x%lx bytes, %lu pages\n", - (unsigned long) image->segment[i].memsz, - (unsigned long) image->segment[i].memsz / PAGE_SIZE); - } -} - -/* * machine_kexec_prepare - Initialize kexec * * This function is called from do_kexec_load, when the user has @@ -60,8 +36,6 @@ machine_kexec_prepare(struct kimage *image) unsigned int control_code_buffer_sz = 0; int i = 0; - kexec_image_info(image); - /* Find the Flattened Device Tree and save its physical address */ for (i = 0; i < image->nr_segments; i++) { if (image->segment[i].memsz <= sizeof(fdt)) diff --git a/arch/riscv/kernel/mcount-dyn.S b/arch/riscv/kernel/mcount-dyn.S index 58dd96a2a1..b7561288e8 100644 --- a/arch/riscv/kernel/mcount-dyn.S +++ b/arch/riscv/kernel/mcount-dyn.S @@ -3,12 +3,12 @@ #include <linux/init.h> #include <linux/linkage.h> +#include <linux/export.h> #include <asm/asm.h> #include <asm/csr.h> #include <asm/unistd.h> #include <asm/thread_info.h> #include <asm/asm-offsets.h> -#include <asm-generic/export.h> #include <asm/ftrace.h> .text @@ -57,31 +57,150 @@ .endm #ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS - .macro SAVE_ALL + +/** +* SAVE_ABI_REGS - save regs against the pt_regs struct +* +* @all: tell if saving all the regs +* +* If all is set, all the regs will be saved, otherwise only ABI +* related regs (a0-a7,epc,ra and optional s0) will be saved. +* +* After the stack is established, +* +* 0(sp) stores the PC of the traced function which can be accessed +* by &(fregs)->regs->epc in tracing function. Note that the real +* function entry address should be computed with -FENTRY_RA_OFFSET. +* +* 8(sp) stores the function return address (i.e. parent IP) that +* can be accessed by &(fregs)->regs->ra in tracing function. +* +* The other regs are saved at the respective localtion and accessed +* by the respective pt_regs member. +* +* Here is the layout of stack for your reference. +* +* PT_SIZE_ON_STACK -> +++++++++ +* + ..... + +* + t3-t6 + +* + s2-s11+ +* + a0-a7 + --++++-> ftrace_caller saved +* + s1 + + +* + s0 + --+ +* + t0-t2 + + +* + tp + + +* + gp + + +* + sp + + +* + ra + --+ // parent IP +* sp -> + epc + --+ // PC +* +++++++++ +**/ + .macro SAVE_ABI_REGS, all=0 addi sp, sp, -PT_SIZE_ON_STACK - REG_S t0, PT_EPC(sp) - REG_S x1, PT_RA(sp) - REG_S x2, PT_SP(sp) - REG_S x3, PT_GP(sp) - REG_S x4, PT_TP(sp) - REG_S x5, PT_T0(sp) - save_from_x6_to_x31 + REG_S t0, PT_EPC(sp) + REG_S x1, PT_RA(sp) + + // save the ABI regs + + REG_S x10, PT_A0(sp) + REG_S x11, PT_A1(sp) + REG_S x12, PT_A2(sp) + REG_S x13, PT_A3(sp) + REG_S x14, PT_A4(sp) + REG_S x15, PT_A5(sp) + REG_S x16, PT_A6(sp) + REG_S x17, PT_A7(sp) + + // save the leftover regs + + .if \all == 1 + REG_S x2, PT_SP(sp) + REG_S x3, PT_GP(sp) + REG_S x4, PT_TP(sp) + REG_S x5, PT_T0(sp) + REG_S x6, PT_T1(sp) + REG_S x7, PT_T2(sp) + REG_S x8, PT_S0(sp) + REG_S x9, PT_S1(sp) + REG_S x18, PT_S2(sp) + REG_S x19, PT_S3(sp) + REG_S x20, PT_S4(sp) + REG_S x21, PT_S5(sp) + REG_S x22, PT_S6(sp) + REG_S x23, PT_S7(sp) + REG_S x24, PT_S8(sp) + REG_S x25, PT_S9(sp) + REG_S x26, PT_S10(sp) + REG_S x27, PT_S11(sp) + REG_S x28, PT_T3(sp) + REG_S x29, PT_T4(sp) + REG_S x30, PT_T5(sp) + REG_S x31, PT_T6(sp) + + // save s0 if FP_TEST defined + + .else +#ifdef HAVE_FUNCTION_GRAPH_FP_TEST + REG_S x8, PT_S0(sp) +#endif + .endif .endm - .macro RESTORE_ALL - REG_L x1, PT_RA(sp) - REG_L x2, PT_SP(sp) - REG_L x3, PT_GP(sp) - REG_L x4, PT_TP(sp) - /* Restore t0 with PT_EPC */ - REG_L x5, PT_EPC(sp) - restore_from_x6_to_x31 + .macro RESTORE_ABI_REGS, all=0 + REG_L t0, PT_EPC(sp) + REG_L x1, PT_RA(sp) + REG_L x10, PT_A0(sp) + REG_L x11, PT_A1(sp) + REG_L x12, PT_A2(sp) + REG_L x13, PT_A3(sp) + REG_L x14, PT_A4(sp) + REG_L x15, PT_A5(sp) + REG_L x16, PT_A6(sp) + REG_L x17, PT_A7(sp) + .if \all == 1 + REG_L x2, PT_SP(sp) + REG_L x3, PT_GP(sp) + REG_L x4, PT_TP(sp) + REG_L x6, PT_T1(sp) + REG_L x7, PT_T2(sp) + REG_L x8, PT_S0(sp) + REG_L x9, PT_S1(sp) + REG_L x18, PT_S2(sp) + REG_L x19, PT_S3(sp) + REG_L x20, PT_S4(sp) + REG_L x21, PT_S5(sp) + REG_L x22, PT_S6(sp) + REG_L x23, PT_S7(sp) + REG_L x24, PT_S8(sp) + REG_L x25, PT_S9(sp) + REG_L x26, PT_S10(sp) + REG_L x27, PT_S11(sp) + REG_L x28, PT_T3(sp) + REG_L x29, PT_T4(sp) + REG_L x30, PT_T5(sp) + REG_L x31, PT_T6(sp) + + .else +#ifdef HAVE_FUNCTION_GRAPH_FP_TEST + REG_L x8, PT_S0(sp) +#endif + .endif addi sp, sp, PT_SIZE_ON_STACK .endm + + .macro PREPARE_ARGS + addi a0, t0, -FENTRY_RA_OFFSET + la a1, function_trace_op + REG_L a2, 0(a1) + mv a1, ra + mv a3, sp + .endm + #endif /* CONFIG_DYNAMIC_FTRACE_WITH_REGS */ +#ifndef CONFIG_DYNAMIC_FTRACE_WITH_REGS SYM_FUNC_START(ftrace_caller) SAVE_ABI @@ -105,34 +224,39 @@ SYM_INNER_LABEL(ftrace_graph_call, SYM_L_GLOBAL) call ftrace_stub #endif RESTORE_ABI - jr t0 + jr t0 SYM_FUNC_END(ftrace_caller) -#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS +#else /* CONFIG_DYNAMIC_FTRACE_WITH_REGS */ SYM_FUNC_START(ftrace_regs_caller) - SAVE_ALL - - addi a0, t0, -FENTRY_RA_OFFSET - la a1, function_trace_op - REG_L a2, 0(a1) - mv a1, ra - mv a3, sp + mv t1, zero + SAVE_ABI_REGS 1 + PREPARE_ARGS SYM_INNER_LABEL(ftrace_regs_call, SYM_L_GLOBAL) call ftrace_stub -#ifdef CONFIG_FUNCTION_GRAPH_TRACER - addi a0, sp, PT_RA - REG_L a1, PT_EPC(sp) - addi a1, a1, -FENTRY_RA_OFFSET -#ifdef HAVE_FUNCTION_GRAPH_FP_TEST - mv a2, s0 -#endif -SYM_INNER_LABEL(ftrace_graph_regs_call, SYM_L_GLOBAL) + RESTORE_ABI_REGS 1 + bnez t1, .Ldirect + jr t0 +.Ldirect: + jr t1 +SYM_FUNC_END(ftrace_regs_caller) + +SYM_FUNC_START(ftrace_caller) + SAVE_ABI_REGS 0 + PREPARE_ARGS + +SYM_INNER_LABEL(ftrace_call, SYM_L_GLOBAL) call ftrace_stub -#endif - RESTORE_ALL - jr t0 -SYM_FUNC_END(ftrace_regs_caller) + RESTORE_ABI_REGS 0 + jr t0 +SYM_FUNC_END(ftrace_caller) #endif /* CONFIG_DYNAMIC_FTRACE_WITH_REGS */ + +#ifdef CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS +SYM_CODE_START(ftrace_stub_direct_tramp) + jr t0 +SYM_CODE_END(ftrace_stub_direct_tramp) +#endif /* CONFIG_DYNAMIC_FTRACE_WITH_DIRECT_CALLS */ diff --git a/arch/riscv/kernel/mcount.S b/arch/riscv/kernel/mcount.S index b4dd9ed684..d7ec69ac69 100644 --- a/arch/riscv/kernel/mcount.S +++ b/arch/riscv/kernel/mcount.S @@ -4,12 +4,12 @@ #include <linux/init.h> #include <linux/linkage.h> #include <linux/cfi_types.h> +#include <linux/export.h> #include <asm/asm.h> #include <asm/csr.h> #include <asm/unistd.h> #include <asm/thread_info.h> #include <asm/asm-offsets.h> -#include <asm-generic/export.h> #include <asm/ftrace.h> .text diff --git a/arch/riscv/kernel/module.c b/arch/riscv/kernel/module.c index c9d59a5448..5e5a826444 100644 --- a/arch/riscv/kernel/module.c +++ b/arch/riscv/kernel/module.c @@ -783,6 +783,7 @@ int apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab, Elf_Sym *sym; void *location; unsigned int i, type; + unsigned int j_idx = 0; Elf_Addr v; int res; unsigned int num_relocations = sechdrs[relsec].sh_size / sizeof(*rel); @@ -833,9 +834,10 @@ int apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab, v = sym->st_value + rel[i].r_addend; if (type == R_RISCV_PCREL_LO12_I || type == R_RISCV_PCREL_LO12_S) { - unsigned int j; + unsigned int j = j_idx; + bool found = false; - for (j = 0; j < sechdrs[relsec].sh_size / sizeof(*rel); j++) { + do { unsigned long hi20_loc = sechdrs[sechdrs[relsec].sh_info].sh_addr + rel[j].r_offset; @@ -864,16 +866,26 @@ int apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab, hi20 = (offset + 0x800) & 0xfffff000; lo12 = offset - hi20; v = lo12; + found = true; break; } - } - if (j == sechdrs[relsec].sh_size / sizeof(*rel)) { + + j++; + if (j > sechdrs[relsec].sh_size / sizeof(*rel)) + j = 0; + + } while (j_idx != j); + + if (!found) { pr_err( "%s: Can not find HI20 relocation information\n", me->name); return -EINVAL; } + + /* Record the previous j-loop end index */ + j_idx = j; } if (reloc_handlers[type].accumulate_handler) diff --git a/arch/riscv/kernel/paravirt.c b/arch/riscv/kernel/paravirt.c new file mode 100644 index 0000000000..0d6225fd31 --- /dev/null +++ b/arch/riscv/kernel/paravirt.c @@ -0,0 +1,135 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (c) 2023 Ventana Micro Systems Inc. + */ + +#define pr_fmt(fmt) "riscv-pv: " fmt + +#include <linux/cpuhotplug.h> +#include <linux/compiler.h> +#include <linux/errno.h> +#include <linux/init.h> +#include <linux/jump_label.h> +#include <linux/kconfig.h> +#include <linux/kernel.h> +#include <linux/percpu-defs.h> +#include <linux/printk.h> +#include <linux/static_call.h> +#include <linux/types.h> + +#include <asm/barrier.h> +#include <asm/page.h> +#include <asm/paravirt.h> +#include <asm/sbi.h> + +struct static_key paravirt_steal_enabled; +struct static_key paravirt_steal_rq_enabled; + +static u64 native_steal_clock(int cpu) +{ + return 0; +} + +DEFINE_STATIC_CALL(pv_steal_clock, native_steal_clock); + +static bool steal_acc = true; +static int __init parse_no_stealacc(char *arg) +{ + steal_acc = false; + return 0; +} + +early_param("no-steal-acc", parse_no_stealacc); + +static DEFINE_PER_CPU(struct sbi_sta_struct, steal_time) __aligned(64); + +static bool __init has_pv_steal_clock(void) +{ + if (sbi_spec_version >= sbi_mk_version(2, 0) && + sbi_probe_extension(SBI_EXT_STA) > 0) { + pr_info("SBI STA extension detected\n"); + return true; + } + + return false; +} + +static int sbi_sta_steal_time_set_shmem(unsigned long lo, unsigned long hi, + unsigned long flags) +{ + struct sbiret ret; + + ret = sbi_ecall(SBI_EXT_STA, SBI_EXT_STA_STEAL_TIME_SET_SHMEM, + lo, hi, flags, 0, 0, 0); + if (ret.error) { + if (lo == SBI_STA_SHMEM_DISABLE && hi == SBI_STA_SHMEM_DISABLE) + pr_warn("Failed to disable steal-time shmem"); + else + pr_warn("Failed to set steal-time shmem"); + return sbi_err_map_linux_errno(ret.error); + } + + return 0; +} + +static int pv_time_cpu_online(unsigned int cpu) +{ + struct sbi_sta_struct *st = this_cpu_ptr(&steal_time); + phys_addr_t pa = __pa(st); + unsigned long lo = (unsigned long)pa; + unsigned long hi = IS_ENABLED(CONFIG_32BIT) ? upper_32_bits((u64)pa) : 0; + + return sbi_sta_steal_time_set_shmem(lo, hi, 0); +} + +static int pv_time_cpu_down_prepare(unsigned int cpu) +{ + return sbi_sta_steal_time_set_shmem(SBI_STA_SHMEM_DISABLE, + SBI_STA_SHMEM_DISABLE, 0); +} + +static u64 pv_time_steal_clock(int cpu) +{ + struct sbi_sta_struct *st = per_cpu_ptr(&steal_time, cpu); + __le32 sequence; + __le64 steal; + + /* + * Check the sequence field before and after reading the steal + * field. Repeat the read if it is different or odd. + */ + do { + sequence = READ_ONCE(st->sequence); + virt_rmb(); + steal = READ_ONCE(st->steal); + virt_rmb(); + } while ((le32_to_cpu(sequence) & 1) || + sequence != READ_ONCE(st->sequence)); + + return le64_to_cpu(steal); +} + +int __init pv_time_init(void) +{ + int ret; + + if (!has_pv_steal_clock()) + return 0; + + ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, + "riscv/pv_time:online", + pv_time_cpu_online, + pv_time_cpu_down_prepare); + if (ret < 0) + return ret; + + static_call_update(pv_steal_clock, pv_time_steal_clock); + + static_key_slow_inc(¶virt_steal_enabled); + if (steal_acc) + static_key_slow_inc(¶virt_steal_rq_enabled); + + pr_info("Computing paravirt steal-time\n"); + + return 0; +} diff --git a/arch/riscv/kernel/patch.c b/arch/riscv/kernel/patch.c index 37e87fdcf6..30e12b310c 100644 --- a/arch/riscv/kernel/patch.c +++ b/arch/riscv/kernel/patch.c @@ -80,6 +80,8 @@ static int __patch_insn_set(void *addr, u8 c, size_t len) */ lockdep_assert_held(&text_mutex); + preempt_disable(); + if (across_pages) patch_map(addr + PAGE_SIZE, FIX_TEXT_POKE1); @@ -92,6 +94,8 @@ static int __patch_insn_set(void *addr, u8 c, size_t len) if (across_pages) patch_unmap(FIX_TEXT_POKE1); + preempt_enable(); + return 0; } NOKPROBE_SYMBOL(__patch_insn_set); @@ -122,6 +126,8 @@ static int __patch_insn_write(void *addr, const void *insn, size_t len) if (!riscv_patch_in_stop_machine) lockdep_assert_held(&text_mutex); + preempt_disable(); + if (across_pages) patch_map(addr + PAGE_SIZE, FIX_TEXT_POKE1); @@ -134,6 +140,8 @@ static int __patch_insn_write(void *addr, const void *insn, size_t len) if (across_pages) patch_unmap(FIX_TEXT_POKE1); + preempt_enable(); + return ret; } NOKPROBE_SYMBOL(__patch_insn_write); diff --git a/arch/riscv/kernel/process.c b/arch/riscv/kernel/process.c index 4f21d970a1..e4bc61c4e5 100644 --- a/arch/riscv/kernel/process.c +++ b/arch/riscv/kernel/process.c @@ -27,8 +27,6 @@ #include <asm/vector.h> #include <asm/cpufeature.h> -register unsigned long gp_in_global __asm__("gp"); - #if defined(CONFIG_STACKPROTECTOR) && !defined(CONFIG_STACKPROTECTOR_PER_TASK) #include <linux/stackprotector.h> unsigned long __stack_chk_guard __read_mostly; @@ -37,7 +35,7 @@ EXPORT_SYMBOL(__stack_chk_guard); extern asmlinkage void ret_from_fork(void); -void arch_cpu_idle(void) +void noinstr arch_cpu_idle(void) { cpu_do_idle(); } @@ -171,6 +169,7 @@ void flush_thread(void) riscv_v_vstate_off(task_pt_regs(current)); kfree(current->thread.vstate.datap); memset(¤t->thread.vstate, 0, sizeof(struct __riscv_v_ext_state)); + clear_tsk_thread_flag(current, TIF_RISCV_V_DEFER_RESTORE); #endif } @@ -178,7 +177,7 @@ void arch_release_task_struct(struct task_struct *tsk) { /* Free the vector context of datap. */ if (has_vector()) - kfree(tsk->thread.vstate.datap); + riscv_v_thread_free(tsk); } int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src) @@ -187,6 +186,8 @@ int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src) *dst = *src; /* clear entire V context, including datap for a new task */ memset(&dst->thread.vstate, 0, sizeof(struct __riscv_v_ext_state)); + memset(&dst->thread.kernel_vstate, 0, sizeof(struct __riscv_v_ext_state)); + clear_tsk_thread_flag(dst, TIF_RISCV_V_DEFER_RESTORE); return 0; } @@ -204,7 +205,6 @@ int copy_thread(struct task_struct *p, const struct kernel_clone_args *args) if (unlikely(args->fn)) { /* Kernel thread */ memset(childregs, 0, sizeof(struct pt_regs)); - childregs->gp = gp_in_global; /* Supervisor/Machine, irqs on: */ childregs->status = SR_PP | SR_PIE; @@ -221,7 +221,15 @@ int copy_thread(struct task_struct *p, const struct kernel_clone_args *args) childregs->a0 = 0; /* Return value of fork() */ p->thread.s[0] = 0; } + p->thread.riscv_v_flags = 0; + if (has_vector()) + riscv_v_thread_alloc(p); p->thread.ra = (unsigned long)ret_from_fork; p->thread.sp = (unsigned long)childregs; /* kernel sp */ return 0; } + +void __init arch_task_cache_init(void) +{ + riscv_v_setup_ctx_cache(); +} diff --git a/arch/riscv/kernel/ptrace.c b/arch/riscv/kernel/ptrace.c index 2afe460de1..e8515aa9d8 100644 --- a/arch/riscv/kernel/ptrace.c +++ b/arch/riscv/kernel/ptrace.c @@ -99,8 +99,11 @@ static int riscv_vr_get(struct task_struct *target, * Ensure the vector registers have been saved to the memory before * copying them to membuf. */ - if (target == current) - riscv_v_vstate_save(current, task_pt_regs(current)); + if (target == current) { + get_cpu_vector_context(); + riscv_v_vstate_save(¤t->thread.vstate, task_pt_regs(current)); + put_cpu_vector_context(); + } ptrace_vstate.vstart = vstate->vstart; ptrace_vstate.vl = vstate->vl; diff --git a/arch/riscv/kernel/sbi.c b/arch/riscv/kernel/sbi.c index 5a62ed1da4..e66e0999a8 100644 --- a/arch/riscv/kernel/sbi.c +++ b/arch/riscv/kernel/sbi.c @@ -7,6 +7,7 @@ #include <linux/bits.h> #include <linux/init.h> +#include <linux/mm.h> #include <linux/pm.h> #include <linux/reboot.h> #include <asm/sbi.h> @@ -571,6 +572,66 @@ long sbi_get_mimpid(void) } EXPORT_SYMBOL_GPL(sbi_get_mimpid); +bool sbi_debug_console_available; + +int sbi_debug_console_write(const char *bytes, unsigned int num_bytes) +{ + phys_addr_t base_addr; + struct sbiret ret; + + if (!sbi_debug_console_available) + return -EOPNOTSUPP; + + if (is_vmalloc_addr(bytes)) + base_addr = page_to_phys(vmalloc_to_page(bytes)) + + offset_in_page(bytes); + else + base_addr = __pa(bytes); + if (PAGE_SIZE < (offset_in_page(bytes) + num_bytes)) + num_bytes = PAGE_SIZE - offset_in_page(bytes); + + if (IS_ENABLED(CONFIG_32BIT)) + ret = sbi_ecall(SBI_EXT_DBCN, SBI_EXT_DBCN_CONSOLE_WRITE, + num_bytes, lower_32_bits(base_addr), + upper_32_bits(base_addr), 0, 0, 0); + else + ret = sbi_ecall(SBI_EXT_DBCN, SBI_EXT_DBCN_CONSOLE_WRITE, + num_bytes, base_addr, 0, 0, 0, 0); + + if (ret.error == SBI_ERR_FAILURE) + return -EIO; + return ret.error ? sbi_err_map_linux_errno(ret.error) : ret.value; +} + +int sbi_debug_console_read(char *bytes, unsigned int num_bytes) +{ + phys_addr_t base_addr; + struct sbiret ret; + + if (!sbi_debug_console_available) + return -EOPNOTSUPP; + + if (is_vmalloc_addr(bytes)) + base_addr = page_to_phys(vmalloc_to_page(bytes)) + + offset_in_page(bytes); + else + base_addr = __pa(bytes); + if (PAGE_SIZE < (offset_in_page(bytes) + num_bytes)) + num_bytes = PAGE_SIZE - offset_in_page(bytes); + + if (IS_ENABLED(CONFIG_32BIT)) + ret = sbi_ecall(SBI_EXT_DBCN, SBI_EXT_DBCN_CONSOLE_READ, + num_bytes, lower_32_bits(base_addr), + upper_32_bits(base_addr), 0, 0, 0); + else + ret = sbi_ecall(SBI_EXT_DBCN, SBI_EXT_DBCN_CONSOLE_READ, + num_bytes, base_addr, 0, 0, 0, 0); + + if (ret.error == SBI_ERR_FAILURE) + return -EIO; + return ret.error ? sbi_err_map_linux_errno(ret.error) : ret.value; +} + void __init sbi_init(void) { int ret; @@ -612,6 +673,11 @@ void __init sbi_init(void) sbi_srst_reboot_nb.priority = 192; register_restart_handler(&sbi_srst_reboot_nb); } + if ((sbi_spec_version >= sbi_mk_version(2, 0)) && + (sbi_probe_extension(SBI_EXT_DBCN) > 0)) { + pr_info("SBI DBCN extension detected\n"); + sbi_debug_console_available = true; + } } else { __sbi_set_timer = __sbi_set_timer_v01; __sbi_send_ipi = __sbi_send_ipi_v01; diff --git a/arch/riscv/kernel/setup.c b/arch/riscv/kernel/setup.c index 535a837de5..4f73c0ae44 100644 --- a/arch/riscv/kernel/setup.c +++ b/arch/riscv/kernel/setup.c @@ -26,7 +26,6 @@ #include <asm/alternative.h> #include <asm/cacheflush.h> #include <asm/cpufeature.h> -#include <asm/cpu_ops.h> #include <asm/early_ioremap.h> #include <asm/pgtable.h> #include <asm/setup.h> @@ -51,7 +50,6 @@ atomic_t hart_lottery __section(".sdata") #endif ; unsigned long boot_cpu_hartid; -static DEFINE_PER_CPU(struct cpu, cpu_devices); /* * Place kernel memory regions on the resource tree so that @@ -299,23 +297,10 @@ void __init setup_arch(char **cmdline_p) riscv_user_isa_enable(); } -static int __init topology_init(void) +bool arch_cpu_is_hotpluggable(int cpu) { - int i, ret; - - for_each_possible_cpu(i) { - struct cpu *cpu = &per_cpu(cpu_devices, i); - - cpu->hotpluggable = cpu_has_hotplug(i); - ret = register_cpu(cpu, i); - if (unlikely(ret)) - pr_warn("Warning: %s: register_cpu %d failed (%d)\n", - __func__, i, ret); - } - - return 0; + return cpu_has_hotplug(cpu); } -subsys_initcall(topology_init); void free_initmem(void) { diff --git a/arch/riscv/kernel/signal.c b/arch/riscv/kernel/signal.c index 88b6220b26..5a2edd7f02 100644 --- a/arch/riscv/kernel/signal.c +++ b/arch/riscv/kernel/signal.c @@ -86,12 +86,15 @@ static long save_v_state(struct pt_regs *regs, void __user **sc_vec) /* datap is designed to be 16 byte aligned for better performance */ WARN_ON(unlikely(!IS_ALIGNED((unsigned long)datap, 16))); - riscv_v_vstate_save(current, regs); + get_cpu_vector_context(); + riscv_v_vstate_save(¤t->thread.vstate, regs); + put_cpu_vector_context(); + /* Copy everything of vstate but datap. */ err = __copy_to_user(&state->v_state, ¤t->thread.vstate, offsetof(struct __riscv_v_ext_state, datap)); /* Copy the pointer datap itself. */ - err |= __put_user(datap, &state->v_state.datap); + err |= __put_user((__force void *)datap, &state->v_state.datap); /* Copy the whole vector content to user space datap. */ err |= __copy_to_user(datap, current->thread.vstate.datap, riscv_v_vsize); /* Copy magic to the user space after saving all vector conetext */ @@ -116,6 +119,13 @@ static long __restore_v_state(struct pt_regs *regs, void __user *sc_vec) struct __sc_riscv_v_state __user *state = sc_vec; void __user *datap; + /* + * Mark the vstate as clean prior performing the actual copy, + * to avoid getting the vstate incorrectly clobbered by the + * discarded vector state. + */ + riscv_v_vstate_set_restore(current, regs); + /* Copy everything of __sc_riscv_v_state except datap. */ err = __copy_from_user(¤t->thread.vstate, &state->v_state, offsetof(struct __riscv_v_ext_state, datap)); @@ -130,13 +140,7 @@ static long __restore_v_state(struct pt_regs *regs, void __user *sc_vec) * Copy the whole vector content from user space datap. Use * copy_from_user to prevent information leak. */ - err = copy_from_user(current->thread.vstate.datap, datap, riscv_v_vsize); - if (unlikely(err)) - return err; - - riscv_v_vstate_restore(current, regs); - - return err; + return copy_from_user(current->thread.vstate.datap, datap, riscv_v_vsize); } #else #define save_v_state(task, regs) (0) diff --git a/arch/riscv/kernel/smp.c b/arch/riscv/kernel/smp.c index 40420afbb1..45dd403541 100644 --- a/arch/riscv/kernel/smp.c +++ b/arch/riscv/kernel/smp.c @@ -81,7 +81,7 @@ static inline void ipi_cpu_crash_stop(unsigned int cpu, struct pt_regs *regs) #ifdef CONFIG_HOTPLUG_CPU if (cpu_has_hotplug(cpu)) - cpu_ops[cpu]->cpu_stop(); + cpu_ops->cpu_stop(); #endif for(;;) diff --git a/arch/riscv/kernel/smpboot.c b/arch/riscv/kernel/smpboot.c index d162bf339b..519b6bd946 100644 --- a/arch/riscv/kernel/smpboot.c +++ b/arch/riscv/kernel/smpboot.c @@ -49,7 +49,6 @@ void __init smp_prepare_boot_cpu(void) void __init smp_prepare_cpus(unsigned int max_cpus) { int cpuid; - int ret; unsigned int curr_cpuid; init_cpu_topology(); @@ -66,11 +65,6 @@ void __init smp_prepare_cpus(unsigned int max_cpus) for_each_possible_cpu(cpuid) { if (cpuid == curr_cpuid) continue; - if (cpu_ops[cpuid]->cpu_prepare) { - ret = cpu_ops[cpuid]->cpu_prepare(cpuid); - if (ret) - continue; - } set_cpu_present(cpuid, true); numa_store_cpu_info(cpuid); } @@ -125,18 +119,7 @@ static int __init acpi_parse_rintc(union acpi_subtable_headers *header, const un static void __init acpi_parse_and_init_cpus(void) { - int cpuid; - - cpu_set_ops(0); - acpi_table_parse_madt(ACPI_MADT_TYPE_RINTC, acpi_parse_rintc, 0); - - for (cpuid = 1; cpuid < nr_cpu_ids; cpuid++) { - if (cpuid_to_hartid_map(cpuid) != INVALID_HARTID) { - cpu_set_ops(cpuid); - set_cpu_possible(cpuid, true); - } - } } #else #define acpi_parse_and_init_cpus(...) do { } while (0) @@ -150,8 +133,6 @@ static void __init of_parse_and_init_cpus(void) int cpuid = 1; int rc; - cpu_set_ops(0); - for_each_of_cpu_node(dn) { rc = riscv_early_of_processor_hartid(dn, &hart); if (rc < 0) @@ -179,27 +160,28 @@ static void __init of_parse_and_init_cpus(void) if (cpuid > nr_cpu_ids) pr_warn("Total number of cpus [%d] is greater than nr_cpus option value [%d]\n", cpuid, nr_cpu_ids); - - for (cpuid = 1; cpuid < nr_cpu_ids; cpuid++) { - if (cpuid_to_hartid_map(cpuid) != INVALID_HARTID) { - cpu_set_ops(cpuid); - set_cpu_possible(cpuid, true); - } - } } void __init setup_smp(void) { + int cpuid; + + cpu_set_ops(); + if (acpi_disabled) of_parse_and_init_cpus(); else acpi_parse_and_init_cpus(); + + for (cpuid = 1; cpuid < nr_cpu_ids; cpuid++) + if (cpuid_to_hartid_map(cpuid) != INVALID_HARTID) + set_cpu_possible(cpuid, true); } static int start_secondary_cpu(int cpu, struct task_struct *tidle) { - if (cpu_ops[cpu]->cpu_start) - return cpu_ops[cpu]->cpu_start(cpu, tidle); + if (cpu_ops->cpu_start) + return cpu_ops->cpu_start(cpu, tidle); return -EOPNOTSUPP; } diff --git a/arch/riscv/kernel/suspend.c b/arch/riscv/kernel/suspend.c index 3c89b8ec69..299795341e 100644 --- a/arch/riscv/kernel/suspend.c +++ b/arch/riscv/kernel/suspend.c @@ -4,13 +4,19 @@ * Copyright (c) 2022 Ventana Micro Systems Inc. */ +#define pr_fmt(fmt) "suspend: " fmt + #include <linux/ftrace.h> +#include <linux/suspend.h> #include <asm/csr.h> +#include <asm/sbi.h> #include <asm/suspend.h> void suspend_save_csrs(struct suspend_context *context) { context->scratch = csr_read(CSR_SCRATCH); + if (riscv_cpu_has_extension_unlikely(smp_processor_id(), RISCV_ISA_EXT_XLINUXENVCFG)) + context->envcfg = csr_read(CSR_ENVCFG); context->tvec = csr_read(CSR_TVEC); context->ie = csr_read(CSR_IE); @@ -32,6 +38,8 @@ void suspend_save_csrs(struct suspend_context *context) void suspend_restore_csrs(struct suspend_context *context) { csr_write(CSR_SCRATCH, context->scratch); + if (riscv_cpu_has_extension_unlikely(smp_processor_id(), RISCV_ISA_EXT_XLINUXENVCFG)) + csr_write(CSR_ENVCFG, context->envcfg); csr_write(CSR_TVEC, context->tvec); csr_write(CSR_IE, context->ie); @@ -85,3 +93,43 @@ int cpu_suspend(unsigned long arg, return rc; } + +#ifdef CONFIG_RISCV_SBI +static int sbi_system_suspend(unsigned long sleep_type, + unsigned long resume_addr, + unsigned long opaque) +{ + struct sbiret ret; + + ret = sbi_ecall(SBI_EXT_SUSP, SBI_EXT_SUSP_SYSTEM_SUSPEND, + sleep_type, resume_addr, opaque, 0, 0, 0); + if (ret.error) + return sbi_err_map_linux_errno(ret.error); + + return ret.value; +} + +static int sbi_system_suspend_enter(suspend_state_t state) +{ + return cpu_suspend(SBI_SUSP_SLEEP_TYPE_SUSPEND_TO_RAM, sbi_system_suspend); +} + +static const struct platform_suspend_ops sbi_system_suspend_ops = { + .valid = suspend_valid_only_mem, + .enter = sbi_system_suspend_enter, +}; + +static int __init sbi_system_suspend_init(void) +{ + if (sbi_spec_version >= sbi_mk_version(2, 0) && + sbi_probe_extension(SBI_EXT_SUSP) > 0) { + pr_info("SBI SUSP extension detected\n"); + if (IS_ENABLED(CONFIG_SUSPEND)) + suspend_set_ops(&sbi_system_suspend_ops); + } + + return 0; +} + +arch_initcall(sbi_system_suspend_init); +#endif /* CONFIG_RISCV_SBI */ diff --git a/arch/riscv/kernel/sys_hwprobe.c b/arch/riscv/kernel/sys_hwprobe.c new file mode 100644 index 0000000000..a7c56b41ef --- /dev/null +++ b/arch/riscv/kernel/sys_hwprobe.c @@ -0,0 +1,411 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * The hwprobe interface, for allowing userspace to probe to see which features + * are supported by the hardware. See Documentation/arch/riscv/hwprobe.rst for + * more details. + */ +#include <linux/syscalls.h> +#include <asm/cacheflush.h> +#include <asm/cpufeature.h> +#include <asm/hwprobe.h> +#include <asm/sbi.h> +#include <asm/switch_to.h> +#include <asm/uaccess.h> +#include <asm/unistd.h> +#include <asm/vector.h> +#include <vdso/vsyscall.h> + + +static void hwprobe_arch_id(struct riscv_hwprobe *pair, + const struct cpumask *cpus) +{ + u64 id = -1ULL; + bool first = true; + int cpu; + + for_each_cpu(cpu, cpus) { + u64 cpu_id; + + switch (pair->key) { + case RISCV_HWPROBE_KEY_MVENDORID: + cpu_id = riscv_cached_mvendorid(cpu); + break; + case RISCV_HWPROBE_KEY_MIMPID: + cpu_id = riscv_cached_mimpid(cpu); + break; + case RISCV_HWPROBE_KEY_MARCHID: + cpu_id = riscv_cached_marchid(cpu); + break; + } + + if (first) { + id = cpu_id; + first = false; + } + + /* + * If there's a mismatch for the given set, return -1 in the + * value. + */ + if (id != cpu_id) { + id = -1ULL; + break; + } + } + + pair->value = id; +} + +static void hwprobe_isa_ext0(struct riscv_hwprobe *pair, + const struct cpumask *cpus) +{ + int cpu; + u64 missing = 0; + + pair->value = 0; + if (has_fpu()) + pair->value |= RISCV_HWPROBE_IMA_FD; + + if (riscv_isa_extension_available(NULL, c)) + pair->value |= RISCV_HWPROBE_IMA_C; + + if (has_vector()) + pair->value |= RISCV_HWPROBE_IMA_V; + + /* + * Loop through and record extensions that 1) anyone has, and 2) anyone + * doesn't have. + */ + for_each_cpu(cpu, cpus) { + struct riscv_isainfo *isainfo = &hart_isa[cpu]; + +#define EXT_KEY(ext) \ + do { \ + if (__riscv_isa_extension_available(isainfo->isa, RISCV_ISA_EXT_##ext)) \ + pair->value |= RISCV_HWPROBE_EXT_##ext; \ + else \ + missing |= RISCV_HWPROBE_EXT_##ext; \ + } while (false) + + /* + * Only use EXT_KEY() for extensions which can be exposed to userspace, + * regardless of the kernel's configuration, as no other checks, besides + * presence in the hart_isa bitmap, are made. + */ + EXT_KEY(ZBA); + EXT_KEY(ZBB); + EXT_KEY(ZBS); + EXT_KEY(ZICBOZ); + EXT_KEY(ZBC); + + EXT_KEY(ZBKB); + EXT_KEY(ZBKC); + EXT_KEY(ZBKX); + EXT_KEY(ZKND); + EXT_KEY(ZKNE); + EXT_KEY(ZKNH); + EXT_KEY(ZKSED); + EXT_KEY(ZKSH); + EXT_KEY(ZKT); + EXT_KEY(ZIHINTNTL); + EXT_KEY(ZTSO); + EXT_KEY(ZACAS); + EXT_KEY(ZICOND); + + if (has_vector()) { + EXT_KEY(ZVBB); + EXT_KEY(ZVBC); + EXT_KEY(ZVKB); + EXT_KEY(ZVKG); + EXT_KEY(ZVKNED); + EXT_KEY(ZVKNHA); + EXT_KEY(ZVKNHB); + EXT_KEY(ZVKSED); + EXT_KEY(ZVKSH); + EXT_KEY(ZVKT); + EXT_KEY(ZVFH); + EXT_KEY(ZVFHMIN); + } + + if (has_fpu()) { + EXT_KEY(ZFH); + EXT_KEY(ZFHMIN); + EXT_KEY(ZFA); + } +#undef EXT_KEY + } + + /* Now turn off reporting features if any CPU is missing it. */ + pair->value &= ~missing; +} + +static bool hwprobe_ext0_has(const struct cpumask *cpus, unsigned long ext) +{ + struct riscv_hwprobe pair; + + hwprobe_isa_ext0(&pair, cpus); + return (pair.value & ext); +} + +static u64 hwprobe_misaligned(const struct cpumask *cpus) +{ + int cpu; + u64 perf = -1ULL; + + for_each_cpu(cpu, cpus) { + int this_perf = per_cpu(misaligned_access_speed, cpu); + + if (perf == -1ULL) + perf = this_perf; + + if (perf != this_perf) { + perf = RISCV_HWPROBE_MISALIGNED_UNKNOWN; + break; + } + } + + if (perf == -1ULL) + return RISCV_HWPROBE_MISALIGNED_UNKNOWN; + + return perf; +} + +static void hwprobe_one_pair(struct riscv_hwprobe *pair, + const struct cpumask *cpus) +{ + switch (pair->key) { + case RISCV_HWPROBE_KEY_MVENDORID: + case RISCV_HWPROBE_KEY_MARCHID: + case RISCV_HWPROBE_KEY_MIMPID: + hwprobe_arch_id(pair, cpus); + break; + /* + * The kernel already assumes that the base single-letter ISA + * extensions are supported on all harts, and only supports the + * IMA base, so just cheat a bit here and tell that to + * userspace. + */ + case RISCV_HWPROBE_KEY_BASE_BEHAVIOR: + pair->value = RISCV_HWPROBE_BASE_BEHAVIOR_IMA; + break; + + case RISCV_HWPROBE_KEY_IMA_EXT_0: + hwprobe_isa_ext0(pair, cpus); + break; + + case RISCV_HWPROBE_KEY_CPUPERF_0: + pair->value = hwprobe_misaligned(cpus); + break; + + case RISCV_HWPROBE_KEY_ZICBOZ_BLOCK_SIZE: + pair->value = 0; + if (hwprobe_ext0_has(cpus, RISCV_HWPROBE_EXT_ZICBOZ)) + pair->value = riscv_cboz_block_size; + break; + + /* + * For forward compatibility, unknown keys don't fail the whole + * call, but get their element key set to -1 and value set to 0 + * indicating they're unrecognized. + */ + default: + pair->key = -1; + pair->value = 0; + break; + } +} + +static int hwprobe_get_values(struct riscv_hwprobe __user *pairs, + size_t pair_count, size_t cpusetsize, + unsigned long __user *cpus_user, + unsigned int flags) +{ + size_t out; + int ret; + cpumask_t cpus; + + /* Check the reserved flags. */ + if (flags != 0) + return -EINVAL; + + /* + * The interface supports taking in a CPU mask, and returns values that + * are consistent across that mask. Allow userspace to specify NULL and + * 0 as a shortcut to all online CPUs. + */ + cpumask_clear(&cpus); + if (!cpusetsize && !cpus_user) { + cpumask_copy(&cpus, cpu_online_mask); + } else { + if (cpusetsize > cpumask_size()) + cpusetsize = cpumask_size(); + + ret = copy_from_user(&cpus, cpus_user, cpusetsize); + if (ret) + return -EFAULT; + + /* + * Userspace must provide at least one online CPU, without that + * there's no way to define what is supported. + */ + cpumask_and(&cpus, &cpus, cpu_online_mask); + if (cpumask_empty(&cpus)) + return -EINVAL; + } + + for (out = 0; out < pair_count; out++, pairs++) { + struct riscv_hwprobe pair; + + if (get_user(pair.key, &pairs->key)) + return -EFAULT; + + pair.value = 0; + hwprobe_one_pair(&pair, &cpus); + ret = put_user(pair.key, &pairs->key); + if (ret == 0) + ret = put_user(pair.value, &pairs->value); + + if (ret) + return -EFAULT; + } + + return 0; +} + +static int hwprobe_get_cpus(struct riscv_hwprobe __user *pairs, + size_t pair_count, size_t cpusetsize, + unsigned long __user *cpus_user, + unsigned int flags) +{ + cpumask_t cpus, one_cpu; + bool clear_all = false; + size_t i; + int ret; + + if (flags != RISCV_HWPROBE_WHICH_CPUS) + return -EINVAL; + + if (!cpusetsize || !cpus_user) + return -EINVAL; + + if (cpusetsize > cpumask_size()) + cpusetsize = cpumask_size(); + + ret = copy_from_user(&cpus, cpus_user, cpusetsize); + if (ret) + return -EFAULT; + + if (cpumask_empty(&cpus)) + cpumask_copy(&cpus, cpu_online_mask); + + cpumask_and(&cpus, &cpus, cpu_online_mask); + + cpumask_clear(&one_cpu); + + for (i = 0; i < pair_count; i++) { + struct riscv_hwprobe pair, tmp; + int cpu; + + ret = copy_from_user(&pair, &pairs[i], sizeof(pair)); + if (ret) + return -EFAULT; + + if (!riscv_hwprobe_key_is_valid(pair.key)) { + clear_all = true; + pair = (struct riscv_hwprobe){ .key = -1, }; + ret = copy_to_user(&pairs[i], &pair, sizeof(pair)); + if (ret) + return -EFAULT; + } + + if (clear_all) + continue; + + tmp = (struct riscv_hwprobe){ .key = pair.key, }; + + for_each_cpu(cpu, &cpus) { + cpumask_set_cpu(cpu, &one_cpu); + + hwprobe_one_pair(&tmp, &one_cpu); + + if (!riscv_hwprobe_pair_cmp(&tmp, &pair)) + cpumask_clear_cpu(cpu, &cpus); + + cpumask_clear_cpu(cpu, &one_cpu); + } + } + + if (clear_all) + cpumask_clear(&cpus); + + ret = copy_to_user(cpus_user, &cpus, cpusetsize); + if (ret) + return -EFAULT; + + return 0; +} + +static int do_riscv_hwprobe(struct riscv_hwprobe __user *pairs, + size_t pair_count, size_t cpusetsize, + unsigned long __user *cpus_user, + unsigned int flags) +{ + if (flags & RISCV_HWPROBE_WHICH_CPUS) + return hwprobe_get_cpus(pairs, pair_count, cpusetsize, + cpus_user, flags); + + return hwprobe_get_values(pairs, pair_count, cpusetsize, + cpus_user, flags); +} + +#ifdef CONFIG_MMU + +static int __init init_hwprobe_vdso_data(void) +{ + struct vdso_data *vd = __arch_get_k_vdso_data(); + struct arch_vdso_data *avd = &vd->arch_data; + u64 id_bitsmash = 0; + struct riscv_hwprobe pair; + int key; + + /* + * Initialize vDSO data with the answers for the "all CPUs" case, to + * save a syscall in the common case. + */ + for (key = 0; key <= RISCV_HWPROBE_MAX_KEY; key++) { + pair.key = key; + hwprobe_one_pair(&pair, cpu_online_mask); + + WARN_ON_ONCE(pair.key < 0); + + avd->all_cpu_hwprobe_values[key] = pair.value; + /* + * Smash together the vendor, arch, and impl IDs to see if + * they're all 0 or any negative. + */ + if (key <= RISCV_HWPROBE_KEY_MIMPID) + id_bitsmash |= pair.value; + } + + /* + * If the arch, vendor, and implementation ID are all the same across + * all harts, then assume all CPUs are the same, and allow the vDSO to + * answer queries for arbitrary masks. However if all values are 0 (not + * populated) or any value returns -1 (varies across CPUs), then the + * vDSO should defer to the kernel for exotic cpu masks. + */ + avd->homogeneous_cpus = id_bitsmash != 0 && id_bitsmash != -1; + return 0; +} + +arch_initcall_sync(init_hwprobe_vdso_data); + +#endif /* CONFIG_MMU */ + +SYSCALL_DEFINE5(riscv_hwprobe, struct riscv_hwprobe __user *, pairs, + size_t, pair_count, size_t, cpusetsize, unsigned long __user *, + cpus, unsigned int, flags) +{ + return do_riscv_hwprobe(pairs, pair_count, cpusetsize, + cpus, flags); +} diff --git a/arch/riscv/kernel/sys_riscv.c b/arch/riscv/kernel/sys_riscv.c index a2ca5b7756..f1c1416a9f 100644 --- a/arch/riscv/kernel/sys_riscv.c +++ b/arch/riscv/kernel/sys_riscv.c @@ -7,15 +7,7 @@ #include <linux/syscalls.h> #include <asm/cacheflush.h> -#include <asm/cpufeature.h> -#include <asm/hwprobe.h> -#include <asm/sbi.h> -#include <asm/vector.h> -#include <asm/switch_to.h> -#include <asm/uaccess.h> -#include <asm/unistd.h> #include <asm-generic/mman-common.h> -#include <vdso/vsyscall.h> static long riscv_sys_mmap(unsigned long addr, unsigned long len, unsigned long prot, unsigned long flags, @@ -77,283 +69,6 @@ SYSCALL_DEFINE3(riscv_flush_icache, uintptr_t, start, uintptr_t, end, return 0; } -/* - * The hwprobe interface, for allowing userspace to probe to see which features - * are supported by the hardware. See Documentation/arch/riscv/hwprobe.rst for more - * details. - */ -static void hwprobe_arch_id(struct riscv_hwprobe *pair, - const struct cpumask *cpus) -{ - u64 id = -1ULL; - bool first = true; - int cpu; - - for_each_cpu(cpu, cpus) { - u64 cpu_id; - - switch (pair->key) { - case RISCV_HWPROBE_KEY_MVENDORID: - cpu_id = riscv_cached_mvendorid(cpu); - break; - case RISCV_HWPROBE_KEY_MIMPID: - cpu_id = riscv_cached_mimpid(cpu); - break; - case RISCV_HWPROBE_KEY_MARCHID: - cpu_id = riscv_cached_marchid(cpu); - break; - } - - if (first) { - id = cpu_id; - first = false; - } - - /* - * If there's a mismatch for the given set, return -1 in the - * value. - */ - if (id != cpu_id) { - id = -1ULL; - break; - } - } - - pair->value = id; -} - -static void hwprobe_isa_ext0(struct riscv_hwprobe *pair, - const struct cpumask *cpus) -{ - int cpu; - u64 missing = 0; - - pair->value = 0; - if (has_fpu()) - pair->value |= RISCV_HWPROBE_IMA_FD; - - if (riscv_isa_extension_available(NULL, c)) - pair->value |= RISCV_HWPROBE_IMA_C; - - if (has_vector()) - pair->value |= RISCV_HWPROBE_IMA_V; - - /* - * Loop through and record extensions that 1) anyone has, and 2) anyone - * doesn't have. - */ - for_each_cpu(cpu, cpus) { - struct riscv_isainfo *isainfo = &hart_isa[cpu]; - -#define EXT_KEY(ext) \ - do { \ - if (__riscv_isa_extension_available(isainfo->isa, RISCV_ISA_EXT_##ext)) \ - pair->value |= RISCV_HWPROBE_EXT_##ext; \ - else \ - missing |= RISCV_HWPROBE_EXT_##ext; \ - } while (false) - - /* - * Only use EXT_KEY() for extensions which can be exposed to userspace, - * regardless of the kernel's configuration, as no other checks, besides - * presence in the hart_isa bitmap, are made. - */ - EXT_KEY(ZBA); - EXT_KEY(ZBB); - EXT_KEY(ZBS); - EXT_KEY(ZICBOZ); -#undef EXT_KEY - } - - /* Now turn off reporting features if any CPU is missing it. */ - pair->value &= ~missing; -} - -static bool hwprobe_ext0_has(const struct cpumask *cpus, u64 ext) -{ - struct riscv_hwprobe pair; - - hwprobe_isa_ext0(&pair, cpus); - return (pair.value & ext); -} - -static u64 hwprobe_misaligned(const struct cpumask *cpus) -{ - int cpu; - u64 perf = -1ULL; - - for_each_cpu(cpu, cpus) { - int this_perf = per_cpu(misaligned_access_speed, cpu); - - if (perf == -1ULL) - perf = this_perf; - - if (perf != this_perf) { - perf = RISCV_HWPROBE_MISALIGNED_UNKNOWN; - break; - } - } - - if (perf == -1ULL) - return RISCV_HWPROBE_MISALIGNED_UNKNOWN; - - return perf; -} - -static void hwprobe_one_pair(struct riscv_hwprobe *pair, - const struct cpumask *cpus) -{ - switch (pair->key) { - case RISCV_HWPROBE_KEY_MVENDORID: - case RISCV_HWPROBE_KEY_MARCHID: - case RISCV_HWPROBE_KEY_MIMPID: - hwprobe_arch_id(pair, cpus); - break; - /* - * The kernel already assumes that the base single-letter ISA - * extensions are supported on all harts, and only supports the - * IMA base, so just cheat a bit here and tell that to - * userspace. - */ - case RISCV_HWPROBE_KEY_BASE_BEHAVIOR: - pair->value = RISCV_HWPROBE_BASE_BEHAVIOR_IMA; - break; - - case RISCV_HWPROBE_KEY_IMA_EXT_0: - hwprobe_isa_ext0(pair, cpus); - break; - - case RISCV_HWPROBE_KEY_CPUPERF_0: - pair->value = hwprobe_misaligned(cpus); - break; - - case RISCV_HWPROBE_KEY_ZICBOZ_BLOCK_SIZE: - pair->value = 0; - if (hwprobe_ext0_has(cpus, RISCV_HWPROBE_EXT_ZICBOZ)) - pair->value = riscv_cboz_block_size; - break; - - /* - * For forward compatibility, unknown keys don't fail the whole - * call, but get their element key set to -1 and value set to 0 - * indicating they're unrecognized. - */ - default: - pair->key = -1; - pair->value = 0; - break; - } -} - -static int do_riscv_hwprobe(struct riscv_hwprobe __user *pairs, - size_t pair_count, size_t cpu_count, - unsigned long __user *cpus_user, - unsigned int flags) -{ - size_t out; - int ret; - cpumask_t cpus; - - /* Check the reserved flags. */ - if (flags != 0) - return -EINVAL; - - /* - * The interface supports taking in a CPU mask, and returns values that - * are consistent across that mask. Allow userspace to specify NULL and - * 0 as a shortcut to all online CPUs. - */ - cpumask_clear(&cpus); - if (!cpu_count && !cpus_user) { - cpumask_copy(&cpus, cpu_online_mask); - } else { - if (cpu_count > cpumask_size()) - cpu_count = cpumask_size(); - - ret = copy_from_user(&cpus, cpus_user, cpu_count); - if (ret) - return -EFAULT; - - /* - * Userspace must provide at least one online CPU, without that - * there's no way to define what is supported. - */ - cpumask_and(&cpus, &cpus, cpu_online_mask); - if (cpumask_empty(&cpus)) - return -EINVAL; - } - - for (out = 0; out < pair_count; out++, pairs++) { - struct riscv_hwprobe pair; - - if (get_user(pair.key, &pairs->key)) - return -EFAULT; - - pair.value = 0; - hwprobe_one_pair(&pair, &cpus); - ret = put_user(pair.key, &pairs->key); - if (ret == 0) - ret = put_user(pair.value, &pairs->value); - - if (ret) - return -EFAULT; - } - - return 0; -} - -#ifdef CONFIG_MMU - -static int __init init_hwprobe_vdso_data(void) -{ - struct vdso_data *vd = __arch_get_k_vdso_data(); - struct arch_vdso_data *avd = &vd->arch_data; - u64 id_bitsmash = 0; - struct riscv_hwprobe pair; - int key; - - /* - * Initialize vDSO data with the answers for the "all CPUs" case, to - * save a syscall in the common case. - */ - for (key = 0; key <= RISCV_HWPROBE_MAX_KEY; key++) { - pair.key = key; - hwprobe_one_pair(&pair, cpu_online_mask); - - WARN_ON_ONCE(pair.key < 0); - - avd->all_cpu_hwprobe_values[key] = pair.value; - /* - * Smash together the vendor, arch, and impl IDs to see if - * they're all 0 or any negative. - */ - if (key <= RISCV_HWPROBE_KEY_MIMPID) - id_bitsmash |= pair.value; - } - - /* - * If the arch, vendor, and implementation ID are all the same across - * all harts, then assume all CPUs are the same, and allow the vDSO to - * answer queries for arbitrary masks. However if all values are 0 (not - * populated) or any value returns -1 (varies across CPUs), then the - * vDSO should defer to the kernel for exotic cpu masks. - */ - avd->homogeneous_cpus = id_bitsmash != 0 && id_bitsmash != -1; - return 0; -} - -arch_initcall_sync(init_hwprobe_vdso_data); - -#endif /* CONFIG_MMU */ - -SYSCALL_DEFINE5(riscv_hwprobe, struct riscv_hwprobe __user *, pairs, - size_t, pair_count, size_t, cpu_count, unsigned long __user *, - cpus, unsigned int, flags) -{ - return do_riscv_hwprobe(pairs, pair_count, cpu_count, - cpus, flags); -} - /* Not defined using SYSCALL_DEFINE0 to avoid error injection */ asmlinkage long __riscv_sys_ni_syscall(const struct pt_regs *__unused) { diff --git a/arch/riscv/kernel/time.c b/arch/riscv/kernel/time.c index 23641e82a9..ba34771977 100644 --- a/arch/riscv/kernel/time.c +++ b/arch/riscv/kernel/time.c @@ -12,6 +12,7 @@ #include <asm/sbi.h> #include <asm/processor.h> #include <asm/timex.h> +#include <asm/paravirt.h> unsigned long riscv_timebase __ro_after_init; EXPORT_SYMBOL_GPL(riscv_timebase); @@ -45,4 +46,6 @@ void __init time_init(void) timer_probe(); tick_setup_hrtimer_broadcast(); + + pv_time_init(); } diff --git a/arch/riscv/kernel/traps.c b/arch/riscv/kernel/traps.c index a1b9be3c43..142f5f5168 100644 --- a/arch/riscv/kernel/traps.c +++ b/arch/riscv/kernel/traps.c @@ -121,7 +121,7 @@ void do_trap(struct pt_regs *regs, int signo, int code, unsigned long addr) print_vma_addr(KERN_CONT " in ", instruction_pointer(regs)); pr_cont("\n"); __show_regs(regs); - dump_instr(KERN_EMERG, regs); + dump_instr(KERN_INFO, regs); } force_sig_fault(signo, code, (void __user *)addr); diff --git a/arch/riscv/kernel/traps_misaligned.c b/arch/riscv/kernel/traps_misaligned.c index 1ed769c87a..c2ed4e689b 100644 --- a/arch/riscv/kernel/traps_misaligned.c +++ b/arch/riscv/kernel/traps_misaligned.c @@ -319,7 +319,7 @@ static inline int get_insn(struct pt_regs *regs, ulong mepc, ulong *r_insn) static inline int load_u8(struct pt_regs *regs, const u8 *addr, u8 *r_val) { if (user_mode(regs)) { - return __get_user(*r_val, addr); + return __get_user(*r_val, (u8 __user *)addr); } else { *r_val = *addr; return 0; @@ -329,7 +329,7 @@ static inline int load_u8(struct pt_regs *regs, const u8 *addr, u8 *r_val) static inline int store_u8(struct pt_regs *regs, u8 *addr, u8 val) { if (user_mode(regs)) { - return __put_user(val, addr); + return __put_user(val, (u8 __user *)addr); } else { *addr = val; return 0; @@ -343,7 +343,7 @@ static inline int store_u8(struct pt_regs *regs, u8 *addr, u8 val) if (user_mode(regs)) { \ __ret = __get_user(insn, insn_addr); \ } else { \ - insn = *insn_addr; \ + insn = *(__force u16 *)insn_addr; \ __ret = 0; \ } \ \ diff --git a/arch/riscv/kernel/vdso/Makefile b/arch/riscv/kernel/vdso/Makefile index 9b517fe1b8..272c431ac5 100644 --- a/arch/riscv/kernel/vdso/Makefile +++ b/arch/riscv/kernel/vdso/Makefile @@ -37,6 +37,7 @@ endif # Disable -pg to prevent insert call site CFLAGS_REMOVE_vgettimeofday.o = $(CC_FLAGS_FTRACE) $(CC_FLAGS_SCS) +CFLAGS_REMOVE_hwprobe.o = $(CC_FLAGS_FTRACE) $(CC_FLAGS_SCS) # Disable profiling and instrumentation for VDSO code GCOV_PROFILE := n diff --git a/arch/riscv/kernel/vdso/hwprobe.c b/arch/riscv/kernel/vdso/hwprobe.c index cadf725ef7..1e926e4b58 100644 --- a/arch/riscv/kernel/vdso/hwprobe.c +++ b/arch/riscv/kernel/vdso/hwprobe.c @@ -3,26 +3,22 @@ * Copyright 2023 Rivos, Inc */ +#include <linux/string.h> #include <linux/types.h> #include <vdso/datapage.h> #include <vdso/helpers.h> extern int riscv_hwprobe(struct riscv_hwprobe *pairs, size_t pair_count, - size_t cpu_count, unsigned long *cpus, + size_t cpusetsize, unsigned long *cpus, unsigned int flags); -/* Add a prototype to avoid -Wmissing-prototypes warning. */ -int __vdso_riscv_hwprobe(struct riscv_hwprobe *pairs, size_t pair_count, - size_t cpu_count, unsigned long *cpus, - unsigned int flags); - -int __vdso_riscv_hwprobe(struct riscv_hwprobe *pairs, size_t pair_count, - size_t cpu_count, unsigned long *cpus, - unsigned int flags) +static int riscv_vdso_get_values(struct riscv_hwprobe *pairs, size_t pair_count, + size_t cpusetsize, unsigned long *cpus, + unsigned int flags) { const struct vdso_data *vd = __arch_get_vdso_data(); const struct arch_vdso_data *avd = &vd->arch_data; - bool all_cpus = !cpu_count && !cpus; + bool all_cpus = !cpusetsize && !cpus; struct riscv_hwprobe *p = pairs; struct riscv_hwprobe *end = pairs + pair_count; @@ -33,7 +29,7 @@ int __vdso_riscv_hwprobe(struct riscv_hwprobe *pairs, size_t pair_count, * masks. */ if ((flags != 0) || (!all_cpus && !avd->homogeneous_cpus)) - return riscv_hwprobe(pairs, pair_count, cpu_count, cpus, flags); + return riscv_hwprobe(pairs, pair_count, cpusetsize, cpus, flags); /* This is something we can handle, fill out the pairs. */ while (p < end) { @@ -50,3 +46,71 @@ int __vdso_riscv_hwprobe(struct riscv_hwprobe *pairs, size_t pair_count, return 0; } + +static int riscv_vdso_get_cpus(struct riscv_hwprobe *pairs, size_t pair_count, + size_t cpusetsize, unsigned long *cpus, + unsigned int flags) +{ + const struct vdso_data *vd = __arch_get_vdso_data(); + const struct arch_vdso_data *avd = &vd->arch_data; + struct riscv_hwprobe *p = pairs; + struct riscv_hwprobe *end = pairs + pair_count; + unsigned char *c = (unsigned char *)cpus; + bool empty_cpus = true; + bool clear_all = false; + int i; + + if (!cpusetsize || !cpus) + return -EINVAL; + + for (i = 0; i < cpusetsize; i++) { + if (c[i]) { + empty_cpus = false; + break; + } + } + + if (empty_cpus || flags != RISCV_HWPROBE_WHICH_CPUS || !avd->homogeneous_cpus) + return riscv_hwprobe(pairs, pair_count, cpusetsize, cpus, flags); + + while (p < end) { + if (riscv_hwprobe_key_is_valid(p->key)) { + struct riscv_hwprobe t = { + .key = p->key, + .value = avd->all_cpu_hwprobe_values[p->key], + }; + + if (!riscv_hwprobe_pair_cmp(&t, p)) + clear_all = true; + } else { + clear_all = true; + p->key = -1; + p->value = 0; + } + p++; + } + + if (clear_all) { + for (i = 0; i < cpusetsize; i++) + c[i] = 0; + } + + return 0; +} + +/* Add a prototype to avoid -Wmissing-prototypes warning. */ +int __vdso_riscv_hwprobe(struct riscv_hwprobe *pairs, size_t pair_count, + size_t cpusetsize, unsigned long *cpus, + unsigned int flags); + +int __vdso_riscv_hwprobe(struct riscv_hwprobe *pairs, size_t pair_count, + size_t cpusetsize, unsigned long *cpus, + unsigned int flags) +{ + if (flags & RISCV_HWPROBE_WHICH_CPUS) + return riscv_vdso_get_cpus(pairs, pair_count, cpusetsize, + cpus, flags); + + return riscv_vdso_get_values(pairs, pair_count, cpusetsize, + cpus, flags); +} diff --git a/arch/riscv/kernel/vdso/vgettimeofday.c b/arch/riscv/kernel/vdso/vgettimeofday.c index cc0d80699c..b350578025 100644 --- a/arch/riscv/kernel/vdso/vgettimeofday.c +++ b/arch/riscv/kernel/vdso/vgettimeofday.c @@ -8,23 +8,18 @@ #include <linux/time.h> #include <linux/types.h> +#include <vdso/gettime.h> -extern -int __vdso_clock_gettime(clockid_t clock, struct __kernel_timespec *ts); int __vdso_clock_gettime(clockid_t clock, struct __kernel_timespec *ts) { return __cvdso_clock_gettime(clock, ts); } -extern -int __vdso_gettimeofday(struct __kernel_old_timeval *tv, struct timezone *tz); int __vdso_gettimeofday(struct __kernel_old_timeval *tv, struct timezone *tz) { return __cvdso_gettimeofday(tv, tz); } -extern -int __vdso_clock_getres(clockid_t clock_id, struct __kernel_timespec *res); int __vdso_clock_getres(clockid_t clock_id, struct __kernel_timespec *res) { return __cvdso_clock_getres(clock_id, res); diff --git a/arch/riscv/kernel/vector.c b/arch/riscv/kernel/vector.c index 578b629248..6727d1d3b8 100644 --- a/arch/riscv/kernel/vector.c +++ b/arch/riscv/kernel/vector.c @@ -21,6 +21,10 @@ #include <asm/bug.h> static bool riscv_v_implicit_uacc = IS_ENABLED(CONFIG_RISCV_ISA_V_DEFAULT_ENABLE); +static struct kmem_cache *riscv_v_user_cachep; +#ifdef CONFIG_RISCV_ISA_V_PREEMPTIVE +static struct kmem_cache *riscv_v_kernel_cachep; +#endif unsigned long riscv_v_vsize __read_mostly; EXPORT_SYMBOL_GPL(riscv_v_vsize); @@ -47,6 +51,21 @@ int riscv_v_setup_vsize(void) return 0; } +void __init riscv_v_setup_ctx_cache(void) +{ + if (!has_vector()) + return; + + riscv_v_user_cachep = kmem_cache_create_usercopy("riscv_vector_ctx", + riscv_v_vsize, 16, SLAB_PANIC, + 0, riscv_v_vsize, NULL); +#ifdef CONFIG_RISCV_ISA_V_PREEMPTIVE + riscv_v_kernel_cachep = kmem_cache_create("riscv_vector_kctx", + riscv_v_vsize, 16, + SLAB_PANIC, NULL); +#endif +} + static bool insn_is_vector(u32 insn_buf) { u32 opcode = insn_buf & __INSN_OPCODE_MASK; @@ -80,20 +99,37 @@ static bool insn_is_vector(u32 insn_buf) return false; } -static int riscv_v_thread_zalloc(void) +static int riscv_v_thread_zalloc(struct kmem_cache *cache, + struct __riscv_v_ext_state *ctx) { void *datap; - datap = kzalloc(riscv_v_vsize, GFP_KERNEL); + datap = kmem_cache_zalloc(cache, GFP_KERNEL); if (!datap) return -ENOMEM; - current->thread.vstate.datap = datap; - memset(¤t->thread.vstate, 0, offsetof(struct __riscv_v_ext_state, - datap)); + ctx->datap = datap; + memset(ctx, 0, offsetof(struct __riscv_v_ext_state, datap)); return 0; } +void riscv_v_thread_alloc(struct task_struct *tsk) +{ +#ifdef CONFIG_RISCV_ISA_V_PREEMPTIVE + riscv_v_thread_zalloc(riscv_v_kernel_cachep, &tsk->thread.kernel_vstate); +#endif +} + +void riscv_v_thread_free(struct task_struct *tsk) +{ + if (tsk->thread.vstate.datap) + kmem_cache_free(riscv_v_user_cachep, tsk->thread.vstate.datap); +#ifdef CONFIG_RISCV_ISA_V_PREEMPTIVE + if (tsk->thread.kernel_vstate.datap) + kmem_cache_free(riscv_v_kernel_cachep, tsk->thread.kernel_vstate.datap); +#endif +} + #define VSTATE_CTRL_GET_CUR(x) ((x) & PR_RISCV_V_VSTATE_CTRL_CUR_MASK) #define VSTATE_CTRL_GET_NEXT(x) (((x) & PR_RISCV_V_VSTATE_CTRL_NEXT_MASK) >> 2) #define VSTATE_CTRL_MAKE_NEXT(x) (((x) << 2) & PR_RISCV_V_VSTATE_CTRL_NEXT_MASK) @@ -122,7 +158,8 @@ static inline void riscv_v_ctrl_set(struct task_struct *tsk, int cur, int nxt, ctrl |= VSTATE_CTRL_MAKE_NEXT(nxt); if (inherit) ctrl |= PR_RISCV_V_VSTATE_CTRL_INHERIT; - tsk->thread.vstate_ctrl = ctrl; + tsk->thread.vstate_ctrl &= ~PR_RISCV_V_VSTATE_CTRL_MASK; + tsk->thread.vstate_ctrl |= ctrl; } bool riscv_v_vstate_ctrl_user_allowed(void) @@ -162,12 +199,12 @@ bool riscv_v_first_use_handler(struct pt_regs *regs) * context where VS has been off. So, try to allocate the user's V * context and resume execution. */ - if (riscv_v_thread_zalloc()) { + if (riscv_v_thread_zalloc(riscv_v_user_cachep, ¤t->thread.vstate)) { force_sig(SIGBUS); return true; } riscv_v_vstate_on(regs); - riscv_v_vstate_restore(current, regs); + riscv_v_vstate_set_restore(current, regs); return true; } diff --git a/arch/riscv/kvm/Kconfig b/arch/riscv/kvm/Kconfig index dfc237d787..d490db9438 100644 --- a/arch/riscv/kvm/Kconfig +++ b/arch/riscv/kvm/Kconfig @@ -20,18 +20,17 @@ if VIRTUALIZATION config KVM tristate "Kernel-based Virtual Machine (KVM) support (EXPERIMENTAL)" depends on RISCV_SBI && MMU - select HAVE_KVM_EVENTFD select HAVE_KVM_IRQCHIP - select HAVE_KVM_IRQFD select HAVE_KVM_IRQ_ROUTING select HAVE_KVM_MSI select HAVE_KVM_VCPU_ASYNC_IOCTL + select KVM_COMMON select KVM_GENERIC_DIRTYLOG_READ_PROTECT select KVM_GENERIC_HARDWARE_ENABLING select KVM_MMIO select KVM_XFER_TO_GUEST_WORK - select MMU_NOTIFIER - select PREEMPT_NOTIFIERS + select KVM_GENERIC_MMU_NOTIFIER + select SCHED_INFO help Support hosting virtualized guest machines. diff --git a/arch/riscv/kvm/Makefile b/arch/riscv/kvm/Makefile index 4c2067fc59..c9646521f1 100644 --- a/arch/riscv/kvm/Makefile +++ b/arch/riscv/kvm/Makefile @@ -26,6 +26,7 @@ kvm-$(CONFIG_RISCV_SBI_V01) += vcpu_sbi_v01.o kvm-y += vcpu_sbi_base.o kvm-y += vcpu_sbi_replace.o kvm-y += vcpu_sbi_hsm.o +kvm-y += vcpu_sbi_sta.o kvm-y += vcpu_timer.o kvm-$(CONFIG_RISCV_PMU_SBI) += vcpu_pmu.o vcpu_sbi_pmu.o kvm-y += aia.o diff --git a/arch/riscv/kvm/aia_aplic.c b/arch/riscv/kvm/aia_aplic.c index 39e72aa016..b467ba5ed9 100644 --- a/arch/riscv/kvm/aia_aplic.c +++ b/arch/riscv/kvm/aia_aplic.c @@ -137,11 +137,21 @@ static void aplic_write_pending(struct aplic *aplic, u32 irq, bool pending) raw_spin_lock_irqsave(&irqd->lock, flags); sm = irqd->sourcecfg & APLIC_SOURCECFG_SM_MASK; - if (!pending && - ((sm == APLIC_SOURCECFG_SM_LEVEL_HIGH) || - (sm == APLIC_SOURCECFG_SM_LEVEL_LOW))) + if (sm == APLIC_SOURCECFG_SM_INACTIVE) goto skip_write_pending; + if (sm == APLIC_SOURCECFG_SM_LEVEL_HIGH || + sm == APLIC_SOURCECFG_SM_LEVEL_LOW) { + if (!pending) + goto skip_write_pending; + if ((irqd->state & APLIC_IRQ_STATE_INPUT) && + sm == APLIC_SOURCECFG_SM_LEVEL_LOW) + goto skip_write_pending; + if (!(irqd->state & APLIC_IRQ_STATE_INPUT) && + sm == APLIC_SOURCECFG_SM_LEVEL_HIGH) + goto skip_write_pending; + } + if (pending) irqd->state |= APLIC_IRQ_STATE_PENDING; else @@ -187,16 +197,31 @@ static void aplic_write_enabled(struct aplic *aplic, u32 irq, bool enabled) static bool aplic_read_input(struct aplic *aplic, u32 irq) { - bool ret; - unsigned long flags; + u32 sourcecfg, sm, raw_input, irq_inverted; struct aplic_irq *irqd; + unsigned long flags; + bool ret = false; if (!irq || aplic->nr_irqs <= irq) return false; irqd = &aplic->irqs[irq]; raw_spin_lock_irqsave(&irqd->lock, flags); - ret = (irqd->state & APLIC_IRQ_STATE_INPUT) ? true : false; + + sourcecfg = irqd->sourcecfg; + if (sourcecfg & APLIC_SOURCECFG_D) + goto skip; + + sm = sourcecfg & APLIC_SOURCECFG_SM_MASK; + if (sm == APLIC_SOURCECFG_SM_INACTIVE) + goto skip; + + raw_input = (irqd->state & APLIC_IRQ_STATE_INPUT) ? 1 : 0; + irq_inverted = (sm == APLIC_SOURCECFG_SM_LEVEL_LOW || + sm == APLIC_SOURCECFG_SM_EDGE_FALL) ? 1 : 0; + ret = !!(raw_input ^ irq_inverted); + +skip: raw_spin_unlock_irqrestore(&irqd->lock, flags); return ret; diff --git a/arch/riscv/kvm/mmu.c b/arch/riscv/kvm/mmu.c index 068c745938..a9e2fd7245 100644 --- a/arch/riscv/kvm/mmu.c +++ b/arch/riscv/kvm/mmu.c @@ -103,7 +103,7 @@ static bool gstage_get_leaf_entry(struct kvm *kvm, gpa_t addr, *ptep_level = current_level; ptep = (pte_t *)kvm->arch.pgd; ptep = &ptep[gstage_pte_index(addr, current_level)]; - while (ptep && pte_val(*ptep)) { + while (ptep && pte_val(ptep_get(ptep))) { if (gstage_pte_leaf(ptep)) { *ptep_level = current_level; *ptepp = ptep; @@ -113,7 +113,7 @@ static bool gstage_get_leaf_entry(struct kvm *kvm, gpa_t addr, if (current_level) { current_level--; *ptep_level = current_level; - ptep = (pte_t *)gstage_pte_page_vaddr(*ptep); + ptep = (pte_t *)gstage_pte_page_vaddr(ptep_get(ptep)); ptep = &ptep[gstage_pte_index(addr, current_level)]; } else { ptep = NULL; @@ -149,25 +149,25 @@ static int gstage_set_pte(struct kvm *kvm, u32 level, if (gstage_pte_leaf(ptep)) return -EEXIST; - if (!pte_val(*ptep)) { + if (!pte_val(ptep_get(ptep))) { if (!pcache) return -ENOMEM; next_ptep = kvm_mmu_memory_cache_alloc(pcache); if (!next_ptep) return -ENOMEM; - *ptep = pfn_pte(PFN_DOWN(__pa(next_ptep)), - __pgprot(_PAGE_TABLE)); + set_pte(ptep, pfn_pte(PFN_DOWN(__pa(next_ptep)), + __pgprot(_PAGE_TABLE))); } else { if (gstage_pte_leaf(ptep)) return -EEXIST; - next_ptep = (pte_t *)gstage_pte_page_vaddr(*ptep); + next_ptep = (pte_t *)gstage_pte_page_vaddr(ptep_get(ptep)); } current_level--; ptep = &next_ptep[gstage_pte_index(addr, current_level)]; } - *ptep = *new_pte; + set_pte(ptep, *new_pte); if (gstage_pte_leaf(ptep)) gstage_remote_tlb_flush(kvm, current_level, addr); @@ -239,11 +239,11 @@ static void gstage_op_pte(struct kvm *kvm, gpa_t addr, BUG_ON(addr & (page_size - 1)); - if (!pte_val(*ptep)) + if (!pte_val(ptep_get(ptep))) return; if (ptep_level && !gstage_pte_leaf(ptep)) { - next_ptep = (pte_t *)gstage_pte_page_vaddr(*ptep); + next_ptep = (pte_t *)gstage_pte_page_vaddr(ptep_get(ptep)); next_ptep_level = ptep_level - 1; ret = gstage_level_to_page_size(next_ptep_level, &next_page_size); @@ -261,7 +261,7 @@ static void gstage_op_pte(struct kvm *kvm, gpa_t addr, if (op == GSTAGE_OP_CLEAR) set_pte(ptep, __pte(0)); else if (op == GSTAGE_OP_WP) - set_pte(ptep, __pte(pte_val(*ptep) & ~_PAGE_WRITE)); + set_pte(ptep, __pte(pte_val(ptep_get(ptep)) & ~_PAGE_WRITE)); gstage_remote_tlb_flush(kvm, ptep_level, addr); } } @@ -603,7 +603,7 @@ bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range) &ptep, &ptep_level)) return false; - return pte_young(*ptep); + return pte_young(ptep_get(ptep)); } int kvm_riscv_gstage_map(struct kvm_vcpu *vcpu, diff --git a/arch/riscv/kvm/vcpu.c b/arch/riscv/kvm/vcpu.c index e087c80907..b5ca9f2e98 100644 --- a/arch/riscv/kvm/vcpu.c +++ b/arch/riscv/kvm/vcpu.c @@ -83,6 +83,8 @@ static void kvm_riscv_reset_vcpu(struct kvm_vcpu *vcpu) vcpu->arch.hfence_tail = 0; memset(vcpu->arch.hfence_queue, 0, sizeof(vcpu->arch.hfence_queue)); + kvm_riscv_vcpu_sbi_sta_reset(vcpu); + /* Reset the guest CSRs for hotplug usecase */ if (loaded) kvm_arch_vcpu_load(vcpu, smp_processor_id()); @@ -541,6 +543,8 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) kvm_riscv_vcpu_aia_load(vcpu, cpu); + kvm_make_request(KVM_REQ_STEAL_UPDATE, vcpu); + vcpu->cpu = cpu; } @@ -614,6 +618,9 @@ static void kvm_riscv_check_vcpu_requests(struct kvm_vcpu *vcpu) if (kvm_check_request(KVM_REQ_HFENCE, vcpu)) kvm_riscv_hfence_process(vcpu); + + if (kvm_check_request(KVM_REQ_STEAL_UPDATE, vcpu)) + kvm_riscv_vcpu_record_steal_time(vcpu); } } @@ -757,8 +764,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) /* Update HVIP CSR for current CPU */ kvm_riscv_update_hvip(vcpu); - if (ret <= 0 || - kvm_riscv_gstage_vmid_ver_changed(&vcpu->kvm->arch.vmid) || + if (kvm_riscv_gstage_vmid_ver_changed(&vcpu->kvm->arch.vmid) || kvm_request_pending(vcpu) || xfer_to_guest_mode_work_pending()) { vcpu->mode = OUTSIDE_GUEST_MODE; diff --git a/arch/riscv/kvm/vcpu_insn.c b/arch/riscv/kvm/vcpu_insn.c index 7a6abed41b..ee7215f407 100644 --- a/arch/riscv/kvm/vcpu_insn.c +++ b/arch/riscv/kvm/vcpu_insn.c @@ -7,6 +7,8 @@ #include <linux/bitops.h> #include <linux/kvm_host.h> +#include <asm/cpufeature.h> + #define INSN_OPCODE_MASK 0x007c #define INSN_OPCODE_SHIFT 2 #define INSN_OPCODE_SYSTEM 28 @@ -213,9 +215,20 @@ struct csr_func { unsigned long wr_mask); }; +static int seed_csr_rmw(struct kvm_vcpu *vcpu, unsigned int csr_num, + unsigned long *val, unsigned long new_val, + unsigned long wr_mask) +{ + if (!riscv_isa_extension_available(vcpu->arch.isa, ZKR)) + return KVM_INSN_ILLEGAL_TRAP; + + return KVM_INSN_EXIT_TO_USER_SPACE; +} + static const struct csr_func csr_funcs[] = { KVM_RISCV_VCPU_AIA_CSR_FUNCS KVM_RISCV_VCPU_HPMCOUNTER_CSR_FUNCS + { .base = CSR_SEED, .count = 1, .func = seed_csr_rmw }, }; /** diff --git a/arch/riscv/kvm/vcpu_onereg.c b/arch/riscv/kvm/vcpu_onereg.c index f8c9fa0c03..5f7355e960 100644 --- a/arch/riscv/kvm/vcpu_onereg.c +++ b/arch/riscv/kvm/vcpu_onereg.c @@ -42,15 +42,42 @@ static const unsigned long kvm_isa_ext_arr[] = { KVM_ISA_EXT_ARR(SVPBMT), KVM_ISA_EXT_ARR(ZBA), KVM_ISA_EXT_ARR(ZBB), + KVM_ISA_EXT_ARR(ZBC), + KVM_ISA_EXT_ARR(ZBKB), + KVM_ISA_EXT_ARR(ZBKC), + KVM_ISA_EXT_ARR(ZBKX), KVM_ISA_EXT_ARR(ZBS), + KVM_ISA_EXT_ARR(ZFA), + KVM_ISA_EXT_ARR(ZFH), + KVM_ISA_EXT_ARR(ZFHMIN), KVM_ISA_EXT_ARR(ZICBOM), KVM_ISA_EXT_ARR(ZICBOZ), KVM_ISA_EXT_ARR(ZICNTR), KVM_ISA_EXT_ARR(ZICOND), KVM_ISA_EXT_ARR(ZICSR), KVM_ISA_EXT_ARR(ZIFENCEI), + KVM_ISA_EXT_ARR(ZIHINTNTL), KVM_ISA_EXT_ARR(ZIHINTPAUSE), KVM_ISA_EXT_ARR(ZIHPM), + KVM_ISA_EXT_ARR(ZKND), + KVM_ISA_EXT_ARR(ZKNE), + KVM_ISA_EXT_ARR(ZKNH), + KVM_ISA_EXT_ARR(ZKR), + KVM_ISA_EXT_ARR(ZKSED), + KVM_ISA_EXT_ARR(ZKSH), + KVM_ISA_EXT_ARR(ZKT), + KVM_ISA_EXT_ARR(ZVBB), + KVM_ISA_EXT_ARR(ZVBC), + KVM_ISA_EXT_ARR(ZVFH), + KVM_ISA_EXT_ARR(ZVFHMIN), + KVM_ISA_EXT_ARR(ZVKB), + KVM_ISA_EXT_ARR(ZVKG), + KVM_ISA_EXT_ARR(ZVKNED), + KVM_ISA_EXT_ARR(ZVKNHA), + KVM_ISA_EXT_ARR(ZVKNHB), + KVM_ISA_EXT_ARR(ZVKSED), + KVM_ISA_EXT_ARR(ZVKSH), + KVM_ISA_EXT_ARR(ZVKT), }; static unsigned long kvm_riscv_vcpu_base2isa_ext(unsigned long base_ext) @@ -92,13 +119,40 @@ static bool kvm_riscv_vcpu_isa_disable_allowed(unsigned long ext) case KVM_RISCV_ISA_EXT_SVNAPOT: case KVM_RISCV_ISA_EXT_ZBA: case KVM_RISCV_ISA_EXT_ZBB: + case KVM_RISCV_ISA_EXT_ZBC: + case KVM_RISCV_ISA_EXT_ZBKB: + case KVM_RISCV_ISA_EXT_ZBKC: + case KVM_RISCV_ISA_EXT_ZBKX: case KVM_RISCV_ISA_EXT_ZBS: + case KVM_RISCV_ISA_EXT_ZFA: + case KVM_RISCV_ISA_EXT_ZFH: + case KVM_RISCV_ISA_EXT_ZFHMIN: case KVM_RISCV_ISA_EXT_ZICNTR: case KVM_RISCV_ISA_EXT_ZICOND: case KVM_RISCV_ISA_EXT_ZICSR: case KVM_RISCV_ISA_EXT_ZIFENCEI: + case KVM_RISCV_ISA_EXT_ZIHINTNTL: case KVM_RISCV_ISA_EXT_ZIHINTPAUSE: case KVM_RISCV_ISA_EXT_ZIHPM: + case KVM_RISCV_ISA_EXT_ZKND: + case KVM_RISCV_ISA_EXT_ZKNE: + case KVM_RISCV_ISA_EXT_ZKNH: + case KVM_RISCV_ISA_EXT_ZKR: + case KVM_RISCV_ISA_EXT_ZKSED: + case KVM_RISCV_ISA_EXT_ZKSH: + case KVM_RISCV_ISA_EXT_ZKT: + case KVM_RISCV_ISA_EXT_ZVBB: + case KVM_RISCV_ISA_EXT_ZVBC: + case KVM_RISCV_ISA_EXT_ZVFH: + case KVM_RISCV_ISA_EXT_ZVFHMIN: + case KVM_RISCV_ISA_EXT_ZVKB: + case KVM_RISCV_ISA_EXT_ZVKG: + case KVM_RISCV_ISA_EXT_ZVKNED: + case KVM_RISCV_ISA_EXT_ZVKNHA: + case KVM_RISCV_ISA_EXT_ZVKNHB: + case KVM_RISCV_ISA_EXT_ZVKSED: + case KVM_RISCV_ISA_EXT_ZVKSH: + case KVM_RISCV_ISA_EXT_ZVKT: return false; /* Extensions which can be disabled using Smstateen */ case KVM_RISCV_ISA_EXT_SSAIA: @@ -485,7 +539,7 @@ static int kvm_riscv_vcpu_set_reg_csr(struct kvm_vcpu *vcpu, if (riscv_has_extension_unlikely(RISCV_ISA_EXT_SMSTATEEN)) rc = kvm_riscv_vcpu_smstateen_set_csr(vcpu, reg_num, reg_val); -break; + break; default: rc = -ENOENT; break; @@ -931,50 +985,106 @@ static inline unsigned long num_isa_ext_regs(const struct kvm_vcpu *vcpu) return copy_isa_ext_reg_indices(vcpu, NULL);; } -static inline unsigned long num_sbi_ext_regs(void) +static int copy_sbi_ext_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices) { - /* - * number of KVM_REG_RISCV_SBI_SINGLE + - * 2 x (number of KVM_REG_RISCV_SBI_MULTI) - */ - return KVM_RISCV_SBI_EXT_MAX + 2*(KVM_REG_RISCV_SBI_MULTI_REG_LAST+1); -} - -static int copy_sbi_ext_reg_indices(u64 __user *uindices) -{ - int n; + unsigned int n = 0; - /* copy KVM_REG_RISCV_SBI_SINGLE */ - n = KVM_RISCV_SBI_EXT_MAX; - for (int i = 0; i < n; i++) { + for (int i = 0; i < KVM_RISCV_SBI_EXT_MAX; i++) { u64 size = IS_ENABLED(CONFIG_32BIT) ? KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64; u64 reg = KVM_REG_RISCV | size | KVM_REG_RISCV_SBI_EXT | KVM_REG_RISCV_SBI_SINGLE | i; + if (!riscv_vcpu_supports_sbi_ext(vcpu, i)) + continue; + if (uindices) { if (put_user(reg, uindices)) return -EFAULT; uindices++; } + + n++; } - /* copy KVM_REG_RISCV_SBI_MULTI */ - n = KVM_REG_RISCV_SBI_MULTI_REG_LAST + 1; - for (int i = 0; i < n; i++) { - u64 size = IS_ENABLED(CONFIG_32BIT) ? - KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64; - u64 reg = KVM_REG_RISCV | size | KVM_REG_RISCV_SBI_EXT | - KVM_REG_RISCV_SBI_MULTI_EN | i; + return n; +} + +static unsigned long num_sbi_ext_regs(struct kvm_vcpu *vcpu) +{ + return copy_sbi_ext_reg_indices(vcpu, NULL); +} + +static int copy_sbi_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices) +{ + struct kvm_vcpu_sbi_context *scontext = &vcpu->arch.sbi_context; + int total = 0; + + if (scontext->ext_status[KVM_RISCV_SBI_EXT_STA] == KVM_RISCV_SBI_EXT_STATUS_ENABLED) { + u64 size = IS_ENABLED(CONFIG_32BIT) ? KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64; + int n = sizeof(struct kvm_riscv_sbi_sta) / sizeof(unsigned long); + + for (int i = 0; i < n; i++) { + u64 reg = KVM_REG_RISCV | size | + KVM_REG_RISCV_SBI_STATE | + KVM_REG_RISCV_SBI_STA | i; + + if (uindices) { + if (put_user(reg, uindices)) + return -EFAULT; + uindices++; + } + } + + total += n; + } + + return total; +} + +static inline unsigned long num_sbi_regs(struct kvm_vcpu *vcpu) +{ + return copy_sbi_reg_indices(vcpu, NULL); +} + +static inline unsigned long num_vector_regs(const struct kvm_vcpu *vcpu) +{ + if (!riscv_isa_extension_available(vcpu->arch.isa, v)) + return 0; + + /* vstart, vl, vtype, vcsr, vlenb and 32 vector regs */ + return 37; +} + +static int copy_vector_reg_indices(const struct kvm_vcpu *vcpu, + u64 __user *uindices) +{ + const struct kvm_cpu_context *cntx = &vcpu->arch.guest_context; + int n = num_vector_regs(vcpu); + u64 reg, size; + int i; + + if (n == 0) + return 0; + + /* copy vstart, vl, vtype, vcsr and vlenb */ + size = IS_ENABLED(CONFIG_32BIT) ? KVM_REG_SIZE_U32 : KVM_REG_SIZE_U64; + for (i = 0; i < 5; i++) { + reg = KVM_REG_RISCV | size | KVM_REG_RISCV_VECTOR | i; if (uindices) { if (put_user(reg, uindices)) return -EFAULT; uindices++; } + } - reg = KVM_REG_RISCV | size | KVM_REG_RISCV_SBI_EXT | - KVM_REG_RISCV_SBI_MULTI_DIS | i; + /* vector_regs have a variable 'vlenb' size */ + size = __builtin_ctzl(cntx->vector.vlenb); + size <<= KVM_REG_SIZE_SHIFT; + for (i = 0; i < 32; i++) { + reg = KVM_REG_RISCV | KVM_REG_RISCV_VECTOR | size | + KVM_REG_RISCV_VECTOR_REG(i); if (uindices) { if (put_user(reg, uindices)) @@ -983,7 +1093,7 @@ static int copy_sbi_ext_reg_indices(u64 __user *uindices) } } - return num_sbi_ext_regs(); + return n; } /* @@ -1001,8 +1111,10 @@ unsigned long kvm_riscv_vcpu_num_regs(struct kvm_vcpu *vcpu) res += num_timer_regs(); res += num_fp_f_regs(vcpu); res += num_fp_d_regs(vcpu); + res += num_vector_regs(vcpu); res += num_isa_ext_regs(vcpu); - res += num_sbi_ext_regs(); + res += num_sbi_ext_regs(vcpu); + res += num_sbi_regs(vcpu); return res; } @@ -1045,14 +1157,25 @@ int kvm_riscv_vcpu_copy_reg_indices(struct kvm_vcpu *vcpu, return ret; uindices += ret; + ret = copy_vector_reg_indices(vcpu, uindices); + if (ret < 0) + return ret; + uindices += ret; + ret = copy_isa_ext_reg_indices(vcpu, uindices); if (ret < 0) return ret; uindices += ret; - ret = copy_sbi_ext_reg_indices(uindices); + ret = copy_sbi_ext_reg_indices(vcpu, uindices); if (ret < 0) return ret; + uindices += ret; + + ret = copy_sbi_reg_indices(vcpu, uindices); + if (ret < 0) + return ret; + uindices += ret; return 0; } @@ -1075,12 +1198,14 @@ int kvm_riscv_vcpu_set_reg(struct kvm_vcpu *vcpu, case KVM_REG_RISCV_FP_D: return kvm_riscv_vcpu_set_reg_fp(vcpu, reg, KVM_REG_RISCV_FP_D); + case KVM_REG_RISCV_VECTOR: + return kvm_riscv_vcpu_set_reg_vector(vcpu, reg); case KVM_REG_RISCV_ISA_EXT: return kvm_riscv_vcpu_set_reg_isa_ext(vcpu, reg); case KVM_REG_RISCV_SBI_EXT: return kvm_riscv_vcpu_set_reg_sbi_ext(vcpu, reg); - case KVM_REG_RISCV_VECTOR: - return kvm_riscv_vcpu_set_reg_vector(vcpu, reg); + case KVM_REG_RISCV_SBI_STATE: + return kvm_riscv_vcpu_set_reg_sbi(vcpu, reg); default: break; } @@ -1106,12 +1231,14 @@ int kvm_riscv_vcpu_get_reg(struct kvm_vcpu *vcpu, case KVM_REG_RISCV_FP_D: return kvm_riscv_vcpu_get_reg_fp(vcpu, reg, KVM_REG_RISCV_FP_D); + case KVM_REG_RISCV_VECTOR: + return kvm_riscv_vcpu_get_reg_vector(vcpu, reg); case KVM_REG_RISCV_ISA_EXT: return kvm_riscv_vcpu_get_reg_isa_ext(vcpu, reg); case KVM_REG_RISCV_SBI_EXT: return kvm_riscv_vcpu_get_reg_sbi_ext(vcpu, reg); - case KVM_REG_RISCV_VECTOR: - return kvm_riscv_vcpu_get_reg_vector(vcpu, reg); + case KVM_REG_RISCV_SBI_STATE: + return kvm_riscv_vcpu_get_reg_sbi(vcpu, reg); default: break; } diff --git a/arch/riscv/kvm/vcpu_sbi.c b/arch/riscv/kvm/vcpu_sbi.c index a04ff98085..72a2ffb8dc 100644 --- a/arch/riscv/kvm/vcpu_sbi.c +++ b/arch/riscv/kvm/vcpu_sbi.c @@ -71,6 +71,10 @@ static const struct kvm_riscv_sbi_extension_entry sbi_ext[] = { .ext_ptr = &vcpu_sbi_ext_dbcn, }, { + .ext_idx = KVM_RISCV_SBI_EXT_STA, + .ext_ptr = &vcpu_sbi_ext_sta, + }, + { .ext_idx = KVM_RISCV_SBI_EXT_EXPERIMENTAL, .ext_ptr = &vcpu_sbi_ext_experimental, }, @@ -80,6 +84,34 @@ static const struct kvm_riscv_sbi_extension_entry sbi_ext[] = { }, }; +static const struct kvm_riscv_sbi_extension_entry * +riscv_vcpu_get_sbi_ext(struct kvm_vcpu *vcpu, unsigned long idx) +{ + const struct kvm_riscv_sbi_extension_entry *sext = NULL; + + if (idx >= KVM_RISCV_SBI_EXT_MAX) + return NULL; + + for (int i = 0; i < ARRAY_SIZE(sbi_ext); i++) { + if (sbi_ext[i].ext_idx == idx) { + sext = &sbi_ext[i]; + break; + } + } + + return sext; +} + +bool riscv_vcpu_supports_sbi_ext(struct kvm_vcpu *vcpu, int idx) +{ + struct kvm_vcpu_sbi_context *scontext = &vcpu->arch.sbi_context; + const struct kvm_riscv_sbi_extension_entry *sext; + + sext = riscv_vcpu_get_sbi_ext(vcpu, idx); + + return sext && scontext->ext_status[sext->ext_idx] != KVM_RISCV_SBI_EXT_STATUS_UNAVAILABLE; +} + void kvm_riscv_vcpu_sbi_forward(struct kvm_vcpu *vcpu, struct kvm_run *run) { struct kvm_cpu_context *cp = &vcpu->arch.guest_context; @@ -140,28 +172,19 @@ static int riscv_vcpu_set_sbi_ext_single(struct kvm_vcpu *vcpu, unsigned long reg_num, unsigned long reg_val) { - unsigned long i; - const struct kvm_riscv_sbi_extension_entry *sext = NULL; struct kvm_vcpu_sbi_context *scontext = &vcpu->arch.sbi_context; - - if (reg_num >= KVM_RISCV_SBI_EXT_MAX) - return -ENOENT; + const struct kvm_riscv_sbi_extension_entry *sext; if (reg_val != 1 && reg_val != 0) return -EINVAL; - for (i = 0; i < ARRAY_SIZE(sbi_ext); i++) { - if (sbi_ext[i].ext_idx == reg_num) { - sext = &sbi_ext[i]; - break; - } - } - if (!sext) + sext = riscv_vcpu_get_sbi_ext(vcpu, reg_num); + if (!sext || scontext->ext_status[sext->ext_idx] == KVM_RISCV_SBI_EXT_STATUS_UNAVAILABLE) return -ENOENT; scontext->ext_status[sext->ext_idx] = (reg_val) ? - KVM_RISCV_SBI_EXT_AVAILABLE : - KVM_RISCV_SBI_EXT_UNAVAILABLE; + KVM_RISCV_SBI_EXT_STATUS_ENABLED : + KVM_RISCV_SBI_EXT_STATUS_DISABLED; return 0; } @@ -170,24 +193,16 @@ static int riscv_vcpu_get_sbi_ext_single(struct kvm_vcpu *vcpu, unsigned long reg_num, unsigned long *reg_val) { - unsigned long i; - const struct kvm_riscv_sbi_extension_entry *sext = NULL; struct kvm_vcpu_sbi_context *scontext = &vcpu->arch.sbi_context; + const struct kvm_riscv_sbi_extension_entry *sext; - if (reg_num >= KVM_RISCV_SBI_EXT_MAX) - return -ENOENT; - - for (i = 0; i < ARRAY_SIZE(sbi_ext); i++) { - if (sbi_ext[i].ext_idx == reg_num) { - sext = &sbi_ext[i]; - break; - } - } - if (!sext) + sext = riscv_vcpu_get_sbi_ext(vcpu, reg_num); + if (!sext || scontext->ext_status[sext->ext_idx] == KVM_RISCV_SBI_EXT_STATUS_UNAVAILABLE) return -ENOENT; *reg_val = scontext->ext_status[sext->ext_idx] == - KVM_RISCV_SBI_EXT_AVAILABLE; + KVM_RISCV_SBI_EXT_STATUS_ENABLED; + return 0; } @@ -310,6 +325,69 @@ int kvm_riscv_vcpu_get_reg_sbi_ext(struct kvm_vcpu *vcpu, return 0; } +int kvm_riscv_vcpu_set_reg_sbi(struct kvm_vcpu *vcpu, + const struct kvm_one_reg *reg) +{ + unsigned long __user *uaddr = + (unsigned long __user *)(unsigned long)reg->addr; + unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK | + KVM_REG_SIZE_MASK | + KVM_REG_RISCV_SBI_STATE); + unsigned long reg_subtype, reg_val; + + if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long)) + return -EINVAL; + + if (copy_from_user(®_val, uaddr, KVM_REG_SIZE(reg->id))) + return -EFAULT; + + reg_subtype = reg_num & KVM_REG_RISCV_SUBTYPE_MASK; + reg_num &= ~KVM_REG_RISCV_SUBTYPE_MASK; + + switch (reg_subtype) { + case KVM_REG_RISCV_SBI_STA: + return kvm_riscv_vcpu_set_reg_sbi_sta(vcpu, reg_num, reg_val); + default: + return -EINVAL; + } + + return 0; +} + +int kvm_riscv_vcpu_get_reg_sbi(struct kvm_vcpu *vcpu, + const struct kvm_one_reg *reg) +{ + unsigned long __user *uaddr = + (unsigned long __user *)(unsigned long)reg->addr; + unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK | + KVM_REG_SIZE_MASK | + KVM_REG_RISCV_SBI_STATE); + unsigned long reg_subtype, reg_val; + int ret; + + if (KVM_REG_SIZE(reg->id) != sizeof(unsigned long)) + return -EINVAL; + + reg_subtype = reg_num & KVM_REG_RISCV_SUBTYPE_MASK; + reg_num &= ~KVM_REG_RISCV_SUBTYPE_MASK; + + switch (reg_subtype) { + case KVM_REG_RISCV_SBI_STA: + ret = kvm_riscv_vcpu_get_reg_sbi_sta(vcpu, reg_num, ®_val); + break; + default: + return -EINVAL; + } + + if (ret) + return ret; + + if (copy_to_user(uaddr, ®_val, KVM_REG_SIZE(reg->id))) + return -EFAULT; + + return 0; +} + const struct kvm_vcpu_sbi_extension *kvm_vcpu_sbi_find_ext( struct kvm_vcpu *vcpu, unsigned long extid) { @@ -325,7 +403,7 @@ const struct kvm_vcpu_sbi_extension *kvm_vcpu_sbi_find_ext( if (ext->extid_start <= extid && ext->extid_end >= extid) { if (entry->ext_idx >= KVM_RISCV_SBI_EXT_MAX || scontext->ext_status[entry->ext_idx] == - KVM_RISCV_SBI_EXT_AVAILABLE) + KVM_RISCV_SBI_EXT_STATUS_ENABLED) return ext; return NULL; @@ -413,12 +491,12 @@ void kvm_riscv_vcpu_sbi_init(struct kvm_vcpu *vcpu) if (ext->probe && !ext->probe(vcpu)) { scontext->ext_status[entry->ext_idx] = - KVM_RISCV_SBI_EXT_UNAVAILABLE; + KVM_RISCV_SBI_EXT_STATUS_UNAVAILABLE; continue; } - scontext->ext_status[entry->ext_idx] = ext->default_unavail ? - KVM_RISCV_SBI_EXT_UNAVAILABLE : - KVM_RISCV_SBI_EXT_AVAILABLE; + scontext->ext_status[entry->ext_idx] = ext->default_disabled ? + KVM_RISCV_SBI_EXT_STATUS_DISABLED : + KVM_RISCV_SBI_EXT_STATUS_ENABLED; } } diff --git a/arch/riscv/kvm/vcpu_sbi_replace.c b/arch/riscv/kvm/vcpu_sbi_replace.c index 23b57c931b..9c2ab3dfa9 100644 --- a/arch/riscv/kvm/vcpu_sbi_replace.c +++ b/arch/riscv/kvm/vcpu_sbi_replace.c @@ -204,6 +204,6 @@ static int kvm_sbi_ext_dbcn_handler(struct kvm_vcpu *vcpu, const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_dbcn = { .extid_start = SBI_EXT_DBCN, .extid_end = SBI_EXT_DBCN, - .default_unavail = true, + .default_disabled = true, .handler = kvm_sbi_ext_dbcn_handler, }; diff --git a/arch/riscv/kvm/vcpu_sbi_sta.c b/arch/riscv/kvm/vcpu_sbi_sta.c new file mode 100644 index 0000000000..d8cf9ca28c --- /dev/null +++ b/arch/riscv/kvm/vcpu_sbi_sta.c @@ -0,0 +1,212 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2023 Ventana Micro Systems Inc. + */ + +#include <linux/kconfig.h> +#include <linux/kernel.h> +#include <linux/kvm_host.h> +#include <linux/mm.h> +#include <linux/sizes.h> + +#include <asm/bug.h> +#include <asm/current.h> +#include <asm/kvm_vcpu_sbi.h> +#include <asm/page.h> +#include <asm/sbi.h> +#include <asm/uaccess.h> + +void kvm_riscv_vcpu_sbi_sta_reset(struct kvm_vcpu *vcpu) +{ + vcpu->arch.sta.shmem = INVALID_GPA; + vcpu->arch.sta.last_steal = 0; +} + +void kvm_riscv_vcpu_record_steal_time(struct kvm_vcpu *vcpu) +{ + gpa_t shmem = vcpu->arch.sta.shmem; + u64 last_steal = vcpu->arch.sta.last_steal; + __le32 __user *sequence_ptr; + __le64 __user *steal_ptr; + __le32 sequence_le; + __le64 steal_le; + u32 sequence; + u64 steal; + unsigned long hva; + gfn_t gfn; + + if (shmem == INVALID_GPA) + return; + + /* + * shmem is 64-byte aligned (see the enforcement in + * kvm_sbi_sta_steal_time_set_shmem()) and the size of sbi_sta_struct + * is 64 bytes, so we know all its offsets are in the same page. + */ + gfn = shmem >> PAGE_SHIFT; + hva = kvm_vcpu_gfn_to_hva(vcpu, gfn); + + if (WARN_ON(kvm_is_error_hva(hva))) { + vcpu->arch.sta.shmem = INVALID_GPA; + return; + } + + sequence_ptr = (__le32 __user *)(hva + offset_in_page(shmem) + + offsetof(struct sbi_sta_struct, sequence)); + steal_ptr = (__le64 __user *)(hva + offset_in_page(shmem) + + offsetof(struct sbi_sta_struct, steal)); + + if (WARN_ON(get_user(sequence_le, sequence_ptr))) + return; + + sequence = le32_to_cpu(sequence_le); + sequence += 1; + + if (WARN_ON(put_user(cpu_to_le32(sequence), sequence_ptr))) + return; + + if (!WARN_ON(get_user(steal_le, steal_ptr))) { + steal = le64_to_cpu(steal_le); + vcpu->arch.sta.last_steal = READ_ONCE(current->sched_info.run_delay); + steal += vcpu->arch.sta.last_steal - last_steal; + WARN_ON(put_user(cpu_to_le64(steal), steal_ptr)); + } + + sequence += 1; + WARN_ON(put_user(cpu_to_le32(sequence), sequence_ptr)); + + kvm_vcpu_mark_page_dirty(vcpu, gfn); +} + +static int kvm_sbi_sta_steal_time_set_shmem(struct kvm_vcpu *vcpu) +{ + struct kvm_cpu_context *cp = &vcpu->arch.guest_context; + unsigned long shmem_phys_lo = cp->a0; + unsigned long shmem_phys_hi = cp->a1; + u32 flags = cp->a2; + struct sbi_sta_struct zero_sta = {0}; + unsigned long hva; + bool writable; + gpa_t shmem; + int ret; + + if (flags != 0) + return SBI_ERR_INVALID_PARAM; + + if (shmem_phys_lo == SBI_STA_SHMEM_DISABLE && + shmem_phys_hi == SBI_STA_SHMEM_DISABLE) { + vcpu->arch.sta.shmem = INVALID_GPA; + return 0; + } + + if (shmem_phys_lo & (SZ_64 - 1)) + return SBI_ERR_INVALID_PARAM; + + shmem = shmem_phys_lo; + + if (shmem_phys_hi != 0) { + if (IS_ENABLED(CONFIG_32BIT)) + shmem |= ((gpa_t)shmem_phys_hi << 32); + else + return SBI_ERR_INVALID_ADDRESS; + } + + hva = kvm_vcpu_gfn_to_hva_prot(vcpu, shmem >> PAGE_SHIFT, &writable); + if (kvm_is_error_hva(hva) || !writable) + return SBI_ERR_INVALID_ADDRESS; + + ret = kvm_vcpu_write_guest(vcpu, shmem, &zero_sta, sizeof(zero_sta)); + if (ret) + return SBI_ERR_FAILURE; + + vcpu->arch.sta.shmem = shmem; + vcpu->arch.sta.last_steal = current->sched_info.run_delay; + + return 0; +} + +static int kvm_sbi_ext_sta_handler(struct kvm_vcpu *vcpu, struct kvm_run *run, + struct kvm_vcpu_sbi_return *retdata) +{ + struct kvm_cpu_context *cp = &vcpu->arch.guest_context; + unsigned long funcid = cp->a6; + int ret; + + switch (funcid) { + case SBI_EXT_STA_STEAL_TIME_SET_SHMEM: + ret = kvm_sbi_sta_steal_time_set_shmem(vcpu); + break; + default: + ret = SBI_ERR_NOT_SUPPORTED; + break; + } + + retdata->err_val = ret; + + return 0; +} + +static unsigned long kvm_sbi_ext_sta_probe(struct kvm_vcpu *vcpu) +{ + return !!sched_info_on(); +} + +const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_sta = { + .extid_start = SBI_EXT_STA, + .extid_end = SBI_EXT_STA, + .handler = kvm_sbi_ext_sta_handler, + .probe = kvm_sbi_ext_sta_probe, +}; + +int kvm_riscv_vcpu_get_reg_sbi_sta(struct kvm_vcpu *vcpu, + unsigned long reg_num, + unsigned long *reg_val) +{ + switch (reg_num) { + case KVM_REG_RISCV_SBI_STA_REG(shmem_lo): + *reg_val = (unsigned long)vcpu->arch.sta.shmem; + break; + case KVM_REG_RISCV_SBI_STA_REG(shmem_hi): + if (IS_ENABLED(CONFIG_32BIT)) + *reg_val = upper_32_bits(vcpu->arch.sta.shmem); + else + *reg_val = 0; + break; + default: + return -EINVAL; + } + + return 0; +} + +int kvm_riscv_vcpu_set_reg_sbi_sta(struct kvm_vcpu *vcpu, + unsigned long reg_num, + unsigned long reg_val) +{ + switch (reg_num) { + case KVM_REG_RISCV_SBI_STA_REG(shmem_lo): + if (IS_ENABLED(CONFIG_32BIT)) { + gpa_t hi = upper_32_bits(vcpu->arch.sta.shmem); + + vcpu->arch.sta.shmem = reg_val; + vcpu->arch.sta.shmem |= hi << 32; + } else { + vcpu->arch.sta.shmem = reg_val; + } + break; + case KVM_REG_RISCV_SBI_STA_REG(shmem_hi): + if (IS_ENABLED(CONFIG_32BIT)) { + gpa_t lo = lower_32_bits(vcpu->arch.sta.shmem); + + vcpu->arch.sta.shmem = ((gpa_t)reg_val << 32); + vcpu->arch.sta.shmem |= lo; + } else if (reg_val != 0) { + return -EINVAL; + } + break; + default: + return -EINVAL; + } + + return 0; +} diff --git a/arch/riscv/kvm/vcpu_switch.S b/arch/riscv/kvm/vcpu_switch.S index d74df8eb4d..0c26189aa0 100644 --- a/arch/riscv/kvm/vcpu_switch.S +++ b/arch/riscv/kvm/vcpu_switch.S @@ -15,7 +15,7 @@ .altmacro .option norelax -ENTRY(__kvm_riscv_switch_to) +SYM_FUNC_START(__kvm_riscv_switch_to) /* Save Host GPRs (except A0 and T0-T6) */ REG_S ra, (KVM_ARCH_HOST_RA)(a0) REG_S sp, (KVM_ARCH_HOST_SP)(a0) @@ -45,7 +45,7 @@ ENTRY(__kvm_riscv_switch_to) REG_L t0, (KVM_ARCH_GUEST_SSTATUS)(a0) REG_L t1, (KVM_ARCH_GUEST_HSTATUS)(a0) REG_L t2, (KVM_ARCH_GUEST_SCOUNTEREN)(a0) - la t4, __kvm_switch_return + la t4, .Lkvm_switch_return REG_L t5, (KVM_ARCH_GUEST_SEPC)(a0) /* Save Host and Restore Guest SSTATUS */ @@ -113,7 +113,7 @@ ENTRY(__kvm_riscv_switch_to) /* Back to Host */ .align 2 -__kvm_switch_return: +.Lkvm_switch_return: /* Swap Guest A0 with SSCRATCH */ csrrw a0, CSR_SSCRATCH, a0 @@ -208,9 +208,9 @@ __kvm_switch_return: /* Return to C code */ ret -ENDPROC(__kvm_riscv_switch_to) +SYM_FUNC_END(__kvm_riscv_switch_to) -ENTRY(__kvm_riscv_unpriv_trap) +SYM_CODE_START(__kvm_riscv_unpriv_trap) /* * We assume that faulting unpriv load/store instruction is * 4-byte long and blindly increment SEPC by 4. @@ -231,12 +231,10 @@ ENTRY(__kvm_riscv_unpriv_trap) csrr a1, CSR_HTINST REG_S a1, (KVM_ARCH_TRAP_HTINST)(a0) sret -ENDPROC(__kvm_riscv_unpriv_trap) +SYM_CODE_END(__kvm_riscv_unpriv_trap) #ifdef CONFIG_FPU - .align 3 - .global __kvm_riscv_fp_f_save -__kvm_riscv_fp_f_save: +SYM_FUNC_START(__kvm_riscv_fp_f_save) csrr t2, CSR_SSTATUS li t1, SR_FS csrs CSR_SSTATUS, t1 @@ -276,10 +274,9 @@ __kvm_riscv_fp_f_save: sw t0, KVM_ARCH_FP_F_FCSR(a0) csrw CSR_SSTATUS, t2 ret +SYM_FUNC_END(__kvm_riscv_fp_f_save) - .align 3 - .global __kvm_riscv_fp_d_save -__kvm_riscv_fp_d_save: +SYM_FUNC_START(__kvm_riscv_fp_d_save) csrr t2, CSR_SSTATUS li t1, SR_FS csrs CSR_SSTATUS, t1 @@ -319,10 +316,9 @@ __kvm_riscv_fp_d_save: sw t0, KVM_ARCH_FP_D_FCSR(a0) csrw CSR_SSTATUS, t2 ret +SYM_FUNC_END(__kvm_riscv_fp_d_save) - .align 3 - .global __kvm_riscv_fp_f_restore -__kvm_riscv_fp_f_restore: +SYM_FUNC_START(__kvm_riscv_fp_f_restore) csrr t2, CSR_SSTATUS li t1, SR_FS lw t0, KVM_ARCH_FP_F_FCSR(a0) @@ -362,10 +358,9 @@ __kvm_riscv_fp_f_restore: fscsr t0 csrw CSR_SSTATUS, t2 ret +SYM_FUNC_END(__kvm_riscv_fp_f_restore) - .align 3 - .global __kvm_riscv_fp_d_restore -__kvm_riscv_fp_d_restore: +SYM_FUNC_START(__kvm_riscv_fp_d_restore) csrr t2, CSR_SSTATUS li t1, SR_FS lw t0, KVM_ARCH_FP_D_FCSR(a0) @@ -405,4 +400,5 @@ __kvm_riscv_fp_d_restore: fscsr t0 csrw CSR_SSTATUS, t2 ret +SYM_FUNC_END(__kvm_riscv_fp_d_restore) #endif diff --git a/arch/riscv/kvm/vcpu_vector.c b/arch/riscv/kvm/vcpu_vector.c index b339a2682f..d92d134804 100644 --- a/arch/riscv/kvm/vcpu_vector.c +++ b/arch/riscv/kvm/vcpu_vector.c @@ -76,6 +76,7 @@ int kvm_riscv_vcpu_alloc_vector_context(struct kvm_vcpu *vcpu, cntx->vector.datap = kmalloc(riscv_v_vsize, GFP_KERNEL); if (!cntx->vector.datap) return -ENOMEM; + cntx->vector.vlenb = riscv_v_vsize / 32; vcpu->arch.host_context.vector.datap = kzalloc(riscv_v_vsize, GFP_KERNEL); if (!vcpu->arch.host_context.vector.datap) @@ -115,6 +116,9 @@ static int kvm_riscv_vcpu_vreg_addr(struct kvm_vcpu *vcpu, case KVM_REG_RISCV_VECTOR_CSR_REG(vcsr): *reg_addr = &cntx->vector.vcsr; break; + case KVM_REG_RISCV_VECTOR_CSR_REG(vlenb): + *reg_addr = &cntx->vector.vlenb; + break; case KVM_REG_RISCV_VECTOR_CSR_REG(datap): default: return -ENOENT; @@ -173,6 +177,18 @@ int kvm_riscv_vcpu_set_reg_vector(struct kvm_vcpu *vcpu, if (!riscv_isa_extension_available(isa, v)) return -ENOENT; + if (reg_num == KVM_REG_RISCV_VECTOR_CSR_REG(vlenb)) { + struct kvm_cpu_context *cntx = &vcpu->arch.guest_context; + unsigned long reg_val; + + if (copy_from_user(®_val, uaddr, reg_size)) + return -EFAULT; + if (reg_val != cntx->vector.vlenb) + return -EINVAL; + + return 0; + } + rc = kvm_riscv_vcpu_vreg_addr(vcpu, reg_num, reg_size, ®_addr); if (rc) return rc; diff --git a/arch/riscv/kvm/vm.c b/arch/riscv/kvm/vm.c index 7e2b50c692..ce58bc48e5 100644 --- a/arch/riscv/kvm/vm.c +++ b/arch/riscv/kvm/vm.c @@ -179,7 +179,6 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) r = kvm_riscv_aia_available(); break; case KVM_CAP_IOEVENTFD: - case KVM_CAP_DEVICE_CTRL: case KVM_CAP_USER_MEMORY: case KVM_CAP_SYNC_MMU: case KVM_CAP_DESTROY_MEMORY_REGION_WORKS: diff --git a/arch/riscv/lib/Makefile b/arch/riscv/lib/Makefile index 26cb2502ec..bd6e6c1b04 100644 --- a/arch/riscv/lib/Makefile +++ b/arch/riscv/lib/Makefile @@ -6,8 +6,14 @@ lib-y += memmove.o lib-y += strcmp.o lib-y += strlen.o lib-y += strncmp.o +lib-y += csum.o +ifeq ($(CONFIG_MMU), y) +lib-$(CONFIG_RISCV_ISA_V) += uaccess_vector.o +endif lib-$(CONFIG_MMU) += uaccess.o lib-$(CONFIG_64BIT) += tishift.o lib-$(CONFIG_RISCV_ISA_ZICBOZ) += clear_page.o obj-$(CONFIG_FUNCTION_ERROR_INJECTION) += error-inject.o +lib-$(CONFIG_RISCV_ISA_V) += xor.o +lib-$(CONFIG_RISCV_ISA_V) += riscv_v_helpers.o diff --git a/arch/riscv/lib/clear_page.S b/arch/riscv/lib/clear_page.S index b22de12311..20ff03f5b0 100644 --- a/arch/riscv/lib/clear_page.S +++ b/arch/riscv/lib/clear_page.S @@ -4,9 +4,9 @@ */ #include <linux/linkage.h> +#include <linux/export.h> #include <asm/asm.h> #include <asm/alternative-macros.h> -#include <asm-generic/export.h> #include <asm/hwcap.h> #include <asm/insn-def.h> #include <asm/page.h> diff --git a/arch/riscv/lib/csum.c b/arch/riscv/lib/csum.c new file mode 100644 index 0000000000..74af3ab520 --- /dev/null +++ b/arch/riscv/lib/csum.c @@ -0,0 +1,328 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Checksum library + * + * Influenced by arch/arm64/lib/csum.c + * Copyright (C) 2023 Rivos Inc. + */ +#include <linux/bitops.h> +#include <linux/compiler.h> +#include <linux/jump_label.h> +#include <linux/kasan-checks.h> +#include <linux/kernel.h> + +#include <asm/cpufeature.h> + +#include <net/checksum.h> + +/* Default version is sufficient for 32 bit */ +#ifndef CONFIG_32BIT +__sum16 csum_ipv6_magic(const struct in6_addr *saddr, + const struct in6_addr *daddr, + __u32 len, __u8 proto, __wsum csum) +{ + unsigned int ulen, uproto; + unsigned long sum = (__force unsigned long)csum; + + sum += (__force unsigned long)saddr->s6_addr32[0]; + sum += (__force unsigned long)saddr->s6_addr32[1]; + sum += (__force unsigned long)saddr->s6_addr32[2]; + sum += (__force unsigned long)saddr->s6_addr32[3]; + + sum += (__force unsigned long)daddr->s6_addr32[0]; + sum += (__force unsigned long)daddr->s6_addr32[1]; + sum += (__force unsigned long)daddr->s6_addr32[2]; + sum += (__force unsigned long)daddr->s6_addr32[3]; + + ulen = (__force unsigned int)htonl((unsigned int)len); + sum += ulen; + + uproto = (__force unsigned int)htonl(proto); + sum += uproto; + + /* + * Zbb support saves 4 instructions, so not worth checking without + * alternatives if supported + */ + if (IS_ENABLED(CONFIG_RISCV_ISA_ZBB) && + IS_ENABLED(CONFIG_RISCV_ALTERNATIVE)) { + unsigned long fold_temp; + + /* + * Zbb is likely available when the kernel is compiled with Zbb + * support, so nop when Zbb is available and jump when Zbb is + * not available. + */ + asm goto(ALTERNATIVE("j %l[no_zbb]", "nop", 0, + RISCV_ISA_EXT_ZBB, 1) + : + : + : + : no_zbb); + asm(".option push \n\ + .option arch,+zbb \n\ + rori %[fold_temp], %[sum], 32 \n\ + add %[sum], %[fold_temp], %[sum] \n\ + srli %[sum], %[sum], 32 \n\ + not %[fold_temp], %[sum] \n\ + roriw %[sum], %[sum], 16 \n\ + subw %[sum], %[fold_temp], %[sum] \n\ + .option pop" + : [sum] "+r" (sum), [fold_temp] "=&r" (fold_temp)); + return (__force __sum16)(sum >> 16); + } +no_zbb: + sum += ror64(sum, 32); + sum >>= 32; + return csum_fold((__force __wsum)sum); +} +EXPORT_SYMBOL(csum_ipv6_magic); +#endif /* !CONFIG_32BIT */ + +#ifdef CONFIG_32BIT +#define OFFSET_MASK 3 +#elif CONFIG_64BIT +#define OFFSET_MASK 7 +#endif + +static inline __no_sanitize_address unsigned long +do_csum_common(const unsigned long *ptr, const unsigned long *end, + unsigned long data) +{ + unsigned int shift; + unsigned long csum = 0, carry = 0; + + /* + * Do 32-bit reads on RV32 and 64-bit reads otherwise. This should be + * faster than doing 32-bit reads on architectures that support larger + * reads. + */ + while (ptr < end) { + csum += data; + carry += csum < data; + data = *(ptr++); + } + + /* + * Perform alignment (and over-read) bytes on the tail if any bytes + * leftover. + */ + shift = ((long)ptr - (long)end) * 8; +#ifdef __LITTLE_ENDIAN + data = (data << shift) >> shift; +#else + data = (data >> shift) << shift; +#endif + csum += data; + carry += csum < data; + csum += carry; + csum += csum < carry; + + return csum; +} + +/* + * Algorithm accounts for buff being misaligned. + * If buff is not aligned, will over-read bytes but not use the bytes that it + * shouldn't. The same thing will occur on the tail-end of the read. + */ +static inline __no_sanitize_address unsigned int +do_csum_with_alignment(const unsigned char *buff, int len) +{ + unsigned int offset, shift; + unsigned long csum, data; + const unsigned long *ptr, *end; + + /* + * Align address to closest word (double word on rv64) that comes before + * buff. This should always be in the same page and cache line. + * Directly call KASAN with the alignment we will be using. + */ + offset = (unsigned long)buff & OFFSET_MASK; + kasan_check_read(buff, len); + ptr = (const unsigned long *)(buff - offset); + + /* + * Clear the most significant bytes that were over-read if buff was not + * aligned. + */ + shift = offset * 8; + data = *(ptr++); +#ifdef __LITTLE_ENDIAN + data = (data >> shift) << shift; +#else + data = (data << shift) >> shift; +#endif + end = (const unsigned long *)(buff + len); + csum = do_csum_common(ptr, end, data); + +#ifdef CC_HAS_ASM_GOTO_TIED_OUTPUT + /* + * Zbb support saves 6 instructions, so not worth checking without + * alternatives if supported + */ + if (IS_ENABLED(CONFIG_RISCV_ISA_ZBB) && + IS_ENABLED(CONFIG_RISCV_ALTERNATIVE)) { + unsigned long fold_temp; + + /* + * Zbb is likely available when the kernel is compiled with Zbb + * support, so nop when Zbb is available and jump when Zbb is + * not available. + */ + asm goto(ALTERNATIVE("j %l[no_zbb]", "nop", 0, + RISCV_ISA_EXT_ZBB, 1) + : + : + : + : no_zbb); + +#ifdef CONFIG_32BIT + asm_goto_output(".option push \n\ + .option arch,+zbb \n\ + rori %[fold_temp], %[csum], 16 \n\ + andi %[offset], %[offset], 1 \n\ + add %[csum], %[fold_temp], %[csum] \n\ + beq %[offset], zero, %l[end] \n\ + rev8 %[csum], %[csum] \n\ + .option pop" + : [csum] "+r" (csum), [fold_temp] "=&r" (fold_temp) + : [offset] "r" (offset) + : + : end); + + return (unsigned short)csum; +#else /* !CONFIG_32BIT */ + asm_goto_output(".option push \n\ + .option arch,+zbb \n\ + rori %[fold_temp], %[csum], 32 \n\ + add %[csum], %[fold_temp], %[csum] \n\ + srli %[csum], %[csum], 32 \n\ + roriw %[fold_temp], %[csum], 16 \n\ + addw %[csum], %[fold_temp], %[csum] \n\ + andi %[offset], %[offset], 1 \n\ + beq %[offset], zero, %l[end] \n\ + rev8 %[csum], %[csum] \n\ + .option pop" + : [csum] "+r" (csum), [fold_temp] "=&r" (fold_temp) + : [offset] "r" (offset) + : + : end); + + return (csum << 16) >> 48; +#endif /* !CONFIG_32BIT */ +end: + return csum >> 16; + } +no_zbb: +#endif /* CC_HAS_ASM_GOTO_TIED_OUTPUT */ +#ifndef CONFIG_32BIT + csum += ror64(csum, 32); + csum >>= 32; +#endif + csum = (u32)csum + ror32((u32)csum, 16); + if (offset & 1) + return (u16)swab32(csum); + return csum >> 16; +} + +/* + * Does not perform alignment, should only be used if machine has fast + * misaligned accesses, or when buff is known to be aligned. + */ +static inline __no_sanitize_address unsigned int +do_csum_no_alignment(const unsigned char *buff, int len) +{ + unsigned long csum, data; + const unsigned long *ptr, *end; + + ptr = (const unsigned long *)(buff); + data = *(ptr++); + + kasan_check_read(buff, len); + + end = (const unsigned long *)(buff + len); + csum = do_csum_common(ptr, end, data); + + /* + * Zbb support saves 6 instructions, so not worth checking without + * alternatives if supported + */ + if (IS_ENABLED(CONFIG_RISCV_ISA_ZBB) && + IS_ENABLED(CONFIG_RISCV_ALTERNATIVE)) { + unsigned long fold_temp; + + /* + * Zbb is likely available when the kernel is compiled with Zbb + * support, so nop when Zbb is available and jump when Zbb is + * not available. + */ + asm goto(ALTERNATIVE("j %l[no_zbb]", "nop", 0, + RISCV_ISA_EXT_ZBB, 1) + : + : + : + : no_zbb); + +#ifdef CONFIG_32BIT + asm (".option push \n\ + .option arch,+zbb \n\ + rori %[fold_temp], %[csum], 16 \n\ + add %[csum], %[fold_temp], %[csum] \n\ + .option pop" + : [csum] "+r" (csum), [fold_temp] "=&r" (fold_temp) + : + : ); + +#else /* !CONFIG_32BIT */ + asm (".option push \n\ + .option arch,+zbb \n\ + rori %[fold_temp], %[csum], 32 \n\ + add %[csum], %[fold_temp], %[csum] \n\ + srli %[csum], %[csum], 32 \n\ + roriw %[fold_temp], %[csum], 16 \n\ + addw %[csum], %[fold_temp], %[csum] \n\ + .option pop" + : [csum] "+r" (csum), [fold_temp] "=&r" (fold_temp) + : + : ); +#endif /* !CONFIG_32BIT */ + return csum >> 16; + } +no_zbb: +#ifndef CONFIG_32BIT + csum += ror64(csum, 32); + csum >>= 32; +#endif + csum = (u32)csum + ror32((u32)csum, 16); + return csum >> 16; +} + +/* + * Perform a checksum on an arbitrary memory address. + * Will do a light-weight address alignment if buff is misaligned, unless + * cpu supports fast misaligned accesses. + */ +unsigned int do_csum(const unsigned char *buff, int len) +{ + if (unlikely(len <= 0)) + return 0; + + /* + * Significant performance gains can be seen by not doing alignment + * on machines with fast misaligned accesses. + * + * There is some duplicate code between the "with_alignment" and + * "no_alignment" implmentations, but the overlap is too awkward to be + * able to fit in one function without introducing multiple static + * branches. The largest chunk of overlap was delegated into the + * do_csum_common function. + */ + if (static_branch_likely(&fast_misaligned_access_speed_key)) + return do_csum_no_alignment(buff, len); + + if (((unsigned long)buff & OFFSET_MASK) == 0) + return do_csum_no_alignment(buff, len); + + return do_csum_with_alignment(buff, len); +} diff --git a/arch/riscv/lib/riscv_v_helpers.c b/arch/riscv/lib/riscv_v_helpers.c new file mode 100644 index 0000000000..be38a93ced --- /dev/null +++ b/arch/riscv/lib/riscv_v_helpers.c @@ -0,0 +1,45 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * Copyright (C) 2023 SiFive + * Author: Andy Chiu <andy.chiu@sifive.com> + */ +#include <linux/linkage.h> +#include <asm/asm.h> + +#include <asm/vector.h> +#include <asm/simd.h> + +#ifdef CONFIG_MMU +#include <asm/asm-prototypes.h> +#endif + +#ifdef CONFIG_MMU +size_t riscv_v_usercopy_threshold = CONFIG_RISCV_ISA_V_UCOPY_THRESHOLD; +int __asm_vector_usercopy(void *dst, void *src, size_t n); +int fallback_scalar_usercopy(void *dst, void *src, size_t n); +asmlinkage int enter_vector_usercopy(void *dst, void *src, size_t n) +{ + size_t remain, copied; + + /* skip has_vector() check because it has been done by the asm */ + if (!may_use_simd()) + goto fallback; + + kernel_vector_begin(); + remain = __asm_vector_usercopy(dst, src, n); + kernel_vector_end(); + + if (remain) { + copied = n - remain; + dst += copied; + src += copied; + n = remain; + goto fallback; + } + + return remain; + +fallback: + return fallback_scalar_usercopy(dst, src, n); +} +#endif diff --git a/arch/riscv/lib/tishift.S b/arch/riscv/lib/tishift.S index ef90075c4b..c8294bf72c 100644 --- a/arch/riscv/lib/tishift.S +++ b/arch/riscv/lib/tishift.S @@ -4,7 +4,7 @@ */ #include <linux/linkage.h> -#include <asm-generic/export.h> +#include <linux/export.h> SYM_FUNC_START(__lshrti3) beqz a2, .L1 diff --git a/arch/riscv/lib/uaccess.S b/arch/riscv/lib/uaccess.S index 3ab438f30d..bc22c078ab 100644 --- a/arch/riscv/lib/uaccess.S +++ b/arch/riscv/lib/uaccess.S @@ -1,8 +1,10 @@ #include <linux/linkage.h> -#include <asm-generic/export.h> +#include <linux/export.h> #include <asm/asm.h> #include <asm/asm-extable.h> #include <asm/csr.h> +#include <asm/hwcap.h> +#include <asm/alternative-macros.h> .macro fixup op reg addr lbl 100: @@ -11,6 +13,13 @@ .endm SYM_FUNC_START(__asm_copy_to_user) +#ifdef CONFIG_RISCV_ISA_V + ALTERNATIVE("j fallback_scalar_usercopy", "nop", 0, RISCV_ISA_EXT_v, CONFIG_RISCV_ISA_V) + REG_L t0, riscv_v_usercopy_threshold + bltu a2, t0, fallback_scalar_usercopy + tail enter_vector_usercopy +#endif +SYM_FUNC_START(fallback_scalar_usercopy) /* Enable access to user memory */ li t6, SR_SUM @@ -181,6 +190,7 @@ SYM_FUNC_START(__asm_copy_to_user) sub a0, t5, a0 ret SYM_FUNC_END(__asm_copy_to_user) +SYM_FUNC_END(fallback_scalar_usercopy) EXPORT_SYMBOL(__asm_copy_to_user) SYM_FUNC_ALIAS(__asm_copy_from_user, __asm_copy_to_user) EXPORT_SYMBOL(__asm_copy_from_user) diff --git a/arch/riscv/lib/uaccess_vector.S b/arch/riscv/lib/uaccess_vector.S new file mode 100644 index 0000000000..51ab5588e9 --- /dev/null +++ b/arch/riscv/lib/uaccess_vector.S @@ -0,0 +1,53 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ + +#include <linux/linkage.h> +#include <asm-generic/export.h> +#include <asm/asm.h> +#include <asm/asm-extable.h> +#include <asm/csr.h> + +#define pDst a0 +#define pSrc a1 +#define iNum a2 + +#define iVL a3 + +#define ELEM_LMUL_SETTING m8 +#define vData v0 + + .macro fixup op reg addr lbl +100: + \op \reg, \addr + _asm_extable 100b, \lbl + .endm + +SYM_FUNC_START(__asm_vector_usercopy) + /* Enable access to user memory */ + li t6, SR_SUM + csrs CSR_STATUS, t6 + +loop: + vsetvli iVL, iNum, e8, ELEM_LMUL_SETTING, ta, ma + fixup vle8.v vData, (pSrc), 10f + sub iNum, iNum, iVL + add pSrc, pSrc, iVL + fixup vse8.v vData, (pDst), 11f + add pDst, pDst, iVL + bnez iNum, loop + + /* Exception fixup for vector load is shared with normal exit */ +10: + /* Disable access to user memory */ + csrc CSR_STATUS, t6 + mv a0, iNum + ret + + /* Exception fixup code for vector store. */ +11: + /* Undo the subtraction after vle8.v */ + add iNum, iNum, iVL + /* Make sure the scalar fallback skip already processed bytes */ + csrr t2, CSR_VSTART + sub iNum, iNum, t2 + j 10b +SYM_FUNC_END(__asm_vector_usercopy) diff --git a/arch/riscv/lib/xor.S b/arch/riscv/lib/xor.S new file mode 100644 index 0000000000..b28f2430e5 --- /dev/null +++ b/arch/riscv/lib/xor.S @@ -0,0 +1,81 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * Copyright (C) 2021 SiFive + */ +#include <linux/linkage.h> +#include <linux/export.h> +#include <asm/asm.h> + +SYM_FUNC_START(xor_regs_2_) + vsetvli a3, a0, e8, m8, ta, ma + vle8.v v0, (a1) + vle8.v v8, (a2) + sub a0, a0, a3 + vxor.vv v16, v0, v8 + add a2, a2, a3 + vse8.v v16, (a1) + add a1, a1, a3 + bnez a0, xor_regs_2_ + ret +SYM_FUNC_END(xor_regs_2_) +EXPORT_SYMBOL(xor_regs_2_) + +SYM_FUNC_START(xor_regs_3_) + vsetvli a4, a0, e8, m8, ta, ma + vle8.v v0, (a1) + vle8.v v8, (a2) + sub a0, a0, a4 + vxor.vv v0, v0, v8 + vle8.v v16, (a3) + add a2, a2, a4 + vxor.vv v16, v0, v16 + add a3, a3, a4 + vse8.v v16, (a1) + add a1, a1, a4 + bnez a0, xor_regs_3_ + ret +SYM_FUNC_END(xor_regs_3_) +EXPORT_SYMBOL(xor_regs_3_) + +SYM_FUNC_START(xor_regs_4_) + vsetvli a5, a0, e8, m8, ta, ma + vle8.v v0, (a1) + vle8.v v8, (a2) + sub a0, a0, a5 + vxor.vv v0, v0, v8 + vle8.v v16, (a3) + add a2, a2, a5 + vxor.vv v0, v0, v16 + vle8.v v24, (a4) + add a3, a3, a5 + vxor.vv v16, v0, v24 + add a4, a4, a5 + vse8.v v16, (a1) + add a1, a1, a5 + bnez a0, xor_regs_4_ + ret +SYM_FUNC_END(xor_regs_4_) +EXPORT_SYMBOL(xor_regs_4_) + +SYM_FUNC_START(xor_regs_5_) + vsetvli a6, a0, e8, m8, ta, ma + vle8.v v0, (a1) + vle8.v v8, (a2) + sub a0, a0, a6 + vxor.vv v0, v0, v8 + vle8.v v16, (a3) + add a2, a2, a6 + vxor.vv v0, v0, v16 + vle8.v v24, (a4) + add a3, a3, a6 + vxor.vv v0, v0, v24 + vle8.v v8, (a5) + add a4, a4, a6 + vxor.vv v16, v0, v8 + add a5, a5, a6 + vse8.v v16, (a1) + add a1, a1, a6 + bnez a0, xor_regs_5_ + ret +SYM_FUNC_END(xor_regs_5_) +EXPORT_SYMBOL(xor_regs_5_) diff --git a/arch/riscv/mm/Makefile b/arch/riscv/mm/Makefile index 3a4dfc8bab..2c869f8026 100644 --- a/arch/riscv/mm/Makefile +++ b/arch/riscv/mm/Makefile @@ -13,10 +13,9 @@ endif KCOV_INSTRUMENT_init.o := n obj-y += init.o -obj-$(CONFIG_MMU) += extable.o fault.o pageattr.o +obj-$(CONFIG_MMU) += extable.o fault.o pageattr.o pgtable.o obj-y += cacheflush.o obj-y += context.o -obj-y += pgtable.o obj-y += pmem.o ifeq ($(CONFIG_MMU),y) diff --git a/arch/riscv/mm/dma-noncoherent.c b/arch/riscv/mm/dma-noncoherent.c index 4e4e469b8d..843107f834 100644 --- a/arch/riscv/mm/dma-noncoherent.c +++ b/arch/riscv/mm/dma-noncoherent.c @@ -129,7 +129,7 @@ void arch_dma_prep_coherent(struct page *page, size_t size) } void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size, - const struct iommu_ops *iommu, bool coherent) + bool coherent) { WARN_TAINT(!coherent && riscv_cbom_block_size > ARCH_DMA_MINALIGN, TAINT_CPU_OUT_OF_SPEC, diff --git a/arch/riscv/mm/extable.c b/arch/riscv/mm/extable.c index 35484d830f..dd1530af3e 100644 --- a/arch/riscv/mm/extable.c +++ b/arch/riscv/mm/extable.c @@ -27,6 +27,14 @@ static bool ex_handler_fixup(const struct exception_table_entry *ex, return true; } +static inline unsigned long regs_get_gpr(struct pt_regs *regs, unsigned int offset) +{ + if (unlikely(!offset || offset > MAX_REG_OFFSET)) + return 0; + + return *(unsigned long *)((unsigned long)regs + offset); +} + static inline void regs_set_gpr(struct pt_regs *regs, unsigned int offset, unsigned long val) { @@ -50,6 +58,27 @@ static bool ex_handler_uaccess_err_zero(const struct exception_table_entry *ex, return true; } +static bool +ex_handler_load_unaligned_zeropad(const struct exception_table_entry *ex, + struct pt_regs *regs) +{ + int reg_data = FIELD_GET(EX_DATA_REG_DATA, ex->data); + int reg_addr = FIELD_GET(EX_DATA_REG_ADDR, ex->data); + unsigned long data, addr, offset; + + addr = regs_get_gpr(regs, reg_addr * sizeof(unsigned long)); + + offset = addr & 0x7UL; + addr &= ~0x7UL; + + data = *(unsigned long *)addr >> (offset * 8); + + regs_set_gpr(regs, reg_data * sizeof(unsigned long), data); + + regs->epc = get_ex_fixup(ex); + return true; +} + bool fixup_exception(struct pt_regs *regs) { const struct exception_table_entry *ex; @@ -65,6 +94,8 @@ bool fixup_exception(struct pt_regs *regs) return ex_handler_bpf(ex, regs); case EX_TYPE_UACCESS_ERR_ZERO: return ex_handler_uaccess_err_zero(ex, regs); + case EX_TYPE_LOAD_UNALIGNED_ZEROPAD: + return ex_handler_load_unaligned_zeropad(ex, regs); } BUG(); diff --git a/arch/riscv/mm/fault.c b/arch/riscv/mm/fault.c index 081339ddf4..3ba1d4dde5 100644 --- a/arch/riscv/mm/fault.c +++ b/arch/riscv/mm/fault.c @@ -136,24 +136,24 @@ static inline void vmalloc_fault(struct pt_regs *regs, int code, unsigned long a pgd = (pgd_t *)pfn_to_virt(pfn) + index; pgd_k = init_mm.pgd + index; - if (!pgd_present(*pgd_k)) { + if (!pgd_present(pgdp_get(pgd_k))) { no_context(regs, addr); return; } - set_pgd(pgd, *pgd_k); + set_pgd(pgd, pgdp_get(pgd_k)); p4d_k = p4d_offset(pgd_k, addr); - if (!p4d_present(*p4d_k)) { + if (!p4d_present(p4dp_get(p4d_k))) { no_context(regs, addr); return; } pud_k = pud_offset(p4d_k, addr); - if (!pud_present(*pud_k)) { + if (!pud_present(pudp_get(pud_k))) { no_context(regs, addr); return; } - if (pud_leaf(*pud_k)) + if (pud_leaf(pudp_get(pud_k))) goto flush_tlb; /* @@ -161,11 +161,11 @@ static inline void vmalloc_fault(struct pt_regs *regs, int code, unsigned long a * to copy individual PTEs */ pmd_k = pmd_offset(pud_k, addr); - if (!pmd_present(*pmd_k)) { + if (!pmd_present(pmdp_get(pmd_k))) { no_context(regs, addr); return; } - if (pmd_leaf(*pmd_k)) + if (pmd_leaf(pmdp_get(pmd_k))) goto flush_tlb; /* @@ -175,7 +175,7 @@ static inline void vmalloc_fault(struct pt_regs *regs, int code, unsigned long a * silently loop forever. */ pte_k = pte_offset_kernel(pmd_k, addr); - if (!pte_present(*pte_k)) { + if (!pte_present(ptep_get(pte_k))) { no_context(regs, addr); return; } diff --git a/arch/riscv/mm/hugetlbpage.c b/arch/riscv/mm/hugetlbpage.c index fbe9188016..5ef2a68911 100644 --- a/arch/riscv/mm/hugetlbpage.c +++ b/arch/riscv/mm/hugetlbpage.c @@ -54,7 +54,7 @@ pte_t *huge_pte_alloc(struct mm_struct *mm, } if (sz == PMD_SIZE) { - if (want_pmd_share(vma, addr) && pud_none(*pud)) + if (want_pmd_share(vma, addr) && pud_none(pudp_get(pud))) pte = huge_pmd_share(mm, vma, addr, pud); else pte = (pte_t *)pmd_alloc(mm, pud, addr); @@ -93,11 +93,11 @@ pte_t *huge_pte_offset(struct mm_struct *mm, pmd_t *pmd; pgd = pgd_offset(mm, addr); - if (!pgd_present(*pgd)) + if (!pgd_present(pgdp_get(pgd))) return NULL; p4d = p4d_offset(pgd, addr); - if (!p4d_present(*p4d)) + if (!p4d_present(p4dp_get(p4d))) return NULL; pud = pud_offset(p4d, addr); @@ -105,7 +105,7 @@ pte_t *huge_pte_offset(struct mm_struct *mm, /* must be pud huge, non-present or none */ return (pte_t *)pud; - if (!pud_present(*pud)) + if (!pud_present(pudp_get(pud))) return NULL; pmd = pmd_offset(pud, addr); @@ -113,7 +113,7 @@ pte_t *huge_pte_offset(struct mm_struct *mm, /* must be pmd huge, non-present or none */ return (pte_t *)pmd; - if (!pmd_present(*pmd)) + if (!pmd_present(pmdp_get(pmd))) return NULL; for_each_napot_order(order) { @@ -351,7 +351,7 @@ void huge_pte_clear(struct mm_struct *mm, pte_t *ptep, unsigned long sz) { - pte_t pte = READ_ONCE(*ptep); + pte_t pte = ptep_get(ptep); int i, pte_num; if (!pte_napot(pte)) { diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c index ee224fe18d..0c00efc756 100644 --- a/arch/riscv/mm/init.c +++ b/arch/riscv/mm/init.c @@ -232,7 +232,7 @@ static void __init setup_bootmem(void) * In 64-bit, any use of __va/__pa before this point is wrong as we * did not know the start of DRAM before. */ - if (IS_ENABLED(CONFIG_64BIT)) + if (IS_ENABLED(CONFIG_64BIT) && IS_ENABLED(CONFIG_MMU)) kernel_map.va_pa_offset = PAGE_OFFSET - phys_ram_base; /* @@ -1395,10 +1395,29 @@ void __init misc_mem_init(void) } #ifdef CONFIG_SPARSEMEM_VMEMMAP +void __meminit vmemmap_set_pmd(pmd_t *pmd, void *p, int node, + unsigned long addr, unsigned long next) +{ + pmd_set_huge(pmd, virt_to_phys(p), PAGE_KERNEL); +} + +int __meminit vmemmap_check_pmd(pmd_t *pmdp, int node, + unsigned long addr, unsigned long next) +{ + vmemmap_verify((pte_t *)pmdp, node, addr, next); + return 1; +} + int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node, struct vmem_altmap *altmap) { - return vmemmap_populate_basepages(start, end, node, NULL); + /* + * Note that SPARSEMEM_VMEMMAP is only selected for rv64 and that we + * can't use hugepage mappings for 2-level page table because in case of + * memory hotplug, we are not able to update all the page tables with + * the new PMDs. + */ + return vmemmap_populate_hugepages(start, end, node, NULL); } #endif diff --git a/arch/riscv/mm/kasan_init.c b/arch/riscv/mm/kasan_init.c index 5e39dcf23f..c301c8d291 100644 --- a/arch/riscv/mm/kasan_init.c +++ b/arch/riscv/mm/kasan_init.c @@ -31,7 +31,7 @@ static void __init kasan_populate_pte(pmd_t *pmd, unsigned long vaddr, unsigned phys_addr_t phys_addr; pte_t *ptep, *p; - if (pmd_none(*pmd)) { + if (pmd_none(pmdp_get(pmd))) { p = memblock_alloc(PTRS_PER_PTE * sizeof(pte_t), PAGE_SIZE); set_pmd(pmd, pfn_pmd(PFN_DOWN(__pa(p)), PAGE_TABLE)); } @@ -39,7 +39,7 @@ static void __init kasan_populate_pte(pmd_t *pmd, unsigned long vaddr, unsigned ptep = pte_offset_kernel(pmd, vaddr); do { - if (pte_none(*ptep)) { + if (pte_none(ptep_get(ptep))) { phys_addr = memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE); set_pte(ptep, pfn_pte(PFN_DOWN(phys_addr), PAGE_KERNEL)); memset(__va(phys_addr), KASAN_SHADOW_INIT, PAGE_SIZE); @@ -53,7 +53,7 @@ static void __init kasan_populate_pmd(pud_t *pud, unsigned long vaddr, unsigned pmd_t *pmdp, *p; unsigned long next; - if (pud_none(*pud)) { + if (pud_none(pudp_get(pud))) { p = memblock_alloc(PTRS_PER_PMD * sizeof(pmd_t), PAGE_SIZE); set_pud(pud, pfn_pud(PFN_DOWN(__pa(p)), PAGE_TABLE)); } @@ -63,7 +63,8 @@ static void __init kasan_populate_pmd(pud_t *pud, unsigned long vaddr, unsigned do { next = pmd_addr_end(vaddr, end); - if (pmd_none(*pmdp) && IS_ALIGNED(vaddr, PMD_SIZE) && (next - vaddr) >= PMD_SIZE) { + if (pmd_none(pmdp_get(pmdp)) && IS_ALIGNED(vaddr, PMD_SIZE) && + (next - vaddr) >= PMD_SIZE) { phys_addr = memblock_phys_alloc(PMD_SIZE, PMD_SIZE); if (phys_addr) { set_pmd(pmdp, pfn_pmd(PFN_DOWN(phys_addr), PAGE_KERNEL)); @@ -83,7 +84,7 @@ static void __init kasan_populate_pud(p4d_t *p4d, pud_t *pudp, *p; unsigned long next; - if (p4d_none(*p4d)) { + if (p4d_none(p4dp_get(p4d))) { p = memblock_alloc(PTRS_PER_PUD * sizeof(pud_t), PAGE_SIZE); set_p4d(p4d, pfn_p4d(PFN_DOWN(__pa(p)), PAGE_TABLE)); } @@ -93,7 +94,8 @@ static void __init kasan_populate_pud(p4d_t *p4d, do { next = pud_addr_end(vaddr, end); - if (pud_none(*pudp) && IS_ALIGNED(vaddr, PUD_SIZE) && (next - vaddr) >= PUD_SIZE) { + if (pud_none(pudp_get(pudp)) && IS_ALIGNED(vaddr, PUD_SIZE) && + (next - vaddr) >= PUD_SIZE) { phys_addr = memblock_phys_alloc(PUD_SIZE, PUD_SIZE); if (phys_addr) { set_pud(pudp, pfn_pud(PFN_DOWN(phys_addr), PAGE_KERNEL)); @@ -113,7 +115,7 @@ static void __init kasan_populate_p4d(pgd_t *pgd, p4d_t *p4dp, *p; unsigned long next; - if (pgd_none(*pgd)) { + if (pgd_none(pgdp_get(pgd))) { p = memblock_alloc(PTRS_PER_P4D * sizeof(p4d_t), PAGE_SIZE); set_pgd(pgd, pfn_pgd(PFN_DOWN(__pa(p)), PAGE_TABLE)); } @@ -123,7 +125,8 @@ static void __init kasan_populate_p4d(pgd_t *pgd, do { next = p4d_addr_end(vaddr, end); - if (p4d_none(*p4dp) && IS_ALIGNED(vaddr, P4D_SIZE) && (next - vaddr) >= P4D_SIZE) { + if (p4d_none(p4dp_get(p4dp)) && IS_ALIGNED(vaddr, P4D_SIZE) && + (next - vaddr) >= P4D_SIZE) { phys_addr = memblock_phys_alloc(P4D_SIZE, P4D_SIZE); if (phys_addr) { set_p4d(p4dp, pfn_p4d(PFN_DOWN(phys_addr), PAGE_KERNEL)); @@ -145,7 +148,7 @@ static void __init kasan_populate_pgd(pgd_t *pgdp, do { next = pgd_addr_end(vaddr, end); - if (pgd_none(*pgdp) && IS_ALIGNED(vaddr, PGDIR_SIZE) && + if (pgd_none(pgdp_get(pgdp)) && IS_ALIGNED(vaddr, PGDIR_SIZE) && (next - vaddr) >= PGDIR_SIZE) { phys_addr = memblock_phys_alloc(PGDIR_SIZE, PGDIR_SIZE); if (phys_addr) { @@ -168,7 +171,7 @@ static void __init kasan_early_clear_pud(p4d_t *p4dp, if (!pgtable_l4_enabled) { pudp = (pud_t *)p4dp; } else { - base_pud = pt_ops.get_pud_virt(pfn_to_phys(_p4d_pfn(*p4dp))); + base_pud = pt_ops.get_pud_virt(pfn_to_phys(_p4d_pfn(p4dp_get(p4dp)))); pudp = base_pud + pud_index(vaddr); } @@ -193,7 +196,7 @@ static void __init kasan_early_clear_p4d(pgd_t *pgdp, if (!pgtable_l5_enabled) { p4dp = (p4d_t *)pgdp; } else { - base_p4d = pt_ops.get_p4d_virt(pfn_to_phys(_pgd_pfn(*pgdp))); + base_p4d = pt_ops.get_p4d_virt(pfn_to_phys(_pgd_pfn(pgdp_get(pgdp)))); p4dp = base_p4d + p4d_index(vaddr); } @@ -239,14 +242,14 @@ static void __init kasan_early_populate_pud(p4d_t *p4dp, if (!pgtable_l4_enabled) { pudp = (pud_t *)p4dp; } else { - base_pud = pt_ops.get_pud_virt(pfn_to_phys(_p4d_pfn(*p4dp))); + base_pud = pt_ops.get_pud_virt(pfn_to_phys(_p4d_pfn(p4dp_get(p4dp)))); pudp = base_pud + pud_index(vaddr); } do { next = pud_addr_end(vaddr, end); - if (pud_none(*pudp) && IS_ALIGNED(vaddr, PUD_SIZE) && + if (pud_none(pudp_get(pudp)) && IS_ALIGNED(vaddr, PUD_SIZE) && (next - vaddr) >= PUD_SIZE) { phys_addr = __pa((uintptr_t)kasan_early_shadow_pmd); set_pud(pudp, pfn_pud(PFN_DOWN(phys_addr), PAGE_TABLE)); @@ -277,14 +280,14 @@ static void __init kasan_early_populate_p4d(pgd_t *pgdp, if (!pgtable_l5_enabled) { p4dp = (p4d_t *)pgdp; } else { - base_p4d = pt_ops.get_p4d_virt(pfn_to_phys(_pgd_pfn(*pgdp))); + base_p4d = pt_ops.get_p4d_virt(pfn_to_phys(_pgd_pfn(pgdp_get(pgdp)))); p4dp = base_p4d + p4d_index(vaddr); } do { next = p4d_addr_end(vaddr, end); - if (p4d_none(*p4dp) && IS_ALIGNED(vaddr, P4D_SIZE) && + if (p4d_none(p4dp_get(p4dp)) && IS_ALIGNED(vaddr, P4D_SIZE) && (next - vaddr) >= P4D_SIZE) { phys_addr = __pa((uintptr_t)kasan_early_shadow_pud); set_p4d(p4dp, pfn_p4d(PFN_DOWN(phys_addr), PAGE_TABLE)); @@ -305,7 +308,7 @@ static void __init kasan_early_populate_pgd(pgd_t *pgdp, do { next = pgd_addr_end(vaddr, end); - if (pgd_none(*pgdp) && IS_ALIGNED(vaddr, PGDIR_SIZE) && + if (pgd_none(pgdp_get(pgdp)) && IS_ALIGNED(vaddr, PGDIR_SIZE) && (next - vaddr) >= PGDIR_SIZE) { phys_addr = __pa((uintptr_t)kasan_early_shadow_p4d); set_pgd(pgdp, pfn_pgd(PFN_DOWN(phys_addr), PAGE_TABLE)); @@ -381,7 +384,7 @@ static void __init kasan_shallow_populate_pud(p4d_t *p4d, do { next = pud_addr_end(vaddr, end); - if (pud_none(*pud_k)) { + if (pud_none(pudp_get(pud_k))) { p = memblock_alloc(PAGE_SIZE, PAGE_SIZE); set_pud(pud_k, pfn_pud(PFN_DOWN(__pa(p)), PAGE_TABLE)); continue; @@ -401,7 +404,7 @@ static void __init kasan_shallow_populate_p4d(pgd_t *pgd, do { next = p4d_addr_end(vaddr, end); - if (p4d_none(*p4d_k)) { + if (p4d_none(p4dp_get(p4d_k))) { p = memblock_alloc(PAGE_SIZE, PAGE_SIZE); set_p4d(p4d_k, pfn_p4d(PFN_DOWN(__pa(p)), PAGE_TABLE)); continue; @@ -420,7 +423,7 @@ static void __init kasan_shallow_populate_pgd(unsigned long vaddr, unsigned long do { next = pgd_addr_end(vaddr, end); - if (pgd_none(*pgd_k)) { + if (pgd_none(pgdp_get(pgd_k))) { p = memblock_alloc(PAGE_SIZE, PAGE_SIZE); set_pgd(pgd_k, pfn_pgd(PFN_DOWN(__pa(p)), PAGE_TABLE)); continue; @@ -438,6 +441,14 @@ static void __init kasan_shallow_populate(void *start, void *end) kasan_shallow_populate_pgd(vaddr, vend); } +#ifdef CONFIG_KASAN_VMALLOC +void __init kasan_populate_early_vm_area_shadow(void *start, unsigned long size) +{ + kasan_populate(kasan_mem_to_shadow(start), + kasan_mem_to_shadow(start + size)); +} +#endif + static void __init create_tmp_mapping(void) { void *ptr; @@ -451,7 +462,7 @@ static void __init create_tmp_mapping(void) /* Copy the last p4d since it is shared with the kernel mapping. */ if (pgtable_l5_enabled) { - ptr = (p4d_t *)pgd_page_vaddr(*pgd_offset_k(KASAN_SHADOW_END)); + ptr = (p4d_t *)pgd_page_vaddr(pgdp_get(pgd_offset_k(KASAN_SHADOW_END))); memcpy(tmp_p4d, ptr, sizeof(p4d_t) * PTRS_PER_P4D); set_pgd(&tmp_pg_dir[pgd_index(KASAN_SHADOW_END)], pfn_pgd(PFN_DOWN(__pa(tmp_p4d)), PAGE_TABLE)); @@ -462,7 +473,7 @@ static void __init create_tmp_mapping(void) /* Copy the last pud since it is shared with the kernel mapping. */ if (pgtable_l4_enabled) { - ptr = (pud_t *)p4d_page_vaddr(*(base_p4d + p4d_index(KASAN_SHADOW_END))); + ptr = (pud_t *)p4d_page_vaddr(p4dp_get(base_p4d + p4d_index(KASAN_SHADOW_END))); memcpy(tmp_pud, ptr, sizeof(pud_t) * PTRS_PER_PUD); set_p4d(&base_p4d[p4d_index(KASAN_SHADOW_END)], pfn_p4d(PFN_DOWN(__pa(tmp_pud)), PAGE_TABLE)); diff --git a/arch/riscv/mm/pageattr.c b/arch/riscv/mm/pageattr.c index 01398fee5c..410056a50a 100644 --- a/arch/riscv/mm/pageattr.c +++ b/arch/riscv/mm/pageattr.c @@ -29,7 +29,7 @@ static unsigned long set_pageattr_masks(unsigned long val, struct mm_walk *walk) static int pageattr_p4d_entry(p4d_t *p4d, unsigned long addr, unsigned long next, struct mm_walk *walk) { - p4d_t val = READ_ONCE(*p4d); + p4d_t val = p4dp_get(p4d); if (p4d_leaf(val)) { val = __p4d(set_pageattr_masks(p4d_val(val), walk)); @@ -42,7 +42,7 @@ static int pageattr_p4d_entry(p4d_t *p4d, unsigned long addr, static int pageattr_pud_entry(pud_t *pud, unsigned long addr, unsigned long next, struct mm_walk *walk) { - pud_t val = READ_ONCE(*pud); + pud_t val = pudp_get(pud); if (pud_leaf(val)) { val = __pud(set_pageattr_masks(pud_val(val), walk)); @@ -55,7 +55,7 @@ static int pageattr_pud_entry(pud_t *pud, unsigned long addr, static int pageattr_pmd_entry(pmd_t *pmd, unsigned long addr, unsigned long next, struct mm_walk *walk) { - pmd_t val = READ_ONCE(*pmd); + pmd_t val = pmdp_get(pmd); if (pmd_leaf(val)) { val = __pmd(set_pageattr_masks(pmd_val(val), walk)); @@ -68,7 +68,7 @@ static int pageattr_pmd_entry(pmd_t *pmd, unsigned long addr, static int pageattr_pte_entry(pte_t *pte, unsigned long addr, unsigned long next, struct mm_walk *walk) { - pte_t val = READ_ONCE(*pte); + pte_t val = ptep_get(pte); val = __pte(set_pageattr_masks(pte_val(val), walk)); set_pte(pte, val); @@ -108,10 +108,10 @@ static int __split_linear_mapping_pmd(pud_t *pudp, vaddr <= (vaddr & PMD_MASK) && end >= next) continue; - if (pmd_leaf(*pmdp)) { + if (pmd_leaf(pmdp_get(pmdp))) { struct page *pte_page; - unsigned long pfn = _pmd_pfn(*pmdp); - pgprot_t prot = __pgprot(pmd_val(*pmdp) & ~_PAGE_PFN_MASK); + unsigned long pfn = _pmd_pfn(pmdp_get(pmdp)); + pgprot_t prot = __pgprot(pmd_val(pmdp_get(pmdp)) & ~_PAGE_PFN_MASK); pte_t *ptep_new; int i; @@ -148,10 +148,10 @@ static int __split_linear_mapping_pud(p4d_t *p4dp, vaddr <= (vaddr & PUD_MASK) && end >= next) continue; - if (pud_leaf(*pudp)) { + if (pud_leaf(pudp_get(pudp))) { struct page *pmd_page; - unsigned long pfn = _pud_pfn(*pudp); - pgprot_t prot = __pgprot(pud_val(*pudp) & ~_PAGE_PFN_MASK); + unsigned long pfn = _pud_pfn(pudp_get(pudp)); + pgprot_t prot = __pgprot(pud_val(pudp_get(pudp)) & ~_PAGE_PFN_MASK); pmd_t *pmdp_new; int i; @@ -197,10 +197,10 @@ static int __split_linear_mapping_p4d(pgd_t *pgdp, vaddr <= (vaddr & P4D_MASK) && end >= next) continue; - if (p4d_leaf(*p4dp)) { + if (p4d_leaf(p4dp_get(p4dp))) { struct page *pud_page; - unsigned long pfn = _p4d_pfn(*p4dp); - pgprot_t prot = __pgprot(p4d_val(*p4dp) & ~_PAGE_PFN_MASK); + unsigned long pfn = _p4d_pfn(p4dp_get(p4dp)); + pgprot_t prot = __pgprot(p4d_val(p4dp_get(p4dp)) & ~_PAGE_PFN_MASK); pud_t *pudp_new; int i; @@ -411,29 +411,29 @@ bool kernel_page_present(struct page *page) pte_t *pte; pgd = pgd_offset_k(addr); - if (!pgd_present(*pgd)) + if (!pgd_present(pgdp_get(pgd))) return false; - if (pgd_leaf(*pgd)) + if (pgd_leaf(pgdp_get(pgd))) return true; p4d = p4d_offset(pgd, addr); - if (!p4d_present(*p4d)) + if (!p4d_present(p4dp_get(p4d))) return false; - if (p4d_leaf(*p4d)) + if (p4d_leaf(p4dp_get(p4d))) return true; pud = pud_offset(p4d, addr); - if (!pud_present(*pud)) + if (!pud_present(pudp_get(pud))) return false; - if (pud_leaf(*pud)) + if (pud_leaf(pudp_get(pud))) return true; pmd = pmd_offset(pud, addr); - if (!pmd_present(*pmd)) + if (!pmd_present(pmdp_get(pmd))) return false; - if (pmd_leaf(*pmd)) + if (pmd_leaf(pmdp_get(pmd))) return true; pte = pte_offset_kernel(pmd, addr); - return pte_present(*pte); + return pte_present(ptep_get(pte)); } diff --git a/arch/riscv/mm/pgtable.c b/arch/riscv/mm/pgtable.c index fef4e7328e..ef887efcb6 100644 --- a/arch/riscv/mm/pgtable.c +++ b/arch/riscv/mm/pgtable.c @@ -5,6 +5,47 @@ #include <linux/kernel.h> #include <linux/pgtable.h> +int ptep_set_access_flags(struct vm_area_struct *vma, + unsigned long address, pte_t *ptep, + pte_t entry, int dirty) +{ + if (!pte_same(ptep_get(ptep), entry)) + __set_pte_at(ptep, entry); + /* + * update_mmu_cache will unconditionally execute, handling both + * the case that the PTE changed and the spurious fault case. + */ + return true; +} + +int ptep_test_and_clear_young(struct vm_area_struct *vma, + unsigned long address, + pte_t *ptep) +{ + if (!pte_young(ptep_get(ptep))) + return 0; + return test_and_clear_bit(_PAGE_ACCESSED_OFFSET, &pte_val(*ptep)); +} +EXPORT_SYMBOL_GPL(ptep_test_and_clear_young); + +#ifdef CONFIG_64BIT +pud_t *pud_offset(p4d_t *p4d, unsigned long address) +{ + if (pgtable_l4_enabled) + return p4d_pgtable(p4dp_get(p4d)) + pud_index(address); + + return (pud_t *)p4d; +} + +p4d_t *p4d_offset(pgd_t *pgd, unsigned long address) +{ + if (pgtable_l5_enabled) + return pgd_pgtable(pgdp_get(pgd)) + p4d_index(address); + + return (p4d_t *)pgd; +} +#endif + #ifdef CONFIG_HAVE_ARCH_HUGE_VMAP int p4d_set_huge(p4d_t *p4d, phys_addr_t addr, pgprot_t prot) { @@ -25,7 +66,7 @@ int pud_set_huge(pud_t *pud, phys_addr_t phys, pgprot_t prot) int pud_clear_huge(pud_t *pud) { - if (!pud_leaf(READ_ONCE(*pud))) + if (!pud_leaf(pudp_get(pud))) return 0; pud_clear(pud); return 1; @@ -33,7 +74,7 @@ int pud_clear_huge(pud_t *pud) int pud_free_pmd_page(pud_t *pud, unsigned long addr) { - pmd_t *pmd = pud_pgtable(*pud); + pmd_t *pmd = pud_pgtable(pudp_get(pud)); int i; pud_clear(pud); @@ -63,7 +104,7 @@ int pmd_set_huge(pmd_t *pmd, phys_addr_t phys, pgprot_t prot) int pmd_clear_huge(pmd_t *pmd) { - if (!pmd_leaf(READ_ONCE(*pmd))) + if (!pmd_leaf(pmdp_get(pmd))) return 0; pmd_clear(pmd); return 1; @@ -71,7 +112,7 @@ int pmd_clear_huge(pmd_t *pmd) int pmd_free_pte_page(pmd_t *pmd, unsigned long addr) { - pte_t *pte = (pte_t *)pmd_page_vaddr(*pmd); + pte_t *pte = (pte_t *)pmd_page_vaddr(pmdp_get(pmd)); pmd_clear(pmd); @@ -88,7 +129,7 @@ pmd_t pmdp_collapse_flush(struct vm_area_struct *vma, pmd_t pmd = pmdp_huge_get_and_clear(vma->vm_mm, address, pmdp); VM_BUG_ON(address & ~HPAGE_PMD_MASK); - VM_BUG_ON(pmd_trans_huge(*pmdp)); + VM_BUG_ON(pmd_trans_huge(pmdp_get(pmdp))); /* * When leaf PTE entries (regular pages) are collapsed into a leaf * PMD entry (huge page), a valid non-leaf PTE is converted into a diff --git a/arch/riscv/mm/tlbflush.c b/arch/riscv/mm/tlbflush.c index 1f90721d22..07d743f87b 100644 --- a/arch/riscv/mm/tlbflush.c +++ b/arch/riscv/mm/tlbflush.c @@ -99,29 +99,23 @@ static void __ipi_flush_tlb_range_asid(void *info) local_flush_tlb_range_asid(d->start, d->size, d->stride, d->asid); } -static void __flush_tlb_range(struct mm_struct *mm, unsigned long start, - unsigned long size, unsigned long stride) +static void __flush_tlb_range(const struct cpumask *cmask, unsigned long asid, + unsigned long start, unsigned long size, + unsigned long stride) { struct flush_tlb_range_data ftd; - const struct cpumask *cmask; - unsigned long asid = FLUSH_TLB_NO_ASID; bool broadcast; - if (mm) { - unsigned int cpuid; + if (cpumask_empty(cmask)) + return; - cmask = mm_cpumask(mm); - if (cpumask_empty(cmask)) - return; + if (cmask != cpu_online_mask) { + unsigned int cpuid; cpuid = get_cpu(); /* check if the tlbflush needs to be sent to other CPUs */ broadcast = cpumask_any_but(cmask, cpuid) < nr_cpu_ids; - - if (static_branch_unlikely(&use_asid_allocator)) - asid = atomic_long_read(&mm->context.id) & asid_mask; } else { - cmask = cpu_online_mask; broadcast = true; } @@ -141,25 +135,34 @@ static void __flush_tlb_range(struct mm_struct *mm, unsigned long start, local_flush_tlb_range_asid(start, size, stride, asid); } - if (mm) + if (cmask != cpu_online_mask) put_cpu(); } +static inline unsigned long get_mm_asid(struct mm_struct *mm) +{ + return static_branch_unlikely(&use_asid_allocator) ? + atomic_long_read(&mm->context.id) & asid_mask : FLUSH_TLB_NO_ASID; +} + void flush_tlb_mm(struct mm_struct *mm) { - __flush_tlb_range(mm, 0, FLUSH_TLB_MAX_SIZE, PAGE_SIZE); + __flush_tlb_range(mm_cpumask(mm), get_mm_asid(mm), + 0, FLUSH_TLB_MAX_SIZE, PAGE_SIZE); } void flush_tlb_mm_range(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned int page_size) { - __flush_tlb_range(mm, start, end - start, page_size); + __flush_tlb_range(mm_cpumask(mm), get_mm_asid(mm), + start, end - start, page_size); } void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr) { - __flush_tlb_range(vma->vm_mm, addr, PAGE_SIZE, PAGE_SIZE); + __flush_tlb_range(mm_cpumask(vma->vm_mm), get_mm_asid(vma->vm_mm), + addr, PAGE_SIZE, PAGE_SIZE); } void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, @@ -191,18 +194,45 @@ void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, } } - __flush_tlb_range(vma->vm_mm, start, end - start, stride_size); + __flush_tlb_range(mm_cpumask(vma->vm_mm), get_mm_asid(vma->vm_mm), + start, end - start, stride_size); } void flush_tlb_kernel_range(unsigned long start, unsigned long end) { - __flush_tlb_range(NULL, start, end - start, PAGE_SIZE); + __flush_tlb_range(cpu_online_mask, FLUSH_TLB_NO_ASID, + start, end - start, PAGE_SIZE); } #ifdef CONFIG_TRANSPARENT_HUGEPAGE void flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) { - __flush_tlb_range(vma->vm_mm, start, end - start, PMD_SIZE); + __flush_tlb_range(mm_cpumask(vma->vm_mm), get_mm_asid(vma->vm_mm), + start, end - start, PMD_SIZE); } #endif + +bool arch_tlbbatch_should_defer(struct mm_struct *mm) +{ + return true; +} + +void arch_tlbbatch_add_pending(struct arch_tlbflush_unmap_batch *batch, + struct mm_struct *mm, + unsigned long uaddr) +{ + cpumask_or(&batch->cpumask, &batch->cpumask, mm_cpumask(mm)); +} + +void arch_flush_tlb_batched_pending(struct mm_struct *mm) +{ + flush_tlb_mm(mm); +} + +void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch) +{ + __flush_tlb_range(&batch->cpumask, FLUSH_TLB_NO_ASID, 0, + FLUSH_TLB_MAX_SIZE, PAGE_SIZE); + cpumask_clear(&batch->cpumask); +} diff --git a/arch/riscv/net/bpf_jit_comp64.c b/arch/riscv/net/bpf_jit_comp64.c index 8581693e62..719a97e7ed 100644 --- a/arch/riscv/net/bpf_jit_comp64.c +++ b/arch/riscv/net/bpf_jit_comp64.c @@ -795,6 +795,7 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, struct bpf_tramp_links *fentry = &tlinks[BPF_TRAMP_FENTRY]; struct bpf_tramp_links *fexit = &tlinks[BPF_TRAMP_FEXIT]; struct bpf_tramp_links *fmod_ret = &tlinks[BPF_TRAMP_MODIFY_RETURN]; + bool is_struct_ops = flags & BPF_TRAMP_F_INDIRECT; void *orig_call = func_addr; bool save_ret; u32 insn; @@ -878,7 +879,7 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, stack_size = round_up(stack_size, 16); - if (func_addr) { + if (!is_struct_ops) { /* For the trampoline called from function entry, * the frame of traced function and the frame of * trampoline need to be considered. @@ -998,7 +999,7 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, emit_ld(RV_REG_S1, -sreg_off, RV_REG_FP, ctx); - if (func_addr) { + if (!is_struct_ops) { /* trampoline called from function entry */ emit_ld(RV_REG_T0, stack_size - 8, RV_REG_SP, ctx); emit_ld(RV_REG_FP, stack_size - 16, RV_REG_SP, ctx); @@ -1029,23 +1030,28 @@ out: return ret; } -int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, - void *image_end, const struct btf_func_model *m, - u32 flags, struct bpf_tramp_links *tlinks, - void *func_addr) +int arch_bpf_trampoline_size(const struct btf_func_model *m, u32 flags, + struct bpf_tramp_links *tlinks, void *func_addr) { - int ret; + struct bpf_tramp_image im; struct rv_jit_context ctx; + int ret; ctx.ninsns = 0; ctx.insns = NULL; ctx.ro_insns = NULL; - ret = __arch_prepare_bpf_trampoline(im, m, tlinks, func_addr, flags, &ctx); - if (ret < 0) - return ret; + ret = __arch_prepare_bpf_trampoline(&im, m, tlinks, func_addr, flags, &ctx); - if (ninsns_rvoff(ret) > (long)image_end - (long)image) - return -EFBIG; + return ret < 0 ? ret : ninsns_rvoff(ctx.ninsns); +} + +int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, + void *image_end, const struct btf_func_model *m, + u32 flags, struct bpf_tramp_links *tlinks, + void *func_addr) +{ + int ret; + struct rv_jit_context ctx; ctx.ninsns = 0; /* |