summaryrefslogtreecommitdiffstats
path: root/arch/riscv
diff options
context:
space:
mode:
Diffstat (limited to 'arch/riscv')
-rw-r--r--arch/riscv/Kbuild4
-rw-r--r--arch/riscv/Kconfig461
-rw-r--r--arch/riscv/Kconfig.debug0
-rw-r--r--arch/riscv/Kconfig.socs51
-rw-r--r--arch/riscv/Makefile123
-rw-r--r--arch/riscv/boot/.gitignore5
-rw-r--r--arch/riscv/boot/Makefile55
-rw-r--r--arch/riscv/boot/dts/Makefile5
-rw-r--r--arch/riscv/boot/dts/kendryte/Makefile4
-rw-r--r--arch/riscv/boot/dts/kendryte/k210.dts23
-rw-r--r--arch/riscv/boot/dts/kendryte/k210.dtsi125
-rw-r--r--arch/riscv/boot/dts/sifive/Makefile2
-rw-r--r--arch/riscv/boot/dts/sifive/fu540-c000.dtsi286
-rw-r--r--arch/riscv/boot/dts/sifive/hifive-unleashed-a00.dts144
-rw-r--r--arch/riscv/boot/install.sh60
-rw-r--r--arch/riscv/boot/loader.S8
-rw-r--r--arch/riscv/boot/loader.lds.S16
-rw-r--r--arch/riscv/configs/defconfig135
-rw-r--r--arch/riscv/configs/nommu_k210_defconfig66
-rw-r--r--arch/riscv/configs/nommu_virt_defconfig76
-rw-r--r--arch/riscv/configs/rv32_defconfig133
-rw-r--r--arch/riscv/include/asm/Kbuild7
-rw-r--r--arch/riscv/include/asm/asm-offsets.h1
-rw-r--r--arch/riscv/include/asm/asm-prototypes.h12
-rw-r--r--arch/riscv/include/asm/asm.h69
-rw-r--r--arch/riscv/include/asm/atomic.h356
-rw-r--r--arch/riscv/include/asm/barrier.h76
-rw-r--r--arch/riscv/include/asm/bitops.h205
-rw-r--r--arch/riscv/include/asm/bug.h91
-rw-r--r--arch/riscv/include/asm/cache.h22
-rw-r--r--arch/riscv/include/asm/cacheflush.h53
-rw-r--r--arch/riscv/include/asm/cacheinfo.h20
-rw-r--r--arch/riscv/include/asm/clint.h26
-rw-r--r--arch/riscv/include/asm/clocksource.h7
-rw-r--r--arch/riscv/include/asm/cmpxchg.h375
-rw-r--r--arch/riscv/include/asm/cpu_ops.h46
-rw-r--r--arch/riscv/include/asm/csr.h219
-rw-r--r--arch/riscv/include/asm/current.h38
-rw-r--r--arch/riscv/include/asm/delay.h20
-rw-r--r--arch/riscv/include/asm/efi.h55
-rw-r--r--arch/riscv/include/asm/elf.h84
-rw-r--r--arch/riscv/include/asm/fence.h12
-rw-r--r--arch/riscv/include/asm/fixmap.h59
-rw-r--r--arch/riscv/include/asm/ftrace.h88
-rw-r--r--arch/riscv/include/asm/futex.h120
-rw-r--r--arch/riscv/include/asm/gdb_xml.h116
-rw-r--r--arch/riscv/include/asm/hugetlb.h14
-rw-r--r--arch/riscv/include/asm/hwcap.h49
-rw-r--r--arch/riscv/include/asm/image.h65
-rw-r--r--arch/riscv/include/asm/io.h151
-rw-r--r--arch/riscv/include/asm/irq.h15
-rw-r--r--arch/riscv/include/asm/irq_work.h10
-rw-r--r--arch/riscv/include/asm/irqflags.h55
-rw-r--r--arch/riscv/include/asm/jump_label.h62
-rw-r--r--arch/riscv/include/asm/kasan.h24
-rw-r--r--arch/riscv/include/asm/kdebug.h12
-rw-r--r--arch/riscv/include/asm/kgdb.h113
-rw-r--r--arch/riscv/include/asm/kprobes.h14
-rw-r--r--arch/riscv/include/asm/linkage.h12
-rw-r--r--arch/riscv/include/asm/mmio.h151
-rw-r--r--arch/riscv/include/asm/mmiowb.h15
-rw-r--r--arch/riscv/include/asm/mmu.h27
-rw-r--r--arch/riscv/include/asm/mmu_context.h46
-rw-r--r--arch/riscv/include/asm/module.h114
-rw-r--r--arch/riscv/include/asm/module.lds.h9
-rw-r--r--arch/riscv/include/asm/page.h148
-rw-r--r--arch/riscv/include/asm/parse_asm.h224
-rw-r--r--arch/riscv/include/asm/patch.h14
-rw-r--r--arch/riscv/include/asm/pci.h37
-rw-r--r--arch/riscv/include/asm/perf_event.h85
-rw-r--r--arch/riscv/include/asm/pgalloc.h71
-rw-r--r--arch/riscv/include/asm/pgtable-32.h19
-rw-r--r--arch/riscv/include/asm/pgtable-64.h86
-rw-r--r--arch/riscv/include/asm/pgtable-bits.h43
-rw-r--r--arch/riscv/include/asm/pgtable.h486
-rw-r--r--arch/riscv/include/asm/processor.h76
-rw-r--r--arch/riscv/include/asm/ptdump.h22
-rw-r--r--arch/riscv/include/asm/ptrace.h106
-rw-r--r--arch/riscv/include/asm/sbi.h157
-rw-r--r--arch/riscv/include/asm/seccomp.h10
-rw-r--r--arch/riscv/include/asm/sections.h13
-rw-r--r--arch/riscv/include/asm/set_memory.h40
-rw-r--r--arch/riscv/include/asm/smp.h114
-rw-r--r--arch/riscv/include/asm/soc.h62
-rw-r--r--arch/riscv/include/asm/sparsemem.h11
-rw-r--r--arch/riscv/include/asm/spinlock.h135
-rw-r--r--arch/riscv/include/asm/spinlock_types.h25
-rw-r--r--arch/riscv/include/asm/stackprotector.h29
-rw-r--r--arch/riscv/include/asm/string.h27
-rw-r--r--arch/riscv/include/asm/switch_to.h78
-rw-r--r--arch/riscv/include/asm/syscall.h85
-rw-r--r--arch/riscv/include/asm/thread_info.h102
-rw-r--r--arch/riscv/include/asm/timex.h91
-rw-r--r--arch/riscv/include/asm/tlb.h21
-rw-r--r--arch/riscv/include/asm/tlbflush.h56
-rw-r--r--arch/riscv/include/asm/uaccess.h490
-rw-r--r--arch/riscv/include/asm/unistd.h15
-rw-r--r--arch/riscv/include/asm/vdso.h34
-rw-r--r--arch/riscv/include/asm/vdso/clocksource.h8
-rw-r--r--arch/riscv/include/asm/vdso/gettimeofday.h81
-rw-r--r--arch/riscv/include/asm/vdso/processor.h21
-rw-r--r--arch/riscv/include/asm/vdso/vsyscall.h27
-rw-r--r--arch/riscv/include/asm/vermagic.h9
-rw-r--r--arch/riscv/include/asm/vmalloc.h4
-rw-r--r--arch/riscv/include/asm/word-at-a-time.h48
-rw-r--r--arch/riscv/include/uapi/asm/Kbuild1
-rw-r--r--arch/riscv/include/uapi/asm/auxvec.h37
-rw-r--r--arch/riscv/include/uapi/asm/bitsperlong.h14
-rw-r--r--arch/riscv/include/uapi/asm/bpf_perf_event.h9
-rw-r--r--arch/riscv/include/uapi/asm/byteorder.h12
-rw-r--r--arch/riscv/include/uapi/asm/elf.h98
-rw-r--r--arch/riscv/include/uapi/asm/hwcap.h25
-rw-r--r--arch/riscv/include/uapi/asm/perf_regs.h42
-rw-r--r--arch/riscv/include/uapi/asm/ptrace.h82
-rw-r--r--arch/riscv/include/uapi/asm/setup.h8
-rw-r--r--arch/riscv/include/uapi/asm/sigcontext.h22
-rw-r--r--arch/riscv/include/uapi/asm/ucontext.h34
-rw-r--r--arch/riscv/include/uapi/asm/unistd.h44
-rw-r--r--arch/riscv/kernel/.gitignore2
-rw-r--r--arch/riscv/kernel/Makefile63
-rw-r--r--arch/riscv/kernel/asm-offsets.c309
-rw-r--r--arch/riscv/kernel/cacheinfo.c189
-rw-r--r--arch/riscv/kernel/cpu-hotplug.c87
-rw-r--r--arch/riscv/kernel/cpu.c133
-rw-r--r--arch/riscv/kernel/cpu_ops.c46
-rw-r--r--arch/riscv/kernel/cpu_ops_sbi.c115
-rw-r--r--arch/riscv/kernel/cpu_ops_spinwait.c43
-rw-r--r--arch/riscv/kernel/cpufeature.c151
-rw-r--r--arch/riscv/kernel/efi-header.S111
-rw-r--r--arch/riscv/kernel/efi.c96
-rw-r--r--arch/riscv/kernel/entry.S477
-rw-r--r--arch/riscv/kernel/fpu.S106
-rw-r--r--arch/riscv/kernel/ftrace.c239
-rw-r--r--arch/riscv/kernel/head.S396
-rw-r--r--arch/riscv/kernel/head.h19
-rw-r--r--arch/riscv/kernel/image-vars.h51
-rw-r--r--arch/riscv/kernel/irq.c24
-rw-r--r--arch/riscv/kernel/jump_label.c53
-rw-r--r--arch/riscv/kernel/kgdb.c390
-rw-r--r--arch/riscv/kernel/mcount-dyn.S239
-rw-r--r--arch/riscv/kernel/mcount.S129
-rw-r--r--arch/riscv/kernel/module-sections.c159
-rw-r--r--arch/riscv/kernel/module.c431
-rw-r--r--arch/riscv/kernel/patch.c151
-rw-r--r--arch/riscv/kernel/perf_callchain.c97
-rw-r--r--arch/riscv/kernel/perf_event.c485
-rw-r--r--arch/riscv/kernel/perf_regs.c43
-rw-r--r--arch/riscv/kernel/process.c138
-rw-r--r--arch/riscv/kernel/ptrace.c178
-rw-r--r--arch/riscv/kernel/reset.c38
-rw-r--r--arch/riscv/kernel/riscv_ksyms.c15
-rw-r--r--arch/riscv/kernel/sbi.c605
-rw-r--r--arch/riscv/kernel/setup.c134
-rw-r--r--arch/riscv/kernel/signal.c327
-rw-r--r--arch/riscv/kernel/smp.c249
-rw-r--r--arch/riscv/kernel/smpboot.c173
-rw-r--r--arch/riscv/kernel/soc.c55
-rw-r--r--arch/riscv/kernel/stacktrace.c177
-rw-r--r--arch/riscv/kernel/sys_riscv.c69
-rw-r--r--arch/riscv/kernel/syscall_table.c19
-rw-r--r--arch/riscv/kernel/time.c43
-rw-r--r--arch/riscv/kernel/trace_irq.c27
-rw-r--r--arch/riscv/kernel/trace_irq.h11
-rw-r--r--arch/riscv/kernel/traps.c188
-rw-r--r--arch/riscv/kernel/traps_misaligned.c368
-rw-r--r--arch/riscv/kernel/vdso.c113
-rw-r--r--arch/riscv/kernel/vdso/.gitignore4
-rw-r--r--arch/riscv/kernel/vdso/Makefile85
-rw-r--r--arch/riscv/kernel/vdso/flush_icache.S22
-rw-r--r--arch/riscv/kernel/vdso/getcpu.S18
-rw-r--r--arch/riscv/kernel/vdso/note.S12
-rw-r--r--arch/riscv/kernel/vdso/rt_sigreturn.S16
-rwxr-xr-xarch/riscv/kernel/vdso/so2s.sh6
-rw-r--r--arch/riscv/kernel/vdso/vdso.S19
-rw-r--r--arch/riscv/kernel/vdso/vdso.lds.S76
-rw-r--r--arch/riscv/kernel/vdso/vgettimeofday.c31
-rw-r--r--arch/riscv/kernel/vmlinux.lds.S123
-rw-r--r--arch/riscv/lib/Makefile6
-rw-r--r--arch/riscv/lib/delay.c107
-rw-r--r--arch/riscv/lib/memcpy.S108
-rw-r--r--arch/riscv/lib/memset.S113
-rw-r--r--arch/riscv/lib/tishift.S76
-rw-r--r--arch/riscv/lib/uaccess.S126
-rw-r--r--arch/riscv/mm/Makefile32
-rw-r--r--arch/riscv/mm/cacheflush.c93
-rw-r--r--arch/riscv/mm/context.c67
-rw-r--r--arch/riscv/mm/extable.c24
-rw-r--r--arch/riscv/mm/fault.c305
-rw-r--r--arch/riscv/mm/hugetlbpage.c34
-rw-r--r--arch/riscv/mm/init.c687
-rw-r--r--arch/riscv/mm/kasan_init.c115
-rw-r--r--arch/riscv/mm/pageattr.c198
-rw-r--r--arch/riscv/mm/physaddr.c37
-rw-r--r--arch/riscv/mm/ptdump.c353
-rw-r--r--arch/riscv/mm/tlbflush.c56
-rw-r--r--arch/riscv/net/Makefile9
-rw-r--r--arch/riscv/net/bpf_jit.h992
-rw-r--r--arch/riscv/net/bpf_jit_comp32.c1347
-rw-r--r--arch/riscv/net/bpf_jit_comp64.c1143
-rw-r--r--arch/riscv/net/bpf_jit_core.c194
200 files changed, 22950 insertions, 0 deletions
diff --git a/arch/riscv/Kbuild b/arch/riscv/Kbuild
new file mode 100644
index 000000000..4614c01ba
--- /dev/null
+++ b/arch/riscv/Kbuild
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+obj-y += kernel/ mm/ net/
+obj-$(CONFIG_BUILTIN_DTB) += boot/dts/
diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig
new file mode 100644
index 000000000..b28fabfc9
--- /dev/null
+++ b/arch/riscv/Kconfig
@@ -0,0 +1,461 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# For a description of the syntax of this configuration file,
+# see Documentation/kbuild/kconfig-language.rst.
+#
+
+config 64BIT
+ bool
+
+config 32BIT
+ bool
+
+config RISCV
+ def_bool y
+ select ARCH_CLOCKSOURCE_INIT
+ select ARCH_SUPPORTS_ATOMIC_RMW
+ select ARCH_HAS_BINFMT_FLAT
+ select ARCH_HAS_DEBUG_VM_PGTABLE
+ select ARCH_HAS_DEBUG_VIRTUAL if MMU
+ select ARCH_HAS_DEBUG_WX
+ select ARCH_HAS_GCOV_PROFILE_ALL
+ select ARCH_HAS_GIGANTIC_PAGE
+ select ARCH_HAS_KCOV
+ select ARCH_HAS_MMIOWB
+ select ARCH_HAS_NON_OVERLAPPING_ADDRESS_SPACE
+ select ARCH_HAS_PTE_SPECIAL
+ select ARCH_HAS_SET_DIRECT_MAP
+ select ARCH_HAS_SET_MEMORY
+ select ARCH_HAS_STRICT_KERNEL_RWX if MMU
+ select ARCH_OPTIONAL_KERNEL_RWX if ARCH_HAS_STRICT_KERNEL_RWX
+ select ARCH_OPTIONAL_KERNEL_RWX_DEFAULT
+ select ARCH_WANT_DEFAULT_TOPDOWN_MMAP_LAYOUT if MMU
+ select ARCH_WANT_FRAME_POINTERS
+ select ARCH_WANT_HUGE_PMD_SHARE if 64BIT
+ select CLONE_BACKWARDS
+ select CLINT_TIMER if !MMU
+ select COMMON_CLK
+ select EDAC_SUPPORT
+ select GENERIC_ARCH_TOPOLOGY
+ select GENERIC_ATOMIC64 if !64BIT
+ select GENERIC_CLOCKEVENTS
+ select GENERIC_EARLY_IOREMAP
+ select GENERIC_GETTIMEOFDAY if HAVE_GENERIC_VDSO
+ select GENERIC_IOREMAP
+ select GENERIC_IRQ_MULTI_HANDLER
+ select GENERIC_IRQ_SHOW
+ select GENERIC_PCI_IOMAP
+ select GENERIC_PTDUMP if MMU
+ select GENERIC_SCHED_CLOCK
+ select GENERIC_SMP_IDLE_THREAD
+ select GENERIC_STRNCPY_FROM_USER if MMU
+ select GENERIC_STRNLEN_USER if MMU
+ select GENERIC_TIME_VSYSCALL if MMU && 64BIT
+ select HANDLE_DOMAIN_IRQ
+ select HAVE_ARCH_AUDITSYSCALL
+ select HAVE_ARCH_JUMP_LABEL
+ select HAVE_ARCH_JUMP_LABEL_RELATIVE
+ select HAVE_ARCH_KASAN if MMU && 64BIT
+ select HAVE_ARCH_KGDB
+ select HAVE_ARCH_KGDB_QXFER_PKT
+ select HAVE_ARCH_MMAP_RND_BITS if MMU
+ select HAVE_ARCH_SECCOMP_FILTER
+ select HAVE_ARCH_TRACEHOOK
+ select HAVE_ASM_MODVERSIONS
+ select HAVE_CONTEXT_TRACKING
+ select HAVE_DEBUG_KMEMLEAK
+ select HAVE_DMA_CONTIGUOUS if MMU
+ select HAVE_EBPF_JIT if MMU
+ select HAVE_FUTEX_CMPXCHG if FUTEX
+ select HAVE_GCC_PLUGINS
+ select HAVE_GENERIC_VDSO if MMU && 64BIT
+ select HAVE_PCI
+ select HAVE_PERF_EVENTS
+ select HAVE_PERF_REGS
+ select HAVE_PERF_USER_STACK_DUMP
+ select HAVE_STACKPROTECTOR
+ select HAVE_SYSCALL_TRACEPOINTS
+ select IRQ_DOMAIN
+ select MODULES_USE_ELF_RELA if MODULES
+ select MODULE_SECTIONS if MODULES
+ select OF
+ select OF_EARLY_FLATTREE
+ select OF_IRQ
+ select PCI_DOMAINS_GENERIC if PCI
+ select PCI_MSI if PCI
+ select RISCV_INTC
+ select RISCV_TIMER if RISCV_SBI
+ select SPARSE_IRQ
+ select SYSCTL_EXCEPTION_TRACE
+ select THREAD_INFO_IN_TASK
+ select UACCESS_MEMCPY if !MMU
+
+config ARCH_MMAP_RND_BITS_MIN
+ default 18 if 64BIT
+ default 8
+
+# max bits determined by the following formula:
+# VA_BITS - PAGE_SHIFT - 3
+config ARCH_MMAP_RND_BITS_MAX
+ default 24 if 64BIT # SV39 based
+ default 17
+
+# set if we run in machine mode, cleared if we run in supervisor mode
+config RISCV_M_MODE
+ bool
+ default !MMU
+
+# set if we are running in S-mode and can use SBI calls
+config RISCV_SBI
+ bool
+ depends on !RISCV_M_MODE
+ default y
+
+config MMU
+ bool "MMU-based Paged Memory Management Support"
+ default y
+ help
+ Select if you want MMU-based virtualised addressing space
+ support by paged memory management. If unsure, say 'Y'.
+
+config ZONE_DMA32
+ bool
+ default y if 64BIT
+
+config VA_BITS
+ int
+ default 32 if 32BIT
+ default 39 if 64BIT
+
+config PA_BITS
+ int
+ default 34 if 32BIT
+ default 56 if 64BIT
+
+config PAGE_OFFSET
+ hex
+ default 0xC0000000 if 32BIT && MAXPHYSMEM_1GB
+ default 0x80000000 if 64BIT && !MMU
+ default 0xffffffff80000000 if 64BIT && MAXPHYSMEM_2GB
+ default 0xffffffe000000000 if 64BIT && MAXPHYSMEM_128GB
+
+config KASAN_SHADOW_OFFSET
+ hex
+ depends on KASAN_GENERIC
+ default 0xdfffffc800000000 if 64BIT
+ default 0xffffffff if 32BIT
+
+config ARCH_FLATMEM_ENABLE
+ def_bool y
+
+config ARCH_SPARSEMEM_ENABLE
+ def_bool y
+ depends on MMU
+ select SPARSEMEM_STATIC if 32BIT && SPARSEMEM
+ select SPARSEMEM_VMEMMAP_ENABLE if 64BIT
+
+config ARCH_SELECT_MEMORY_MODEL
+ def_bool ARCH_SPARSEMEM_ENABLE
+
+config ARCH_WANT_GENERAL_HUGETLB
+ def_bool y
+
+config ARCH_SUPPORTS_DEBUG_PAGEALLOC
+ def_bool y
+
+config SYS_SUPPORTS_HUGETLBFS
+ depends on MMU
+ def_bool y
+
+config STACKTRACE_SUPPORT
+ def_bool y
+
+config TRACE_IRQFLAGS_SUPPORT
+ def_bool y
+
+config GENERIC_BUG
+ def_bool y
+ depends on BUG
+ select GENERIC_BUG_RELATIVE_POINTERS if 64BIT
+
+config GENERIC_BUG_RELATIVE_POINTERS
+ bool
+
+config GENERIC_CALIBRATE_DELAY
+ def_bool y
+
+config GENERIC_CSUM
+ def_bool y
+
+config GENERIC_HWEIGHT
+ def_bool y
+
+config FIX_EARLYCON_MEM
+ def_bool MMU
+
+config PGTABLE_LEVELS
+ int
+ default 3 if 64BIT
+ default 2
+
+config LOCKDEP_SUPPORT
+ def_bool y
+
+source "arch/riscv/Kconfig.socs"
+
+menu "Platform type"
+
+choice
+ prompt "Base ISA"
+ default ARCH_RV64I
+ help
+ This selects the base ISA that this kernel will target and must match
+ the target platform.
+
+config ARCH_RV32I
+ bool "RV32I"
+ select 32BIT
+ select GENERIC_LIB_ASHLDI3
+ select GENERIC_LIB_ASHRDI3
+ select GENERIC_LIB_LSHRDI3
+ select GENERIC_LIB_UCMPDI2
+ select MMU
+
+config ARCH_RV64I
+ bool "RV64I"
+ select 64BIT
+ select ARCH_SUPPORTS_INT128 if CC_HAS_INT128 && GCC_VERSION >= 50000
+ select HAVE_DYNAMIC_FTRACE if MMU
+ select HAVE_DYNAMIC_FTRACE_WITH_REGS if HAVE_DYNAMIC_FTRACE
+ select HAVE_FTRACE_MCOUNT_RECORD
+ select HAVE_FUNCTION_GRAPH_TRACER
+ select HAVE_FUNCTION_TRACER
+ select SWIOTLB if MMU
+
+endchoice
+
+# We must be able to map all physical memory into the kernel, but the compiler
+# is still a bit more efficient when generating code if it's setup in a manner
+# such that it can only map 2GiB of memory.
+choice
+ prompt "Kernel Code Model"
+ default CMODEL_MEDLOW if 32BIT
+ default CMODEL_MEDANY if 64BIT
+
+ config CMODEL_MEDLOW
+ bool "medium low code model"
+ config CMODEL_MEDANY
+ bool "medium any code model"
+endchoice
+
+config MODULE_SECTIONS
+ bool
+ select HAVE_MOD_ARCH_SPECIFIC
+
+choice
+ prompt "Maximum Physical Memory"
+ default MAXPHYSMEM_1GB if 32BIT
+ default MAXPHYSMEM_2GB if 64BIT && CMODEL_MEDLOW
+ default MAXPHYSMEM_128GB if 64BIT && CMODEL_MEDANY
+
+ config MAXPHYSMEM_1GB
+ depends on 32BIT
+ bool "1GiB"
+ config MAXPHYSMEM_2GB
+ depends on 64BIT && CMODEL_MEDLOW
+ bool "2GiB"
+ config MAXPHYSMEM_128GB
+ depends on 64BIT && CMODEL_MEDANY
+ bool "128GiB"
+endchoice
+
+
+config SMP
+ bool "Symmetric Multi-Processing"
+ help
+ This enables support for systems with more than one CPU. If
+ you say N here, the kernel will run on single and
+ multiprocessor machines, but will use only one CPU of a
+ multiprocessor machine. If you say Y here, the kernel will run
+ on many, but not all, single processor machines. On a single
+ processor machine, the kernel will run faster if you say N
+ here.
+
+ If you don't know what to do here, say N.
+
+config NR_CPUS
+ int "Maximum number of CPUs (2-32)"
+ range 2 32
+ depends on SMP
+ default "8"
+
+config HOTPLUG_CPU
+ bool "Support for hot-pluggable CPUs"
+ depends on SMP
+ select GENERIC_IRQ_MIGRATION
+ help
+
+ Say Y here to experiment with turning CPUs off and on. CPUs
+ can be controlled through /sys/devices/system/cpu.
+
+ Say N if you want to disable CPU hotplug.
+
+choice
+ prompt "CPU Tuning"
+ default TUNE_GENERIC
+
+config TUNE_GENERIC
+ bool "generic"
+
+endchoice
+
+config RISCV_ISA_C
+ bool "Emit compressed instructions when building Linux"
+ default y
+ help
+ Adds "C" to the ISA subsets that the toolchain is allowed to emit
+ when building Linux, which results in compressed instructions in the
+ Linux binary.
+
+ If you don't know what to do here, say Y.
+
+menu "supported PMU type"
+ depends on PERF_EVENTS
+
+config RISCV_BASE_PMU
+ bool "Base Performance Monitoring Unit"
+ def_bool y
+ help
+ A base PMU that serves as a reference implementation and has limited
+ feature of perf. It can run on any RISC-V machines so serves as the
+ fallback, but this option can also be disable to reduce kernel size.
+
+endmenu
+
+config TOOLCHAIN_NEEDS_EXPLICIT_ZICSR_ZIFENCEI
+ def_bool y
+ # https://sourceware.org/git/?p=binutils-gdb.git;a=commit;h=aed44286efa8ae8717a77d94b51ac3614e2ca6dc
+ depends on AS_IS_GNU && AS_VERSION >= 23800
+ help
+ Newer binutils versions default to ISA spec version 20191213 which
+ moves some instructions from the I extension to the Zicsr and Zifencei
+ extensions.
+
+config TOOLCHAIN_NEEDS_OLD_ISA_SPEC
+ def_bool y
+ depends on TOOLCHAIN_NEEDS_EXPLICIT_ZICSR_ZIFENCEI
+ # https://github.com/llvm/llvm-project/commit/22e199e6afb1263c943c0c0d4498694e15bf8a16
+ depends on CC_IS_CLANG && CLANG_VERSION < 170000
+ help
+ Certain versions of clang do not support zicsr and zifencei via -march
+ but newer versions of binutils require it for the reasons noted in the
+ help text of CONFIG_TOOLCHAIN_NEEDS_EXPLICIT_ZICSR_ZIFENCEI. This
+ option causes an older ISA spec compatible with these older versions
+ of clang to be passed to GAS, which has the same result as passing zicsr
+ and zifencei to -march.
+
+config FPU
+ bool "FPU support"
+ default y
+ help
+ Say N here if you want to disable all floating-point related procedure
+ in the kernel.
+
+ If you don't know what to do here, say Y.
+
+endmenu
+
+menu "Kernel features"
+
+source "kernel/Kconfig.hz"
+
+config RISCV_SBI_V01
+ bool "SBI v0.1 support"
+ default y
+ depends on RISCV_SBI
+ help
+ This config allows kernel to use SBI v0.1 APIs. This will be
+ deprecated in future once legacy M-mode software are no longer in use.
+endmenu
+
+menu "Boot options"
+
+config CMDLINE
+ string "Built-in kernel command line"
+ help
+ For most platforms, the arguments for the kernel's command line
+ are provided at run-time, during boot. However, there are cases
+ where either no arguments are being provided or the provided
+ arguments are insufficient or even invalid.
+
+ When that occurs, it is possible to define a built-in command
+ line here and choose how the kernel should use it later on.
+
+choice
+ prompt "Built-in command line usage" if CMDLINE != ""
+ default CMDLINE_FALLBACK
+ help
+ Choose how the kernel will handle the provided built-in command
+ line.
+
+config CMDLINE_FALLBACK
+ bool "Use bootloader kernel arguments if available"
+ help
+ Use the built-in command line as fallback in case we get nothing
+ during boot. This is the default behaviour.
+
+config CMDLINE_EXTEND
+ bool "Extend bootloader kernel arguments"
+ help
+ The command-line arguments provided during boot will be
+ appended to the built-in command line. This is useful in
+ cases where the provided arguments are insufficient and
+ you don't want to or cannot modify them.
+
+
+config CMDLINE_FORCE
+ bool "Always use the default kernel command string"
+ help
+ Always use the built-in command line, even if we get one during
+ boot. This is useful in case you need to override the provided
+ command line on systems where you don't have or want control
+ over it.
+
+endchoice
+
+config EFI_STUB
+ bool
+
+config EFI
+ bool "UEFI runtime support"
+ depends on OF
+ select LIBFDT
+ select UCS2_STRING
+ select EFI_PARAMS_FROM_FDT
+ select EFI_STUB
+ select EFI_GENERIC_STUB
+ select EFI_RUNTIME_WRAPPERS
+ select RISCV_ISA_C
+ depends on MMU
+ default y
+ help
+ This option provides support for runtime services provided
+ by UEFI firmware (such as non-volatile variables, realtime
+ clock, and platform reset). A UEFI stub is also provided to
+ allow the kernel to be booted as an EFI application. This
+ is only useful on systems that have UEFI firmware.
+
+endmenu
+
+config BUILTIN_DTB
+ def_bool n
+ depends on RISCV_M_MODE
+ depends on OF
+
+menu "Power management options"
+
+source "kernel/power/Kconfig"
+
+endmenu
+
+source "drivers/firmware/Kconfig"
diff --git a/arch/riscv/Kconfig.debug b/arch/riscv/Kconfig.debug
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/arch/riscv/Kconfig.debug
diff --git a/arch/riscv/Kconfig.socs b/arch/riscv/Kconfig.socs
new file mode 100644
index 000000000..8a55f6156
--- /dev/null
+++ b/arch/riscv/Kconfig.socs
@@ -0,0 +1,51 @@
+menu "SoC selection"
+
+config SOC_SIFIVE
+ bool "SiFive SoCs"
+ select SERIAL_SIFIVE if TTY
+ select SERIAL_SIFIVE_CONSOLE if TTY
+ select CLK_SIFIVE
+ select CLK_SIFIVE_FU540_PRCI
+ select SIFIVE_PLIC
+ help
+ This enables support for SiFive SoC platform hardware.
+
+config SOC_VIRT
+ bool "QEMU Virt Machine"
+ select CLINT_TIMER if RISCV_M_MODE
+ select POWER_RESET
+ select POWER_RESET_SYSCON
+ select POWER_RESET_SYSCON_POWEROFF
+ select GOLDFISH
+ select RTC_DRV_GOLDFISH if RTC_CLASS
+ select SIFIVE_PLIC
+ help
+ This enables support for QEMU Virt Machine.
+
+config SOC_KENDRYTE
+ bool "Kendryte K210 SoC"
+ depends on !MMU
+ select CLINT_TIMER if RISCV_M_MODE
+ select SERIAL_SIFIVE if TTY
+ select SERIAL_SIFIVE_CONSOLE if TTY
+ select SIFIVE_PLIC
+ help
+ This enables support for Kendryte K210 SoC platform hardware.
+
+config SOC_KENDRYTE_K210_DTB
+ def_bool y
+ depends on SOC_KENDRYTE_K210_DTB_BUILTIN
+
+config SOC_KENDRYTE_K210_DTB_BUILTIN
+ bool "Builtin device tree for the Kendryte K210"
+ depends on SOC_KENDRYTE
+ default y
+ select OF
+ select BUILTIN_DTB
+ select SOC_KENDRYTE_K210_DTB
+ help
+ Builds a device tree for the Kendryte K210 into the Linux image.
+ This option should be selected if no bootloader is being used.
+ If unsure, say Y.
+
+endmenu
diff --git a/arch/riscv/Makefile b/arch/riscv/Makefile
new file mode 100644
index 000000000..daa679440
--- /dev/null
+++ b/arch/riscv/Makefile
@@ -0,0 +1,123 @@
+# This file is included by the global makefile so that you can add your own
+# architecture-specific flags and dependencies. Remember to do have actions
+# for "archclean" and "archdep" for cleaning up and making dependencies for
+# this architecture
+#
+# This file is subject to the terms and conditions of the GNU General Public
+# License. See the file "COPYING" in the main directory of this archive
+# for more details.
+#
+
+OBJCOPYFLAGS := -O binary
+LDFLAGS_vmlinux :=
+ifeq ($(CONFIG_DYNAMIC_FTRACE),y)
+ LDFLAGS_vmlinux := --no-relax
+endif
+
+ifeq ($(CONFIG_CMODEL_MEDLOW),y)
+KBUILD_CFLAGS_MODULE += -mcmodel=medany
+endif
+
+export BITS
+ifeq ($(CONFIG_ARCH_RV64I),y)
+ BITS := 64
+ UTS_MACHINE := riscv64
+
+ KBUILD_CFLAGS += -mabi=lp64
+ KBUILD_AFLAGS += -mabi=lp64
+
+ KBUILD_LDFLAGS += -melf64lriscv
+else
+ BITS := 32
+ UTS_MACHINE := riscv32
+
+ KBUILD_CFLAGS += -mabi=ilp32
+ KBUILD_AFLAGS += -mabi=ilp32
+ KBUILD_LDFLAGS += -melf32lriscv
+endif
+
+ifeq ($(CONFIG_LD_IS_LLD),y)
+ifeq ($(shell test $(CONFIG_LLD_VERSION) -lt 150000; echo $$?),0)
+ KBUILD_CFLAGS += -mno-relax
+ KBUILD_AFLAGS += -mno-relax
+ifndef CONFIG_AS_IS_LLVM
+ KBUILD_CFLAGS += -Wa,-mno-relax
+ KBUILD_AFLAGS += -Wa,-mno-relax
+endif
+endif
+endif
+
+# ISA string setting
+riscv-march-$(CONFIG_ARCH_RV32I) := rv32ima
+riscv-march-$(CONFIG_ARCH_RV64I) := rv64ima
+riscv-march-$(CONFIG_FPU) := $(riscv-march-y)fd
+riscv-march-$(CONFIG_RISCV_ISA_C) := $(riscv-march-y)c
+
+ifdef CONFIG_TOOLCHAIN_NEEDS_OLD_ISA_SPEC
+KBUILD_CFLAGS += -Wa,-misa-spec=2.2
+KBUILD_AFLAGS += -Wa,-misa-spec=2.2
+else
+riscv-march-$(CONFIG_TOOLCHAIN_NEEDS_EXPLICIT_ZICSR_ZIFENCEI) := $(riscv-march-y)_zicsr_zifencei
+endif
+
+KBUILD_CFLAGS += -march=$(subst fd,,$(riscv-march-y))
+KBUILD_AFLAGS += -march=$(riscv-march-y)
+
+KBUILD_CFLAGS += -mno-save-restore
+KBUILD_CFLAGS += -DCONFIG_PAGE_OFFSET=$(CONFIG_PAGE_OFFSET)
+
+ifeq ($(CONFIG_CMODEL_MEDLOW),y)
+ KBUILD_CFLAGS += -mcmodel=medlow
+endif
+ifeq ($(CONFIG_CMODEL_MEDANY),y)
+ KBUILD_CFLAGS += -mcmodel=medany
+endif
+ifeq ($(CONFIG_PERF_EVENTS),y)
+ KBUILD_CFLAGS += -fno-omit-frame-pointer
+endif
+
+# Avoid generating .eh_frame sections.
+KBUILD_CFLAGS += -fno-asynchronous-unwind-tables -fno-unwind-tables
+
+KBUILD_CFLAGS_MODULE += $(call cc-option,-mno-relax)
+KBUILD_AFLAGS_MODULE += $(call as-option,-Wa$(comma)-mno-relax)
+
+# GCC versions that support the "-mstrict-align" option default to allowing
+# unaligned accesses. While unaligned accesses are explicitly allowed in the
+# RISC-V ISA, they're emulated by machine mode traps on all extant
+# architectures. It's faster to have GCC emit only aligned accesses.
+KBUILD_CFLAGS += $(call cc-option,-mstrict-align)
+
+# arch specific predefines for sparse
+CHECKFLAGS += -D__riscv -D__riscv_xlen=$(BITS)
+
+# Default target when executing plain make
+boot := arch/riscv/boot
+KBUILD_IMAGE := $(boot)/Image.gz
+
+head-y := arch/riscv/kernel/head.o
+
+core-y += arch/riscv/
+
+libs-y += arch/riscv/lib/
+libs-$(CONFIG_EFI_STUB) += $(objtree)/drivers/firmware/efi/libstub/lib.a
+
+PHONY += vdso_install
+vdso_install:
+ $(Q)$(MAKE) $(build)=arch/riscv/kernel/vdso $@
+
+ifeq ($(CONFIG_RISCV_M_MODE)$(CONFIG_SOC_KENDRYTE),yy)
+KBUILD_IMAGE := $(boot)/loader.bin
+else
+KBUILD_IMAGE := $(boot)/Image.gz
+endif
+BOOT_TARGETS := Image Image.gz loader loader.bin
+
+all: $(notdir $(KBUILD_IMAGE))
+
+$(BOOT_TARGETS): vmlinux
+ $(Q)$(MAKE) $(build)=$(boot) $(boot)/$@
+ @$(kecho) ' Kernel: $(boot)/$@ is ready'
+
+zinstall install:
+ $(Q)$(MAKE) $(build)=$(boot) $@
diff --git a/arch/riscv/boot/.gitignore b/arch/riscv/boot/.gitignore
new file mode 100644
index 000000000..574c10f8f
--- /dev/null
+++ b/arch/riscv/boot/.gitignore
@@ -0,0 +1,5 @@
+# SPDX-License-Identifier: GPL-2.0-only
+Image
+Image.gz
+loader
+loader.lds
diff --git a/arch/riscv/boot/Makefile b/arch/riscv/boot/Makefile
new file mode 100644
index 000000000..c59fca695
--- /dev/null
+++ b/arch/riscv/boot/Makefile
@@ -0,0 +1,55 @@
+#
+# arch/riscv/boot/Makefile
+#
+# This file is included by the global makefile so that you can add your own
+# architecture-specific flags and dependencies.
+#
+# This file is subject to the terms and conditions of the GNU General Public
+# License. See the file "COPYING" in the main directory of this archive
+# for more details.
+#
+# Copyright (C) 2018, Anup Patel.
+# Author: Anup Patel <anup@brainfault.org>
+#
+# Based on the ia64 and arm64 boot/Makefile.
+#
+
+KCOV_INSTRUMENT := n
+
+OBJCOPYFLAGS_Image :=-O binary -R .note -R .note.gnu.build-id -R .comment -S
+
+targets := Image loader
+
+$(obj)/Image: vmlinux FORCE
+ $(call if_changed,objcopy)
+
+$(obj)/Image.gz: $(obj)/Image FORCE
+ $(call if_changed,gzip)
+
+$(obj)/loader.o: $(src)/loader.S $(obj)/Image
+
+$(obj)/loader: $(obj)/loader.o $(obj)/Image $(obj)/loader.lds FORCE
+ $(Q)$(LD) -T $(obj)/loader.lds -o $@ $(obj)/loader.o
+
+$(obj)/Image.bz2: $(obj)/Image FORCE
+ $(call if_changed,bzip2)
+
+$(obj)/Image.lz4: $(obj)/Image FORCE
+ $(call if_changed,lz4)
+
+$(obj)/Image.lzma: $(obj)/Image FORCE
+ $(call if_changed,lzma)
+
+$(obj)/Image.lzo: $(obj)/Image FORCE
+ $(call if_changed,lzo)
+
+$(obj)/loader.bin: $(obj)/loader FORCE
+ $(call if_changed,objcopy)
+
+install:
+ $(CONFIG_SHELL) $(srctree)/$(src)/install.sh $(KERNELRELEASE) \
+ $(obj)/Image System.map "$(INSTALL_PATH)"
+
+zinstall:
+ $(CONFIG_SHELL) $(srctree)/$(src)/install.sh $(KERNELRELEASE) \
+ $(obj)/Image.gz System.map "$(INSTALL_PATH)"
diff --git a/arch/riscv/boot/dts/Makefile b/arch/riscv/boot/dts/Makefile
new file mode 100644
index 000000000..ca1f8cbd7
--- /dev/null
+++ b/arch/riscv/boot/dts/Makefile
@@ -0,0 +1,5 @@
+# SPDX-License-Identifier: GPL-2.0
+subdir-y += sifive
+subdir-y += kendryte
+
+obj-$(CONFIG_BUILTIN_DTB) := $(addsuffix /, $(subdir-y))
diff --git a/arch/riscv/boot/dts/kendryte/Makefile b/arch/riscv/boot/dts/kendryte/Makefile
new file mode 100644
index 000000000..1a88e616f
--- /dev/null
+++ b/arch/riscv/boot/dts/kendryte/Makefile
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0
+dtb-$(CONFIG_SOC_KENDRYTE_K210_DTB) += k210.dtb
+
+obj-$(CONFIG_SOC_KENDRYTE_K210_DTB_BUILTIN) += $(addsuffix .o, $(dtb-y))
diff --git a/arch/riscv/boot/dts/kendryte/k210.dts b/arch/riscv/boot/dts/kendryte/k210.dts
new file mode 100644
index 000000000..0d1f28fce
--- /dev/null
+++ b/arch/riscv/boot/dts/kendryte/k210.dts
@@ -0,0 +1,23 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2020 Western Digital Corporation or its affiliates.
+ */
+
+/dts-v1/;
+
+#include "k210.dtsi"
+
+/ {
+ model = "Kendryte K210 generic";
+ compatible = "kendryte,k210";
+
+ chosen {
+ bootargs = "earlycon console=ttySIF0";
+ stdout-path = "serial0";
+ };
+};
+
+&uarths0 {
+ status = "okay";
+};
+
diff --git a/arch/riscv/boot/dts/kendryte/k210.dtsi b/arch/riscv/boot/dts/kendryte/k210.dtsi
new file mode 100644
index 000000000..d2d0ff645
--- /dev/null
+++ b/arch/riscv/boot/dts/kendryte/k210.dtsi
@@ -0,0 +1,125 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) 2019 Sean Anderson <seanga2@gmail.com>
+ * Copyright (C) 2020 Western Digital Corporation or its affiliates.
+ */
+#include <dt-bindings/clock/k210-clk.h>
+
+/ {
+ /*
+ * Although the K210 is a 64-bit CPU, the address bus is only 32-bits
+ * wide, and the upper half of all addresses is ignored.
+ */
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "kendryte,k210";
+
+ aliases {
+ serial0 = &uarths0;
+ };
+
+ /*
+ * The K210 has an sv39 MMU following the priviledge specification v1.9.
+ * Since this is a non-ratified draft specification, the kernel does not
+ * support it and the K210 support enabled only for the !MMU case.
+ * Be consistent with this by setting the CPUs MMU type to "none".
+ */
+ cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ timebase-frequency = <7800000>;
+ cpu0: cpu@0 {
+ device_type = "cpu";
+ reg = <0>;
+ compatible = "kendryte,k210", "sifive,rocket0", "riscv";
+ riscv,isa = "rv64imafdc";
+ mmu-type = "none";
+ i-cache-size = <0x8000>;
+ i-cache-block-size = <64>;
+ d-cache-size = <0x8000>;
+ d-cache-block-size = <64>;
+ clocks = <&sysctl K210_CLK_CPU>;
+ clock-frequency = <390000000>;
+ cpu0_intc: interrupt-controller {
+ #interrupt-cells = <1>;
+ interrupt-controller;
+ compatible = "riscv,cpu-intc";
+ };
+ };
+ cpu1: cpu@1 {
+ device_type = "cpu";
+ reg = <1>;
+ compatible = "kendryte,k210", "sifive,rocket0", "riscv";
+ riscv,isa = "rv64imafdc";
+ mmu-type = "none";
+ i-cache-size = <0x8000>;
+ i-cache-block-size = <64>;
+ d-cache-size = <0x8000>;
+ d-cache-block-size = <64>;
+ clocks = <&sysctl K210_CLK_CPU>;
+ clock-frequency = <390000000>;
+ cpu1_intc: interrupt-controller {
+ #interrupt-cells = <1>;
+ interrupt-controller;
+ compatible = "riscv,cpu-intc";
+ };
+ };
+ };
+
+ sram: memory@80000000 {
+ device_type = "memory";
+ reg = <0x80000000 0x400000>,
+ <0x80400000 0x200000>,
+ <0x80600000 0x200000>;
+ reg-names = "sram0", "sram1", "aisram";
+ };
+
+ clocks {
+ in0: oscillator {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <26000000>;
+ };
+ };
+
+ soc {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "kendryte,k210-soc", "simple-bus";
+ ranges;
+ interrupt-parent = <&plic0>;
+
+ sysctl: sysctl@50440000 {
+ compatible = "kendryte,k210-sysctl", "simple-mfd";
+ reg = <0x50440000 0x1000>;
+ #clock-cells = <1>;
+ };
+
+ clint0: clint@2000000 {
+ #interrupt-cells = <1>;
+ compatible = "riscv,clint0";
+ reg = <0x2000000 0xC000>;
+ interrupts-extended = <&cpu0_intc 3 &cpu0_intc 7
+ &cpu1_intc 3 &cpu1_intc 7>;
+ clocks = <&sysctl K210_CLK_ACLK>;
+ };
+
+ plic0: interrupt-controller@c000000 {
+ #interrupt-cells = <1>;
+ interrupt-controller;
+ compatible = "kendryte,k210-plic0", "riscv,plic0";
+ reg = <0xC000000 0x4000000>;
+ interrupts-extended = <&cpu0_intc 11>, <&cpu0_intc 0xffffffff>,
+ <&cpu1_intc 11>, <&cpu1_intc 0xffffffff>;
+ riscv,ndev = <65>;
+ riscv,max-priority = <7>;
+ };
+
+ uarths0: serial@38000000 {
+ compatible = "kendryte,k210-uarths", "sifive,uart0";
+ reg = <0x38000000 0x1000>;
+ interrupts = <33>;
+ clocks = <&sysctl K210_CLK_CPU>;
+ };
+ };
+};
diff --git a/arch/riscv/boot/dts/sifive/Makefile b/arch/riscv/boot/dts/sifive/Makefile
new file mode 100644
index 000000000..6d6189e6e
--- /dev/null
+++ b/arch/riscv/boot/dts/sifive/Makefile
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0
+dtb-$(CONFIG_SOC_SIFIVE) += hifive-unleashed-a00.dtb
diff --git a/arch/riscv/boot/dts/sifive/fu540-c000.dtsi b/arch/riscv/boot/dts/sifive/fu540-c000.dtsi
new file mode 100644
index 000000000..64c06c9b4
--- /dev/null
+++ b/arch/riscv/boot/dts/sifive/fu540-c000.dtsi
@@ -0,0 +1,286 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/* Copyright (c) 2018-2019 SiFive, Inc */
+
+/dts-v1/;
+
+#include <dt-bindings/clock/sifive-fu540-prci.h>
+
+/ {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ compatible = "sifive,fu540-c000", "sifive,fu540";
+
+ aliases {
+ serial0 = &uart0;
+ serial1 = &uart1;
+ ethernet0 = &eth0;
+ };
+
+ chosen {
+ };
+
+ cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ cpu0: cpu@0 {
+ compatible = "sifive,e51", "sifive,rocket0", "riscv";
+ device_type = "cpu";
+ i-cache-block-size = <64>;
+ i-cache-sets = <128>;
+ i-cache-size = <16384>;
+ reg = <0>;
+ riscv,isa = "rv64imac";
+ status = "disabled";
+ cpu0_intc: interrupt-controller {
+ #interrupt-cells = <1>;
+ compatible = "riscv,cpu-intc";
+ interrupt-controller;
+ };
+ };
+ cpu1: cpu@1 {
+ compatible = "sifive,u54-mc", "sifive,rocket0", "riscv";
+ d-cache-block-size = <64>;
+ d-cache-sets = <64>;
+ d-cache-size = <32768>;
+ d-tlb-sets = <1>;
+ d-tlb-size = <32>;
+ device_type = "cpu";
+ i-cache-block-size = <64>;
+ i-cache-sets = <64>;
+ i-cache-size = <32768>;
+ i-tlb-sets = <1>;
+ i-tlb-size = <32>;
+ mmu-type = "riscv,sv39";
+ reg = <1>;
+ riscv,isa = "rv64imafdc";
+ tlb-split;
+ next-level-cache = <&l2cache>;
+ cpu1_intc: interrupt-controller {
+ #interrupt-cells = <1>;
+ compatible = "riscv,cpu-intc";
+ interrupt-controller;
+ };
+ };
+ cpu2: cpu@2 {
+ compatible = "sifive,u54-mc", "sifive,rocket0", "riscv";
+ d-cache-block-size = <64>;
+ d-cache-sets = <64>;
+ d-cache-size = <32768>;
+ d-tlb-sets = <1>;
+ d-tlb-size = <32>;
+ device_type = "cpu";
+ i-cache-block-size = <64>;
+ i-cache-sets = <64>;
+ i-cache-size = <32768>;
+ i-tlb-sets = <1>;
+ i-tlb-size = <32>;
+ mmu-type = "riscv,sv39";
+ reg = <2>;
+ riscv,isa = "rv64imafdc";
+ tlb-split;
+ next-level-cache = <&l2cache>;
+ cpu2_intc: interrupt-controller {
+ #interrupt-cells = <1>;
+ compatible = "riscv,cpu-intc";
+ interrupt-controller;
+ };
+ };
+ cpu3: cpu@3 {
+ compatible = "sifive,u54-mc", "sifive,rocket0", "riscv";
+ d-cache-block-size = <64>;
+ d-cache-sets = <64>;
+ d-cache-size = <32768>;
+ d-tlb-sets = <1>;
+ d-tlb-size = <32>;
+ device_type = "cpu";
+ i-cache-block-size = <64>;
+ i-cache-sets = <64>;
+ i-cache-size = <32768>;
+ i-tlb-sets = <1>;
+ i-tlb-size = <32>;
+ mmu-type = "riscv,sv39";
+ reg = <3>;
+ riscv,isa = "rv64imafdc";
+ tlb-split;
+ next-level-cache = <&l2cache>;
+ cpu3_intc: interrupt-controller {
+ #interrupt-cells = <1>;
+ compatible = "riscv,cpu-intc";
+ interrupt-controller;
+ };
+ };
+ cpu4: cpu@4 {
+ compatible = "sifive,u54-mc", "sifive,rocket0", "riscv";
+ d-cache-block-size = <64>;
+ d-cache-sets = <64>;
+ d-cache-size = <32768>;
+ d-tlb-sets = <1>;
+ d-tlb-size = <32>;
+ device_type = "cpu";
+ i-cache-block-size = <64>;
+ i-cache-sets = <64>;
+ i-cache-size = <32768>;
+ i-tlb-sets = <1>;
+ i-tlb-size = <32>;
+ mmu-type = "riscv,sv39";
+ reg = <4>;
+ riscv,isa = "rv64imafdc";
+ tlb-split;
+ next-level-cache = <&l2cache>;
+ cpu4_intc: interrupt-controller {
+ #interrupt-cells = <1>;
+ compatible = "riscv,cpu-intc";
+ interrupt-controller;
+ };
+ };
+ };
+ soc {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ compatible = "sifive,fu540-c000", "sifive,fu540", "simple-bus";
+ ranges;
+ plic0: interrupt-controller@c000000 {
+ #interrupt-cells = <1>;
+ compatible = "sifive,plic-1.0.0";
+ reg = <0x0 0xc000000 0x0 0x4000000>;
+ riscv,ndev = <53>;
+ interrupt-controller;
+ interrupts-extended = <
+ &cpu0_intc 0xffffffff
+ &cpu1_intc 0xffffffff &cpu1_intc 9
+ &cpu2_intc 0xffffffff &cpu2_intc 9
+ &cpu3_intc 0xffffffff &cpu3_intc 9
+ &cpu4_intc 0xffffffff &cpu4_intc 9>;
+ };
+ prci: clock-controller@10000000 {
+ compatible = "sifive,fu540-c000-prci";
+ reg = <0x0 0x10000000 0x0 0x1000>;
+ clocks = <&hfclk>, <&rtcclk>;
+ #clock-cells = <1>;
+ };
+ uart0: serial@10010000 {
+ compatible = "sifive,fu540-c000-uart", "sifive,uart0";
+ reg = <0x0 0x10010000 0x0 0x1000>;
+ interrupt-parent = <&plic0>;
+ interrupts = <4>;
+ clocks = <&prci PRCI_CLK_TLCLK>;
+ status = "disabled";
+ };
+ dma: dma-controller@3000000 {
+ compatible = "sifive,fu540-c000-pdma";
+ reg = <0x0 0x3000000 0x0 0x8000>;
+ interrupt-parent = <&plic0>;
+ interrupts = <23 24 25 26 27 28 29 30>;
+ #dma-cells = <1>;
+ };
+ uart1: serial@10011000 {
+ compatible = "sifive,fu540-c000-uart", "sifive,uart0";
+ reg = <0x0 0x10011000 0x0 0x1000>;
+ interrupt-parent = <&plic0>;
+ interrupts = <5>;
+ clocks = <&prci PRCI_CLK_TLCLK>;
+ status = "disabled";
+ };
+ i2c0: i2c@10030000 {
+ compatible = "sifive,fu540-c000-i2c", "sifive,i2c0";
+ reg = <0x0 0x10030000 0x0 0x1000>;
+ interrupt-parent = <&plic0>;
+ interrupts = <50>;
+ clocks = <&prci PRCI_CLK_TLCLK>;
+ reg-shift = <2>;
+ reg-io-width = <1>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+ qspi0: spi@10040000 {
+ compatible = "sifive,fu540-c000-spi", "sifive,spi0";
+ reg = <0x0 0x10040000 0x0 0x1000
+ 0x0 0x20000000 0x0 0x10000000>;
+ interrupt-parent = <&plic0>;
+ interrupts = <51>;
+ clocks = <&prci PRCI_CLK_TLCLK>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+ qspi1: spi@10041000 {
+ compatible = "sifive,fu540-c000-spi", "sifive,spi0";
+ reg = <0x0 0x10041000 0x0 0x1000
+ 0x0 0x30000000 0x0 0x10000000>;
+ interrupt-parent = <&plic0>;
+ interrupts = <52>;
+ clocks = <&prci PRCI_CLK_TLCLK>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+ qspi2: spi@10050000 {
+ compatible = "sifive,fu540-c000-spi", "sifive,spi0";
+ reg = <0x0 0x10050000 0x0 0x1000>;
+ interrupt-parent = <&plic0>;
+ interrupts = <6>;
+ clocks = <&prci PRCI_CLK_TLCLK>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+ eth0: ethernet@10090000 {
+ compatible = "sifive,fu540-c000-gem";
+ interrupt-parent = <&plic0>;
+ interrupts = <53>;
+ reg = <0x0 0x10090000 0x0 0x2000
+ 0x0 0x100a0000 0x0 0x1000>;
+ local-mac-address = [00 00 00 00 00 00];
+ clock-names = "pclk", "hclk";
+ clocks = <&prci PRCI_CLK_GEMGXLPLL>,
+ <&prci PRCI_CLK_GEMGXLPLL>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+ };
+ pwm0: pwm@10020000 {
+ compatible = "sifive,fu540-c000-pwm", "sifive,pwm0";
+ reg = <0x0 0x10020000 0x0 0x1000>;
+ interrupt-parent = <&plic0>;
+ interrupts = <42 43 44 45>;
+ clocks = <&prci PRCI_CLK_TLCLK>;
+ #pwm-cells = <3>;
+ status = "disabled";
+ };
+ pwm1: pwm@10021000 {
+ compatible = "sifive,fu540-c000-pwm", "sifive,pwm0";
+ reg = <0x0 0x10021000 0x0 0x1000>;
+ interrupt-parent = <&plic0>;
+ interrupts = <46 47 48 49>;
+ clocks = <&prci PRCI_CLK_TLCLK>;
+ #pwm-cells = <3>;
+ status = "disabled";
+ };
+ l2cache: cache-controller@2010000 {
+ compatible = "sifive,fu540-c000-ccache", "cache";
+ cache-block-size = <64>;
+ cache-level = <2>;
+ cache-sets = <1024>;
+ cache-size = <2097152>;
+ cache-unified;
+ interrupt-parent = <&plic0>;
+ interrupts = <1 2 3>;
+ reg = <0x0 0x2010000 0x0 0x1000>;
+ };
+ gpio: gpio@10060000 {
+ compatible = "sifive,fu540-c000-gpio", "sifive,gpio0";
+ interrupt-parent = <&plic0>;
+ interrupts = <7>, <8>, <9>, <10>, <11>, <12>, <13>,
+ <14>, <15>, <16>, <17>, <18>, <19>, <20>,
+ <21>, <22>;
+ reg = <0x0 0x10060000 0x0 0x1000>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ clocks = <&prci PRCI_CLK_TLCLK>;
+ status = "disabled";
+ };
+ };
+};
diff --git a/arch/riscv/boot/dts/sifive/hifive-unleashed-a00.dts b/arch/riscv/boot/dts/sifive/hifive-unleashed-a00.dts
new file mode 100644
index 000000000..dddabfbbc
--- /dev/null
+++ b/arch/riscv/boot/dts/sifive/hifive-unleashed-a00.dts
@@ -0,0 +1,144 @@
+// SPDX-License-Identifier: (GPL-2.0 OR MIT)
+/* Copyright (c) 2018-2019 SiFive, Inc */
+
+#include "fu540-c000.dtsi"
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/leds/common.h>
+#include <dt-bindings/pwm/pwm.h>
+
+/* Clock frequency (in Hz) of the PCB crystal for rtcclk */
+#define RTCCLK_FREQ 1000000
+
+/ {
+ #address-cells = <2>;
+ #size-cells = <2>;
+ model = "SiFive HiFive Unleashed A00";
+ compatible = "sifive,hifive-unleashed-a00", "sifive,fu540-c000";
+
+ chosen {
+ stdout-path = "serial0";
+ };
+
+ cpus {
+ timebase-frequency = <RTCCLK_FREQ>;
+ };
+
+ memory@80000000 {
+ device_type = "memory";
+ reg = <0x0 0x80000000 0x2 0x00000000>;
+ };
+
+ soc {
+ };
+
+ hfclk: hfclk {
+ #clock-cells = <0>;
+ compatible = "fixed-clock";
+ clock-frequency = <33333333>;
+ clock-output-names = "hfclk";
+ };
+
+ rtcclk: rtcclk {
+ #clock-cells = <0>;
+ compatible = "fixed-clock";
+ clock-frequency = <RTCCLK_FREQ>;
+ clock-output-names = "rtcclk";
+ };
+ gpio-restart {
+ compatible = "gpio-restart";
+ gpios = <&gpio 10 GPIO_ACTIVE_LOW>;
+ };
+
+ led-controller {
+ compatible = "pwm-leds";
+
+ led-d1 {
+ pwms = <&pwm0 0 7812500 PWM_POLARITY_INVERTED>;
+ active-low;
+ color = <LED_COLOR_ID_GREEN>;
+ max-brightness = <255>;
+ label = "d1";
+ };
+
+ led-d2 {
+ pwms = <&pwm0 1 7812500 PWM_POLARITY_INVERTED>;
+ active-low;
+ color = <LED_COLOR_ID_GREEN>;
+ max-brightness = <255>;
+ label = "d2";
+ };
+
+ led-d3 {
+ pwms = <&pwm0 2 7812500 PWM_POLARITY_INVERTED>;
+ active-low;
+ color = <LED_COLOR_ID_GREEN>;
+ max-brightness = <255>;
+ label = "d3";
+ };
+
+ led-d4 {
+ pwms = <&pwm0 3 7812500 PWM_POLARITY_INVERTED>;
+ active-low;
+ color = <LED_COLOR_ID_GREEN>;
+ max-brightness = <255>;
+ label = "d4";
+ };
+ };
+};
+
+&uart0 {
+ status = "okay";
+};
+
+&uart1 {
+ status = "okay";
+};
+
+&i2c0 {
+ status = "okay";
+};
+
+&qspi0 {
+ status = "okay";
+ flash@0 {
+ compatible = "issi,is25wp256", "jedec,spi-nor";
+ reg = <0>;
+ spi-max-frequency = <50000000>;
+ m25p,fast-read;
+ spi-tx-bus-width = <4>;
+ spi-rx-bus-width = <4>;
+ };
+};
+
+&qspi2 {
+ status = "okay";
+ mmc@0 {
+ compatible = "mmc-spi-slot";
+ reg = <0>;
+ spi-max-frequency = <20000000>;
+ voltage-ranges = <3300 3300>;
+ disable-wp;
+ };
+};
+
+&eth0 {
+ status = "okay";
+ phy-mode = "gmii";
+ phy-handle = <&phy0>;
+ phy0: ethernet-phy@0 {
+ compatible = "ethernet-phy-id0007.0771";
+ reg = <0>;
+ };
+};
+
+&pwm0 {
+ status = "okay";
+};
+
+&pwm1 {
+ status = "okay";
+};
+
+&gpio {
+ status = "okay";
+};
diff --git a/arch/riscv/boot/install.sh b/arch/riscv/boot/install.sh
new file mode 100644
index 000000000..18c39159c
--- /dev/null
+++ b/arch/riscv/boot/install.sh
@@ -0,0 +1,60 @@
+#!/bin/sh
+#
+# arch/riscv/boot/install.sh
+#
+# This file is subject to the terms and conditions of the GNU General Public
+# License. See the file "COPYING" in the main directory of this archive
+# for more details.
+#
+# Copyright (C) 1995 by Linus Torvalds
+#
+# Adapted from code in arch/i386/boot/Makefile by H. Peter Anvin
+# Adapted from code in arch/i386/boot/install.sh by Russell King
+#
+# "make install" script for the RISC-V Linux port
+#
+# Arguments:
+# $1 - kernel version
+# $2 - kernel image file
+# $3 - kernel map file
+# $4 - default install path (blank if root directory)
+#
+
+verify () {
+ if [ ! -f "$1" ]; then
+ echo "" 1>&2
+ echo " *** Missing file: $1" 1>&2
+ echo ' *** You need to run "make" before "make install".' 1>&2
+ echo "" 1>&2
+ exit 1
+ fi
+}
+
+# Make sure the files actually exist
+verify "$2"
+verify "$3"
+
+# User may have a custom install script
+if [ -x ~/bin/${INSTALLKERNEL} ]; then exec ~/bin/${INSTALLKERNEL} "$@"; fi
+if [ -x /sbin/${INSTALLKERNEL} ]; then exec /sbin/${INSTALLKERNEL} "$@"; fi
+
+if [ "$(basename $2)" = "Image.gz" ]; then
+# Compressed install
+ echo "Installing compressed kernel"
+ base=vmlinuz
+else
+# Normal install
+ echo "Installing normal kernel"
+ base=vmlinux
+fi
+
+if [ -f $4/$base-$1 ]; then
+ mv $4/$base-$1 $4/$base-$1.old
+fi
+cat $2 > $4/$base-$1
+
+# Install system map file
+if [ -f $4/System.map-$1 ]; then
+ mv $4/System.map-$1 $4/System.map-$1.old
+fi
+cp $3 $4/System.map-$1
diff --git a/arch/riscv/boot/loader.S b/arch/riscv/boot/loader.S
new file mode 100644
index 000000000..dcf88cf44
--- /dev/null
+++ b/arch/riscv/boot/loader.S
@@ -0,0 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+ .align 4
+ .section .payload, "ax", %progbits
+ .globl _start
+_start:
+ .incbin "arch/riscv/boot/Image"
+
diff --git a/arch/riscv/boot/loader.lds.S b/arch/riscv/boot/loader.lds.S
new file mode 100644
index 000000000..47a5003c2
--- /dev/null
+++ b/arch/riscv/boot/loader.lds.S
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#include <asm/page.h>
+
+OUTPUT_ARCH(riscv)
+ENTRY(_start)
+
+SECTIONS
+{
+ . = PAGE_OFFSET;
+
+ .payload : {
+ *(.payload)
+ . = ALIGN(8);
+ }
+}
diff --git a/arch/riscv/configs/defconfig b/arch/riscv/configs/defconfig
new file mode 100644
index 000000000..8c3d1e451
--- /dev/null
+++ b/arch/riscv/configs/defconfig
@@ -0,0 +1,135 @@
+CONFIG_SYSVIPC=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_NO_HZ_IDLE=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_CGROUPS=y
+CONFIG_CGROUP_SCHED=y
+CONFIG_CFS_BANDWIDTH=y
+CONFIG_CGROUP_BPF=y
+CONFIG_NAMESPACES=y
+CONFIG_USER_NS=y
+CONFIG_CHECKPOINT_RESTORE=y
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_EXPERT=y
+CONFIG_BPF_SYSCALL=y
+CONFIG_SOC_SIFIVE=y
+CONFIG_SOC_VIRT=y
+CONFIG_SMP=y
+CONFIG_JUMP_LABEL=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_ADVANCED_ROUTER=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+CONFIG_IP_PNP_RARP=y
+CONFIG_NETLINK_DIAG=y
+CONFIG_NET_9P=y
+CONFIG_NET_9P_VIRTIO=y
+CONFIG_PCI=y
+CONFIG_PCIEPORTBUS=y
+CONFIG_PCI_HOST_GENERIC=y
+CONFIG_PCIE_XILINX=y
+CONFIG_DEVTMPFS=y
+CONFIG_DEVTMPFS_MOUNT=y
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_VIRTIO_BLK=y
+CONFIG_BLK_DEV_SD=y
+CONFIG_BLK_DEV_SR=y
+CONFIG_SCSI_VIRTIO=y
+CONFIG_ATA=y
+CONFIG_SATA_AHCI=y
+CONFIG_SATA_AHCI_PLATFORM=y
+CONFIG_NETDEVICES=y
+CONFIG_VIRTIO_NET=y
+CONFIG_MACB=y
+CONFIG_E1000E=y
+CONFIG_R8169=y
+CONFIG_MICROSEMI_PHY=y
+CONFIG_INPUT_MOUSEDEV=y
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_OF_PLATFORM=y
+CONFIG_SERIAL_EARLYCON_RISCV_SBI=y
+CONFIG_HVC_RISCV_SBI=y
+CONFIG_VIRTIO_CONSOLE=y
+CONFIG_HW_RANDOM=y
+CONFIG_HW_RANDOM_VIRTIO=y
+CONFIG_SPI=y
+CONFIG_SPI_SIFIVE=y
+CONFIG_GPIOLIB=y
+CONFIG_GPIO_SIFIVE=y
+# CONFIG_PTP_1588_CLOCK is not set
+CONFIG_POWER_RESET=y
+CONFIG_DRM=y
+CONFIG_DRM_RADEON=y
+CONFIG_DRM_VIRTIO_GPU=y
+CONFIG_FRAMEBUFFER_CONSOLE=y
+CONFIG_USB=y
+CONFIG_USB_XHCI_HCD=y
+CONFIG_USB_XHCI_PLATFORM=y
+CONFIG_USB_EHCI_HCD=y
+CONFIG_USB_EHCI_HCD_PLATFORM=y
+CONFIG_USB_OHCI_HCD=y
+CONFIG_USB_OHCI_HCD_PLATFORM=y
+CONFIG_USB_STORAGE=y
+CONFIG_USB_UAS=y
+CONFIG_MMC=y
+CONFIG_MMC_SPI=y
+CONFIG_RTC_CLASS=y
+CONFIG_VIRTIO_PCI=y
+CONFIG_VIRTIO_BALLOON=y
+CONFIG_VIRTIO_INPUT=y
+CONFIG_VIRTIO_MMIO=y
+CONFIG_RPMSG_CHAR=y
+CONFIG_RPMSG_VIRTIO=y
+CONFIG_EXT4_FS=y
+CONFIG_EXT4_FS_POSIX_ACL=y
+CONFIG_AUTOFS4_FS=y
+CONFIG_MSDOS_FS=y
+CONFIG_VFAT_FS=y
+CONFIG_TMPFS=y
+CONFIG_TMPFS_POSIX_ACL=y
+CONFIG_NFS_FS=y
+CONFIG_NFS_V4=y
+CONFIG_NFS_V4_1=y
+CONFIG_NFS_V4_2=y
+CONFIG_ROOT_NFS=y
+CONFIG_9P_FS=y
+CONFIG_CRYPTO_USER_API_HASH=y
+CONFIG_CRYPTO_DEV_VIRTIO=y
+CONFIG_PRINTK_TIME=y
+CONFIG_DEBUG_FS=y
+CONFIG_DEBUG_PAGEALLOC=y
+CONFIG_SCHED_STACK_END_CHECK=y
+CONFIG_DEBUG_VM=y
+CONFIG_DEBUG_VM_PGFLAGS=y
+CONFIG_DEBUG_MEMORY_INIT=y
+CONFIG_DEBUG_PER_CPU_MAPS=y
+CONFIG_SOFTLOCKUP_DETECTOR=y
+CONFIG_WQ_WATCHDOG=y
+CONFIG_DEBUG_TIMEKEEPING=y
+CONFIG_DEBUG_RT_MUTEXES=y
+CONFIG_DEBUG_SPINLOCK=y
+CONFIG_DEBUG_MUTEXES=y
+CONFIG_DEBUG_RWSEMS=y
+CONFIG_DEBUG_ATOMIC_SLEEP=y
+CONFIG_STACKTRACE=y
+CONFIG_DEBUG_LIST=y
+CONFIG_DEBUG_PLIST=y
+CONFIG_DEBUG_SG=y
+# CONFIG_RCU_TRACE is not set
+CONFIG_RCU_EQS_DEBUG=y
+CONFIG_DEBUG_BLOCK_EXT_DEVT=y
+# CONFIG_FTRACE is not set
+# CONFIG_RUNTIME_TESTING_MENU is not set
+CONFIG_MEMTEST=y
+# CONFIG_SYSFS_SYSCALL is not set
+CONFIG_EFI=y
diff --git a/arch/riscv/configs/nommu_k210_defconfig b/arch/riscv/configs/nommu_k210_defconfig
new file mode 100644
index 000000000..cd1df62b1
--- /dev/null
+++ b/arch/riscv/configs/nommu_k210_defconfig
@@ -0,0 +1,66 @@
+# CONFIG_CPU_ISOLATION is not set
+CONFIG_LOG_BUF_SHIFT=15
+CONFIG_PRINTK_SAFE_LOG_BUF_SHIFT=12
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_INITRAMFS_FORCE=y
+# CONFIG_RD_BZIP2 is not set
+# CONFIG_RD_LZMA is not set
+# CONFIG_RD_XZ is not set
+# CONFIG_RD_LZO is not set
+# CONFIG_RD_LZ4 is not set
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
+# CONFIG_SYSFS_SYSCALL is not set
+# CONFIG_FHANDLE is not set
+# CONFIG_BASE_FULL is not set
+# CONFIG_EPOLL is not set
+# CONFIG_SIGNALFD is not set
+# CONFIG_TIMERFD is not set
+# CONFIG_EVENTFD is not set
+# CONFIG_AIO is not set
+# CONFIG_IO_URING is not set
+# CONFIG_ADVISE_SYSCALLS is not set
+# CONFIG_MEMBARRIER is not set
+# CONFIG_KALLSYMS is not set
+CONFIG_EMBEDDED=y
+# CONFIG_VM_EVENT_COUNTERS is not set
+# CONFIG_COMPAT_BRK is not set
+CONFIG_SLOB=y
+# CONFIG_SLAB_MERGE_DEFAULT is not set
+# CONFIG_MMU is not set
+CONFIG_SOC_KENDRYTE=y
+CONFIG_MAXPHYSMEM_2GB=y
+CONFIG_SMP=y
+CONFIG_NR_CPUS=2
+CONFIG_CMDLINE="earlycon console=ttySIF0"
+CONFIG_CMDLINE_FORCE=y
+CONFIG_JUMP_LABEL=y
+# CONFIG_BLOCK is not set
+CONFIG_BINFMT_FLAT=y
+# CONFIG_COREDUMP is not set
+CONFIG_DEVTMPFS=y
+CONFIG_DEVTMPFS_MOUNT=y
+# CONFIG_FW_LOADER is not set
+# CONFIG_ALLOW_DEV_COREDUMP is not set
+# CONFIG_INPUT_KEYBOARD is not set
+# CONFIG_INPUT_MOUSE is not set
+# CONFIG_SERIO is not set
+# CONFIG_LEGACY_PTYS is not set
+# CONFIG_LDISC_AUTOLOAD is not set
+# CONFIG_HW_RANDOM is not set
+# CONFIG_DEVMEM is not set
+# CONFIG_HWMON is not set
+# CONFIG_VGA_CONSOLE is not set
+# CONFIG_HID is not set
+# CONFIG_USB_SUPPORT is not set
+# CONFIG_VIRTIO_MENU is not set
+# CONFIG_DNOTIFY is not set
+# CONFIG_INOTIFY_USER is not set
+# CONFIG_MISC_FILESYSTEMS is not set
+CONFIG_LSM="[]"
+CONFIG_PRINTK_TIME=y
+# CONFIG_DEBUG_MISC is not set
+CONFIG_PANIC_ON_OOPS=y
+# CONFIG_SCHED_DEBUG is not set
+# CONFIG_RCU_TRACE is not set
+# CONFIG_FTRACE is not set
+# CONFIG_RUNTIME_TESTING_MENU is not set
diff --git a/arch/riscv/configs/nommu_virt_defconfig b/arch/riscv/configs/nommu_virt_defconfig
new file mode 100644
index 000000000..e046a0bab
--- /dev/null
+++ b/arch/riscv/configs/nommu_virt_defconfig
@@ -0,0 +1,76 @@
+# CONFIG_CPU_ISOLATION is not set
+CONFIG_LOG_BUF_SHIFT=16
+CONFIG_PRINTK_SAFE_LOG_BUF_SHIFT=12
+CONFIG_BLK_DEV_INITRD=y
+# CONFIG_RD_BZIP2 is not set
+# CONFIG_RD_LZMA is not set
+# CONFIG_RD_XZ is not set
+# CONFIG_RD_LZO is not set
+# CONFIG_RD_LZ4 is not set
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
+CONFIG_EXPERT=y
+# CONFIG_SYSFS_SYSCALL is not set
+# CONFIG_FHANDLE is not set
+# CONFIG_BASE_FULL is not set
+# CONFIG_EPOLL is not set
+# CONFIG_SIGNALFD is not set
+# CONFIG_TIMERFD is not set
+# CONFIG_EVENTFD is not set
+# CONFIG_AIO is not set
+# CONFIG_IO_URING is not set
+# CONFIG_ADVISE_SYSCALLS is not set
+# CONFIG_MEMBARRIER is not set
+# CONFIG_KALLSYMS is not set
+# CONFIG_VM_EVENT_COUNTERS is not set
+# CONFIG_COMPAT_BRK is not set
+CONFIG_SLOB=y
+# CONFIG_SLAB_MERGE_DEFAULT is not set
+# CONFIG_MMU is not set
+CONFIG_SOC_VIRT=y
+CONFIG_MAXPHYSMEM_2GB=y
+CONFIG_SMP=y
+CONFIG_CMDLINE="root=/dev/vda rw earlycon=uart8250,mmio,0x10000000,115200n8 console=ttyS0"
+CONFIG_CMDLINE_FORCE=y
+CONFIG_JUMP_LABEL=y
+# CONFIG_BLK_DEV_BSG is not set
+CONFIG_PARTITION_ADVANCED=y
+# CONFIG_MSDOS_PARTITION is not set
+# CONFIG_EFI_PARTITION is not set
+# CONFIG_MQ_IOSCHED_DEADLINE is not set
+# CONFIG_MQ_IOSCHED_KYBER is not set
+CONFIG_BINFMT_FLAT=y
+# CONFIG_COREDUMP is not set
+CONFIG_DEVTMPFS=y
+CONFIG_DEVTMPFS_MOUNT=y
+# CONFIG_FW_LOADER is not set
+# CONFIG_ALLOW_DEV_COREDUMP is not set
+CONFIG_VIRTIO_BLK=y
+# CONFIG_INPUT_KEYBOARD is not set
+# CONFIG_INPUT_MOUSE is not set
+# CONFIG_SERIO is not set
+# CONFIG_LEGACY_PTYS is not set
+# CONFIG_LDISC_AUTOLOAD is not set
+CONFIG_SERIAL_8250=y
+# CONFIG_SERIAL_8250_DEPRECATED_OPTIONS is not set
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_8250_NR_UARTS=1
+CONFIG_SERIAL_8250_RUNTIME_UARTS=1
+CONFIG_SERIAL_OF_PLATFORM=y
+# CONFIG_HW_RANDOM is not set
+# CONFIG_DEVMEM is not set
+# CONFIG_HWMON is not set
+# CONFIG_VGA_CONSOLE is not set
+# CONFIG_HID is not set
+# CONFIG_USB_SUPPORT is not set
+CONFIG_VIRTIO_MMIO=y
+CONFIG_VIRTIO_MMIO_CMDLINE_DEVICES=y
+CONFIG_EXT2_FS=y
+# CONFIG_DNOTIFY is not set
+# CONFIG_INOTIFY_USER is not set
+# CONFIG_MISC_FILESYSTEMS is not set
+CONFIG_LSM="[]"
+CONFIG_PRINTK_TIME=y
+# CONFIG_SCHED_DEBUG is not set
+# CONFIG_RCU_TRACE is not set
+# CONFIG_FTRACE is not set
+# CONFIG_RUNTIME_TESTING_MENU is not set
diff --git a/arch/riscv/configs/rv32_defconfig b/arch/riscv/configs/rv32_defconfig
new file mode 100644
index 000000000..2c2cda6cc
--- /dev/null
+++ b/arch/riscv/configs/rv32_defconfig
@@ -0,0 +1,133 @@
+CONFIG_SYSVIPC=y
+CONFIG_POSIX_MQUEUE=y
+CONFIG_NO_HZ_IDLE=y
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_IKCONFIG=y
+CONFIG_IKCONFIG_PROC=y
+CONFIG_CGROUPS=y
+CONFIG_CGROUP_SCHED=y
+CONFIG_CFS_BANDWIDTH=y
+CONFIG_CGROUP_BPF=y
+CONFIG_NAMESPACES=y
+CONFIG_USER_NS=y
+CONFIG_CHECKPOINT_RESTORE=y
+CONFIG_BLK_DEV_INITRD=y
+CONFIG_EXPERT=y
+CONFIG_BPF_SYSCALL=y
+CONFIG_SOC_SIFIVE=y
+CONFIG_SOC_VIRT=y
+CONFIG_ARCH_RV32I=y
+CONFIG_SMP=y
+CONFIG_JUMP_LABEL=y
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_ADVANCED_ROUTER=y
+CONFIG_IP_PNP=y
+CONFIG_IP_PNP_DHCP=y
+CONFIG_IP_PNP_BOOTP=y
+CONFIG_IP_PNP_RARP=y
+CONFIG_NETLINK_DIAG=y
+CONFIG_NET_9P=y
+CONFIG_NET_9P_VIRTIO=y
+CONFIG_PCI=y
+CONFIG_PCIEPORTBUS=y
+CONFIG_PCI_HOST_GENERIC=y
+CONFIG_PCIE_XILINX=y
+CONFIG_DEVTMPFS=y
+CONFIG_DEVTMPFS_MOUNT=y
+CONFIG_BLK_DEV_LOOP=y
+CONFIG_VIRTIO_BLK=y
+CONFIG_BLK_DEV_SD=y
+CONFIG_BLK_DEV_SR=y
+CONFIG_SCSI_VIRTIO=y
+CONFIG_ATA=y
+CONFIG_SATA_AHCI=y
+CONFIG_SATA_AHCI_PLATFORM=y
+CONFIG_NETDEVICES=y
+CONFIG_VIRTIO_NET=y
+CONFIG_MACB=y
+CONFIG_E1000E=y
+CONFIG_R8169=y
+CONFIG_MICROSEMI_PHY=y
+CONFIG_INPUT_MOUSEDEV=y
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+CONFIG_SERIAL_OF_PLATFORM=y
+CONFIG_SERIAL_EARLYCON_RISCV_SBI=y
+CONFIG_HVC_RISCV_SBI=y
+CONFIG_VIRTIO_CONSOLE=y
+CONFIG_HW_RANDOM=y
+CONFIG_HW_RANDOM_VIRTIO=y
+CONFIG_SPI=y
+CONFIG_SPI_SIFIVE=y
+# CONFIG_PTP_1588_CLOCK is not set
+CONFIG_POWER_RESET=y
+CONFIG_DRM=y
+CONFIG_DRM_RADEON=y
+CONFIG_DRM_VIRTIO_GPU=y
+CONFIG_FRAMEBUFFER_CONSOLE=y
+CONFIG_USB=y
+CONFIG_USB_XHCI_HCD=y
+CONFIG_USB_XHCI_PLATFORM=y
+CONFIG_USB_EHCI_HCD=y
+CONFIG_USB_EHCI_HCD_PLATFORM=y
+CONFIG_USB_OHCI_HCD=y
+CONFIG_USB_OHCI_HCD_PLATFORM=y
+CONFIG_USB_STORAGE=y
+CONFIG_USB_UAS=y
+CONFIG_MMC=y
+CONFIG_MMC_SPI=y
+CONFIG_RTC_CLASS=y
+CONFIG_VIRTIO_PCI=y
+CONFIG_VIRTIO_BALLOON=y
+CONFIG_VIRTIO_INPUT=y
+CONFIG_VIRTIO_MMIO=y
+CONFIG_RPMSG_CHAR=y
+CONFIG_RPMSG_VIRTIO=y
+CONFIG_EXT4_FS=y
+CONFIG_EXT4_FS_POSIX_ACL=y
+CONFIG_AUTOFS4_FS=y
+CONFIG_MSDOS_FS=y
+CONFIG_VFAT_FS=y
+CONFIG_TMPFS=y
+CONFIG_TMPFS_POSIX_ACL=y
+CONFIG_NFS_FS=y
+CONFIG_NFS_V4=y
+CONFIG_NFS_V4_1=y
+CONFIG_NFS_V4_2=y
+CONFIG_ROOT_NFS=y
+CONFIG_9P_FS=y
+CONFIG_CRYPTO_USER_API_HASH=y
+CONFIG_CRYPTO_DEV_VIRTIO=y
+CONFIG_PRINTK_TIME=y
+CONFIG_DEBUG_FS=y
+CONFIG_DEBUG_PAGEALLOC=y
+CONFIG_SCHED_STACK_END_CHECK=y
+CONFIG_DEBUG_VM=y
+CONFIG_DEBUG_VM_PGFLAGS=y
+CONFIG_DEBUG_MEMORY_INIT=y
+CONFIG_DEBUG_PER_CPU_MAPS=y
+CONFIG_SOFTLOCKUP_DETECTOR=y
+CONFIG_WQ_WATCHDOG=y
+CONFIG_DEBUG_TIMEKEEPING=y
+CONFIG_DEBUG_RT_MUTEXES=y
+CONFIG_DEBUG_SPINLOCK=y
+CONFIG_DEBUG_MUTEXES=y
+CONFIG_DEBUG_RWSEMS=y
+CONFIG_DEBUG_ATOMIC_SLEEP=y
+CONFIG_STACKTRACE=y
+CONFIG_DEBUG_LIST=y
+CONFIG_DEBUG_PLIST=y
+CONFIG_DEBUG_SG=y
+# CONFIG_RCU_TRACE is not set
+CONFIG_RCU_EQS_DEBUG=y
+CONFIG_DEBUG_BLOCK_EXT_DEVT=y
+# CONFIG_FTRACE is not set
+# CONFIG_RUNTIME_TESTING_MENU is not set
+CONFIG_MEMTEST=y
+# CONFIG_SYSFS_SYSCALL is not set
diff --git a/arch/riscv/include/asm/Kbuild b/arch/riscv/include/asm/Kbuild
new file mode 100644
index 000000000..445ccc973
--- /dev/null
+++ b/arch/riscv/include/asm/Kbuild
@@ -0,0 +1,7 @@
+# SPDX-License-Identifier: GPL-2.0
+generic-y += early_ioremap.h
+generic-y += extable.h
+generic-y += flat.h
+generic-y += kvm_para.h
+generic-y += user.h
+generic-y += vmlinux.lds.h
diff --git a/arch/riscv/include/asm/asm-offsets.h b/arch/riscv/include/asm/asm-offsets.h
new file mode 100644
index 000000000..d370ee36a
--- /dev/null
+++ b/arch/riscv/include/asm/asm-offsets.h
@@ -0,0 +1 @@
+#include <generated/asm-offsets.h>
diff --git a/arch/riscv/include/asm/asm-prototypes.h b/arch/riscv/include/asm/asm-prototypes.h
new file mode 100644
index 000000000..27e005fca
--- /dev/null
+++ b/arch/riscv/include/asm/asm-prototypes.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_RISCV_PROTOTYPES_H
+#define _ASM_RISCV_PROTOTYPES_H
+
+#include <linux/ftrace.h>
+#include <asm-generic/asm-prototypes.h>
+
+long long __lshrti3(long long a, int b);
+long long __ashrti3(long long a, int b);
+long long __ashlti3(long long a, int b);
+
+#endif /* _ASM_RISCV_PROTOTYPES_H */
diff --git a/arch/riscv/include/asm/asm.h b/arch/riscv/include/asm/asm.h
new file mode 100644
index 000000000..9c992a88d
--- /dev/null
+++ b/arch/riscv/include/asm/asm.h
@@ -0,0 +1,69 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2015 Regents of the University of California
+ */
+
+#ifndef _ASM_RISCV_ASM_H
+#define _ASM_RISCV_ASM_H
+
+#ifdef __ASSEMBLY__
+#define __ASM_STR(x) x
+#else
+#define __ASM_STR(x) #x
+#endif
+
+#if __riscv_xlen == 64
+#define __REG_SEL(a, b) __ASM_STR(a)
+#elif __riscv_xlen == 32
+#define __REG_SEL(a, b) __ASM_STR(b)
+#else
+#error "Unexpected __riscv_xlen"
+#endif
+
+#define REG_L __REG_SEL(ld, lw)
+#define REG_S __REG_SEL(sd, sw)
+#define REG_SC __REG_SEL(sc.d, sc.w)
+#define SZREG __REG_SEL(8, 4)
+#define LGREG __REG_SEL(3, 2)
+
+#if __SIZEOF_POINTER__ == 8
+#ifdef __ASSEMBLY__
+#define RISCV_PTR .dword
+#define RISCV_SZPTR 8
+#define RISCV_LGPTR 3
+#else
+#define RISCV_PTR ".dword"
+#define RISCV_SZPTR "8"
+#define RISCV_LGPTR "3"
+#endif
+#elif __SIZEOF_POINTER__ == 4
+#ifdef __ASSEMBLY__
+#define RISCV_PTR .word
+#define RISCV_SZPTR 4
+#define RISCV_LGPTR 2
+#else
+#define RISCV_PTR ".word"
+#define RISCV_SZPTR "4"
+#define RISCV_LGPTR "2"
+#endif
+#else
+#error "Unexpected __SIZEOF_POINTER__"
+#endif
+
+#if (__SIZEOF_INT__ == 4)
+#define RISCV_INT __ASM_STR(.word)
+#define RISCV_SZINT __ASM_STR(4)
+#define RISCV_LGINT __ASM_STR(2)
+#else
+#error "Unexpected __SIZEOF_INT__"
+#endif
+
+#if (__SIZEOF_SHORT__ == 2)
+#define RISCV_SHORT __ASM_STR(.half)
+#define RISCV_SZSHORT __ASM_STR(2)
+#define RISCV_LGSHORT __ASM_STR(1)
+#else
+#error "Unexpected __SIZEOF_SHORT__"
+#endif
+
+#endif /* _ASM_RISCV_ASM_H */
diff --git a/arch/riscv/include/asm/atomic.h b/arch/riscv/include/asm/atomic.h
new file mode 100644
index 000000000..400a8c8b6
--- /dev/null
+++ b/arch/riscv/include/asm/atomic.h
@@ -0,0 +1,356 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
+ * Copyright (C) 2012 Regents of the University of California
+ * Copyright (C) 2017 SiFive
+ */
+
+#ifndef _ASM_RISCV_ATOMIC_H
+#define _ASM_RISCV_ATOMIC_H
+
+#ifdef CONFIG_GENERIC_ATOMIC64
+# include <asm-generic/atomic64.h>
+#else
+# if (__riscv_xlen < 64)
+# error "64-bit atomics require XLEN to be at least 64"
+# endif
+#endif
+
+#include <asm/cmpxchg.h>
+#include <asm/barrier.h>
+
+#define __atomic_acquire_fence() \
+ __asm__ __volatile__(RISCV_ACQUIRE_BARRIER "" ::: "memory")
+
+#define __atomic_release_fence() \
+ __asm__ __volatile__(RISCV_RELEASE_BARRIER "" ::: "memory");
+
+static __always_inline int atomic_read(const atomic_t *v)
+{
+ return READ_ONCE(v->counter);
+}
+static __always_inline void atomic_set(atomic_t *v, int i)
+{
+ WRITE_ONCE(v->counter, i);
+}
+
+#ifndef CONFIG_GENERIC_ATOMIC64
+#define ATOMIC64_INIT(i) { (i) }
+static __always_inline s64 atomic64_read(const atomic64_t *v)
+{
+ return READ_ONCE(v->counter);
+}
+static __always_inline void atomic64_set(atomic64_t *v, s64 i)
+{
+ WRITE_ONCE(v->counter, i);
+}
+#endif
+
+/*
+ * First, the atomic ops that have no ordering constraints and therefor don't
+ * have the AQ or RL bits set. These don't return anything, so there's only
+ * one version to worry about.
+ */
+#define ATOMIC_OP(op, asm_op, I, asm_type, c_type, prefix) \
+static __always_inline \
+void atomic##prefix##_##op(c_type i, atomic##prefix##_t *v) \
+{ \
+ __asm__ __volatile__ ( \
+ " amo" #asm_op "." #asm_type " zero, %1, %0" \
+ : "+A" (v->counter) \
+ : "r" (I) \
+ : "memory"); \
+} \
+
+#ifdef CONFIG_GENERIC_ATOMIC64
+#define ATOMIC_OPS(op, asm_op, I) \
+ ATOMIC_OP (op, asm_op, I, w, int, )
+#else
+#define ATOMIC_OPS(op, asm_op, I) \
+ ATOMIC_OP (op, asm_op, I, w, int, ) \
+ ATOMIC_OP (op, asm_op, I, d, s64, 64)
+#endif
+
+ATOMIC_OPS(add, add, i)
+ATOMIC_OPS(sub, add, -i)
+ATOMIC_OPS(and, and, i)
+ATOMIC_OPS( or, or, i)
+ATOMIC_OPS(xor, xor, i)
+
+#undef ATOMIC_OP
+#undef ATOMIC_OPS
+
+/*
+ * Atomic ops that have ordered, relaxed, acquire, and release variants.
+ * There's two flavors of these: the arithmatic ops have both fetch and return
+ * versions, while the logical ops only have fetch versions.
+ */
+#define ATOMIC_FETCH_OP(op, asm_op, I, asm_type, c_type, prefix) \
+static __always_inline \
+c_type atomic##prefix##_fetch_##op##_relaxed(c_type i, \
+ atomic##prefix##_t *v) \
+{ \
+ register c_type ret; \
+ __asm__ __volatile__ ( \
+ " amo" #asm_op "." #asm_type " %1, %2, %0" \
+ : "+A" (v->counter), "=r" (ret) \
+ : "r" (I) \
+ : "memory"); \
+ return ret; \
+} \
+static __always_inline \
+c_type atomic##prefix##_fetch_##op(c_type i, atomic##prefix##_t *v) \
+{ \
+ register c_type ret; \
+ __asm__ __volatile__ ( \
+ " amo" #asm_op "." #asm_type ".aqrl %1, %2, %0" \
+ : "+A" (v->counter), "=r" (ret) \
+ : "r" (I) \
+ : "memory"); \
+ return ret; \
+}
+
+#define ATOMIC_OP_RETURN(op, asm_op, c_op, I, asm_type, c_type, prefix) \
+static __always_inline \
+c_type atomic##prefix##_##op##_return_relaxed(c_type i, \
+ atomic##prefix##_t *v) \
+{ \
+ return atomic##prefix##_fetch_##op##_relaxed(i, v) c_op I; \
+} \
+static __always_inline \
+c_type atomic##prefix##_##op##_return(c_type i, atomic##prefix##_t *v) \
+{ \
+ return atomic##prefix##_fetch_##op(i, v) c_op I; \
+}
+
+#ifdef CONFIG_GENERIC_ATOMIC64
+#define ATOMIC_OPS(op, asm_op, c_op, I) \
+ ATOMIC_FETCH_OP( op, asm_op, I, w, int, ) \
+ ATOMIC_OP_RETURN(op, asm_op, c_op, I, w, int, )
+#else
+#define ATOMIC_OPS(op, asm_op, c_op, I) \
+ ATOMIC_FETCH_OP( op, asm_op, I, w, int, ) \
+ ATOMIC_OP_RETURN(op, asm_op, c_op, I, w, int, ) \
+ ATOMIC_FETCH_OP( op, asm_op, I, d, s64, 64) \
+ ATOMIC_OP_RETURN(op, asm_op, c_op, I, d, s64, 64)
+#endif
+
+ATOMIC_OPS(add, add, +, i)
+ATOMIC_OPS(sub, add, +, -i)
+
+#define atomic_add_return_relaxed atomic_add_return_relaxed
+#define atomic_sub_return_relaxed atomic_sub_return_relaxed
+#define atomic_add_return atomic_add_return
+#define atomic_sub_return atomic_sub_return
+
+#define atomic_fetch_add_relaxed atomic_fetch_add_relaxed
+#define atomic_fetch_sub_relaxed atomic_fetch_sub_relaxed
+#define atomic_fetch_add atomic_fetch_add
+#define atomic_fetch_sub atomic_fetch_sub
+
+#ifndef CONFIG_GENERIC_ATOMIC64
+#define atomic64_add_return_relaxed atomic64_add_return_relaxed
+#define atomic64_sub_return_relaxed atomic64_sub_return_relaxed
+#define atomic64_add_return atomic64_add_return
+#define atomic64_sub_return atomic64_sub_return
+
+#define atomic64_fetch_add_relaxed atomic64_fetch_add_relaxed
+#define atomic64_fetch_sub_relaxed atomic64_fetch_sub_relaxed
+#define atomic64_fetch_add atomic64_fetch_add
+#define atomic64_fetch_sub atomic64_fetch_sub
+#endif
+
+#undef ATOMIC_OPS
+
+#ifdef CONFIG_GENERIC_ATOMIC64
+#define ATOMIC_OPS(op, asm_op, I) \
+ ATOMIC_FETCH_OP(op, asm_op, I, w, int, )
+#else
+#define ATOMIC_OPS(op, asm_op, I) \
+ ATOMIC_FETCH_OP(op, asm_op, I, w, int, ) \
+ ATOMIC_FETCH_OP(op, asm_op, I, d, s64, 64)
+#endif
+
+ATOMIC_OPS(and, and, i)
+ATOMIC_OPS( or, or, i)
+ATOMIC_OPS(xor, xor, i)
+
+#define atomic_fetch_and_relaxed atomic_fetch_and_relaxed
+#define atomic_fetch_or_relaxed atomic_fetch_or_relaxed
+#define atomic_fetch_xor_relaxed atomic_fetch_xor_relaxed
+#define atomic_fetch_and atomic_fetch_and
+#define atomic_fetch_or atomic_fetch_or
+#define atomic_fetch_xor atomic_fetch_xor
+
+#ifndef CONFIG_GENERIC_ATOMIC64
+#define atomic64_fetch_and_relaxed atomic64_fetch_and_relaxed
+#define atomic64_fetch_or_relaxed atomic64_fetch_or_relaxed
+#define atomic64_fetch_xor_relaxed atomic64_fetch_xor_relaxed
+#define atomic64_fetch_and atomic64_fetch_and
+#define atomic64_fetch_or atomic64_fetch_or
+#define atomic64_fetch_xor atomic64_fetch_xor
+#endif
+
+#undef ATOMIC_OPS
+
+#undef ATOMIC_FETCH_OP
+#undef ATOMIC_OP_RETURN
+
+/* This is required to provide a full barrier on success. */
+static __always_inline int atomic_fetch_add_unless(atomic_t *v, int a, int u)
+{
+ int prev, rc;
+
+ __asm__ __volatile__ (
+ "0: lr.w %[p], %[c]\n"
+ " beq %[p], %[u], 1f\n"
+ " add %[rc], %[p], %[a]\n"
+ " sc.w.rl %[rc], %[rc], %[c]\n"
+ " bnez %[rc], 0b\n"
+ " fence rw, rw\n"
+ "1:\n"
+ : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
+ : [a]"r" (a), [u]"r" (u)
+ : "memory");
+ return prev;
+}
+#define atomic_fetch_add_unless atomic_fetch_add_unless
+
+#ifndef CONFIG_GENERIC_ATOMIC64
+static __always_inline s64 atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
+{
+ s64 prev;
+ long rc;
+
+ __asm__ __volatile__ (
+ "0: lr.d %[p], %[c]\n"
+ " beq %[p], %[u], 1f\n"
+ " add %[rc], %[p], %[a]\n"
+ " sc.d.rl %[rc], %[rc], %[c]\n"
+ " bnez %[rc], 0b\n"
+ " fence rw, rw\n"
+ "1:\n"
+ : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
+ : [a]"r" (a), [u]"r" (u)
+ : "memory");
+ return prev;
+}
+#define atomic64_fetch_add_unless atomic64_fetch_add_unless
+#endif
+
+/*
+ * atomic_{cmp,}xchg is required to have exactly the same ordering semantics as
+ * {cmp,}xchg and the operations that return, so they need a full barrier.
+ */
+#define ATOMIC_OP(c_t, prefix, size) \
+static __always_inline \
+c_t atomic##prefix##_xchg_relaxed(atomic##prefix##_t *v, c_t n) \
+{ \
+ return __xchg_relaxed(&(v->counter), n, size); \
+} \
+static __always_inline \
+c_t atomic##prefix##_xchg_acquire(atomic##prefix##_t *v, c_t n) \
+{ \
+ return __xchg_acquire(&(v->counter), n, size); \
+} \
+static __always_inline \
+c_t atomic##prefix##_xchg_release(atomic##prefix##_t *v, c_t n) \
+{ \
+ return __xchg_release(&(v->counter), n, size); \
+} \
+static __always_inline \
+c_t atomic##prefix##_xchg(atomic##prefix##_t *v, c_t n) \
+{ \
+ return __xchg(&(v->counter), n, size); \
+} \
+static __always_inline \
+c_t atomic##prefix##_cmpxchg_relaxed(atomic##prefix##_t *v, \
+ c_t o, c_t n) \
+{ \
+ return __cmpxchg_relaxed(&(v->counter), o, n, size); \
+} \
+static __always_inline \
+c_t atomic##prefix##_cmpxchg_acquire(atomic##prefix##_t *v, \
+ c_t o, c_t n) \
+{ \
+ return __cmpxchg_acquire(&(v->counter), o, n, size); \
+} \
+static __always_inline \
+c_t atomic##prefix##_cmpxchg_release(atomic##prefix##_t *v, \
+ c_t o, c_t n) \
+{ \
+ return __cmpxchg_release(&(v->counter), o, n, size); \
+} \
+static __always_inline \
+c_t atomic##prefix##_cmpxchg(atomic##prefix##_t *v, c_t o, c_t n) \
+{ \
+ return __cmpxchg(&(v->counter), o, n, size); \
+}
+
+#ifdef CONFIG_GENERIC_ATOMIC64
+#define ATOMIC_OPS() \
+ ATOMIC_OP(int, , 4)
+#else
+#define ATOMIC_OPS() \
+ ATOMIC_OP(int, , 4) \
+ ATOMIC_OP(s64, 64, 8)
+#endif
+
+ATOMIC_OPS()
+
+#define atomic_xchg_relaxed atomic_xchg_relaxed
+#define atomic_xchg_acquire atomic_xchg_acquire
+#define atomic_xchg_release atomic_xchg_release
+#define atomic_xchg atomic_xchg
+#define atomic_cmpxchg_relaxed atomic_cmpxchg_relaxed
+#define atomic_cmpxchg_acquire atomic_cmpxchg_acquire
+#define atomic_cmpxchg_release atomic_cmpxchg_release
+#define atomic_cmpxchg atomic_cmpxchg
+
+#undef ATOMIC_OPS
+#undef ATOMIC_OP
+
+static __always_inline int atomic_sub_if_positive(atomic_t *v, int offset)
+{
+ int prev, rc;
+
+ __asm__ __volatile__ (
+ "0: lr.w %[p], %[c]\n"
+ " sub %[rc], %[p], %[o]\n"
+ " bltz %[rc], 1f\n"
+ " sc.w.rl %[rc], %[rc], %[c]\n"
+ " bnez %[rc], 0b\n"
+ " fence rw, rw\n"
+ "1:\n"
+ : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
+ : [o]"r" (offset)
+ : "memory");
+ return prev - offset;
+}
+
+#define atomic_dec_if_positive(v) atomic_sub_if_positive(v, 1)
+
+#ifndef CONFIG_GENERIC_ATOMIC64
+static __always_inline s64 atomic64_sub_if_positive(atomic64_t *v, s64 offset)
+{
+ s64 prev;
+ long rc;
+
+ __asm__ __volatile__ (
+ "0: lr.d %[p], %[c]\n"
+ " sub %[rc], %[p], %[o]\n"
+ " bltz %[rc], 1f\n"
+ " sc.d.rl %[rc], %[rc], %[c]\n"
+ " bnez %[rc], 0b\n"
+ " fence rw, rw\n"
+ "1:\n"
+ : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
+ : [o]"r" (offset)
+ : "memory");
+ return prev - offset;
+}
+
+#define atomic64_dec_if_positive(v) atomic64_sub_if_positive(v, 1)
+#endif
+
+#endif /* _ASM_RISCV_ATOMIC_H */
diff --git a/arch/riscv/include/asm/barrier.h b/arch/riscv/include/asm/barrier.h
new file mode 100644
index 000000000..d0e24aaa2
--- /dev/null
+++ b/arch/riscv/include/asm/barrier.h
@@ -0,0 +1,76 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Based on arch/arm/include/asm/barrier.h
+ *
+ * Copyright (C) 2012 ARM Ltd.
+ * Copyright (C) 2013 Regents of the University of California
+ * Copyright (C) 2017 SiFive
+ */
+
+#ifndef _ASM_RISCV_BARRIER_H
+#define _ASM_RISCV_BARRIER_H
+
+#ifndef __ASSEMBLY__
+
+#define nop() __asm__ __volatile__ ("nop")
+
+#define RISCV_FENCE(p, s) \
+ __asm__ __volatile__ ("fence " #p "," #s : : : "memory")
+
+/* These barriers need to enforce ordering on both devices or memory. */
+#define mb() RISCV_FENCE(iorw,iorw)
+#define rmb() RISCV_FENCE(ir,ir)
+#define wmb() RISCV_FENCE(ow,ow)
+
+/* These barriers do not need to enforce ordering on devices, just memory. */
+#define __smp_mb() RISCV_FENCE(rw,rw)
+#define __smp_rmb() RISCV_FENCE(r,r)
+#define __smp_wmb() RISCV_FENCE(w,w)
+
+#define __smp_store_release(p, v) \
+do { \
+ compiletime_assert_atomic_type(*p); \
+ RISCV_FENCE(rw,w); \
+ WRITE_ONCE(*p, v); \
+} while (0)
+
+#define __smp_load_acquire(p) \
+({ \
+ typeof(*p) ___p1 = READ_ONCE(*p); \
+ compiletime_assert_atomic_type(*p); \
+ RISCV_FENCE(r,rw); \
+ ___p1; \
+})
+
+/*
+ * This is a very specific barrier: it's currently only used in two places in
+ * the kernel, both in the scheduler. See include/linux/spinlock.h for the two
+ * orderings it guarantees, but the "critical section is RCsc" guarantee
+ * mandates a barrier on RISC-V. The sequence looks like:
+ *
+ * lr.aq lock
+ * sc lock <= LOCKED
+ * smp_mb__after_spinlock()
+ * // critical section
+ * lr lock
+ * sc.rl lock <= UNLOCKED
+ *
+ * The AQ/RL pair provides a RCpc critical section, but there's not really any
+ * way we can take advantage of that here because the ordering is only enforced
+ * on that one lock. Thus, we're just doing a full fence.
+ *
+ * Since we allow writeX to be called from preemptive regions we need at least
+ * an "o" in the predecessor set to ensure device writes are visible before the
+ * task is marked as available for scheduling on a new hart. While I don't see
+ * any concrete reason we need a full IO fence, it seems safer to just upgrade
+ * this in order to avoid any IO crossing a scheduling boundary. In both
+ * instances the scheduler pairs this with an mb(), so nothing is necessary on
+ * the new hart.
+ */
+#define smp_mb__after_spinlock() RISCV_FENCE(iorw,iorw)
+
+#include <asm-generic/barrier.h>
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* _ASM_RISCV_BARRIER_H */
diff --git a/arch/riscv/include/asm/bitops.h b/arch/riscv/include/asm/bitops.h
new file mode 100644
index 000000000..396a3303c
--- /dev/null
+++ b/arch/riscv/include/asm/bitops.h
@@ -0,0 +1,205 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2012 Regents of the University of California
+ */
+
+#ifndef _ASM_RISCV_BITOPS_H
+#define _ASM_RISCV_BITOPS_H
+
+#ifndef _LINUX_BITOPS_H
+#error "Only <linux/bitops.h> can be included directly"
+#endif /* _LINUX_BITOPS_H */
+
+#include <linux/compiler.h>
+#include <linux/irqflags.h>
+#include <asm/barrier.h>
+#include <asm/bitsperlong.h>
+
+#include <asm-generic/bitops/__ffs.h>
+#include <asm-generic/bitops/ffz.h>
+#include <asm-generic/bitops/fls.h>
+#include <asm-generic/bitops/__fls.h>
+#include <asm-generic/bitops/fls64.h>
+#include <asm-generic/bitops/find.h>
+#include <asm-generic/bitops/sched.h>
+#include <asm-generic/bitops/ffs.h>
+
+#include <asm-generic/bitops/hweight.h>
+
+#if (BITS_PER_LONG == 64)
+#define __AMO(op) "amo" #op ".d"
+#elif (BITS_PER_LONG == 32)
+#define __AMO(op) "amo" #op ".w"
+#else
+#error "Unexpected BITS_PER_LONG"
+#endif
+
+#define __test_and_op_bit_ord(op, mod, nr, addr, ord) \
+({ \
+ unsigned long __res, __mask; \
+ __mask = BIT_MASK(nr); \
+ __asm__ __volatile__ ( \
+ __AMO(op) #ord " %0, %2, %1" \
+ : "=r" (__res), "+A" (addr[BIT_WORD(nr)]) \
+ : "r" (mod(__mask)) \
+ : "memory"); \
+ ((__res & __mask) != 0); \
+})
+
+#define __op_bit_ord(op, mod, nr, addr, ord) \
+ __asm__ __volatile__ ( \
+ __AMO(op) #ord " zero, %1, %0" \
+ : "+A" (addr[BIT_WORD(nr)]) \
+ : "r" (mod(BIT_MASK(nr))) \
+ : "memory");
+
+#define __test_and_op_bit(op, mod, nr, addr) \
+ __test_and_op_bit_ord(op, mod, nr, addr, .aqrl)
+#define __op_bit(op, mod, nr, addr) \
+ __op_bit_ord(op, mod, nr, addr, )
+
+/* Bitmask modifiers */
+#define __NOP(x) (x)
+#define __NOT(x) (~(x))
+
+/**
+ * test_and_set_bit - Set a bit and return its old value
+ * @nr: Bit to set
+ * @addr: Address to count from
+ *
+ * This operation may be reordered on other architectures than x86.
+ */
+static inline int test_and_set_bit(int nr, volatile unsigned long *addr)
+{
+ return __test_and_op_bit(or, __NOP, nr, addr);
+}
+
+/**
+ * test_and_clear_bit - Clear a bit and return its old value
+ * @nr: Bit to clear
+ * @addr: Address to count from
+ *
+ * This operation can be reordered on other architectures other than x86.
+ */
+static inline int test_and_clear_bit(int nr, volatile unsigned long *addr)
+{
+ return __test_and_op_bit(and, __NOT, nr, addr);
+}
+
+/**
+ * test_and_change_bit - Change a bit and return its old value
+ * @nr: Bit to change
+ * @addr: Address to count from
+ *
+ * This operation is atomic and cannot be reordered.
+ * It also implies a memory barrier.
+ */
+static inline int test_and_change_bit(int nr, volatile unsigned long *addr)
+{
+ return __test_and_op_bit(xor, __NOP, nr, addr);
+}
+
+/**
+ * set_bit - Atomically set a bit in memory
+ * @nr: the bit to set
+ * @addr: the address to start counting from
+ *
+ * Note: there are no guarantees that this function will not be reordered
+ * on non x86 architectures, so if you are writing portable code,
+ * make sure not to rely on its reordering guarantees.
+ *
+ * Note that @nr may be almost arbitrarily large; this function is not
+ * restricted to acting on a single-word quantity.
+ */
+static inline void set_bit(int nr, volatile unsigned long *addr)
+{
+ __op_bit(or, __NOP, nr, addr);
+}
+
+/**
+ * clear_bit - Clears a bit in memory
+ * @nr: Bit to clear
+ * @addr: Address to start counting from
+ *
+ * Note: there are no guarantees that this function will not be reordered
+ * on non x86 architectures, so if you are writing portable code,
+ * make sure not to rely on its reordering guarantees.
+ */
+static inline void clear_bit(int nr, volatile unsigned long *addr)
+{
+ __op_bit(and, __NOT, nr, addr);
+}
+
+/**
+ * change_bit - Toggle a bit in memory
+ * @nr: Bit to change
+ * @addr: Address to start counting from
+ *
+ * change_bit() may be reordered on other architectures than x86.
+ * Note that @nr may be almost arbitrarily large; this function is not
+ * restricted to acting on a single-word quantity.
+ */
+static inline void change_bit(int nr, volatile unsigned long *addr)
+{
+ __op_bit(xor, __NOP, nr, addr);
+}
+
+/**
+ * test_and_set_bit_lock - Set a bit and return its old value, for lock
+ * @nr: Bit to set
+ * @addr: Address to count from
+ *
+ * This operation is atomic and provides acquire barrier semantics.
+ * It can be used to implement bit locks.
+ */
+static inline int test_and_set_bit_lock(
+ unsigned long nr, volatile unsigned long *addr)
+{
+ return __test_and_op_bit_ord(or, __NOP, nr, addr, .aq);
+}
+
+/**
+ * clear_bit_unlock - Clear a bit in memory, for unlock
+ * @nr: the bit to set
+ * @addr: the address to start counting from
+ *
+ * This operation is atomic and provides release barrier semantics.
+ */
+static inline void clear_bit_unlock(
+ unsigned long nr, volatile unsigned long *addr)
+{
+ __op_bit_ord(and, __NOT, nr, addr, .rl);
+}
+
+/**
+ * __clear_bit_unlock - Clear a bit in memory, for unlock
+ * @nr: the bit to set
+ * @addr: the address to start counting from
+ *
+ * This operation is like clear_bit_unlock, however it is not atomic.
+ * It does provide release barrier semantics so it can be used to unlock
+ * a bit lock, however it would only be used if no other CPU can modify
+ * any bits in the memory until the lock is released (a good example is
+ * if the bit lock itself protects access to the other bits in the word).
+ *
+ * On RISC-V systems there seems to be no benefit to taking advantage of the
+ * non-atomic property here: it's a lot more instructions and we still have to
+ * provide release semantics anyway.
+ */
+static inline void __clear_bit_unlock(
+ unsigned long nr, volatile unsigned long *addr)
+{
+ clear_bit_unlock(nr, addr);
+}
+
+#undef __test_and_op_bit
+#undef __op_bit
+#undef __NOP
+#undef __NOT
+#undef __AMO
+
+#include <asm-generic/bitops/non-atomic.h>
+#include <asm-generic/bitops/le.h>
+#include <asm-generic/bitops/ext2-atomic.h>
+
+#endif /* _ASM_RISCV_BITOPS_H */
diff --git a/arch/riscv/include/asm/bug.h b/arch/riscv/include/asm/bug.h
new file mode 100644
index 000000000..d6f1ec08d
--- /dev/null
+++ b/arch/riscv/include/asm/bug.h
@@ -0,0 +1,91 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2012 Regents of the University of California
+ */
+
+#ifndef _ASM_RISCV_BUG_H
+#define _ASM_RISCV_BUG_H
+
+#include <linux/compiler.h>
+#include <linux/const.h>
+#include <linux/types.h>
+
+#include <asm/asm.h>
+
+#define __INSN_LENGTH_MASK _UL(0x3)
+#define __INSN_LENGTH_32 _UL(0x3)
+#define __COMPRESSED_INSN_MASK _UL(0xffff)
+
+#define __BUG_INSN_32 _UL(0x00100073) /* ebreak */
+#define __BUG_INSN_16 _UL(0x9002) /* c.ebreak */
+
+#define GET_INSN_LENGTH(insn) \
+({ \
+ unsigned long __len; \
+ __len = ((insn & __INSN_LENGTH_MASK) == __INSN_LENGTH_32) ? \
+ 4UL : 2UL; \
+ __len; \
+})
+
+typedef u32 bug_insn_t;
+
+#ifdef CONFIG_GENERIC_BUG_RELATIVE_POINTERS
+#define __BUG_ENTRY_ADDR RISCV_INT " 1b - 2b"
+#define __BUG_ENTRY_FILE RISCV_INT " %0 - 2b"
+#else
+#define __BUG_ENTRY_ADDR RISCV_PTR " 1b"
+#define __BUG_ENTRY_FILE RISCV_PTR " %0"
+#endif
+
+#ifdef CONFIG_DEBUG_BUGVERBOSE
+#define __BUG_ENTRY \
+ __BUG_ENTRY_ADDR "\n\t" \
+ __BUG_ENTRY_FILE "\n\t" \
+ RISCV_SHORT " %1\n\t" \
+ RISCV_SHORT " %2"
+#else
+#define __BUG_ENTRY \
+ __BUG_ENTRY_ADDR "\n\t" \
+ RISCV_SHORT " %2"
+#endif
+
+#ifdef CONFIG_GENERIC_BUG
+#define __BUG_FLAGS(flags) \
+do { \
+ __asm__ __volatile__ ( \
+ "1:\n\t" \
+ "ebreak\n" \
+ ".pushsection __bug_table,\"aw\"\n\t" \
+ "2:\n\t" \
+ __BUG_ENTRY "\n\t" \
+ ".org 2b + %3\n\t" \
+ ".popsection" \
+ : \
+ : "i" (__FILE__), "i" (__LINE__), \
+ "i" (flags), \
+ "i" (sizeof(struct bug_entry))); \
+} while (0)
+#else /* CONFIG_GENERIC_BUG */
+#define __BUG_FLAGS(flags) do { \
+ __asm__ __volatile__ ("ebreak\n"); \
+} while (0)
+#endif /* CONFIG_GENERIC_BUG */
+
+#define BUG() do { \
+ __BUG_FLAGS(0); \
+ unreachable(); \
+} while (0)
+
+#define __WARN_FLAGS(flags) __BUG_FLAGS(BUGFLAG_WARNING|(flags))
+
+#define HAVE_ARCH_BUG
+
+#include <asm-generic/bug.h>
+
+struct pt_regs;
+struct task_struct;
+
+void die(struct pt_regs *regs, const char *str);
+void do_trap(struct pt_regs *regs, int signo, int code, unsigned long addr);
+
+#endif /* _ASM_RISCV_BUG_H */
diff --git a/arch/riscv/include/asm/cache.h b/arch/riscv/include/asm/cache.h
new file mode 100644
index 000000000..9b58b1045
--- /dev/null
+++ b/arch/riscv/include/asm/cache.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2017 Chen Liqin <liqin.chen@sunplusct.com>
+ * Copyright (C) 2012 Regents of the University of California
+ */
+
+#ifndef _ASM_RISCV_CACHE_H
+#define _ASM_RISCV_CACHE_H
+
+#define L1_CACHE_SHIFT 6
+
+#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
+
+/*
+ * RISC-V requires the stack pointer to be 16-byte aligned, so ensure that
+ * the flat loader aligns it accordingly.
+ */
+#ifndef CONFIG_MMU
+#define ARCH_SLAB_MINALIGN 16
+#endif
+
+#endif /* _ASM_RISCV_CACHE_H */
diff --git a/arch/riscv/include/asm/cacheflush.h b/arch/riscv/include/asm/cacheflush.h
new file mode 100644
index 000000000..23ff70350
--- /dev/null
+++ b/arch/riscv/include/asm/cacheflush.h
@@ -0,0 +1,53 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2015 Regents of the University of California
+ */
+
+#ifndef _ASM_RISCV_CACHEFLUSH_H
+#define _ASM_RISCV_CACHEFLUSH_H
+
+#include <linux/mm.h>
+
+static inline void local_flush_icache_all(void)
+{
+ asm volatile ("fence.i" ::: "memory");
+}
+
+#define PG_dcache_clean PG_arch_1
+
+static inline void flush_dcache_page(struct page *page)
+{
+ if (test_bit(PG_dcache_clean, &page->flags))
+ clear_bit(PG_dcache_clean, &page->flags);
+}
+#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
+
+/*
+ * RISC-V doesn't have an instruction to flush parts of the instruction cache,
+ * so instead we just flush the whole thing.
+ */
+#define flush_icache_range(start, end) flush_icache_all()
+#define flush_icache_user_page(vma, pg, addr, len) \
+ flush_icache_mm(vma->vm_mm, 0)
+
+#ifndef CONFIG_SMP
+
+#define flush_icache_all() local_flush_icache_all()
+#define flush_icache_mm(mm, local) flush_icache_all()
+
+#else /* CONFIG_SMP */
+
+void flush_icache_all(void);
+void flush_icache_mm(struct mm_struct *mm, bool local);
+
+#endif /* CONFIG_SMP */
+
+/*
+ * Bits in sys_riscv_flush_icache()'s flags argument.
+ */
+#define SYS_RISCV_FLUSH_ICACHE_LOCAL 1UL
+#define SYS_RISCV_FLUSH_ICACHE_ALL (SYS_RISCV_FLUSH_ICACHE_LOCAL)
+
+#include <asm-generic/cacheflush.h>
+
+#endif /* _ASM_RISCV_CACHEFLUSH_H */
diff --git a/arch/riscv/include/asm/cacheinfo.h b/arch/riscv/include/asm/cacheinfo.h
new file mode 100644
index 000000000..d1a365215
--- /dev/null
+++ b/arch/riscv/include/asm/cacheinfo.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020 SiFive
+ */
+
+#ifndef _ASM_RISCV_CACHEINFO_H
+#define _ASM_RISCV_CACHEINFO_H
+
+#include <linux/cacheinfo.h>
+
+struct riscv_cacheinfo_ops {
+ const struct attribute_group * (*get_priv_group)(struct cacheinfo
+ *this_leaf);
+};
+
+void riscv_set_cacheinfo_ops(struct riscv_cacheinfo_ops *ops);
+uintptr_t get_cache_size(u32 level, enum cache_type type);
+uintptr_t get_cache_geometry(u32 level, enum cache_type type);
+
+#endif /* _ASM_RISCV_CACHEINFO_H */
diff --git a/arch/riscv/include/asm/clint.h b/arch/riscv/include/asm/clint.h
new file mode 100644
index 000000000..0789fd37b
--- /dev/null
+++ b/arch/riscv/include/asm/clint.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2020 Google, Inc
+ */
+
+#ifndef _ASM_RISCV_CLINT_H
+#define _ASM_RISCV_CLINT_H
+
+#include <linux/types.h>
+#include <asm/mmio.h>
+
+#ifdef CONFIG_RISCV_M_MODE
+/*
+ * This lives in the CLINT driver, but is accessed directly by timex.h to avoid
+ * any overhead when accessing the MMIO timer.
+ *
+ * The ISA defines mtime as a 64-bit memory-mapped register that increments at
+ * a constant frequency, but it doesn't define some other constraints we depend
+ * on (most notably ordering constraints, but also some simpler stuff like the
+ * memory layout). Thus, this is called "clint_time_val" instead of something
+ * like "riscv_mtime", to signify that these non-ISA assumptions must hold.
+ */
+extern u64 __iomem *clint_time_val;
+#endif
+
+#endif
diff --git a/arch/riscv/include/asm/clocksource.h b/arch/riscv/include/asm/clocksource.h
new file mode 100644
index 000000000..482185566
--- /dev/null
+++ b/arch/riscv/include/asm/clocksource.h
@@ -0,0 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_CLOCKSOURCE_H
+#define _ASM_CLOCKSOURCE_H
+
+#include <asm/vdso/clocksource.h>
+
+#endif
diff --git a/arch/riscv/include/asm/cmpxchg.h b/arch/riscv/include/asm/cmpxchg.h
new file mode 100644
index 000000000..262e5bbb2
--- /dev/null
+++ b/arch/riscv/include/asm/cmpxchg.h
@@ -0,0 +1,375 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2014 Regents of the University of California
+ */
+
+#ifndef _ASM_RISCV_CMPXCHG_H
+#define _ASM_RISCV_CMPXCHG_H
+
+#include <linux/bug.h>
+
+#include <asm/barrier.h>
+#include <asm/fence.h>
+
+#define __xchg_relaxed(ptr, new, size) \
+({ \
+ __typeof__(ptr) __ptr = (ptr); \
+ __typeof__(new) __new = (new); \
+ __typeof__(*(ptr)) __ret; \
+ switch (size) { \
+ case 4: \
+ __asm__ __volatile__ ( \
+ " amoswap.w %0, %2, %1\n" \
+ : "=r" (__ret), "+A" (*__ptr) \
+ : "r" (__new) \
+ : "memory"); \
+ break; \
+ case 8: \
+ __asm__ __volatile__ ( \
+ " amoswap.d %0, %2, %1\n" \
+ : "=r" (__ret), "+A" (*__ptr) \
+ : "r" (__new) \
+ : "memory"); \
+ break; \
+ default: \
+ BUILD_BUG(); \
+ } \
+ __ret; \
+})
+
+#define xchg_relaxed(ptr, x) \
+({ \
+ __typeof__(*(ptr)) _x_ = (x); \
+ (__typeof__(*(ptr))) __xchg_relaxed((ptr), \
+ _x_, sizeof(*(ptr))); \
+})
+
+#define __xchg_acquire(ptr, new, size) \
+({ \
+ __typeof__(ptr) __ptr = (ptr); \
+ __typeof__(new) __new = (new); \
+ __typeof__(*(ptr)) __ret; \
+ switch (size) { \
+ case 4: \
+ __asm__ __volatile__ ( \
+ " amoswap.w %0, %2, %1\n" \
+ RISCV_ACQUIRE_BARRIER \
+ : "=r" (__ret), "+A" (*__ptr) \
+ : "r" (__new) \
+ : "memory"); \
+ break; \
+ case 8: \
+ __asm__ __volatile__ ( \
+ " amoswap.d %0, %2, %1\n" \
+ RISCV_ACQUIRE_BARRIER \
+ : "=r" (__ret), "+A" (*__ptr) \
+ : "r" (__new) \
+ : "memory"); \
+ break; \
+ default: \
+ BUILD_BUG(); \
+ } \
+ __ret; \
+})
+
+#define xchg_acquire(ptr, x) \
+({ \
+ __typeof__(*(ptr)) _x_ = (x); \
+ (__typeof__(*(ptr))) __xchg_acquire((ptr), \
+ _x_, sizeof(*(ptr))); \
+})
+
+#define __xchg_release(ptr, new, size) \
+({ \
+ __typeof__(ptr) __ptr = (ptr); \
+ __typeof__(new) __new = (new); \
+ __typeof__(*(ptr)) __ret; \
+ switch (size) { \
+ case 4: \
+ __asm__ __volatile__ ( \
+ RISCV_RELEASE_BARRIER \
+ " amoswap.w %0, %2, %1\n" \
+ : "=r" (__ret), "+A" (*__ptr) \
+ : "r" (__new) \
+ : "memory"); \
+ break; \
+ case 8: \
+ __asm__ __volatile__ ( \
+ RISCV_RELEASE_BARRIER \
+ " amoswap.d %0, %2, %1\n" \
+ : "=r" (__ret), "+A" (*__ptr) \
+ : "r" (__new) \
+ : "memory"); \
+ break; \
+ default: \
+ BUILD_BUG(); \
+ } \
+ __ret; \
+})
+
+#define xchg_release(ptr, x) \
+({ \
+ __typeof__(*(ptr)) _x_ = (x); \
+ (__typeof__(*(ptr))) __xchg_release((ptr), \
+ _x_, sizeof(*(ptr))); \
+})
+
+#define __xchg(ptr, new, size) \
+({ \
+ __typeof__(ptr) __ptr = (ptr); \
+ __typeof__(new) __new = (new); \
+ __typeof__(*(ptr)) __ret; \
+ switch (size) { \
+ case 4: \
+ __asm__ __volatile__ ( \
+ " amoswap.w.aqrl %0, %2, %1\n" \
+ : "=r" (__ret), "+A" (*__ptr) \
+ : "r" (__new) \
+ : "memory"); \
+ break; \
+ case 8: \
+ __asm__ __volatile__ ( \
+ " amoswap.d.aqrl %0, %2, %1\n" \
+ : "=r" (__ret), "+A" (*__ptr) \
+ : "r" (__new) \
+ : "memory"); \
+ break; \
+ default: \
+ BUILD_BUG(); \
+ } \
+ __ret; \
+})
+
+#define xchg(ptr, x) \
+({ \
+ __typeof__(*(ptr)) _x_ = (x); \
+ (__typeof__(*(ptr))) __xchg((ptr), _x_, sizeof(*(ptr))); \
+})
+
+#define xchg32(ptr, x) \
+({ \
+ BUILD_BUG_ON(sizeof(*(ptr)) != 4); \
+ xchg((ptr), (x)); \
+})
+
+#define xchg64(ptr, x) \
+({ \
+ BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
+ xchg((ptr), (x)); \
+})
+
+/*
+ * Atomic compare and exchange. Compare OLD with MEM, if identical,
+ * store NEW in MEM. Return the initial value in MEM. Success is
+ * indicated by comparing RETURN with OLD.
+ */
+#define __cmpxchg_relaxed(ptr, old, new, size) \
+({ \
+ __typeof__(ptr) __ptr = (ptr); \
+ __typeof__(*(ptr)) __old = (old); \
+ __typeof__(*(ptr)) __new = (new); \
+ __typeof__(*(ptr)) __ret; \
+ register unsigned int __rc; \
+ switch (size) { \
+ case 4: \
+ __asm__ __volatile__ ( \
+ "0: lr.w %0, %2\n" \
+ " bne %0, %z3, 1f\n" \
+ " sc.w %1, %z4, %2\n" \
+ " bnez %1, 0b\n" \
+ "1:\n" \
+ : "=&r" (__ret), "=&r" (__rc), "+A" (*__ptr) \
+ : "rJ" ((long)__old), "rJ" (__new) \
+ : "memory"); \
+ break; \
+ case 8: \
+ __asm__ __volatile__ ( \
+ "0: lr.d %0, %2\n" \
+ " bne %0, %z3, 1f\n" \
+ " sc.d %1, %z4, %2\n" \
+ " bnez %1, 0b\n" \
+ "1:\n" \
+ : "=&r" (__ret), "=&r" (__rc), "+A" (*__ptr) \
+ : "rJ" (__old), "rJ" (__new) \
+ : "memory"); \
+ break; \
+ default: \
+ BUILD_BUG(); \
+ } \
+ __ret; \
+})
+
+#define cmpxchg_relaxed(ptr, o, n) \
+({ \
+ __typeof__(*(ptr)) _o_ = (o); \
+ __typeof__(*(ptr)) _n_ = (n); \
+ (__typeof__(*(ptr))) __cmpxchg_relaxed((ptr), \
+ _o_, _n_, sizeof(*(ptr))); \
+})
+
+#define __cmpxchg_acquire(ptr, old, new, size) \
+({ \
+ __typeof__(ptr) __ptr = (ptr); \
+ __typeof__(*(ptr)) __old = (old); \
+ __typeof__(*(ptr)) __new = (new); \
+ __typeof__(*(ptr)) __ret; \
+ register unsigned int __rc; \
+ switch (size) { \
+ case 4: \
+ __asm__ __volatile__ ( \
+ "0: lr.w %0, %2\n" \
+ " bne %0, %z3, 1f\n" \
+ " sc.w %1, %z4, %2\n" \
+ " bnez %1, 0b\n" \
+ RISCV_ACQUIRE_BARRIER \
+ "1:\n" \
+ : "=&r" (__ret), "=&r" (__rc), "+A" (*__ptr) \
+ : "rJ" ((long)__old), "rJ" (__new) \
+ : "memory"); \
+ break; \
+ case 8: \
+ __asm__ __volatile__ ( \
+ "0: lr.d %0, %2\n" \
+ " bne %0, %z3, 1f\n" \
+ " sc.d %1, %z4, %2\n" \
+ " bnez %1, 0b\n" \
+ RISCV_ACQUIRE_BARRIER \
+ "1:\n" \
+ : "=&r" (__ret), "=&r" (__rc), "+A" (*__ptr) \
+ : "rJ" (__old), "rJ" (__new) \
+ : "memory"); \
+ break; \
+ default: \
+ BUILD_BUG(); \
+ } \
+ __ret; \
+})
+
+#define cmpxchg_acquire(ptr, o, n) \
+({ \
+ __typeof__(*(ptr)) _o_ = (o); \
+ __typeof__(*(ptr)) _n_ = (n); \
+ (__typeof__(*(ptr))) __cmpxchg_acquire((ptr), \
+ _o_, _n_, sizeof(*(ptr))); \
+})
+
+#define __cmpxchg_release(ptr, old, new, size) \
+({ \
+ __typeof__(ptr) __ptr = (ptr); \
+ __typeof__(*(ptr)) __old = (old); \
+ __typeof__(*(ptr)) __new = (new); \
+ __typeof__(*(ptr)) __ret; \
+ register unsigned int __rc; \
+ switch (size) { \
+ case 4: \
+ __asm__ __volatile__ ( \
+ RISCV_RELEASE_BARRIER \
+ "0: lr.w %0, %2\n" \
+ " bne %0, %z3, 1f\n" \
+ " sc.w %1, %z4, %2\n" \
+ " bnez %1, 0b\n" \
+ "1:\n" \
+ : "=&r" (__ret), "=&r" (__rc), "+A" (*__ptr) \
+ : "rJ" ((long)__old), "rJ" (__new) \
+ : "memory"); \
+ break; \
+ case 8: \
+ __asm__ __volatile__ ( \
+ RISCV_RELEASE_BARRIER \
+ "0: lr.d %0, %2\n" \
+ " bne %0, %z3, 1f\n" \
+ " sc.d %1, %z4, %2\n" \
+ " bnez %1, 0b\n" \
+ "1:\n" \
+ : "=&r" (__ret), "=&r" (__rc), "+A" (*__ptr) \
+ : "rJ" (__old), "rJ" (__new) \
+ : "memory"); \
+ break; \
+ default: \
+ BUILD_BUG(); \
+ } \
+ __ret; \
+})
+
+#define cmpxchg_release(ptr, o, n) \
+({ \
+ __typeof__(*(ptr)) _o_ = (o); \
+ __typeof__(*(ptr)) _n_ = (n); \
+ (__typeof__(*(ptr))) __cmpxchg_release((ptr), \
+ _o_, _n_, sizeof(*(ptr))); \
+})
+
+#define __cmpxchg(ptr, old, new, size) \
+({ \
+ __typeof__(ptr) __ptr = (ptr); \
+ __typeof__(*(ptr)) __old = (old); \
+ __typeof__(*(ptr)) __new = (new); \
+ __typeof__(*(ptr)) __ret; \
+ register unsigned int __rc; \
+ switch (size) { \
+ case 4: \
+ __asm__ __volatile__ ( \
+ "0: lr.w %0, %2\n" \
+ " bne %0, %z3, 1f\n" \
+ " sc.w.rl %1, %z4, %2\n" \
+ " bnez %1, 0b\n" \
+ " fence rw, rw\n" \
+ "1:\n" \
+ : "=&r" (__ret), "=&r" (__rc), "+A" (*__ptr) \
+ : "rJ" ((long)__old), "rJ" (__new) \
+ : "memory"); \
+ break; \
+ case 8: \
+ __asm__ __volatile__ ( \
+ "0: lr.d %0, %2\n" \
+ " bne %0, %z3, 1f\n" \
+ " sc.d.rl %1, %z4, %2\n" \
+ " bnez %1, 0b\n" \
+ " fence rw, rw\n" \
+ "1:\n" \
+ : "=&r" (__ret), "=&r" (__rc), "+A" (*__ptr) \
+ : "rJ" (__old), "rJ" (__new) \
+ : "memory"); \
+ break; \
+ default: \
+ BUILD_BUG(); \
+ } \
+ __ret; \
+})
+
+#define cmpxchg(ptr, o, n) \
+({ \
+ __typeof__(*(ptr)) _o_ = (o); \
+ __typeof__(*(ptr)) _n_ = (n); \
+ (__typeof__(*(ptr))) __cmpxchg((ptr), \
+ _o_, _n_, sizeof(*(ptr))); \
+})
+
+#define cmpxchg_local(ptr, o, n) \
+ (__cmpxchg_relaxed((ptr), (o), (n), sizeof(*(ptr))))
+
+#define cmpxchg32(ptr, o, n) \
+({ \
+ BUILD_BUG_ON(sizeof(*(ptr)) != 4); \
+ cmpxchg((ptr), (o), (n)); \
+})
+
+#define cmpxchg32_local(ptr, o, n) \
+({ \
+ BUILD_BUG_ON(sizeof(*(ptr)) != 4); \
+ cmpxchg_relaxed((ptr), (o), (n)) \
+})
+
+#define cmpxchg64(ptr, o, n) \
+({ \
+ BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
+ cmpxchg((ptr), (o), (n)); \
+})
+
+#define cmpxchg64_local(ptr, o, n) \
+({ \
+ BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
+ cmpxchg_relaxed((ptr), (o), (n)); \
+})
+
+#endif /* _ASM_RISCV_CMPXCHG_H */
diff --git a/arch/riscv/include/asm/cpu_ops.h b/arch/riscv/include/asm/cpu_ops.h
new file mode 100644
index 000000000..a8ec3c5c1
--- /dev/null
+++ b/arch/riscv/include/asm/cpu_ops.h
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2020 Western Digital Corporation or its affiliates.
+ * Based on arch/arm64/include/asm/cpu_ops.h
+ */
+#ifndef __ASM_CPU_OPS_H
+#define __ASM_CPU_OPS_H
+
+#include <linux/init.h>
+#include <linux/sched.h>
+#include <linux/threads.h>
+
+/**
+ * struct cpu_operations - Callback operations for hotplugging CPUs.
+ *
+ * @name: Name of the boot protocol.
+ * @cpu_prepare: Early one-time preparation step for a cpu. If there
+ * is a mechanism for doing so, tests whether it is
+ * possible to boot the given HART.
+ * @cpu_start: Boots a cpu into the kernel.
+ * @cpu_disable: Prepares a cpu to die. May fail for some
+ * mechanism-specific reason, which will cause the hot
+ * unplug to be aborted. Called from the cpu to be killed.
+ * @cpu_stop: Makes a cpu leave the kernel. Must not fail. Called from
+ * the cpu being stopped.
+ * @cpu_is_stopped: Ensures a cpu has left the kernel. Called from another
+ * cpu.
+ */
+struct cpu_operations {
+ const char *name;
+ int (*cpu_prepare)(unsigned int cpu);
+ int (*cpu_start)(unsigned int cpu,
+ struct task_struct *tidle);
+#ifdef CONFIG_HOTPLUG_CPU
+ int (*cpu_disable)(unsigned int cpu);
+ void (*cpu_stop)(void);
+ int (*cpu_is_stopped)(unsigned int cpu);
+#endif
+};
+
+extern const struct cpu_operations *cpu_ops[NR_CPUS];
+void __init cpu_set_ops(int cpu);
+void cpu_update_secondary_bootdata(unsigned int cpuid,
+ struct task_struct *tidle);
+
+#endif /* ifndef __ASM_CPU_OPS_H */
diff --git a/arch/riscv/include/asm/csr.h b/arch/riscv/include/asm/csr.h
new file mode 100644
index 000000000..cec462e19
--- /dev/null
+++ b/arch/riscv/include/asm/csr.h
@@ -0,0 +1,219 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2015 Regents of the University of California
+ */
+
+#ifndef _ASM_RISCV_CSR_H
+#define _ASM_RISCV_CSR_H
+
+#include <asm/asm.h>
+#include <linux/const.h>
+
+/* Status register flags */
+#define SR_SIE _AC(0x00000002, UL) /* Supervisor Interrupt Enable */
+#define SR_MIE _AC(0x00000008, UL) /* Machine Interrupt Enable */
+#define SR_SPIE _AC(0x00000020, UL) /* Previous Supervisor IE */
+#define SR_MPIE _AC(0x00000080, UL) /* Previous Machine IE */
+#define SR_SPP _AC(0x00000100, UL) /* Previously Supervisor */
+#define SR_MPP _AC(0x00001800, UL) /* Previously Machine */
+#define SR_SUM _AC(0x00040000, UL) /* Supervisor User Memory Access */
+
+#define SR_FS _AC(0x00006000, UL) /* Floating-point Status */
+#define SR_FS_OFF _AC(0x00000000, UL)
+#define SR_FS_INITIAL _AC(0x00002000, UL)
+#define SR_FS_CLEAN _AC(0x00004000, UL)
+#define SR_FS_DIRTY _AC(0x00006000, UL)
+
+#define SR_XS _AC(0x00018000, UL) /* Extension Status */
+#define SR_XS_OFF _AC(0x00000000, UL)
+#define SR_XS_INITIAL _AC(0x00008000, UL)
+#define SR_XS_CLEAN _AC(0x00010000, UL)
+#define SR_XS_DIRTY _AC(0x00018000, UL)
+
+#ifndef CONFIG_64BIT
+#define SR_SD _AC(0x80000000, UL) /* FS/XS dirty */
+#else
+#define SR_SD _AC(0x8000000000000000, UL) /* FS/XS dirty */
+#endif
+
+/* SATP flags */
+#ifndef CONFIG_64BIT
+#define SATP_PPN _AC(0x003FFFFF, UL)
+#define SATP_MODE_32 _AC(0x80000000, UL)
+#define SATP_MODE SATP_MODE_32
+#else
+#define SATP_PPN _AC(0x00000FFFFFFFFFFF, UL)
+#define SATP_MODE_39 _AC(0x8000000000000000, UL)
+#define SATP_MODE SATP_MODE_39
+#endif
+
+/* Exception cause high bit - is an interrupt if set */
+#define CAUSE_IRQ_FLAG (_AC(1, UL) << (__riscv_xlen - 1))
+
+/* Interrupt causes (minus the high bit) */
+#define IRQ_S_SOFT 1
+#define IRQ_M_SOFT 3
+#define IRQ_S_TIMER 5
+#define IRQ_M_TIMER 7
+#define IRQ_S_EXT 9
+#define IRQ_M_EXT 11
+
+/* Exception causes */
+#define EXC_INST_MISALIGNED 0
+#define EXC_INST_ACCESS 1
+#define EXC_BREAKPOINT 3
+#define EXC_LOAD_ACCESS 5
+#define EXC_STORE_ACCESS 7
+#define EXC_SYSCALL 8
+#define EXC_INST_PAGE_FAULT 12
+#define EXC_LOAD_PAGE_FAULT 13
+#define EXC_STORE_PAGE_FAULT 15
+
+/* PMP configuration */
+#define PMP_R 0x01
+#define PMP_W 0x02
+#define PMP_X 0x04
+#define PMP_A 0x18
+#define PMP_A_TOR 0x08
+#define PMP_A_NA4 0x10
+#define PMP_A_NAPOT 0x18
+#define PMP_L 0x80
+
+/* symbolic CSR names: */
+#define CSR_CYCLE 0xc00
+#define CSR_TIME 0xc01
+#define CSR_INSTRET 0xc02
+#define CSR_CYCLEH 0xc80
+#define CSR_TIMEH 0xc81
+#define CSR_INSTRETH 0xc82
+
+#define CSR_SSTATUS 0x100
+#define CSR_SIE 0x104
+#define CSR_STVEC 0x105
+#define CSR_SCOUNTEREN 0x106
+#define CSR_SSCRATCH 0x140
+#define CSR_SEPC 0x141
+#define CSR_SCAUSE 0x142
+#define CSR_STVAL 0x143
+#define CSR_SIP 0x144
+#define CSR_SATP 0x180
+
+#define CSR_MSTATUS 0x300
+#define CSR_MISA 0x301
+#define CSR_MIE 0x304
+#define CSR_MTVEC 0x305
+#define CSR_MSCRATCH 0x340
+#define CSR_MEPC 0x341
+#define CSR_MCAUSE 0x342
+#define CSR_MTVAL 0x343
+#define CSR_MIP 0x344
+#define CSR_PMPCFG0 0x3a0
+#define CSR_PMPADDR0 0x3b0
+#define CSR_MHARTID 0xf14
+
+#ifdef CONFIG_RISCV_M_MODE
+# define CSR_STATUS CSR_MSTATUS
+# define CSR_IE CSR_MIE
+# define CSR_TVEC CSR_MTVEC
+# define CSR_SCRATCH CSR_MSCRATCH
+# define CSR_EPC CSR_MEPC
+# define CSR_CAUSE CSR_MCAUSE
+# define CSR_TVAL CSR_MTVAL
+# define CSR_IP CSR_MIP
+
+# define SR_IE SR_MIE
+# define SR_PIE SR_MPIE
+# define SR_PP SR_MPP
+
+# define RV_IRQ_SOFT IRQ_M_SOFT
+# define RV_IRQ_TIMER IRQ_M_TIMER
+# define RV_IRQ_EXT IRQ_M_EXT
+#else /* CONFIG_RISCV_M_MODE */
+# define CSR_STATUS CSR_SSTATUS
+# define CSR_IE CSR_SIE
+# define CSR_TVEC CSR_STVEC
+# define CSR_SCRATCH CSR_SSCRATCH
+# define CSR_EPC CSR_SEPC
+# define CSR_CAUSE CSR_SCAUSE
+# define CSR_TVAL CSR_STVAL
+# define CSR_IP CSR_SIP
+
+# define SR_IE SR_SIE
+# define SR_PIE SR_SPIE
+# define SR_PP SR_SPP
+
+# define RV_IRQ_SOFT IRQ_S_SOFT
+# define RV_IRQ_TIMER IRQ_S_TIMER
+# define RV_IRQ_EXT IRQ_S_EXT
+#endif /* CONFIG_RISCV_M_MODE */
+
+/* IE/IP (Supervisor/Machine Interrupt Enable/Pending) flags */
+#define IE_SIE (_AC(0x1, UL) << RV_IRQ_SOFT)
+#define IE_TIE (_AC(0x1, UL) << RV_IRQ_TIMER)
+#define IE_EIE (_AC(0x1, UL) << RV_IRQ_EXT)
+
+#ifndef __ASSEMBLY__
+
+#define csr_swap(csr, val) \
+({ \
+ unsigned long __v = (unsigned long)(val); \
+ __asm__ __volatile__ ("csrrw %0, " __ASM_STR(csr) ", %1"\
+ : "=r" (__v) : "rK" (__v) \
+ : "memory"); \
+ __v; \
+})
+
+#define csr_read(csr) \
+({ \
+ register unsigned long __v; \
+ __asm__ __volatile__ ("csrr %0, " __ASM_STR(csr) \
+ : "=r" (__v) : \
+ : "memory"); \
+ __v; \
+})
+
+#define csr_write(csr, val) \
+({ \
+ unsigned long __v = (unsigned long)(val); \
+ __asm__ __volatile__ ("csrw " __ASM_STR(csr) ", %0" \
+ : : "rK" (__v) \
+ : "memory"); \
+})
+
+#define csr_read_set(csr, val) \
+({ \
+ unsigned long __v = (unsigned long)(val); \
+ __asm__ __volatile__ ("csrrs %0, " __ASM_STR(csr) ", %1"\
+ : "=r" (__v) : "rK" (__v) \
+ : "memory"); \
+ __v; \
+})
+
+#define csr_set(csr, val) \
+({ \
+ unsigned long __v = (unsigned long)(val); \
+ __asm__ __volatile__ ("csrs " __ASM_STR(csr) ", %0" \
+ : : "rK" (__v) \
+ : "memory"); \
+})
+
+#define csr_read_clear(csr, val) \
+({ \
+ unsigned long __v = (unsigned long)(val); \
+ __asm__ __volatile__ ("csrrc %0, " __ASM_STR(csr) ", %1"\
+ : "=r" (__v) : "rK" (__v) \
+ : "memory"); \
+ __v; \
+})
+
+#define csr_clear(csr, val) \
+({ \
+ unsigned long __v = (unsigned long)(val); \
+ __asm__ __volatile__ ("csrc " __ASM_STR(csr) ", %0" \
+ : : "rK" (__v) \
+ : "memory"); \
+})
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* _ASM_RISCV_CSR_H */
diff --git a/arch/riscv/include/asm/current.h b/arch/riscv/include/asm/current.h
new file mode 100644
index 000000000..1de233d8e
--- /dev/null
+++ b/arch/riscv/include/asm/current.h
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Based on arm/arm64/include/asm/current.h
+ *
+ * Copyright (C) 2016 ARM
+ * Copyright (C) 2017 SiFive
+ */
+
+
+#ifndef _ASM_RISCV_CURRENT_H
+#define _ASM_RISCV_CURRENT_H
+
+#include <linux/bug.h>
+#include <linux/compiler.h>
+
+#ifndef __ASSEMBLY__
+
+struct task_struct;
+
+register struct task_struct *riscv_current_is_tp __asm__("tp");
+
+/*
+ * This only works because "struct thread_info" is at offset 0 from "struct
+ * task_struct". This constraint seems to be necessary on other architectures
+ * as well, but __switch_to enforces it. We can't check TASK_TI here because
+ * <asm/asm-offsets.h> includes this, and I can't get the definition of "struct
+ * task_struct" here due to some header ordering problems.
+ */
+static __always_inline struct task_struct *get_current(void)
+{
+ return riscv_current_is_tp;
+}
+
+#define current get_current()
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* _ASM_RISCV_CURRENT_H */
diff --git a/arch/riscv/include/asm/delay.h b/arch/riscv/include/asm/delay.h
new file mode 100644
index 000000000..524f8ef7c
--- /dev/null
+++ b/arch/riscv/include/asm/delay.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2009 Chen Liqin <liqin.chen@sunplusct.com>
+ * Copyright (C) 2016 Regents of the University of California
+ */
+
+#ifndef _ASM_RISCV_DELAY_H
+#define _ASM_RISCV_DELAY_H
+
+extern unsigned long riscv_timebase;
+
+#define udelay udelay
+extern void udelay(unsigned long usecs);
+
+#define ndelay ndelay
+extern void ndelay(unsigned long nsecs);
+
+extern void __delay(unsigned long cycles);
+
+#endif /* _ASM_RISCV_DELAY_H */
diff --git a/arch/riscv/include/asm/efi.h b/arch/riscv/include/asm/efi.h
new file mode 100644
index 000000000..7542282f1
--- /dev/null
+++ b/arch/riscv/include/asm/efi.h
@@ -0,0 +1,55 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020 Western Digital Corporation or its affiliates.
+ */
+#ifndef _ASM_EFI_H
+#define _ASM_EFI_H
+
+#include <asm/csr.h>
+#include <asm/io.h>
+#include <asm/mmu_context.h>
+#include <asm/ptrace.h>
+#include <asm/tlbflush.h>
+
+#ifdef CONFIG_EFI
+extern void efi_init(void);
+#else
+#define efi_init()
+#endif
+
+int efi_create_mapping(struct mm_struct *mm, efi_memory_desc_t *md);
+int efi_set_mapping_permissions(struct mm_struct *mm, efi_memory_desc_t *md);
+
+#define arch_efi_call_virt_setup() efi_virtmap_load()
+#define arch_efi_call_virt_teardown() efi_virtmap_unload()
+
+#define arch_efi_call_virt(p, f, args...) p->f(args)
+
+#define ARCH_EFI_IRQ_FLAGS_MASK (SR_IE | SR_SPIE)
+
+/* on RISC-V, the FDT may be located anywhere in system RAM */
+static inline unsigned long efi_get_max_fdt_addr(unsigned long image_addr)
+{
+ return ULONG_MAX;
+}
+
+/* Load initrd at enough distance from DRAM start */
+static inline unsigned long efi_get_max_initrd_addr(unsigned long image_addr)
+{
+ return image_addr + SZ_256M;
+}
+
+#define alloc_screen_info(x...) (&screen_info)
+
+static inline void free_screen_info(struct screen_info *si)
+{
+}
+
+static inline void efifb_setup_from_dmi(struct screen_info *si, const char *opt)
+{
+}
+
+void efi_virtmap_load(void);
+void efi_virtmap_unload(void);
+
+#endif /* _ASM_EFI_H */
diff --git a/arch/riscv/include/asm/elf.h b/arch/riscv/include/asm/elf.h
new file mode 100644
index 000000000..5c725e1df
--- /dev/null
+++ b/arch/riscv/include/asm/elf.h
@@ -0,0 +1,84 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
+ * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
+ * Copyright (C) 2012 Regents of the University of California
+ */
+
+#ifndef _ASM_RISCV_ELF_H
+#define _ASM_RISCV_ELF_H
+
+#include <uapi/asm/elf.h>
+#include <asm/auxvec.h>
+#include <asm/byteorder.h>
+#include <asm/cacheinfo.h>
+
+/*
+ * These are used to set parameters in the core dumps.
+ */
+#define ELF_ARCH EM_RISCV
+
+#ifdef CONFIG_64BIT
+#define ELF_CLASS ELFCLASS64
+#else
+#define ELF_CLASS ELFCLASS32
+#endif
+
+#define ELF_DATA ELFDATA2LSB
+
+/*
+ * This is used to ensure we don't load something for the wrong architecture.
+ */
+#define elf_check_arch(x) ((x)->e_machine == EM_RISCV)
+
+#define CORE_DUMP_USE_REGSET
+#define ELF_EXEC_PAGESIZE (PAGE_SIZE)
+
+/*
+ * This is the location that an ET_DYN program is loaded if exec'ed. Typical
+ * use of this is to invoke "./ld.so someprog" to test out a new version of
+ * the loader. We need to make sure that it is out of the way of the program
+ * that it will "exec", and that there is sufficient room for the brk.
+ */
+#define ELF_ET_DYN_BASE ((TASK_SIZE / 3) * 2)
+
+/*
+ * This yields a mask that user programs can use to figure out what
+ * instruction set this CPU supports. This could be done in user space,
+ * but it's not easy, and we've already done it here.
+ */
+#define ELF_HWCAP (elf_hwcap)
+extern unsigned long elf_hwcap;
+
+/*
+ * This yields a string that ld.so will use to load implementation
+ * specific libraries for optimization. This is more specific in
+ * intent than poking at uname or /proc/cpuinfo.
+ */
+#define ELF_PLATFORM (NULL)
+
+#ifdef CONFIG_MMU
+#define ARCH_DLINFO \
+do { \
+ NEW_AUX_ENT(AT_SYSINFO_EHDR, \
+ (elf_addr_t)current->mm->context.vdso); \
+ NEW_AUX_ENT(AT_L1I_CACHESIZE, \
+ get_cache_size(1, CACHE_TYPE_INST)); \
+ NEW_AUX_ENT(AT_L1I_CACHEGEOMETRY, \
+ get_cache_geometry(1, CACHE_TYPE_INST)); \
+ NEW_AUX_ENT(AT_L1D_CACHESIZE, \
+ get_cache_size(1, CACHE_TYPE_DATA)); \
+ NEW_AUX_ENT(AT_L1D_CACHEGEOMETRY, \
+ get_cache_geometry(1, CACHE_TYPE_DATA)); \
+ NEW_AUX_ENT(AT_L2_CACHESIZE, \
+ get_cache_size(2, CACHE_TYPE_UNIFIED)); \
+ NEW_AUX_ENT(AT_L2_CACHEGEOMETRY, \
+ get_cache_geometry(2, CACHE_TYPE_UNIFIED)); \
+} while (0)
+#define ARCH_HAS_SETUP_ADDITIONAL_PAGES
+struct linux_binprm;
+extern int arch_setup_additional_pages(struct linux_binprm *bprm,
+ int uses_interp);
+#endif /* CONFIG_MMU */
+
+#endif /* _ASM_RISCV_ELF_H */
diff --git a/arch/riscv/include/asm/fence.h b/arch/riscv/include/asm/fence.h
new file mode 100644
index 000000000..2b443a3a4
--- /dev/null
+++ b/arch/riscv/include/asm/fence.h
@@ -0,0 +1,12 @@
+#ifndef _ASM_RISCV_FENCE_H
+#define _ASM_RISCV_FENCE_H
+
+#ifdef CONFIG_SMP
+#define RISCV_ACQUIRE_BARRIER "\tfence r , rw\n"
+#define RISCV_RELEASE_BARRIER "\tfence rw, w\n"
+#else
+#define RISCV_ACQUIRE_BARRIER
+#define RISCV_RELEASE_BARRIER
+#endif
+
+#endif /* _ASM_RISCV_FENCE_H */
diff --git a/arch/riscv/include/asm/fixmap.h b/arch/riscv/include/asm/fixmap.h
new file mode 100644
index 000000000..54cbf07fb
--- /dev/null
+++ b/arch/riscv/include/asm/fixmap.h
@@ -0,0 +1,59 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2019 Western Digital Corporation or its affiliates.
+ */
+
+#ifndef _ASM_RISCV_FIXMAP_H
+#define _ASM_RISCV_FIXMAP_H
+
+#include <linux/kernel.h>
+#include <linux/sizes.h>
+#include <linux/pgtable.h>
+#include <asm/page.h>
+
+#ifdef CONFIG_MMU
+/*
+ * Here we define all the compile-time 'special' virtual addresses.
+ * The point is to have a constant address at compile time, but to
+ * set the physical address only in the boot process.
+ *
+ * These 'compile-time allocated' memory buffers are page-sized. Use
+ * set_fixmap(idx,phys) to associate physical memory with fixmap indices.
+ */
+enum fixed_addresses {
+ FIX_HOLE,
+ FIX_PTE,
+ FIX_PMD,
+ FIX_TEXT_POKE1,
+ FIX_TEXT_POKE0,
+ FIX_EARLYCON_MEM_BASE,
+
+ __end_of_permanent_fixed_addresses,
+ /*
+ * Temporary boot-time mappings, used by early_ioremap(),
+ * before ioremap() is functional.
+ */
+#define NR_FIX_BTMAPS (SZ_256K / PAGE_SIZE)
+#define FIX_BTMAPS_SLOTS 7
+#define TOTAL_FIX_BTMAPS (NR_FIX_BTMAPS * FIX_BTMAPS_SLOTS)
+
+ FIX_BTMAP_END = __end_of_permanent_fixed_addresses,
+ FIX_BTMAP_BEGIN = FIX_BTMAP_END + TOTAL_FIX_BTMAPS - 1,
+
+ __end_of_fixed_addresses
+};
+
+#define FIXMAP_PAGE_IO PAGE_KERNEL
+
+#define __early_set_fixmap __set_fixmap
+
+#define __late_set_fixmap __set_fixmap
+#define __late_clear_fixmap(idx) __set_fixmap((idx), 0, FIXMAP_PAGE_CLEAR)
+
+extern void __set_fixmap(enum fixed_addresses idx,
+ phys_addr_t phys, pgprot_t prot);
+
+#include <asm-generic/fixmap.h>
+
+#endif /* CONFIG_MMU */
+#endif /* _ASM_RISCV_FIXMAP_H */
diff --git a/arch/riscv/include/asm/ftrace.h b/arch/riscv/include/asm/ftrace.h
new file mode 100644
index 000000000..bc745900c
--- /dev/null
+++ b/arch/riscv/include/asm/ftrace.h
@@ -0,0 +1,88 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2017 Andes Technology Corporation */
+
+#ifndef _ASM_RISCV_FTRACE_H
+#define _ASM_RISCV_FTRACE_H
+
+/*
+ * The graph frame test is not possible if CONFIG_FRAME_POINTER is not enabled.
+ * Check arch/riscv/kernel/mcount.S for detail.
+ */
+#if defined(CONFIG_FUNCTION_GRAPH_TRACER) && defined(CONFIG_FRAME_POINTER)
+#define HAVE_FUNCTION_GRAPH_FP_TEST
+#endif
+#define HAVE_FUNCTION_GRAPH_RET_ADDR_PTR
+
+/*
+ * Clang prior to 13 had "mcount" instead of "_mcount":
+ * https://reviews.llvm.org/D98881
+ */
+#if defined(CONFIG_CC_IS_GCC) || CONFIG_CLANG_VERSION >= 130000
+#define MCOUNT_NAME _mcount
+#else
+#define MCOUNT_NAME mcount
+#endif
+
+#define ARCH_SUPPORTS_FTRACE_OPS 1
+#ifndef __ASSEMBLY__
+void MCOUNT_NAME(void);
+static inline unsigned long ftrace_call_adjust(unsigned long addr)
+{
+ return addr;
+}
+
+struct dyn_arch_ftrace {
+};
+#endif
+
+#ifdef CONFIG_DYNAMIC_FTRACE
+/*
+ * A general call in RISC-V is a pair of insts:
+ * 1) auipc: setting high-20 pc-related bits to ra register
+ * 2) jalr: setting low-12 offset to ra, jump to ra, and set ra to
+ * return address (original pc + 4)
+ *
+ * Dynamic ftrace generates probes to call sites, so we must deal with
+ * both auipc and jalr at the same time.
+ */
+
+#define MCOUNT_ADDR ((unsigned long)MCOUNT_NAME)
+#define JALR_SIGN_MASK (0x00000800)
+#define JALR_OFFSET_MASK (0x00000fff)
+#define AUIPC_OFFSET_MASK (0xfffff000)
+#define AUIPC_PAD (0x00001000)
+#define JALR_SHIFT 20
+#define JALR_BASIC (0x000080e7)
+#define AUIPC_BASIC (0x00000097)
+#define NOP4 (0x00000013)
+
+#define make_call(caller, callee, call) \
+do { \
+ call[0] = to_auipc_insn((unsigned int)((unsigned long)callee - \
+ (unsigned long)caller)); \
+ call[1] = to_jalr_insn((unsigned int)((unsigned long)callee - \
+ (unsigned long)caller)); \
+} while (0)
+
+#define to_jalr_insn(offset) \
+ (((offset & JALR_OFFSET_MASK) << JALR_SHIFT) | JALR_BASIC)
+
+#define to_auipc_insn(offset) \
+ ((offset & JALR_SIGN_MASK) ? \
+ (((offset & AUIPC_OFFSET_MASK) + AUIPC_PAD) | AUIPC_BASIC) : \
+ ((offset & AUIPC_OFFSET_MASK) | AUIPC_BASIC))
+
+/*
+ * Let auipc+jalr be the basic *mcount unit*, so we make it 8 bytes here.
+ */
+#define MCOUNT_INSN_SIZE 8
+
+#ifndef __ASSEMBLY__
+struct dyn_ftrace;
+int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec);
+#define ftrace_init_nop ftrace_init_nop
+#endif
+
+#endif /* CONFIG_DYNAMIC_FTRACE */
+
+#endif /* _ASM_RISCV_FTRACE_H */
diff --git a/arch/riscv/include/asm/futex.h b/arch/riscv/include/asm/futex.h
new file mode 100644
index 000000000..1b00badb9
--- /dev/null
+++ b/arch/riscv/include/asm/futex.h
@@ -0,0 +1,120 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2006 Ralf Baechle (ralf@linux-mips.org)
+ * Copyright (c) 2018 Jim Wilson (jimw@sifive.com)
+ */
+
+#ifndef _ASM_RISCV_FUTEX_H
+#define _ASM_RISCV_FUTEX_H
+
+#include <linux/futex.h>
+#include <linux/uaccess.h>
+#include <linux/errno.h>
+#include <asm/asm.h>
+
+/* We don't even really need the extable code, but for now keep it simple */
+#ifndef CONFIG_MMU
+#define __enable_user_access() do { } while (0)
+#define __disable_user_access() do { } while (0)
+#endif
+
+#define __futex_atomic_op(insn, ret, oldval, uaddr, oparg) \
+{ \
+ uintptr_t tmp; \
+ __enable_user_access(); \
+ __asm__ __volatile__ ( \
+ "1: " insn " \n" \
+ "2: \n" \
+ " .section .fixup,\"ax\" \n" \
+ " .balign 4 \n" \
+ "3: li %[r],%[e] \n" \
+ " jump 2b,%[t] \n" \
+ " .previous \n" \
+ " .section __ex_table,\"a\" \n" \
+ " .balign " RISCV_SZPTR " \n" \
+ " " RISCV_PTR " 1b, 3b \n" \
+ " .previous \n" \
+ : [r] "+r" (ret), [ov] "=&r" (oldval), \
+ [u] "+m" (*uaddr), [t] "=&r" (tmp) \
+ : [op] "Jr" (oparg), [e] "i" (-EFAULT) \
+ : "memory"); \
+ __disable_user_access(); \
+}
+
+static inline int
+arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *uaddr)
+{
+ int oldval = 0, ret = 0;
+
+ if (!access_ok(uaddr, sizeof(u32)))
+ return -EFAULT;
+
+ switch (op) {
+ case FUTEX_OP_SET:
+ __futex_atomic_op("amoswap.w.aqrl %[ov],%z[op],%[u]",
+ ret, oldval, uaddr, oparg);
+ break;
+ case FUTEX_OP_ADD:
+ __futex_atomic_op("amoadd.w.aqrl %[ov],%z[op],%[u]",
+ ret, oldval, uaddr, oparg);
+ break;
+ case FUTEX_OP_OR:
+ __futex_atomic_op("amoor.w.aqrl %[ov],%z[op],%[u]",
+ ret, oldval, uaddr, oparg);
+ break;
+ case FUTEX_OP_ANDN:
+ __futex_atomic_op("amoand.w.aqrl %[ov],%z[op],%[u]",
+ ret, oldval, uaddr, ~oparg);
+ break;
+ case FUTEX_OP_XOR:
+ __futex_atomic_op("amoxor.w.aqrl %[ov],%z[op],%[u]",
+ ret, oldval, uaddr, oparg);
+ break;
+ default:
+ ret = -ENOSYS;
+ }
+
+ if (!ret)
+ *oval = oldval;
+
+ return ret;
+}
+
+static inline int
+futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
+ u32 oldval, u32 newval)
+{
+ int ret = 0;
+ u32 val;
+ uintptr_t tmp;
+
+ if (!access_ok(uaddr, sizeof(u32)))
+ return -EFAULT;
+
+ __enable_user_access();
+ __asm__ __volatile__ (
+ "1: lr.w.aqrl %[v],%[u] \n"
+ " bne %[v],%z[ov],3f \n"
+ "2: sc.w.aqrl %[t],%z[nv],%[u] \n"
+ " bnez %[t],1b \n"
+ "3: \n"
+ " .section .fixup,\"ax\" \n"
+ " .balign 4 \n"
+ "4: li %[r],%[e] \n"
+ " jump 3b,%[t] \n"
+ " .previous \n"
+ " .section __ex_table,\"a\" \n"
+ " .balign " RISCV_SZPTR " \n"
+ " " RISCV_PTR " 1b, 4b \n"
+ " " RISCV_PTR " 2b, 4b \n"
+ " .previous \n"
+ : [r] "+r" (ret), [v] "=&r" (val), [u] "+m" (*uaddr), [t] "=&r" (tmp)
+ : [ov] "Jr" (oldval), [nv] "Jr" (newval), [e] "i" (-EFAULT)
+ : "memory");
+ __disable_user_access();
+
+ *uval = val;
+ return ret;
+}
+
+#endif /* _ASM_RISCV_FUTEX_H */
diff --git a/arch/riscv/include/asm/gdb_xml.h b/arch/riscv/include/asm/gdb_xml.h
new file mode 100644
index 000000000..09342111f
--- /dev/null
+++ b/arch/riscv/include/asm/gdb_xml.h
@@ -0,0 +1,116 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef __ASM_GDB_XML_H_
+#define __ASM_GDB_XML_H_
+
+const char riscv_gdb_stub_feature[64] =
+ "PacketSize=800;qXfer:features:read+;";
+
+static const char gdb_xfer_read_target[31] = "qXfer:features:read:target.xml:";
+
+#ifdef CONFIG_64BIT
+static const char gdb_xfer_read_cpuxml[39] =
+ "qXfer:features:read:riscv-64bit-cpu.xml";
+
+static const char riscv_gdb_stub_target_desc[256] =
+"l<?xml version=\"1.0\"?>"
+"<!DOCTYPE target SYSTEM \"gdb-target.dtd\">"
+"<target>"
+"<xi:include href=\"riscv-64bit-cpu.xml\"/>"
+"</target>";
+
+static const char riscv_gdb_stub_cpuxml[2048] =
+"l<?xml version=\"1.0\"?>"
+"<!DOCTYPE feature SYSTEM \"gdb-target.dtd\">"
+"<feature name=\"org.gnu.gdb.riscv.cpu\">"
+"<reg name=\""DBG_REG_ZERO"\" bitsize=\"64\" type=\"int\" regnum=\"0\"/>"
+"<reg name=\""DBG_REG_RA"\" bitsize=\"64\" type=\"code_ptr\"/>"
+"<reg name=\""DBG_REG_SP"\" bitsize=\"64\" type=\"data_ptr\"/>"
+"<reg name=\""DBG_REG_GP"\" bitsize=\"64\" type=\"data_ptr\"/>"
+"<reg name=\""DBG_REG_TP"\" bitsize=\"64\" type=\"data_ptr\"/>"
+"<reg name=\""DBG_REG_T0"\" bitsize=\"64\" type=\"int\"/>"
+"<reg name=\""DBG_REG_T1"\" bitsize=\"64\" type=\"int\"/>"
+"<reg name=\""DBG_REG_T2"\" bitsize=\"64\" type=\"int\"/>"
+"<reg name=\""DBG_REG_FP"\" bitsize=\"64\" type=\"data_ptr\"/>"
+"<reg name=\""DBG_REG_S1"\" bitsize=\"64\" type=\"int\"/>"
+"<reg name=\""DBG_REG_A0"\" bitsize=\"64\" type=\"int\"/>"
+"<reg name=\""DBG_REG_A1"\" bitsize=\"64\" type=\"int\"/>"
+"<reg name=\""DBG_REG_A2"\" bitsize=\"64\" type=\"int\"/>"
+"<reg name=\""DBG_REG_A3"\" bitsize=\"64\" type=\"int\"/>"
+"<reg name=\""DBG_REG_A4"\" bitsize=\"64\" type=\"int\"/>"
+"<reg name=\""DBG_REG_A5"\" bitsize=\"64\" type=\"int\"/>"
+"<reg name=\""DBG_REG_A6"\" bitsize=\"64\" type=\"int\"/>"
+"<reg name=\""DBG_REG_A7"\" bitsize=\"64\" type=\"int\"/>"
+"<reg name=\""DBG_REG_S2"\" bitsize=\"64\" type=\"int\"/>"
+"<reg name=\""DBG_REG_S3"\" bitsize=\"64\" type=\"int\"/>"
+"<reg name=\""DBG_REG_S4"\" bitsize=\"64\" type=\"int\"/>"
+"<reg name=\""DBG_REG_S5"\" bitsize=\"64\" type=\"int\"/>"
+"<reg name=\""DBG_REG_S6"\" bitsize=\"64\" type=\"int\"/>"
+"<reg name=\""DBG_REG_S7"\" bitsize=\"64\" type=\"int\"/>"
+"<reg name=\""DBG_REG_S8"\" bitsize=\"64\" type=\"int\"/>"
+"<reg name=\""DBG_REG_S9"\" bitsize=\"64\" type=\"int\"/>"
+"<reg name=\""DBG_REG_S10"\" bitsize=\"64\" type=\"int\"/>"
+"<reg name=\""DBG_REG_S11"\" bitsize=\"64\" type=\"int\"/>"
+"<reg name=\""DBG_REG_T3"\" bitsize=\"64\" type=\"int\"/>"
+"<reg name=\""DBG_REG_T4"\" bitsize=\"64\" type=\"int\"/>"
+"<reg name=\""DBG_REG_T5"\" bitsize=\"64\" type=\"int\"/>"
+"<reg name=\""DBG_REG_T6"\" bitsize=\"64\" type=\"int\"/>"
+"<reg name=\""DBG_REG_EPC"\" bitsize=\"64\" type=\"code_ptr\"/>"
+"<reg name=\""DBG_REG_STATUS"\" bitsize=\"64\" type=\"int\"/>"
+"<reg name=\""DBG_REG_BADADDR"\" bitsize=\"64\" type=\"int\"/>"
+"<reg name=\""DBG_REG_CAUSE"\" bitsize=\"64\" type=\"int\"/>"
+"</feature>";
+#else
+static const char gdb_xfer_read_cpuxml[39] =
+ "qXfer:features:read:riscv-32bit-cpu.xml";
+
+static const char riscv_gdb_stub_target_desc[256] =
+"l<?xml version=\"1.0\"?>"
+"<!DOCTYPE target SYSTEM \"gdb-target.dtd\">"
+"<target>"
+"<xi:include href=\"riscv-32bit-cpu.xml\"/>"
+"</target>";
+
+static const char riscv_gdb_stub_cpuxml[2048] =
+"l<?xml version=\"1.0\"?>"
+"<!DOCTYPE feature SYSTEM \"gdb-target.dtd\">"
+"<feature name=\"org.gnu.gdb.riscv.cpu\">"
+"<reg name=\""DBG_REG_ZERO"\" bitsize=\"32\" type=\"int\" regnum=\"0\"/>"
+"<reg name=\""DBG_REG_RA"\" bitsize=\"32\" type=\"code_ptr\"/>"
+"<reg name=\""DBG_REG_SP"\" bitsize=\"32\" type=\"data_ptr\"/>"
+"<reg name=\""DBG_REG_GP"\" bitsize=\"32\" type=\"data_ptr\"/>"
+"<reg name=\""DBG_REG_TP"\" bitsize=\"32\" type=\"data_ptr\"/>"
+"<reg name=\""DBG_REG_T0"\" bitsize=\"32\" type=\"int\"/>"
+"<reg name=\""DBG_REG_T1"\" bitsize=\"32\" type=\"int\"/>"
+"<reg name=\""DBG_REG_T2"\" bitsize=\"32\" type=\"int\"/>"
+"<reg name=\""DBG_REG_FP"\" bitsize=\"32\" type=\"data_ptr\"/>"
+"<reg name=\""DBG_REG_S1"\" bitsize=\"32\" type=\"int\"/>"
+"<reg name=\""DBG_REG_A0"\" bitsize=\"32\" type=\"int\"/>"
+"<reg name=\""DBG_REG_A1"\" bitsize=\"32\" type=\"int\"/>"
+"<reg name=\""DBG_REG_A2"\" bitsize=\"32\" type=\"int\"/>"
+"<reg name=\""DBG_REG_A3"\" bitsize=\"32\" type=\"int\"/>"
+"<reg name=\""DBG_REG_A4"\" bitsize=\"32\" type=\"int\"/>"
+"<reg name=\""DBG_REG_A5"\" bitsize=\"32\" type=\"int\"/>"
+"<reg name=\""DBG_REG_A6"\" bitsize=\"32\" type=\"int\"/>"
+"<reg name=\""DBG_REG_A7"\" bitsize=\"32\" type=\"int\"/>"
+"<reg name=\""DBG_REG_S2"\" bitsize=\"32\" type=\"int\"/>"
+"<reg name=\""DBG_REG_S3"\" bitsize=\"32\" type=\"int\"/>"
+"<reg name=\""DBG_REG_S4"\" bitsize=\"32\" type=\"int\"/>"
+"<reg name=\""DBG_REG_S5"\" bitsize=\"32\" type=\"int\"/>"
+"<reg name=\""DBG_REG_S6"\" bitsize=\"32\" type=\"int\"/>"
+"<reg name=\""DBG_REG_S7"\" bitsize=\"32\" type=\"int\"/>"
+"<reg name=\""DBG_REG_S8"\" bitsize=\"32\" type=\"int\"/>"
+"<reg name=\""DBG_REG_S9"\" bitsize=\"32\" type=\"int\"/>"
+"<reg name=\""DBG_REG_S10"\" bitsize=\"32\" type=\"int\"/>"
+"<reg name=\""DBG_REG_S11"\" bitsize=\"32\" type=\"int\"/>"
+"<reg name=\""DBG_REG_T3"\" bitsize=\"32\" type=\"int\"/>"
+"<reg name=\""DBG_REG_T4"\" bitsize=\"32\" type=\"int\"/>"
+"<reg name=\""DBG_REG_T5"\" bitsize=\"32\" type=\"int\"/>"
+"<reg name=\""DBG_REG_T6"\" bitsize=\"32\" type=\"int\"/>"
+"<reg name=\""DBG_REG_EPC"\" bitsize=\"32\" type=\"code_ptr\"/>"
+"<reg name=\""DBG_REG_STATUS"\" bitsize=\"32\" type=\"int\"/>"
+"<reg name=\""DBG_REG_BADADDR"\" bitsize=\"32\" type=\"int\"/>"
+"<reg name=\""DBG_REG_CAUSE"\" bitsize=\"32\" type=\"int\"/>"
+"</feature>";
+#endif
+#endif
diff --git a/arch/riscv/include/asm/hugetlb.h b/arch/riscv/include/asm/hugetlb.h
new file mode 100644
index 000000000..ec19d6afc
--- /dev/null
+++ b/arch/riscv/include/asm/hugetlb.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_RISCV_HUGETLB_H
+#define _ASM_RISCV_HUGETLB_H
+
+#include <asm-generic/hugetlb.h>
+#include <asm/page.h>
+
+static inline void arch_clear_hugepage_flags(struct page *page)
+{
+ clear_bit(PG_dcache_clean, &page->flags);
+}
+#define arch_clear_hugepage_flags arch_clear_hugepage_flags
+
+#endif /* _ASM_RISCV_HUGETLB_H */
diff --git a/arch/riscv/include/asm/hwcap.h b/arch/riscv/include/asm/hwcap.h
new file mode 100644
index 000000000..5ce50468a
--- /dev/null
+++ b/arch/riscv/include/asm/hwcap.h
@@ -0,0 +1,49 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copied from arch/arm64/include/asm/hwcap.h
+ *
+ * Copyright (C) 2012 ARM Ltd.
+ * Copyright (C) 2017 SiFive
+ */
+#ifndef _ASM_RISCV_HWCAP_H
+#define _ASM_RISCV_HWCAP_H
+
+#include <linux/bits.h>
+#include <uapi/asm/hwcap.h>
+
+#ifndef __ASSEMBLY__
+/*
+ * This yields a mask that user programs can use to figure out what
+ * instruction set this cpu supports.
+ */
+#define ELF_HWCAP (elf_hwcap)
+
+enum {
+ CAP_HWCAP = 1,
+};
+
+extern unsigned long elf_hwcap;
+
+#define RISCV_ISA_EXT_a ('a' - 'a')
+#define RISCV_ISA_EXT_c ('c' - 'a')
+#define RISCV_ISA_EXT_d ('d' - 'a')
+#define RISCV_ISA_EXT_f ('f' - 'a')
+#define RISCV_ISA_EXT_h ('h' - 'a')
+#define RISCV_ISA_EXT_i ('i' - 'a')
+#define RISCV_ISA_EXT_m ('m' - 'a')
+#define RISCV_ISA_EXT_s ('s' - 'a')
+#define RISCV_ISA_EXT_u ('u' - 'a')
+
+#define RISCV_ISA_EXT_MAX 64
+
+unsigned long riscv_isa_extension_base(const unsigned long *isa_bitmap);
+
+#define riscv_isa_extension_mask(ext) BIT_MASK(RISCV_ISA_EXT_##ext)
+
+bool __riscv_isa_extension_available(const unsigned long *isa_bitmap, int bit);
+#define riscv_isa_extension_available(isa_bitmap, ext) \
+ __riscv_isa_extension_available(isa_bitmap, RISCV_ISA_EXT_##ext)
+
+#endif
+
+#endif /* _ASM_RISCV_HWCAP_H */
diff --git a/arch/riscv/include/asm/image.h b/arch/riscv/include/asm/image.h
new file mode 100644
index 000000000..e0b319af3
--- /dev/null
+++ b/arch/riscv/include/asm/image.h
@@ -0,0 +1,65 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _ASM_RISCV_IMAGE_H
+#define _ASM_RISCV_IMAGE_H
+
+#define RISCV_IMAGE_MAGIC "RISCV\0\0\0"
+#define RISCV_IMAGE_MAGIC2 "RSC\x05"
+
+#define RISCV_IMAGE_FLAG_BE_SHIFT 0
+#define RISCV_IMAGE_FLAG_BE_MASK 0x1
+
+#define RISCV_IMAGE_FLAG_LE 0
+#define RISCV_IMAGE_FLAG_BE 1
+
+#ifdef CONFIG_CPU_BIG_ENDIAN
+#error conversion of header fields to LE not yet implemented
+#else
+#define __HEAD_FLAG_BE RISCV_IMAGE_FLAG_LE
+#endif
+
+#define __HEAD_FLAG(field) (__HEAD_FLAG_##field << \
+ RISCV_IMAGE_FLAG_##field##_SHIFT)
+
+#define __HEAD_FLAGS (__HEAD_FLAG(BE))
+
+#define RISCV_HEADER_VERSION_MAJOR 0
+#define RISCV_HEADER_VERSION_MINOR 2
+
+#define RISCV_HEADER_VERSION (RISCV_HEADER_VERSION_MAJOR << 16 | \
+ RISCV_HEADER_VERSION_MINOR)
+
+#ifndef __ASSEMBLY__
+/**
+ * struct riscv_image_header - riscv kernel image header
+ * @code0: Executable code
+ * @code1: Executable code
+ * @text_offset: Image load offset (little endian)
+ * @image_size: Effective Image size (little endian)
+ * @flags: kernel flags (little endian)
+ * @version: version
+ * @res1: reserved
+ * @res2: reserved
+ * @magic: Magic number (RISC-V specific; deprecated)
+ * @magic2: Magic number 2 (to match the ARM64 'magic' field pos)
+ * @res3: reserved (will be used for PE COFF offset)
+ *
+ * The intention is for this header format to be shared between multiple
+ * architectures to avoid a proliferation of image header formats.
+ */
+
+struct riscv_image_header {
+ u32 code0;
+ u32 code1;
+ u64 text_offset;
+ u64 image_size;
+ u64 flags;
+ u32 version;
+ u32 res1;
+ u64 res2;
+ u64 magic;
+ u32 magic2;
+ u32 res3;
+};
+#endif /* __ASSEMBLY__ */
+#endif /* _ASM_RISCV_IMAGE_H */
diff --git a/arch/riscv/include/asm/io.h b/arch/riscv/include/asm/io.h
new file mode 100644
index 000000000..391dd869d
--- /dev/null
+++ b/arch/riscv/include/asm/io.h
@@ -0,0 +1,151 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * {read,write}{b,w,l,q} based on arch/arm64/include/asm/io.h
+ * which was based on arch/arm/include/io.h
+ *
+ * Copyright (C) 1996-2000 Russell King
+ * Copyright (C) 2012 ARM Ltd.
+ * Copyright (C) 2014 Regents of the University of California
+ */
+
+#ifndef _ASM_RISCV_IO_H
+#define _ASM_RISCV_IO_H
+
+#include <linux/types.h>
+#include <linux/pgtable.h>
+#include <asm/mmiowb.h>
+#include <asm/early_ioremap.h>
+
+/*
+ * MMIO access functions are separated out to break dependency cycles
+ * when using {read,write}* fns in low-level headers
+ */
+#include <asm/mmio.h>
+
+/*
+ * I/O port access constants.
+ */
+#ifdef CONFIG_MMU
+#define IO_SPACE_LIMIT (PCI_IO_SIZE - 1)
+#define PCI_IOBASE ((void __iomem *)PCI_IO_START)
+#endif /* CONFIG_MMU */
+
+/*
+ * Emulation routines for the port-mapped IO space used by some PCI drivers.
+ * These are defined as being "fully synchronous", but also "not guaranteed to
+ * be fully ordered with respect to other memory and I/O operations". We're
+ * going to be on the safe side here and just make them:
+ * - Fully ordered WRT each other, by bracketing them with two fences. The
+ * outer set contains both I/O so inX is ordered with outX, while the inner just
+ * needs the type of the access (I for inX and O for outX).
+ * - Ordered in the same manner as readX/writeX WRT memory by subsuming their
+ * fences.
+ * - Ordered WRT timer reads, so udelay and friends don't get elided by the
+ * implementation.
+ * Note that there is no way to actually enforce that outX is a non-posted
+ * operation on RISC-V, but hopefully the timer ordering constraint is
+ * sufficient to ensure this works sanely on controllers that support I/O
+ * writes.
+ */
+#define __io_pbr() __asm__ __volatile__ ("fence io,i" : : : "memory");
+#define __io_par(v) __asm__ __volatile__ ("fence i,ior" : : : "memory");
+#define __io_pbw() __asm__ __volatile__ ("fence iow,o" : : : "memory");
+#define __io_paw() __asm__ __volatile__ ("fence o,io" : : : "memory");
+
+#define inb(c) ({ u8 __v; __io_pbr(); __v = readb_cpu((void*)(PCI_IOBASE + (c))); __io_par(__v); __v; })
+#define inw(c) ({ u16 __v; __io_pbr(); __v = readw_cpu((void*)(PCI_IOBASE + (c))); __io_par(__v); __v; })
+#define inl(c) ({ u32 __v; __io_pbr(); __v = readl_cpu((void*)(PCI_IOBASE + (c))); __io_par(__v); __v; })
+
+#define outb(v,c) ({ __io_pbw(); writeb_cpu((v),(void*)(PCI_IOBASE + (c))); __io_paw(); })
+#define outw(v,c) ({ __io_pbw(); writew_cpu((v),(void*)(PCI_IOBASE + (c))); __io_paw(); })
+#define outl(v,c) ({ __io_pbw(); writel_cpu((v),(void*)(PCI_IOBASE + (c))); __io_paw(); })
+
+#ifdef CONFIG_64BIT
+#define inq(c) ({ u64 __v; __io_pbr(); __v = readq_cpu((void*)(c)); __io_par(__v); __v; })
+#define outq(v,c) ({ __io_pbw(); writeq_cpu((v),(void*)(c)); __io_paw(); })
+#endif
+
+/*
+ * Accesses from a single hart to a single I/O address must be ordered. This
+ * allows us to use the raw read macros, but we still need to fence before and
+ * after the block to ensure ordering WRT other macros. These are defined to
+ * perform host-endian accesses so we use __raw instead of __cpu.
+ */
+#define __io_reads_ins(port, ctype, len, bfence, afence) \
+ static inline void __ ## port ## len(const volatile void __iomem *addr, \
+ void *buffer, \
+ unsigned int count) \
+ { \
+ bfence; \
+ if (count) { \
+ ctype *buf = buffer; \
+ \
+ do { \
+ ctype x = __raw_read ## len(addr); \
+ *buf++ = x; \
+ } while (--count); \
+ } \
+ afence; \
+ }
+
+#define __io_writes_outs(port, ctype, len, bfence, afence) \
+ static inline void __ ## port ## len(volatile void __iomem *addr, \
+ const void *buffer, \
+ unsigned int count) \
+ { \
+ bfence; \
+ if (count) { \
+ const ctype *buf = buffer; \
+ \
+ do { \
+ __raw_write ## len(*buf++, addr); \
+ } while (--count); \
+ } \
+ afence; \
+ }
+
+__io_reads_ins(reads, u8, b, __io_br(), __io_ar(addr))
+__io_reads_ins(reads, u16, w, __io_br(), __io_ar(addr))
+__io_reads_ins(reads, u32, l, __io_br(), __io_ar(addr))
+#define readsb(addr, buffer, count) __readsb(addr, buffer, count)
+#define readsw(addr, buffer, count) __readsw(addr, buffer, count)
+#define readsl(addr, buffer, count) __readsl(addr, buffer, count)
+
+__io_reads_ins(ins, u8, b, __io_pbr(), __io_par(addr))
+__io_reads_ins(ins, u16, w, __io_pbr(), __io_par(addr))
+__io_reads_ins(ins, u32, l, __io_pbr(), __io_par(addr))
+#define insb(addr, buffer, count) __insb(PCI_IOBASE + (addr), buffer, count)
+#define insw(addr, buffer, count) __insw(PCI_IOBASE + (addr), buffer, count)
+#define insl(addr, buffer, count) __insl(PCI_IOBASE + (addr), buffer, count)
+
+__io_writes_outs(writes, u8, b, __io_bw(), __io_aw())
+__io_writes_outs(writes, u16, w, __io_bw(), __io_aw())
+__io_writes_outs(writes, u32, l, __io_bw(), __io_aw())
+#define writesb(addr, buffer, count) __writesb(addr, buffer, count)
+#define writesw(addr, buffer, count) __writesw(addr, buffer, count)
+#define writesl(addr, buffer, count) __writesl(addr, buffer, count)
+
+__io_writes_outs(outs, u8, b, __io_pbw(), __io_paw())
+__io_writes_outs(outs, u16, w, __io_pbw(), __io_paw())
+__io_writes_outs(outs, u32, l, __io_pbw(), __io_paw())
+#define outsb(addr, buffer, count) __outsb(PCI_IOBASE + (addr), buffer, count)
+#define outsw(addr, buffer, count) __outsw(PCI_IOBASE + (addr), buffer, count)
+#define outsl(addr, buffer, count) __outsl(PCI_IOBASE + (addr), buffer, count)
+
+#ifdef CONFIG_64BIT
+__io_reads_ins(reads, u64, q, __io_br(), __io_ar(addr))
+#define readsq(addr, buffer, count) __readsq(addr, buffer, count)
+
+__io_reads_ins(ins, u64, q, __io_pbr(), __io_par(addr))
+#define insq(addr, buffer, count) __insq(PCI_IOBASE + (addr), buffer, count)
+
+__io_writes_outs(writes, u64, q, __io_bw(), __io_aw())
+#define writesq(addr, buffer, count) __writesq(addr, buffer, count)
+
+__io_writes_outs(outs, u64, q, __io_pbr(), __io_paw())
+#define outsq(addr, buffer, count) __outsq(PCI_IOBASE + (addr), buffer, count)
+#endif
+
+#include <asm-generic/io.h>
+
+#endif /* _ASM_RISCV_IO_H */
diff --git a/arch/riscv/include/asm/irq.h b/arch/riscv/include/asm/irq.h
new file mode 100644
index 000000000..9807ad164
--- /dev/null
+++ b/arch/riscv/include/asm/irq.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2012 Regents of the University of California
+ * Copyright (C) 2017 SiFive
+ */
+
+#ifndef _ASM_RISCV_IRQ_H
+#define _ASM_RISCV_IRQ_H
+
+#include <linux/interrupt.h>
+#include <linux/linkage.h>
+
+#include <asm-generic/irq.h>
+
+#endif /* _ASM_RISCV_IRQ_H */
diff --git a/arch/riscv/include/asm/irq_work.h b/arch/riscv/include/asm/irq_work.h
new file mode 100644
index 000000000..b53891964
--- /dev/null
+++ b/arch/riscv/include/asm/irq_work.h
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_RISCV_IRQ_WORK_H
+#define _ASM_RISCV_IRQ_WORK_H
+
+static inline bool arch_irq_work_has_interrupt(void)
+{
+ return IS_ENABLED(CONFIG_SMP);
+}
+extern void arch_irq_work_raise(void);
+#endif /* _ASM_RISCV_IRQ_WORK_H */
diff --git a/arch/riscv/include/asm/irqflags.h b/arch/riscv/include/asm/irqflags.h
new file mode 100644
index 000000000..08d4d6a5b
--- /dev/null
+++ b/arch/riscv/include/asm/irqflags.h
@@ -0,0 +1,55 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2012 Regents of the University of California
+ */
+
+
+#ifndef _ASM_RISCV_IRQFLAGS_H
+#define _ASM_RISCV_IRQFLAGS_H
+
+#include <asm/processor.h>
+#include <asm/csr.h>
+
+/* read interrupt enabled status */
+static inline unsigned long arch_local_save_flags(void)
+{
+ return csr_read(CSR_STATUS);
+}
+
+/* unconditionally enable interrupts */
+static inline void arch_local_irq_enable(void)
+{
+ csr_set(CSR_STATUS, SR_IE);
+}
+
+/* unconditionally disable interrupts */
+static inline void arch_local_irq_disable(void)
+{
+ csr_clear(CSR_STATUS, SR_IE);
+}
+
+/* get status and disable interrupts */
+static inline unsigned long arch_local_irq_save(void)
+{
+ return csr_read_clear(CSR_STATUS, SR_IE);
+}
+
+/* test flags */
+static inline int arch_irqs_disabled_flags(unsigned long flags)
+{
+ return !(flags & SR_IE);
+}
+
+/* test hardware interrupt enable bit */
+static inline int arch_irqs_disabled(void)
+{
+ return arch_irqs_disabled_flags(arch_local_save_flags());
+}
+
+/* set interrupt enabled status */
+static inline void arch_local_irq_restore(unsigned long flags)
+{
+ csr_set(CSR_STATUS, flags & SR_IE);
+}
+
+#endif /* _ASM_RISCV_IRQFLAGS_H */
diff --git a/arch/riscv/include/asm/jump_label.h b/arch/riscv/include/asm/jump_label.h
new file mode 100644
index 000000000..729991e8f
--- /dev/null
+++ b/arch/riscv/include/asm/jump_label.h
@@ -0,0 +1,62 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2020 Emil Renner Berthing
+ *
+ * Based on arch/arm64/include/asm/jump_label.h
+ */
+#ifndef __ASM_JUMP_LABEL_H
+#define __ASM_JUMP_LABEL_H
+
+#ifndef __ASSEMBLY__
+
+#include <linux/types.h>
+#include <asm/asm.h>
+
+#define JUMP_LABEL_NOP_SIZE 4
+
+static __always_inline bool arch_static_branch(struct static_key *key,
+ bool branch)
+{
+ asm_volatile_goto(
+ " .align 2 \n\t"
+ " .option push \n\t"
+ " .option norelax \n\t"
+ " .option norvc \n\t"
+ "1: nop \n\t"
+ " .option pop \n\t"
+ " .pushsection __jump_table, \"aw\" \n\t"
+ " .align " RISCV_LGPTR " \n\t"
+ " .long 1b - ., %l[label] - . \n\t"
+ " " RISCV_PTR " %0 - . \n\t"
+ " .popsection \n\t"
+ : : "i"(&((char *)key)[branch]) : : label);
+
+ return false;
+label:
+ return true;
+}
+
+static __always_inline bool arch_static_branch_jump(struct static_key *key,
+ bool branch)
+{
+ asm_volatile_goto(
+ " .align 2 \n\t"
+ " .option push \n\t"
+ " .option norelax \n\t"
+ " .option norvc \n\t"
+ "1: jal zero, %l[label] \n\t"
+ " .option pop \n\t"
+ " .pushsection __jump_table, \"aw\" \n\t"
+ " .align " RISCV_LGPTR " \n\t"
+ " .long 1b - ., %l[label] - . \n\t"
+ " " RISCV_PTR " %0 - . \n\t"
+ " .popsection \n\t"
+ : : "i"(&((char *)key)[branch]) : : label);
+
+ return false;
+label:
+ return true;
+}
+
+#endif /* __ASSEMBLY__ */
+#endif /* __ASM_JUMP_LABEL_H */
diff --git a/arch/riscv/include/asm/kasan.h b/arch/riscv/include/asm/kasan.h
new file mode 100644
index 000000000..db10a64e3
--- /dev/null
+++ b/arch/riscv/include/asm/kasan.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2019 Andes Technology Corporation */
+
+#ifndef __ASM_KASAN_H
+#define __ASM_KASAN_H
+
+#ifndef __ASSEMBLY__
+
+#ifdef CONFIG_KASAN
+
+#define KASAN_SHADOW_SCALE_SHIFT 3
+
+#define KASAN_SHADOW_SIZE (UL(1) << (38 - KASAN_SHADOW_SCALE_SHIFT))
+#define KASAN_SHADOW_START KERN_VIRT_START /* 2^64 - 2^38 */
+#define KASAN_SHADOW_END (KASAN_SHADOW_START + KASAN_SHADOW_SIZE)
+
+#define KASAN_SHADOW_OFFSET _AC(CONFIG_KASAN_SHADOW_OFFSET, UL)
+
+void kasan_init(void);
+asmlinkage void kasan_early_init(void);
+
+#endif
+#endif
+#endif /* __ASM_KASAN_H */
diff --git a/arch/riscv/include/asm/kdebug.h b/arch/riscv/include/asm/kdebug.h
new file mode 100644
index 000000000..85ac00411
--- /dev/null
+++ b/arch/riscv/include/asm/kdebug.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef _ASM_ARC_KDEBUG_H
+#define _ASM_ARC_KDEBUG_H
+
+enum die_val {
+ DIE_UNUSED,
+ DIE_TRAP,
+ DIE_OOPS
+};
+
+#endif
diff --git a/arch/riscv/include/asm/kgdb.h b/arch/riscv/include/asm/kgdb.h
new file mode 100644
index 000000000..46677daf7
--- /dev/null
+++ b/arch/riscv/include/asm/kgdb.h
@@ -0,0 +1,113 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef __ASM_KGDB_H_
+#define __ASM_KGDB_H_
+
+#ifdef __KERNEL__
+
+#define GDB_SIZEOF_REG sizeof(unsigned long)
+
+#define DBG_MAX_REG_NUM (36)
+#define NUMREGBYTES ((DBG_MAX_REG_NUM) * GDB_SIZEOF_REG)
+#define CACHE_FLUSH_IS_SAFE 1
+#define BUFMAX 2048
+#ifdef CONFIG_RISCV_ISA_C
+#define BREAK_INSTR_SIZE 2
+#else
+#define BREAK_INSTR_SIZE 4
+#endif
+
+#ifndef __ASSEMBLY__
+
+extern unsigned long kgdb_compiled_break;
+
+static inline void arch_kgdb_breakpoint(void)
+{
+ asm(".global kgdb_compiled_break\n"
+ ".option norvc\n"
+ "kgdb_compiled_break: ebreak\n"
+ ".option rvc\n");
+}
+
+#endif /* !__ASSEMBLY__ */
+
+#define DBG_REG_ZERO "zero"
+#define DBG_REG_RA "ra"
+#define DBG_REG_SP "sp"
+#define DBG_REG_GP "gp"
+#define DBG_REG_TP "tp"
+#define DBG_REG_T0 "t0"
+#define DBG_REG_T1 "t1"
+#define DBG_REG_T2 "t2"
+#define DBG_REG_FP "fp"
+#define DBG_REG_S1 "s1"
+#define DBG_REG_A0 "a0"
+#define DBG_REG_A1 "a1"
+#define DBG_REG_A2 "a2"
+#define DBG_REG_A3 "a3"
+#define DBG_REG_A4 "a4"
+#define DBG_REG_A5 "a5"
+#define DBG_REG_A6 "a6"
+#define DBG_REG_A7 "a7"
+#define DBG_REG_S2 "s2"
+#define DBG_REG_S3 "s3"
+#define DBG_REG_S4 "s4"
+#define DBG_REG_S5 "s5"
+#define DBG_REG_S6 "s6"
+#define DBG_REG_S7 "s7"
+#define DBG_REG_S8 "s8"
+#define DBG_REG_S9 "s9"
+#define DBG_REG_S10 "s10"
+#define DBG_REG_S11 "s11"
+#define DBG_REG_T3 "t3"
+#define DBG_REG_T4 "t4"
+#define DBG_REG_T5 "t5"
+#define DBG_REG_T6 "t6"
+#define DBG_REG_EPC "pc"
+#define DBG_REG_STATUS "sstatus"
+#define DBG_REG_BADADDR "stval"
+#define DBG_REG_CAUSE "scause"
+
+#define DBG_REG_ZERO_OFF 0
+#define DBG_REG_RA_OFF 1
+#define DBG_REG_SP_OFF 2
+#define DBG_REG_GP_OFF 3
+#define DBG_REG_TP_OFF 4
+#define DBG_REG_T0_OFF 5
+#define DBG_REG_T1_OFF 6
+#define DBG_REG_T2_OFF 7
+#define DBG_REG_FP_OFF 8
+#define DBG_REG_S1_OFF 9
+#define DBG_REG_A0_OFF 10
+#define DBG_REG_A1_OFF 11
+#define DBG_REG_A2_OFF 12
+#define DBG_REG_A3_OFF 13
+#define DBG_REG_A4_OFF 14
+#define DBG_REG_A5_OFF 15
+#define DBG_REG_A6_OFF 16
+#define DBG_REG_A7_OFF 17
+#define DBG_REG_S2_OFF 18
+#define DBG_REG_S3_OFF 19
+#define DBG_REG_S4_OFF 20
+#define DBG_REG_S5_OFF 21
+#define DBG_REG_S6_OFF 22
+#define DBG_REG_S7_OFF 23
+#define DBG_REG_S8_OFF 24
+#define DBG_REG_S9_OFF 25
+#define DBG_REG_S10_OFF 26
+#define DBG_REG_S11_OFF 27
+#define DBG_REG_T3_OFF 28
+#define DBG_REG_T4_OFF 29
+#define DBG_REG_T5_OFF 30
+#define DBG_REG_T6_OFF 31
+#define DBG_REG_EPC_OFF 32
+#define DBG_REG_STATUS_OFF 33
+#define DBG_REG_BADADDR_OFF 34
+#define DBG_REG_CAUSE_OFF 35
+
+extern const char riscv_gdb_stub_feature[64];
+
+#define kgdb_arch_gdb_stub_feature riscv_gdb_stub_feature
+
+#endif
+#endif
diff --git a/arch/riscv/include/asm/kprobes.h b/arch/riscv/include/asm/kprobes.h
new file mode 100644
index 000000000..56a98ea30
--- /dev/null
+++ b/arch/riscv/include/asm/kprobes.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copied from arch/arm64/include/asm/kprobes.h
+ *
+ * Copyright (C) 2013 Linaro Limited
+ * Copyright (C) 2017 SiFive
+ */
+
+#ifndef _ASM_RISCV_KPROBES_H
+#define _ASM_RISCV_KPROBES_H
+
+#include <asm-generic/kprobes.h>
+
+#endif /* _ASM_RISCV_KPROBES_H */
diff --git a/arch/riscv/include/asm/linkage.h b/arch/riscv/include/asm/linkage.h
new file mode 100644
index 000000000..9e88ba23c
--- /dev/null
+++ b/arch/riscv/include/asm/linkage.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2015 Regents of the University of California
+ */
+
+#ifndef _ASM_RISCV_LINKAGE_H
+#define _ASM_RISCV_LINKAGE_H
+
+#define __ALIGN .balign 4
+#define __ALIGN_STR ".balign 4"
+
+#endif /* _ASM_RISCV_LINKAGE_H */
diff --git a/arch/riscv/include/asm/mmio.h b/arch/riscv/include/asm/mmio.h
new file mode 100644
index 000000000..4c58ee7f9
--- /dev/null
+++ b/arch/riscv/include/asm/mmio.h
@@ -0,0 +1,151 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * {read,write}{b,w,l,q} based on arch/arm64/include/asm/io.h
+ * which was based on arch/arm/include/io.h
+ *
+ * Copyright (C) 1996-2000 Russell King
+ * Copyright (C) 2012 ARM Ltd.
+ * Copyright (C) 2014 Regents of the University of California
+ */
+
+#ifndef _ASM_RISCV_MMIO_H
+#define _ASM_RISCV_MMIO_H
+
+#include <linux/types.h>
+#include <asm/mmiowb.h>
+
+/* Generic IO read/write. These perform native-endian accesses. */
+#define __raw_writeb __raw_writeb
+static inline void __raw_writeb(u8 val, volatile void __iomem *addr)
+{
+ asm volatile("sb %0, 0(%1)" : : "r" (val), "r" (addr));
+}
+
+#define __raw_writew __raw_writew
+static inline void __raw_writew(u16 val, volatile void __iomem *addr)
+{
+ asm volatile("sh %0, 0(%1)" : : "r" (val), "r" (addr));
+}
+
+#define __raw_writel __raw_writel
+static inline void __raw_writel(u32 val, volatile void __iomem *addr)
+{
+ asm volatile("sw %0, 0(%1)" : : "r" (val), "r" (addr));
+}
+
+#ifdef CONFIG_64BIT
+#define __raw_writeq __raw_writeq
+static inline void __raw_writeq(u64 val, volatile void __iomem *addr)
+{
+ asm volatile("sd %0, 0(%1)" : : "r" (val), "r" (addr));
+}
+#endif
+
+#define __raw_readb __raw_readb
+static inline u8 __raw_readb(const volatile void __iomem *addr)
+{
+ u8 val;
+
+ asm volatile("lb %0, 0(%1)" : "=r" (val) : "r" (addr));
+ return val;
+}
+
+#define __raw_readw __raw_readw
+static inline u16 __raw_readw(const volatile void __iomem *addr)
+{
+ u16 val;
+
+ asm volatile("lh %0, 0(%1)" : "=r" (val) : "r" (addr));
+ return val;
+}
+
+#define __raw_readl __raw_readl
+static inline u32 __raw_readl(const volatile void __iomem *addr)
+{
+ u32 val;
+
+ asm volatile("lw %0, 0(%1)" : "=r" (val) : "r" (addr));
+ return val;
+}
+
+#ifdef CONFIG_64BIT
+#define __raw_readq __raw_readq
+static inline u64 __raw_readq(const volatile void __iomem *addr)
+{
+ u64 val;
+
+ asm volatile("ld %0, 0(%1)" : "=r" (val) : "r" (addr));
+ return val;
+}
+#endif
+
+/*
+ * Unordered I/O memory access primitives. These are even more relaxed than
+ * the relaxed versions, as they don't even order accesses between successive
+ * operations to the I/O regions.
+ */
+#define readb_cpu(c) ({ u8 __r = __raw_readb(c); __r; })
+#define readw_cpu(c) ({ u16 __r = le16_to_cpu((__force __le16)__raw_readw(c)); __r; })
+#define readl_cpu(c) ({ u32 __r = le32_to_cpu((__force __le32)__raw_readl(c)); __r; })
+
+#define writeb_cpu(v, c) ((void)__raw_writeb((v), (c)))
+#define writew_cpu(v, c) ((void)__raw_writew((__force u16)cpu_to_le16(v), (c)))
+#define writel_cpu(v, c) ((void)__raw_writel((__force u32)cpu_to_le32(v), (c)))
+
+#ifdef CONFIG_64BIT
+#define readq_cpu(c) ({ u64 __r = le64_to_cpu((__force __le64)__raw_readq(c)); __r; })
+#define writeq_cpu(v, c) ((void)__raw_writeq((__force u64)cpu_to_le64(v), (c)))
+#endif
+
+/*
+ * Relaxed I/O memory access primitives. These follow the Device memory
+ * ordering rules but do not guarantee any ordering relative to Normal memory
+ * accesses. These are defined to order the indicated access (either a read or
+ * write) with all other I/O memory accesses to the same peripheral. Since the
+ * platform specification defines that all I/O regions are strongly ordered on
+ * channel 0, no explicit fences are required to enforce this ordering.
+ */
+/* FIXME: These are now the same as asm-generic */
+#define __io_rbr() do {} while (0)
+#define __io_rar() do {} while (0)
+#define __io_rbw() do {} while (0)
+#define __io_raw() do {} while (0)
+
+#define readb_relaxed(c) ({ u8 __v; __io_rbr(); __v = readb_cpu(c); __io_rar(); __v; })
+#define readw_relaxed(c) ({ u16 __v; __io_rbr(); __v = readw_cpu(c); __io_rar(); __v; })
+#define readl_relaxed(c) ({ u32 __v; __io_rbr(); __v = readl_cpu(c); __io_rar(); __v; })
+
+#define writeb_relaxed(v, c) ({ __io_rbw(); writeb_cpu((v), (c)); __io_raw(); })
+#define writew_relaxed(v, c) ({ __io_rbw(); writew_cpu((v), (c)); __io_raw(); })
+#define writel_relaxed(v, c) ({ __io_rbw(); writel_cpu((v), (c)); __io_raw(); })
+
+#ifdef CONFIG_64BIT
+#define readq_relaxed(c) ({ u64 __v; __io_rbr(); __v = readq_cpu(c); __io_rar(); __v; })
+#define writeq_relaxed(v, c) ({ __io_rbw(); writeq_cpu((v), (c)); __io_raw(); })
+#endif
+
+/*
+ * I/O memory access primitives. Reads are ordered relative to any following
+ * Normal memory read and delay() loop. Writes are ordered relative to any
+ * prior Normal memory write. The memory barriers here are necessary as RISC-V
+ * doesn't define any ordering between the memory space and the I/O space.
+ */
+#define __io_br() do {} while (0)
+#define __io_ar(v) ({ __asm__ __volatile__ ("fence i,ir" : : : "memory"); })
+#define __io_bw() ({ __asm__ __volatile__ ("fence w,o" : : : "memory"); })
+#define __io_aw() mmiowb_set_pending()
+
+#define readb(c) ({ u8 __v; __io_br(); __v = readb_cpu(c); __io_ar(__v); __v; })
+#define readw(c) ({ u16 __v; __io_br(); __v = readw_cpu(c); __io_ar(__v); __v; })
+#define readl(c) ({ u32 __v; __io_br(); __v = readl_cpu(c); __io_ar(__v); __v; })
+
+#define writeb(v, c) ({ __io_bw(); writeb_cpu((v), (c)); __io_aw(); })
+#define writew(v, c) ({ __io_bw(); writew_cpu((v), (c)); __io_aw(); })
+#define writel(v, c) ({ __io_bw(); writel_cpu((v), (c)); __io_aw(); })
+
+#ifdef CONFIG_64BIT
+#define readq(c) ({ u64 __v; __io_br(); __v = readq_cpu(c); __io_ar(__v); __v; })
+#define writeq(v, c) ({ __io_bw(); writeq_cpu((v), (c)); __io_aw(); })
+#endif
+
+#endif /* _ASM_RISCV_MMIO_H */
diff --git a/arch/riscv/include/asm/mmiowb.h b/arch/riscv/include/asm/mmiowb.h
new file mode 100644
index 000000000..0b2333e71
--- /dev/null
+++ b/arch/riscv/include/asm/mmiowb.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _ASM_RISCV_MMIOWB_H
+#define _ASM_RISCV_MMIOWB_H
+
+/*
+ * "o,w" is sufficient to ensure that all writes to the device have completed
+ * before the write to the spinlock is allowed to commit.
+ */
+#define mmiowb() __asm__ __volatile__ ("fence o,w" : : : "memory");
+
+#include <linux/smp.h>
+#include <asm-generic/mmiowb.h>
+
+#endif /* _ASM_RISCV_MMIOWB_H */
diff --git a/arch/riscv/include/asm/mmu.h b/arch/riscv/include/asm/mmu.h
new file mode 100644
index 000000000..dabcf2cfb
--- /dev/null
+++ b/arch/riscv/include/asm/mmu.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2012 Regents of the University of California
+ */
+
+
+#ifndef _ASM_RISCV_MMU_H
+#define _ASM_RISCV_MMU_H
+
+#ifndef __ASSEMBLY__
+
+typedef struct {
+#ifndef CONFIG_MMU
+ unsigned long end_brk;
+#endif
+ void *vdso;
+#ifdef CONFIG_SMP
+ /* A local icache flush is needed before user execution can resume. */
+ cpumask_t icache_stale_mask;
+#endif
+} mm_context_t;
+
+void __init create_pgd_mapping(pgd_t *pgdp, uintptr_t va, phys_addr_t pa,
+ phys_addr_t sz, pgprot_t prot);
+#endif /* __ASSEMBLY__ */
+
+#endif /* _ASM_RISCV_MMU_H */
diff --git a/arch/riscv/include/asm/mmu_context.h b/arch/riscv/include/asm/mmu_context.h
new file mode 100644
index 000000000..67c463812
--- /dev/null
+++ b/arch/riscv/include/asm/mmu_context.h
@@ -0,0 +1,46 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2012 Regents of the University of California
+ * Copyright (C) 2017 SiFive
+ */
+
+#ifndef _ASM_RISCV_MMU_CONTEXT_H
+#define _ASM_RISCV_MMU_CONTEXT_H
+
+#include <linux/mm_types.h>
+#include <asm-generic/mm_hooks.h>
+
+#include <linux/mm.h>
+#include <linux/sched.h>
+
+static inline void enter_lazy_tlb(struct mm_struct *mm,
+ struct task_struct *task)
+{
+}
+
+/* Initialize context-related info for a new mm_struct */
+static inline int init_new_context(struct task_struct *task,
+ struct mm_struct *mm)
+{
+ return 0;
+}
+
+static inline void destroy_context(struct mm_struct *mm)
+{
+}
+
+void switch_mm(struct mm_struct *prev, struct mm_struct *next,
+ struct task_struct *task);
+
+static inline void activate_mm(struct mm_struct *prev,
+ struct mm_struct *next)
+{
+ switch_mm(prev, next, NULL);
+}
+
+static inline void deactivate_mm(struct task_struct *task,
+ struct mm_struct *mm)
+{
+}
+
+#endif /* _ASM_RISCV_MMU_CONTEXT_H */
diff --git a/arch/riscv/include/asm/module.h b/arch/riscv/include/asm/module.h
new file mode 100644
index 000000000..76aa96a9f
--- /dev/null
+++ b/arch/riscv/include/asm/module.h
@@ -0,0 +1,114 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2017 Andes Technology Corporation */
+
+#ifndef _ASM_RISCV_MODULE_H
+#define _ASM_RISCV_MODULE_H
+
+#include <asm-generic/module.h>
+
+struct module;
+unsigned long module_emit_got_entry(struct module *mod, unsigned long val);
+unsigned long module_emit_plt_entry(struct module *mod, unsigned long val);
+
+#ifdef CONFIG_MODULE_SECTIONS
+struct mod_section {
+ Elf_Shdr *shdr;
+ int num_entries;
+ int max_entries;
+};
+
+struct mod_arch_specific {
+ struct mod_section got;
+ struct mod_section plt;
+ struct mod_section got_plt;
+};
+
+struct got_entry {
+ unsigned long symbol_addr; /* the real variable address */
+};
+
+static inline struct got_entry emit_got_entry(unsigned long val)
+{
+ return (struct got_entry) {val};
+}
+
+static inline struct got_entry *get_got_entry(unsigned long val,
+ const struct mod_section *sec)
+{
+ struct got_entry *got = (struct got_entry *)(sec->shdr->sh_addr);
+ int i;
+ for (i = 0; i < sec->num_entries; i++) {
+ if (got[i].symbol_addr == val)
+ return &got[i];
+ }
+ return NULL;
+}
+
+struct plt_entry {
+ /*
+ * Trampoline code to real target address. The return address
+ * should be the original (pc+4) before entring plt entry.
+ */
+ u32 insn_auipc; /* auipc t0, 0x0 */
+ u32 insn_ld; /* ld t1, 0x10(t0) */
+ u32 insn_jr; /* jr t1 */
+};
+
+#define OPC_AUIPC 0x0017
+#define OPC_LD 0x3003
+#define OPC_JALR 0x0067
+#define REG_T0 0x5
+#define REG_T1 0x6
+
+static inline struct plt_entry emit_plt_entry(unsigned long val,
+ unsigned long plt,
+ unsigned long got_plt)
+{
+ /*
+ * U-Type encoding:
+ * +------------+----------+----------+
+ * | imm[31:12] | rd[11:7] | opc[6:0] |
+ * +------------+----------+----------+
+ *
+ * I-Type encoding:
+ * +------------+------------+--------+----------+----------+
+ * | imm[31:20] | rs1[19:15] | funct3 | rd[11:7] | opc[6:0] |
+ * +------------+------------+--------+----------+----------+
+ *
+ */
+ unsigned long offset = got_plt - plt;
+ u32 hi20 = (offset + 0x800) & 0xfffff000;
+ u32 lo12 = (offset - hi20);
+ return (struct plt_entry) {
+ OPC_AUIPC | (REG_T0 << 7) | hi20,
+ OPC_LD | (lo12 << 20) | (REG_T0 << 15) | (REG_T1 << 7),
+ OPC_JALR | (REG_T1 << 15)
+ };
+}
+
+static inline int get_got_plt_idx(unsigned long val, const struct mod_section *sec)
+{
+ struct got_entry *got_plt = (struct got_entry *)sec->shdr->sh_addr;
+ int i;
+ for (i = 0; i < sec->num_entries; i++) {
+ if (got_plt[i].symbol_addr == val)
+ return i;
+ }
+ return -1;
+}
+
+static inline struct plt_entry *get_plt_entry(unsigned long val,
+ const struct mod_section *sec_plt,
+ const struct mod_section *sec_got_plt)
+{
+ struct plt_entry *plt = (struct plt_entry *)sec_plt->shdr->sh_addr;
+ int got_plt_idx = get_got_plt_idx(val, sec_got_plt);
+ if (got_plt_idx >= 0)
+ return plt + got_plt_idx;
+ else
+ return NULL;
+}
+
+#endif /* CONFIG_MODULE_SECTIONS */
+
+#endif /* _ASM_RISCV_MODULE_H */
diff --git a/arch/riscv/include/asm/module.lds.h b/arch/riscv/include/asm/module.lds.h
new file mode 100644
index 000000000..1075beae1
--- /dev/null
+++ b/arch/riscv/include/asm/module.lds.h
@@ -0,0 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2017 Andes Technology Corporation */
+#ifdef CONFIG_MODULE_SECTIONS
+SECTIONS {
+ .plt : { BYTE(0) }
+ .got : { BYTE(0) }
+ .got.plt : { BYTE(0) }
+}
+#endif
diff --git a/arch/riscv/include/asm/page.h b/arch/riscv/include/asm/page.h
new file mode 100644
index 000000000..64a675c5c
--- /dev/null
+++ b/arch/riscv/include/asm/page.h
@@ -0,0 +1,148 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2009 Chen Liqin <liqin.chen@sunplusct.com>
+ * Copyright (C) 2012 Regents of the University of California
+ * Copyright (C) 2017 SiFive
+ * Copyright (C) 2017 XiaojingZhu <zhuxiaoj@ict.ac.cn>
+ */
+
+#ifndef _ASM_RISCV_PAGE_H
+#define _ASM_RISCV_PAGE_H
+
+#include <linux/pfn.h>
+#include <linux/const.h>
+
+#define PAGE_SHIFT (12)
+#define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
+#define PAGE_MASK (~(PAGE_SIZE - 1))
+
+#ifdef CONFIG_64BIT
+#define HUGE_MAX_HSTATE 2
+#else
+#define HUGE_MAX_HSTATE 1
+#endif
+#define HPAGE_SHIFT PMD_SHIFT
+#define HPAGE_SIZE (_AC(1, UL) << HPAGE_SHIFT)
+#define HPAGE_MASK (~(HPAGE_SIZE - 1))
+#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
+
+/*
+ * PAGE_OFFSET -- the first address of the first page of memory.
+ * When not using MMU this corresponds to the first free page in
+ * physical memory (aligned on a page boundary).
+ */
+#define PAGE_OFFSET _AC(CONFIG_PAGE_OFFSET, UL)
+
+#define KERN_VIRT_SIZE (-PAGE_OFFSET)
+
+#ifndef __ASSEMBLY__
+
+#define PAGE_UP(addr) (((addr)+((PAGE_SIZE)-1))&(~((PAGE_SIZE)-1)))
+#define PAGE_DOWN(addr) ((addr)&(~((PAGE_SIZE)-1)))
+
+/* align addr on a size boundary - adjust address up/down if needed */
+#define _ALIGN_UP(addr, size) (((addr)+((size)-1))&(~((size)-1)))
+#define _ALIGN_DOWN(addr, size) ((addr)&(~((size)-1)))
+
+/* align addr on a size boundary - adjust address up if needed */
+#define _ALIGN(addr, size) _ALIGN_UP(addr, size)
+
+#define clear_page(pgaddr) memset((pgaddr), 0, PAGE_SIZE)
+#define copy_page(to, from) memcpy((to), (from), PAGE_SIZE)
+
+#define clear_user_page(pgaddr, vaddr, page) memset((pgaddr), 0, PAGE_SIZE)
+#define copy_user_page(vto, vfrom, vaddr, topg) \
+ memcpy((vto), (vfrom), PAGE_SIZE)
+
+/*
+ * Use struct definitions to apply C type checking
+ */
+
+/* Page Global Directory entry */
+typedef struct {
+ unsigned long pgd;
+} pgd_t;
+
+/* Page Table entry */
+typedef struct {
+ unsigned long pte;
+} pte_t;
+
+typedef struct {
+ unsigned long pgprot;
+} pgprot_t;
+
+typedef struct page *pgtable_t;
+
+#define pte_val(x) ((x).pte)
+#define pgd_val(x) ((x).pgd)
+#define pgprot_val(x) ((x).pgprot)
+
+#define __pte(x) ((pte_t) { (x) })
+#define __pgd(x) ((pgd_t) { (x) })
+#define __pgprot(x) ((pgprot_t) { (x) })
+
+#ifdef CONFIG_64BIT
+#define PTE_FMT "%016lx"
+#else
+#define PTE_FMT "%08lx"
+#endif
+
+#ifdef CONFIG_MMU
+extern unsigned long va_pa_offset;
+extern unsigned long pfn_base;
+#define ARCH_PFN_OFFSET (pfn_base)
+#else
+#define va_pa_offset 0
+#define ARCH_PFN_OFFSET (PAGE_OFFSET >> PAGE_SHIFT)
+#endif /* CONFIG_MMU */
+
+extern unsigned long max_low_pfn;
+extern unsigned long min_low_pfn;
+
+#define __pa_to_va_nodebug(x) ((void *)((unsigned long) (x) + va_pa_offset))
+#define __va_to_pa_nodebug(x) ((unsigned long)(x) - va_pa_offset)
+
+#ifdef CONFIG_DEBUG_VIRTUAL
+extern phys_addr_t __virt_to_phys(unsigned long x);
+extern phys_addr_t __phys_addr_symbol(unsigned long x);
+#else
+#define __virt_to_phys(x) __va_to_pa_nodebug(x)
+#define __phys_addr_symbol(x) __va_to_pa_nodebug(x)
+#endif /* CONFIG_DEBUG_VIRTUAL */
+
+#define __pa_symbol(x) __phys_addr_symbol(RELOC_HIDE((unsigned long)(x), 0))
+#define __pa(x) __virt_to_phys((unsigned long)(x))
+#define __va(x) ((void *)__pa_to_va_nodebug((phys_addr_t)(x)))
+
+#define phys_to_pfn(phys) (PFN_DOWN(phys))
+#define pfn_to_phys(pfn) (PFN_PHYS(pfn))
+
+#define virt_to_pfn(vaddr) (phys_to_pfn(__pa(vaddr)))
+#define pfn_to_virt(pfn) (__va(pfn_to_phys(pfn)))
+
+#define virt_to_page(vaddr) (pfn_to_page(virt_to_pfn(vaddr)))
+#define page_to_virt(page) (pfn_to_virt(page_to_pfn(page)))
+
+#define page_to_phys(page) (pfn_to_phys(page_to_pfn(page)))
+#define page_to_bus(page) (page_to_phys(page))
+#define phys_to_page(paddr) (pfn_to_page(phys_to_pfn(paddr)))
+
+#ifdef CONFIG_FLATMEM
+#define pfn_valid(pfn) \
+ (((pfn) >= ARCH_PFN_OFFSET) && (((pfn) - ARCH_PFN_OFFSET) < max_mapnr))
+#endif
+
+#endif /* __ASSEMBLY__ */
+
+#define virt_addr_valid(vaddr) ({ \
+ unsigned long _addr = (unsigned long)vaddr; \
+ (unsigned long)(_addr) >= PAGE_OFFSET && pfn_valid(virt_to_pfn(_addr)); \
+})
+
+#define VM_DATA_DEFAULT_FLAGS VM_DATA_FLAGS_NON_EXEC
+
+#include <asm-generic/memory_model.h>
+#include <asm-generic/getorder.h>
+
+#endif /* _ASM_RISCV_PAGE_H */
diff --git a/arch/riscv/include/asm/parse_asm.h b/arch/riscv/include/asm/parse_asm.h
new file mode 100644
index 000000000..ad254da85
--- /dev/null
+++ b/arch/riscv/include/asm/parse_asm.h
@@ -0,0 +1,224 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2020 SiFive
+ */
+
+#ifndef _ASM_RISCV_INSN_H
+#define _ASM_RISCV_INSN_H
+
+#include <linux/bits.h>
+
+/* The bit field of immediate value in I-type instruction */
+#define I_IMM_SIGN_OPOFF 31
+#define I_IMM_11_0_OPOFF 20
+#define I_IMM_SIGN_OFF 12
+#define I_IMM_11_0_OFF 0
+#define I_IMM_11_0_MASK GENMASK(11, 0)
+
+/* The bit field of immediate value in J-type instruction */
+#define J_IMM_SIGN_OPOFF 31
+#define J_IMM_10_1_OPOFF 21
+#define J_IMM_11_OPOFF 20
+#define J_IMM_19_12_OPOFF 12
+#define J_IMM_SIGN_OFF 20
+#define J_IMM_10_1_OFF 1
+#define J_IMM_11_OFF 11
+#define J_IMM_19_12_OFF 12
+#define J_IMM_10_1_MASK GENMASK(9, 0)
+#define J_IMM_11_MASK GENMASK(0, 0)
+#define J_IMM_19_12_MASK GENMASK(7, 0)
+
+/* The bit field of immediate value in B-type instruction */
+#define B_IMM_SIGN_OPOFF 31
+#define B_IMM_10_5_OPOFF 25
+#define B_IMM_4_1_OPOFF 8
+#define B_IMM_11_OPOFF 7
+#define B_IMM_SIGN_OFF 12
+#define B_IMM_10_5_OFF 5
+#define B_IMM_4_1_OFF 1
+#define B_IMM_11_OFF 11
+#define B_IMM_10_5_MASK GENMASK(5, 0)
+#define B_IMM_4_1_MASK GENMASK(3, 0)
+#define B_IMM_11_MASK GENMASK(0, 0)
+
+/* The register offset in RVG instruction */
+#define RVG_RS1_OPOFF 15
+#define RVG_RS2_OPOFF 20
+#define RVG_RD_OPOFF 7
+
+/* The bit field of immediate value in RVC J instruction */
+#define RVC_J_IMM_SIGN_OPOFF 12
+#define RVC_J_IMM_4_OPOFF 11
+#define RVC_J_IMM_9_8_OPOFF 9
+#define RVC_J_IMM_10_OPOFF 8
+#define RVC_J_IMM_6_OPOFF 7
+#define RVC_J_IMM_7_OPOFF 6
+#define RVC_J_IMM_3_1_OPOFF 3
+#define RVC_J_IMM_5_OPOFF 2
+#define RVC_J_IMM_SIGN_OFF 11
+#define RVC_J_IMM_4_OFF 4
+#define RVC_J_IMM_9_8_OFF 8
+#define RVC_J_IMM_10_OFF 10
+#define RVC_J_IMM_6_OFF 6
+#define RVC_J_IMM_7_OFF 7
+#define RVC_J_IMM_3_1_OFF 1
+#define RVC_J_IMM_5_OFF 5
+#define RVC_J_IMM_4_MASK GENMASK(0, 0)
+#define RVC_J_IMM_9_8_MASK GENMASK(1, 0)
+#define RVC_J_IMM_10_MASK GENMASK(0, 0)
+#define RVC_J_IMM_6_MASK GENMASK(0, 0)
+#define RVC_J_IMM_7_MASK GENMASK(0, 0)
+#define RVC_J_IMM_3_1_MASK GENMASK(2, 0)
+#define RVC_J_IMM_5_MASK GENMASK(0, 0)
+
+/* The bit field of immediate value in RVC B instruction */
+#define RVC_B_IMM_SIGN_OPOFF 12
+#define RVC_B_IMM_4_3_OPOFF 10
+#define RVC_B_IMM_7_6_OPOFF 5
+#define RVC_B_IMM_2_1_OPOFF 3
+#define RVC_B_IMM_5_OPOFF 2
+#define RVC_B_IMM_SIGN_OFF 8
+#define RVC_B_IMM_4_3_OFF 3
+#define RVC_B_IMM_7_6_OFF 6
+#define RVC_B_IMM_2_1_OFF 1
+#define RVC_B_IMM_5_OFF 5
+#define RVC_B_IMM_4_3_MASK GENMASK(1, 0)
+#define RVC_B_IMM_7_6_MASK GENMASK(1, 0)
+#define RVC_B_IMM_2_1_MASK GENMASK(1, 0)
+#define RVC_B_IMM_5_MASK GENMASK(0, 0)
+
+/* The register offset in RVC op=C0 instruction */
+#define RVC_C0_RS1_OPOFF 7
+#define RVC_C0_RS2_OPOFF 2
+#define RVC_C0_RD_OPOFF 2
+
+/* The register offset in RVC op=C1 instruction */
+#define RVC_C1_RS1_OPOFF 7
+#define RVC_C1_RS2_OPOFF 2
+#define RVC_C1_RD_OPOFF 7
+
+/* The register offset in RVC op=C2 instruction */
+#define RVC_C2_RS1_OPOFF 7
+#define RVC_C2_RS2_OPOFF 2
+#define RVC_C2_RD_OPOFF 7
+
+/* parts of opcode for RVG*/
+#define OPCODE_BRANCH 0x63
+#define OPCODE_JALR 0x67
+#define OPCODE_JAL 0x6f
+#define OPCODE_SYSTEM 0x73
+
+/* parts of opcode for RVC*/
+#define OPCODE_C_0 0x0
+#define OPCODE_C_1 0x1
+#define OPCODE_C_2 0x2
+
+/* parts of funct3 code for I, M, A extension*/
+#define FUNCT3_JALR 0x0
+#define FUNCT3_BEQ 0x0
+#define FUNCT3_BNE 0x1000
+#define FUNCT3_BLT 0x4000
+#define FUNCT3_BGE 0x5000
+#define FUNCT3_BLTU 0x6000
+#define FUNCT3_BGEU 0x7000
+
+/* parts of funct3 code for C extension*/
+#define FUNCT3_C_BEQZ 0xc000
+#define FUNCT3_C_BNEZ 0xe000
+#define FUNCT3_C_J 0xa000
+#define FUNCT3_C_JAL 0x2000
+#define FUNCT4_C_JR 0x8000
+#define FUNCT4_C_JALR 0x9000
+
+#define FUNCT12_SRET 0x10200000
+
+#define MATCH_JALR (FUNCT3_JALR | OPCODE_JALR)
+#define MATCH_JAL (OPCODE_JAL)
+#define MATCH_BEQ (FUNCT3_BEQ | OPCODE_BRANCH)
+#define MATCH_BNE (FUNCT3_BNE | OPCODE_BRANCH)
+#define MATCH_BLT (FUNCT3_BLT | OPCODE_BRANCH)
+#define MATCH_BGE (FUNCT3_BGE | OPCODE_BRANCH)
+#define MATCH_BLTU (FUNCT3_BLTU | OPCODE_BRANCH)
+#define MATCH_BGEU (FUNCT3_BGEU | OPCODE_BRANCH)
+#define MATCH_SRET (FUNCT12_SRET | OPCODE_SYSTEM)
+#define MATCH_C_BEQZ (FUNCT3_C_BEQZ | OPCODE_C_1)
+#define MATCH_C_BNEZ (FUNCT3_C_BNEZ | OPCODE_C_1)
+#define MATCH_C_J (FUNCT3_C_J | OPCODE_C_1)
+#define MATCH_C_JAL (FUNCT3_C_JAL | OPCODE_C_1)
+#define MATCH_C_JR (FUNCT4_C_JR | OPCODE_C_2)
+#define MATCH_C_JALR (FUNCT4_C_JALR | OPCODE_C_2)
+
+#define MASK_JALR 0x707f
+#define MASK_JAL 0x7f
+#define MASK_C_JALR 0xf07f
+#define MASK_C_JR 0xf07f
+#define MASK_C_JAL 0xe003
+#define MASK_C_J 0xe003
+#define MASK_BEQ 0x707f
+#define MASK_BNE 0x707f
+#define MASK_BLT 0x707f
+#define MASK_BGE 0x707f
+#define MASK_BLTU 0x707f
+#define MASK_BGEU 0x707f
+#define MASK_C_BEQZ 0xe003
+#define MASK_C_BNEZ 0xe003
+#define MASK_SRET 0xffffffff
+
+#define __INSN_LENGTH_MASK _UL(0x3)
+#define __INSN_LENGTH_GE_32 _UL(0x3)
+#define __INSN_OPCODE_MASK _UL(0x7F)
+#define __INSN_BRANCH_OPCODE _UL(OPCODE_BRANCH)
+
+/* Define a series of is_XXX_insn functions to check if the value INSN
+ * is an instance of instruction XXX.
+ */
+#define DECLARE_INSN(INSN_NAME, INSN_MATCH, INSN_MASK) \
+static inline bool is_ ## INSN_NAME ## _insn(long insn) \
+{ \
+ return (insn & (INSN_MASK)) == (INSN_MATCH); \
+}
+
+#define RV_IMM_SIGN(x) (-(((x) >> 31) & 1))
+#define RVC_IMM_SIGN(x) (-(((x) >> 12) & 1))
+#define RV_X(X, s, mask) (((X) >> (s)) & (mask))
+#define RVC_X(X, s, mask) RV_X(X, s, mask)
+
+#define EXTRACT_JTYPE_IMM(x) \
+ ({typeof(x) x_ = (x); \
+ (RV_X(x_, J_IMM_10_1_OPOFF, J_IMM_10_1_MASK) << J_IMM_10_1_OFF) | \
+ (RV_X(x_, J_IMM_11_OPOFF, J_IMM_11_MASK) << J_IMM_11_OFF) | \
+ (RV_X(x_, J_IMM_19_12_OPOFF, J_IMM_19_12_MASK) << J_IMM_19_12_OFF) | \
+ (RV_IMM_SIGN(x_) << J_IMM_SIGN_OFF); })
+
+#define EXTRACT_ITYPE_IMM(x) \
+ ({typeof(x) x_ = (x); \
+ (RV_X(x_, I_IMM_11_0_OPOFF, I_IMM_11_0_MASK)) | \
+ (RV_IMM_SIGN(x_) << I_IMM_SIGN_OFF); })
+
+#define EXTRACT_BTYPE_IMM(x) \
+ ({typeof(x) x_ = (x); \
+ (RV_X(x_, B_IMM_4_1_OPOFF, B_IMM_4_1_MASK) << B_IMM_4_1_OFF) | \
+ (RV_X(x_, B_IMM_10_5_OPOFF, B_IMM_10_5_MASK) << B_IMM_10_5_OFF) | \
+ (RV_X(x_, B_IMM_11_OPOFF, B_IMM_11_MASK) << B_IMM_11_OFF) | \
+ (RV_IMM_SIGN(x_) << B_IMM_SIGN_OFF); })
+
+#define EXTRACT_RVC_J_IMM(x) \
+ ({typeof(x) x_ = (x); \
+ (RVC_X(x_, RVC_J_IMM_3_1_OPOFF, RVC_J_IMM_3_1_MASK) << RVC_J_IMM_3_1_OFF) | \
+ (RVC_X(x_, RVC_J_IMM_4_OPOFF, RVC_J_IMM_4_MASK) << RVC_J_IMM_4_OFF) | \
+ (RVC_X(x_, RVC_J_IMM_5_OPOFF, RVC_J_IMM_5_MASK) << RVC_J_IMM_5_OFF) | \
+ (RVC_X(x_, RVC_J_IMM_6_OPOFF, RVC_J_IMM_6_MASK) << RVC_J_IMM_6_OFF) | \
+ (RVC_X(x_, RVC_J_IMM_7_OPOFF, RVC_J_IMM_7_MASK) << RVC_J_IMM_7_OFF) | \
+ (RVC_X(x_, RVC_J_IMM_9_8_OPOFF, RVC_J_IMM_9_8_MASK) << RVC_J_IMM_9_8_OFF) | \
+ (RVC_X(x_, RVC_J_IMM_10_OPOFF, RVC_J_IMM_10_MASK) << RVC_J_IMM_10_OFF) | \
+ (RVC_IMM_SIGN(x_) << RVC_J_IMM_SIGN_OFF); })
+
+#define EXTRACT_RVC_B_IMM(x) \
+ ({typeof(x) x_ = (x); \
+ (RVC_X(x_, RVC_B_IMM_2_1_OPOFF, RVC_B_IMM_2_1_MASK) << RVC_B_IMM_2_1_OFF) | \
+ (RVC_X(x_, RVC_B_IMM_4_3_OPOFF, RVC_B_IMM_4_3_MASK) << RVC_B_IMM_4_3_OFF) | \
+ (RVC_X(x_, RVC_B_IMM_5_OPOFF, RVC_B_IMM_5_MASK) << RVC_B_IMM_5_OFF) | \
+ (RVC_X(x_, RVC_B_IMM_7_6_OPOFF, RVC_B_IMM_7_6_MASK) << RVC_B_IMM_7_6_OFF) | \
+ (RVC_IMM_SIGN(x_) << RVC_B_IMM_SIGN_OFF); })
+
+#endif /* _ASM_RISCV_INSN_H */
diff --git a/arch/riscv/include/asm/patch.h b/arch/riscv/include/asm/patch.h
new file mode 100644
index 000000000..98d9de07c
--- /dev/null
+++ b/arch/riscv/include/asm/patch.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2020 SiFive
+ */
+
+#ifndef _ASM_RISCV_PATCH_H
+#define _ASM_RISCV_PATCH_H
+
+int patch_text_nosync(void *addr, const void *insns, size_t len);
+int patch_text(void *addr, u32 insn);
+
+extern int riscv_patch_in_stop_machine;
+
+#endif /* _ASM_RISCV_PATCH_H */
diff --git a/arch/riscv/include/asm/pci.h b/arch/riscv/include/asm/pci.h
new file mode 100644
index 000000000..1c473a1bd
--- /dev/null
+++ b/arch/riscv/include/asm/pci.h
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2016 SiFive
+ */
+
+#ifndef _ASM_RISCV_PCI_H
+#define _ASM_RISCV_PCI_H
+
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/dma-mapping.h>
+
+#include <asm/io.h>
+
+#define PCIBIOS_MIN_IO 0
+#define PCIBIOS_MIN_MEM 0
+
+/* RISC-V shim does not initialize PCI bus */
+#define pcibios_assign_all_busses() 1
+
+extern int isa_dma_bridge_buggy;
+
+#ifdef CONFIG_PCI
+static inline int pci_get_legacy_ide_irq(struct pci_dev *dev, int channel)
+{
+ /* no legacy IRQ on risc-v */
+ return -ENODEV;
+}
+
+static inline int pci_proc_domain(struct pci_bus *bus)
+{
+ /* always show the domain in /proc */
+ return 1;
+}
+#endif /* CONFIG_PCI */
+
+#endif /* _ASM_RISCV_PCI_H */
diff --git a/arch/riscv/include/asm/perf_event.h b/arch/riscv/include/asm/perf_event.h
new file mode 100644
index 000000000..062efd3a1
--- /dev/null
+++ b/arch/riscv/include/asm/perf_event.h
@@ -0,0 +1,85 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2018 SiFive
+ * Copyright (C) 2018 Andes Technology Corporation
+ *
+ */
+
+#ifndef _ASM_RISCV_PERF_EVENT_H
+#define _ASM_RISCV_PERF_EVENT_H
+
+#include <linux/perf_event.h>
+#include <linux/ptrace.h>
+#include <linux/interrupt.h>
+
+#ifdef CONFIG_RISCV_BASE_PMU
+#define RISCV_BASE_COUNTERS 2
+
+/*
+ * The RISCV_MAX_COUNTERS parameter should be specified.
+ */
+
+#define RISCV_MAX_COUNTERS 2
+
+/*
+ * These are the indexes of bits in counteren register *minus* 1,
+ * except for cycle. It would be coherent if it can directly mapped
+ * to counteren bit definition, but there is a *time* register at
+ * counteren[1]. Per-cpu structure is scarce resource here.
+ *
+ * According to the spec, an implementation can support counter up to
+ * mhpmcounter31, but many high-end processors has at most 6 general
+ * PMCs, we give the definition to MHPMCOUNTER8 here.
+ */
+#define RISCV_PMU_CYCLE 0
+#define RISCV_PMU_INSTRET 1
+#define RISCV_PMU_MHPMCOUNTER3 2
+#define RISCV_PMU_MHPMCOUNTER4 3
+#define RISCV_PMU_MHPMCOUNTER5 4
+#define RISCV_PMU_MHPMCOUNTER6 5
+#define RISCV_PMU_MHPMCOUNTER7 6
+#define RISCV_PMU_MHPMCOUNTER8 7
+
+#define RISCV_OP_UNSUPP (-EOPNOTSUPP)
+
+struct cpu_hw_events {
+ /* # currently enabled events*/
+ int n_events;
+ /* currently enabled events */
+ struct perf_event *events[RISCV_MAX_COUNTERS];
+ /* vendor-defined PMU data */
+ void *platform;
+};
+
+struct riscv_pmu {
+ struct pmu *pmu;
+
+ /* generic hw/cache events table */
+ const int *hw_events;
+ const int (*cache_events)[PERF_COUNT_HW_CACHE_MAX]
+ [PERF_COUNT_HW_CACHE_OP_MAX]
+ [PERF_COUNT_HW_CACHE_RESULT_MAX];
+ /* method used to map hw/cache events */
+ int (*map_hw_event)(u64 config);
+ int (*map_cache_event)(u64 config);
+
+ /* max generic hw events in map */
+ int max_events;
+ /* number total counters, 2(base) + x(general) */
+ int num_counters;
+ /* the width of the counter */
+ int counter_width;
+
+ /* vendor-defined PMU features */
+ void *platform;
+
+ irqreturn_t (*handle_irq)(int irq_num, void *dev);
+ int irq;
+};
+
+#endif
+#ifdef CONFIG_PERF_EVENTS
+#define perf_arch_bpf_user_pt_regs(regs) (struct user_regs_struct *)regs
+#endif
+
+#endif /* _ASM_RISCV_PERF_EVENT_H */
diff --git a/arch/riscv/include/asm/pgalloc.h b/arch/riscv/include/asm/pgalloc.h
new file mode 100644
index 000000000..23b1544e0
--- /dev/null
+++ b/arch/riscv/include/asm/pgalloc.h
@@ -0,0 +1,71 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2009 Chen Liqin <liqin.chen@sunplusct.com>
+ * Copyright (C) 2012 Regents of the University of California
+ */
+
+#ifndef _ASM_RISCV_PGALLOC_H
+#define _ASM_RISCV_PGALLOC_H
+
+#include <linux/mm.h>
+#include <asm/tlb.h>
+
+#ifdef CONFIG_MMU
+#include <asm-generic/pgalloc.h>
+
+static inline void pmd_populate_kernel(struct mm_struct *mm,
+ pmd_t *pmd, pte_t *pte)
+{
+ unsigned long pfn = virt_to_pfn(pte);
+
+ set_pmd(pmd, __pmd((pfn << _PAGE_PFN_SHIFT) | _PAGE_TABLE));
+}
+
+static inline void pmd_populate(struct mm_struct *mm,
+ pmd_t *pmd, pgtable_t pte)
+{
+ unsigned long pfn = virt_to_pfn(page_address(pte));
+
+ set_pmd(pmd, __pmd((pfn << _PAGE_PFN_SHIFT) | _PAGE_TABLE));
+}
+
+#ifndef __PAGETABLE_PMD_FOLDED
+static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
+{
+ unsigned long pfn = virt_to_pfn(pmd);
+
+ set_pud(pud, __pud((pfn << _PAGE_PFN_SHIFT) | _PAGE_TABLE));
+}
+#endif /* __PAGETABLE_PMD_FOLDED */
+
+#define pmd_pgtable(pmd) pmd_page(pmd)
+
+static inline pgd_t *pgd_alloc(struct mm_struct *mm)
+{
+ pgd_t *pgd;
+
+ pgd = (pgd_t *)__get_free_page(GFP_KERNEL);
+ if (likely(pgd != NULL)) {
+ memset(pgd, 0, USER_PTRS_PER_PGD * sizeof(pgd_t));
+ /* Copy kernel mappings */
+ memcpy(pgd + USER_PTRS_PER_PGD,
+ init_mm.pgd + USER_PTRS_PER_PGD,
+ (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
+ }
+ return pgd;
+}
+
+#ifndef __PAGETABLE_PMD_FOLDED
+
+#define __pmd_free_tlb(tlb, pmd, addr) pmd_free((tlb)->mm, pmd)
+
+#endif /* __PAGETABLE_PMD_FOLDED */
+
+#define __pte_free_tlb(tlb, pte, buf) \
+do { \
+ pgtable_pte_page_dtor(pte); \
+ tlb_remove_page((tlb), pte); \
+} while (0)
+#endif /* CONFIG_MMU */
+
+#endif /* _ASM_RISCV_PGALLOC_H */
diff --git a/arch/riscv/include/asm/pgtable-32.h b/arch/riscv/include/asm/pgtable-32.h
new file mode 100644
index 000000000..5b2e79e5b
--- /dev/null
+++ b/arch/riscv/include/asm/pgtable-32.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2012 Regents of the University of California
+ */
+
+#ifndef _ASM_RISCV_PGTABLE_32_H
+#define _ASM_RISCV_PGTABLE_32_H
+
+#include <asm-generic/pgtable-nopmd.h>
+#include <linux/const.h>
+
+/* Size of region mapped by a page global directory */
+#define PGDIR_SHIFT 22
+#define PGDIR_SIZE (_AC(1, UL) << PGDIR_SHIFT)
+#define PGDIR_MASK (~(PGDIR_SIZE - 1))
+
+#define MAX_POSSIBLE_PHYSMEM_BITS 34
+
+#endif /* _ASM_RISCV_PGTABLE_32_H */
diff --git a/arch/riscv/include/asm/pgtable-64.h b/arch/riscv/include/asm/pgtable-64.h
new file mode 100644
index 000000000..0e863f3f7
--- /dev/null
+++ b/arch/riscv/include/asm/pgtable-64.h
@@ -0,0 +1,86 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2012 Regents of the University of California
+ */
+
+#ifndef _ASM_RISCV_PGTABLE_64_H
+#define _ASM_RISCV_PGTABLE_64_H
+
+#include <linux/const.h>
+
+#define PGDIR_SHIFT 30
+/* Size of region mapped by a page global directory */
+#define PGDIR_SIZE (_AC(1, UL) << PGDIR_SHIFT)
+#define PGDIR_MASK (~(PGDIR_SIZE - 1))
+
+#define PMD_SHIFT 21
+/* Size of region mapped by a page middle directory */
+#define PMD_SIZE (_AC(1, UL) << PMD_SHIFT)
+#define PMD_MASK (~(PMD_SIZE - 1))
+
+/* Page Middle Directory entry */
+typedef struct {
+ unsigned long pmd;
+} pmd_t;
+
+#define pmd_val(x) ((x).pmd)
+#define __pmd(x) ((pmd_t) { (x) })
+
+#define PTRS_PER_PMD (PAGE_SIZE / sizeof(pmd_t))
+
+static inline int pud_present(pud_t pud)
+{
+ return (pud_val(pud) & _PAGE_PRESENT);
+}
+
+static inline int pud_none(pud_t pud)
+{
+ return (pud_val(pud) == 0);
+}
+
+static inline int pud_bad(pud_t pud)
+{
+ return !pud_present(pud);
+}
+
+#define pud_leaf pud_leaf
+static inline int pud_leaf(pud_t pud)
+{
+ return pud_present(pud) &&
+ (pud_val(pud) & (_PAGE_READ | _PAGE_WRITE | _PAGE_EXEC));
+}
+
+static inline void set_pud(pud_t *pudp, pud_t pud)
+{
+ *pudp = pud;
+}
+
+static inline void pud_clear(pud_t *pudp)
+{
+ set_pud(pudp, __pud(0));
+}
+
+static inline pmd_t *pud_pgtable(pud_t pud)
+{
+ return (pmd_t *)pfn_to_virt(pud_val(pud) >> _PAGE_PFN_SHIFT);
+}
+
+static inline struct page *pud_page(pud_t pud)
+{
+ return pfn_to_page(pud_val(pud) >> _PAGE_PFN_SHIFT);
+}
+
+static inline pmd_t pfn_pmd(unsigned long pfn, pgprot_t prot)
+{
+ return __pmd((pfn << _PAGE_PFN_SHIFT) | pgprot_val(prot));
+}
+
+static inline unsigned long _pmd_pfn(pmd_t pmd)
+{
+ return pmd_val(pmd) >> _PAGE_PFN_SHIFT;
+}
+
+#define pmd_ERROR(e) \
+ pr_err("%s:%d: bad pmd %016lx.\n", __FILE__, __LINE__, pmd_val(e))
+
+#endif /* _ASM_RISCV_PGTABLE_64_H */
diff --git a/arch/riscv/include/asm/pgtable-bits.h b/arch/riscv/include/asm/pgtable-bits.h
new file mode 100644
index 000000000..bbaeb5d35
--- /dev/null
+++ b/arch/riscv/include/asm/pgtable-bits.h
@@ -0,0 +1,43 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2012 Regents of the University of California
+ */
+
+#ifndef _ASM_RISCV_PGTABLE_BITS_H
+#define _ASM_RISCV_PGTABLE_BITS_H
+
+/*
+ * PTE format:
+ * | XLEN-1 10 | 9 8 | 7 | 6 | 5 | 4 | 3 | 2 | 1 | 0
+ * PFN reserved for SW D A G U X W R V
+ */
+
+#define _PAGE_ACCESSED_OFFSET 6
+
+#define _PAGE_PRESENT (1 << 0)
+#define _PAGE_READ (1 << 1) /* Readable */
+#define _PAGE_WRITE (1 << 2) /* Writable */
+#define _PAGE_EXEC (1 << 3) /* Executable */
+#define _PAGE_USER (1 << 4) /* User */
+#define _PAGE_GLOBAL (1 << 5) /* Global */
+#define _PAGE_ACCESSED (1 << 6) /* Set by hardware on any access */
+#define _PAGE_DIRTY (1 << 7) /* Set by hardware on any write */
+#define _PAGE_SOFT (1 << 8) /* Reserved for software */
+
+#define _PAGE_SPECIAL _PAGE_SOFT
+#define _PAGE_TABLE _PAGE_PRESENT
+
+/*
+ * _PAGE_PROT_NONE is set on not-present pages (and ignored by the hardware) to
+ * distinguish them from swapped out pages
+ */
+#define _PAGE_PROT_NONE _PAGE_READ
+
+#define _PAGE_PFN_SHIFT 10
+
+/* Set of bits to preserve across pte_modify() */
+#define _PAGE_CHG_MASK (~(unsigned long)(_PAGE_PRESENT | _PAGE_READ | \
+ _PAGE_WRITE | _PAGE_EXEC | \
+ _PAGE_USER | _PAGE_GLOBAL))
+
+#endif /* _ASM_RISCV_PGTABLE_BITS_H */
diff --git a/arch/riscv/include/asm/pgtable.h b/arch/riscv/include/asm/pgtable.h
new file mode 100644
index 000000000..b16304fdf
--- /dev/null
+++ b/arch/riscv/include/asm/pgtable.h
@@ -0,0 +1,486 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2012 Regents of the University of California
+ */
+
+#ifndef _ASM_RISCV_PGTABLE_H
+#define _ASM_RISCV_PGTABLE_H
+
+#include <linux/mmzone.h>
+#include <linux/sizes.h>
+
+#include <asm/pgtable-bits.h>
+
+#ifndef __ASSEMBLY__
+
+/* Page Upper Directory not used in RISC-V */
+#include <asm-generic/pgtable-nopud.h>
+#include <asm/page.h>
+#include <asm/tlbflush.h>
+#include <linux/mm_types.h>
+
+#ifdef CONFIG_MMU
+
+#define VMALLOC_SIZE (KERN_VIRT_SIZE >> 1)
+#define VMALLOC_END (PAGE_OFFSET - 1)
+#define VMALLOC_START (PAGE_OFFSET - VMALLOC_SIZE)
+
+#define BPF_JIT_REGION_SIZE (SZ_128M)
+#define BPF_JIT_REGION_START (PAGE_OFFSET - BPF_JIT_REGION_SIZE)
+#define BPF_JIT_REGION_END (VMALLOC_END)
+
+/*
+ * Roughly size the vmemmap space to be large enough to fit enough
+ * struct pages to map half the virtual address space. Then
+ * position vmemmap directly below the VMALLOC region.
+ */
+#define VMEMMAP_SHIFT \
+ (CONFIG_VA_BITS - PAGE_SHIFT - 1 + STRUCT_PAGE_MAX_SHIFT)
+#define VMEMMAP_SIZE BIT(VMEMMAP_SHIFT)
+#define VMEMMAP_END (VMALLOC_START - 1)
+#define VMEMMAP_START (VMALLOC_START - VMEMMAP_SIZE)
+
+/*
+ * Define vmemmap for pfn_to_page & page_to_pfn calls. Needed if kernel
+ * is configured with CONFIG_SPARSEMEM_VMEMMAP enabled.
+ */
+#define vmemmap ((struct page *)VMEMMAP_START)
+
+#define PCI_IO_SIZE SZ_16M
+#define PCI_IO_END VMEMMAP_START
+#define PCI_IO_START (PCI_IO_END - PCI_IO_SIZE)
+
+#define FIXADDR_TOP PCI_IO_START
+#ifdef CONFIG_64BIT
+#define FIXADDR_SIZE PMD_SIZE
+#else
+#define FIXADDR_SIZE PGDIR_SIZE
+#endif
+#define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE)
+
+#endif
+
+#ifdef CONFIG_64BIT
+#include <asm/pgtable-64.h>
+#else
+#include <asm/pgtable-32.h>
+#endif /* CONFIG_64BIT */
+
+#ifdef CONFIG_MMU
+/* Number of entries in the page global directory */
+#define PTRS_PER_PGD (PAGE_SIZE / sizeof(pgd_t))
+/* Number of entries in the page table */
+#define PTRS_PER_PTE (PAGE_SIZE / sizeof(pte_t))
+
+/* Number of PGD entries that a user-mode program can use */
+#define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE)
+
+/* Page protection bits */
+#define _PAGE_BASE (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_USER)
+
+#define PAGE_NONE __pgprot(_PAGE_PROT_NONE)
+#define PAGE_READ __pgprot(_PAGE_BASE | _PAGE_READ)
+#define PAGE_WRITE __pgprot(_PAGE_BASE | _PAGE_READ | _PAGE_WRITE)
+#define PAGE_EXEC __pgprot(_PAGE_BASE | _PAGE_EXEC)
+#define PAGE_READ_EXEC __pgprot(_PAGE_BASE | _PAGE_READ | _PAGE_EXEC)
+#define PAGE_WRITE_EXEC __pgprot(_PAGE_BASE | _PAGE_READ | \
+ _PAGE_EXEC | _PAGE_WRITE)
+
+#define PAGE_COPY PAGE_READ
+#define PAGE_COPY_EXEC PAGE_EXEC
+#define PAGE_COPY_READ_EXEC PAGE_READ_EXEC
+#define PAGE_SHARED PAGE_WRITE
+#define PAGE_SHARED_EXEC PAGE_WRITE_EXEC
+
+#define _PAGE_KERNEL (_PAGE_READ \
+ | _PAGE_WRITE \
+ | _PAGE_PRESENT \
+ | _PAGE_ACCESSED \
+ | _PAGE_DIRTY)
+
+#define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
+#define PAGE_KERNEL_READ __pgprot(_PAGE_KERNEL & ~_PAGE_WRITE)
+#define PAGE_KERNEL_EXEC __pgprot(_PAGE_KERNEL | _PAGE_EXEC)
+#define PAGE_KERNEL_READ_EXEC __pgprot((_PAGE_KERNEL & ~_PAGE_WRITE) \
+ | _PAGE_EXEC)
+
+#define PAGE_TABLE __pgprot(_PAGE_TABLE)
+
+/*
+ * The RISC-V ISA doesn't yet specify how to query or modify PMAs, so we can't
+ * change the properties of memory regions.
+ */
+#define _PAGE_IOREMAP _PAGE_KERNEL
+
+extern pgd_t swapper_pg_dir[];
+
+/* MAP_PRIVATE permissions: xwr (copy-on-write) */
+#define __P000 PAGE_NONE
+#define __P001 PAGE_READ
+#define __P010 PAGE_COPY
+#define __P011 PAGE_COPY
+#define __P100 PAGE_EXEC
+#define __P101 PAGE_READ_EXEC
+#define __P110 PAGE_COPY_EXEC
+#define __P111 PAGE_COPY_READ_EXEC
+
+/* MAP_SHARED permissions: xwr */
+#define __S000 PAGE_NONE
+#define __S001 PAGE_READ
+#define __S010 PAGE_SHARED
+#define __S011 PAGE_SHARED
+#define __S100 PAGE_EXEC
+#define __S101 PAGE_READ_EXEC
+#define __S110 PAGE_SHARED_EXEC
+#define __S111 PAGE_SHARED_EXEC
+
+static inline int pmd_present(pmd_t pmd)
+{
+ return (pmd_val(pmd) & (_PAGE_PRESENT | _PAGE_PROT_NONE));
+}
+
+static inline int pmd_none(pmd_t pmd)
+{
+ return (pmd_val(pmd) == 0);
+}
+
+static inline int pmd_bad(pmd_t pmd)
+{
+ return !pmd_present(pmd);
+}
+
+#define pmd_leaf pmd_leaf
+static inline int pmd_leaf(pmd_t pmd)
+{
+ return pmd_present(pmd) &&
+ (pmd_val(pmd) & (_PAGE_READ | _PAGE_WRITE | _PAGE_EXEC));
+}
+
+static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
+{
+ *pmdp = pmd;
+}
+
+static inline void pmd_clear(pmd_t *pmdp)
+{
+ set_pmd(pmdp, __pmd(0));
+}
+
+static inline pgd_t pfn_pgd(unsigned long pfn, pgprot_t prot)
+{
+ return __pgd((pfn << _PAGE_PFN_SHIFT) | pgprot_val(prot));
+}
+
+static inline unsigned long _pgd_pfn(pgd_t pgd)
+{
+ return pgd_val(pgd) >> _PAGE_PFN_SHIFT;
+}
+
+static inline struct page *pmd_page(pmd_t pmd)
+{
+ return pfn_to_page(pmd_val(pmd) >> _PAGE_PFN_SHIFT);
+}
+
+static inline unsigned long pmd_page_vaddr(pmd_t pmd)
+{
+ return (unsigned long)pfn_to_virt(pmd_val(pmd) >> _PAGE_PFN_SHIFT);
+}
+
+/* Yields the page frame number (PFN) of a page table entry */
+static inline unsigned long pte_pfn(pte_t pte)
+{
+ return (pte_val(pte) >> _PAGE_PFN_SHIFT);
+}
+
+#define pte_page(x) pfn_to_page(pte_pfn(x))
+
+/* Constructs a page table entry */
+static inline pte_t pfn_pte(unsigned long pfn, pgprot_t prot)
+{
+ return __pte((pfn << _PAGE_PFN_SHIFT) | pgprot_val(prot));
+}
+
+#define mk_pte(page, prot) pfn_pte(page_to_pfn(page), prot)
+
+static inline int pte_present(pte_t pte)
+{
+ return (pte_val(pte) & (_PAGE_PRESENT | _PAGE_PROT_NONE));
+}
+
+static inline int pte_none(pte_t pte)
+{
+ return (pte_val(pte) == 0);
+}
+
+static inline int pte_write(pte_t pte)
+{
+ return pte_val(pte) & _PAGE_WRITE;
+}
+
+static inline int pte_exec(pte_t pte)
+{
+ return pte_val(pte) & _PAGE_EXEC;
+}
+
+static inline int pte_huge(pte_t pte)
+{
+ return pte_present(pte)
+ && (pte_val(pte) & (_PAGE_READ | _PAGE_WRITE | _PAGE_EXEC));
+}
+
+static inline int pte_dirty(pte_t pte)
+{
+ return pte_val(pte) & _PAGE_DIRTY;
+}
+
+static inline int pte_young(pte_t pte)
+{
+ return pte_val(pte) & _PAGE_ACCESSED;
+}
+
+static inline int pte_special(pte_t pte)
+{
+ return pte_val(pte) & _PAGE_SPECIAL;
+}
+
+/* static inline pte_t pte_rdprotect(pte_t pte) */
+
+static inline pte_t pte_wrprotect(pte_t pte)
+{
+ return __pte(pte_val(pte) & ~(_PAGE_WRITE));
+}
+
+/* static inline pte_t pte_mkread(pte_t pte) */
+
+static inline pte_t pte_mkwrite(pte_t pte)
+{
+ return __pte(pte_val(pte) | _PAGE_WRITE);
+}
+
+/* static inline pte_t pte_mkexec(pte_t pte) */
+
+static inline pte_t pte_mkdirty(pte_t pte)
+{
+ return __pte(pte_val(pte) | _PAGE_DIRTY);
+}
+
+static inline pte_t pte_mkclean(pte_t pte)
+{
+ return __pte(pte_val(pte) & ~(_PAGE_DIRTY));
+}
+
+static inline pte_t pte_mkyoung(pte_t pte)
+{
+ return __pte(pte_val(pte) | _PAGE_ACCESSED);
+}
+
+static inline pte_t pte_mkold(pte_t pte)
+{
+ return __pte(pte_val(pte) & ~(_PAGE_ACCESSED));
+}
+
+static inline pte_t pte_mkspecial(pte_t pte)
+{
+ return __pte(pte_val(pte) | _PAGE_SPECIAL);
+}
+
+static inline pte_t pte_mkhuge(pte_t pte)
+{
+ return pte;
+}
+
+/* Modify page protection bits */
+static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
+{
+ return __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot));
+}
+
+#define pgd_ERROR(e) \
+ pr_err("%s:%d: bad pgd " PTE_FMT ".\n", __FILE__, __LINE__, pgd_val(e))
+
+
+/* Commit new configuration to MMU hardware */
+static inline void update_mmu_cache(struct vm_area_struct *vma,
+ unsigned long address, pte_t *ptep)
+{
+ /*
+ * The kernel assumes that TLBs don't cache invalid entries, but
+ * in RISC-V, SFENCE.VMA specifies an ordering constraint, not a
+ * cache flush; it is necessary even after writing invalid entries.
+ * Relying on flush_tlb_fix_spurious_fault would suffice, but
+ * the extra traps reduce performance. So, eagerly SFENCE.VMA.
+ */
+ local_flush_tlb_page(address);
+}
+
+#define __HAVE_ARCH_PTE_SAME
+static inline int pte_same(pte_t pte_a, pte_t pte_b)
+{
+ return pte_val(pte_a) == pte_val(pte_b);
+}
+
+/*
+ * Certain architectures need to do special things when PTEs within
+ * a page table are directly modified. Thus, the following hook is
+ * made available.
+ */
+static inline void set_pte(pte_t *ptep, pte_t pteval)
+{
+ *ptep = pteval;
+}
+
+void flush_icache_pte(pte_t pte);
+
+static inline void set_pte_at(struct mm_struct *mm,
+ unsigned long addr, pte_t *ptep, pte_t pteval)
+{
+ if (pte_present(pteval) && pte_exec(pteval))
+ flush_icache_pte(pteval);
+
+ set_pte(ptep, pteval);
+}
+
+static inline void pte_clear(struct mm_struct *mm,
+ unsigned long addr, pte_t *ptep)
+{
+ set_pte_at(mm, addr, ptep, __pte(0));
+}
+
+#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
+static inline int ptep_set_access_flags(struct vm_area_struct *vma,
+ unsigned long address, pte_t *ptep,
+ pte_t entry, int dirty)
+{
+ if (!pte_same(*ptep, entry))
+ set_pte_at(vma->vm_mm, address, ptep, entry);
+ /*
+ * update_mmu_cache will unconditionally execute, handling both
+ * the case that the PTE changed and the spurious fault case.
+ */
+ return true;
+}
+
+#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
+static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
+ unsigned long address, pte_t *ptep)
+{
+ return __pte(atomic_long_xchg((atomic_long_t *)ptep, 0));
+}
+
+#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
+static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
+ unsigned long address,
+ pte_t *ptep)
+{
+ if (!pte_young(*ptep))
+ return 0;
+ return test_and_clear_bit(_PAGE_ACCESSED_OFFSET, &pte_val(*ptep));
+}
+
+#define __HAVE_ARCH_PTEP_SET_WRPROTECT
+static inline void ptep_set_wrprotect(struct mm_struct *mm,
+ unsigned long address, pte_t *ptep)
+{
+ atomic_long_and(~(unsigned long)_PAGE_WRITE, (atomic_long_t *)ptep);
+}
+
+#define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
+static inline int ptep_clear_flush_young(struct vm_area_struct *vma,
+ unsigned long address, pte_t *ptep)
+{
+ /*
+ * This comment is borrowed from x86, but applies equally to RISC-V:
+ *
+ * Clearing the accessed bit without a TLB flush
+ * doesn't cause data corruption. [ It could cause incorrect
+ * page aging and the (mistaken) reclaim of hot pages, but the
+ * chance of that should be relatively low. ]
+ *
+ * So as a performance optimization don't flush the TLB when
+ * clearing the accessed bit, it will eventually be flushed by
+ * a context switch or a VM operation anyway. [ In the rare
+ * event of it not getting flushed for a long time the delay
+ * shouldn't really matter because there's no real memory
+ * pressure for swapout to react to. ]
+ */
+ return ptep_test_and_clear_young(vma, address, ptep);
+}
+
+/*
+ * Encode and decode a swap entry
+ *
+ * Format of swap PTE:
+ * bit 0: _PAGE_PRESENT (zero)
+ * bit 1: _PAGE_PROT_NONE (zero)
+ * bits 2 to 6: swap type
+ * bits 7 to XLEN-1: swap offset
+ */
+#define __SWP_TYPE_SHIFT 2
+#define __SWP_TYPE_BITS 5
+#define __SWP_TYPE_MASK ((1UL << __SWP_TYPE_BITS) - 1)
+#define __SWP_OFFSET_SHIFT (__SWP_TYPE_BITS + __SWP_TYPE_SHIFT)
+
+#define MAX_SWAPFILES_CHECK() \
+ BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > __SWP_TYPE_BITS)
+
+#define __swp_type(x) (((x).val >> __SWP_TYPE_SHIFT) & __SWP_TYPE_MASK)
+#define __swp_offset(x) ((x).val >> __SWP_OFFSET_SHIFT)
+#define __swp_entry(type, offset) ((swp_entry_t) \
+ { ((type) << __SWP_TYPE_SHIFT) | ((offset) << __SWP_OFFSET_SHIFT) })
+
+#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
+#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
+
+/*
+ * In the RV64 Linux scheme, we give the user half of the virtual-address space
+ * and give the kernel the other (upper) half.
+ */
+#ifdef CONFIG_64BIT
+#define KERN_VIRT_START (-(BIT(CONFIG_VA_BITS)) + TASK_SIZE)
+#else
+#define KERN_VIRT_START FIXADDR_START
+#endif
+
+/*
+ * Task size is 0x4000000000 for RV64 or 0x9fc00000 for RV32.
+ * Note that PGDIR_SIZE must evenly divide TASK_SIZE.
+ */
+#ifdef CONFIG_64BIT
+#define TASK_SIZE (PGDIR_SIZE * PTRS_PER_PGD / 2)
+#else
+#define TASK_SIZE FIXADDR_START
+#endif
+
+#else /* CONFIG_MMU */
+
+#define PAGE_SHARED __pgprot(0)
+#define PAGE_KERNEL __pgprot(0)
+#define swapper_pg_dir NULL
+#define TASK_SIZE 0xffffffffUL
+#define VMALLOC_START 0
+#define VMALLOC_END TASK_SIZE
+
+static inline void __kernel_map_pages(struct page *page, int numpages, int enable) {}
+
+#endif /* !CONFIG_MMU */
+
+#define kern_addr_valid(addr) (1) /* FIXME */
+
+extern void *dtb_early_va;
+extern uintptr_t dtb_early_pa;
+void setup_bootmem(void);
+void paging_init(void);
+void misc_mem_init(void);
+
+#define FIRST_USER_ADDRESS 0
+
+/*
+ * ZERO_PAGE is a global shared page that is always zero,
+ * used for zero-mapped memory areas, etc.
+ */
+extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
+#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
+
+#endif /* !__ASSEMBLY__ */
+
+#endif /* _ASM_RISCV_PGTABLE_H */
diff --git a/arch/riscv/include/asm/processor.h b/arch/riscv/include/asm/processor.h
new file mode 100644
index 000000000..bdddcd5c1
--- /dev/null
+++ b/arch/riscv/include/asm/processor.h
@@ -0,0 +1,76 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2012 Regents of the University of California
+ */
+
+#ifndef _ASM_RISCV_PROCESSOR_H
+#define _ASM_RISCV_PROCESSOR_H
+
+#include <linux/const.h>
+
+#include <vdso/processor.h>
+
+#include <asm/ptrace.h>
+
+/*
+ * This decides where the kernel will search for a free chunk of vm
+ * space during mmap's.
+ */
+#define TASK_UNMAPPED_BASE PAGE_ALIGN(TASK_SIZE / 3)
+
+#define STACK_TOP TASK_SIZE
+#define STACK_TOP_MAX STACK_TOP
+#define STACK_ALIGN 16
+
+#ifndef __ASSEMBLY__
+
+struct task_struct;
+struct pt_regs;
+
+/* CPU-specific state of a task */
+struct thread_struct {
+ /* Callee-saved registers */
+ unsigned long ra;
+ unsigned long sp; /* Kernel mode stack */
+ unsigned long s[12]; /* s[0]: frame pointer */
+ struct __riscv_d_ext_state fstate;
+};
+
+#define INIT_THREAD { \
+ .sp = sizeof(init_stack) + (long)&init_stack, \
+}
+
+#define task_pt_regs(tsk) \
+ ((struct pt_regs *)(task_stack_page(tsk) + THREAD_SIZE \
+ - ALIGN(sizeof(struct pt_regs), STACK_ALIGN)))
+
+#define KSTK_EIP(tsk) (task_pt_regs(tsk)->epc)
+#define KSTK_ESP(tsk) (task_pt_regs(tsk)->sp)
+
+
+/* Do necessary setup to start up a newly executed thread. */
+extern void start_thread(struct pt_regs *regs,
+ unsigned long pc, unsigned long sp);
+
+/* Free all resources held by a thread. */
+static inline void release_thread(struct task_struct *dead_task)
+{
+}
+
+extern unsigned long get_wchan(struct task_struct *p);
+
+
+static inline void wait_for_interrupt(void)
+{
+ __asm__ __volatile__ ("wfi");
+}
+
+struct device_node;
+int riscv_of_processor_hartid(struct device_node *node);
+int riscv_of_parent_hartid(struct device_node *node);
+
+extern void riscv_fill_hwcap(void);
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* _ASM_RISCV_PROCESSOR_H */
diff --git a/arch/riscv/include/asm/ptdump.h b/arch/riscv/include/asm/ptdump.h
new file mode 100644
index 000000000..3c9ea6dd5
--- /dev/null
+++ b/arch/riscv/include/asm/ptdump.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2019 SiFive
+ */
+
+#ifndef _ASM_RISCV_PTDUMP_H
+#define _ASM_RISCV_PTDUMP_H
+
+void ptdump_check_wx(void);
+
+#ifdef CONFIG_DEBUG_WX
+static inline void debug_checkwx(void)
+{
+ ptdump_check_wx();
+}
+#else
+static inline void debug_checkwx(void)
+{
+}
+#endif
+
+#endif /* _ASM_RISCV_PTDUMP_H */
diff --git a/arch/riscv/include/asm/ptrace.h b/arch/riscv/include/asm/ptrace.h
new file mode 100644
index 000000000..ee49f80c9
--- /dev/null
+++ b/arch/riscv/include/asm/ptrace.h
@@ -0,0 +1,106 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2012 Regents of the University of California
+ */
+
+#ifndef _ASM_RISCV_PTRACE_H
+#define _ASM_RISCV_PTRACE_H
+
+#include <uapi/asm/ptrace.h>
+#include <asm/csr.h>
+
+#ifndef __ASSEMBLY__
+
+struct pt_regs {
+ unsigned long epc;
+ unsigned long ra;
+ unsigned long sp;
+ unsigned long gp;
+ unsigned long tp;
+ unsigned long t0;
+ unsigned long t1;
+ unsigned long t2;
+ unsigned long s0;
+ unsigned long s1;
+ unsigned long a0;
+ unsigned long a1;
+ unsigned long a2;
+ unsigned long a3;
+ unsigned long a4;
+ unsigned long a5;
+ unsigned long a6;
+ unsigned long a7;
+ unsigned long s2;
+ unsigned long s3;
+ unsigned long s4;
+ unsigned long s5;
+ unsigned long s6;
+ unsigned long s7;
+ unsigned long s8;
+ unsigned long s9;
+ unsigned long s10;
+ unsigned long s11;
+ unsigned long t3;
+ unsigned long t4;
+ unsigned long t5;
+ unsigned long t6;
+ /* Supervisor/Machine CSRs */
+ unsigned long status;
+ unsigned long badaddr;
+ unsigned long cause;
+ /* a0 value before the syscall */
+ unsigned long orig_a0;
+};
+
+#ifdef CONFIG_64BIT
+#define REG_FMT "%016lx"
+#else
+#define REG_FMT "%08lx"
+#endif
+
+#define user_mode(regs) (((regs)->status & SR_PP) == 0)
+
+
+/* Helpers for working with the instruction pointer */
+static inline unsigned long instruction_pointer(struct pt_regs *regs)
+{
+ return regs->epc;
+}
+static inline void instruction_pointer_set(struct pt_regs *regs,
+ unsigned long val)
+{
+ regs->epc = val;
+}
+
+#define profile_pc(regs) instruction_pointer(regs)
+
+/* Helpers for working with the user stack pointer */
+static inline unsigned long user_stack_pointer(struct pt_regs *regs)
+{
+ return regs->sp;
+}
+static inline void user_stack_pointer_set(struct pt_regs *regs,
+ unsigned long val)
+{
+ regs->sp = val;
+}
+
+/* Helpers for working with the frame pointer */
+static inline unsigned long frame_pointer(struct pt_regs *regs)
+{
+ return regs->s0;
+}
+static inline void frame_pointer_set(struct pt_regs *regs,
+ unsigned long val)
+{
+ regs->s0 = val;
+}
+
+static inline unsigned long regs_return_value(struct pt_regs *regs)
+{
+ return regs->a0;
+}
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* _ASM_RISCV_PTRACE_H */
diff --git a/arch/riscv/include/asm/sbi.h b/arch/riscv/include/asm/sbi.h
new file mode 100644
index 000000000..c0fdb05ff
--- /dev/null
+++ b/arch/riscv/include/asm/sbi.h
@@ -0,0 +1,157 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2015 Regents of the University of California
+ * Copyright (c) 2020 Western Digital Corporation or its affiliates.
+ */
+
+#ifndef _ASM_RISCV_SBI_H
+#define _ASM_RISCV_SBI_H
+
+#include <linux/types.h>
+
+#ifdef CONFIG_RISCV_SBI
+enum sbi_ext_id {
+#ifdef CONFIG_RISCV_SBI_V01
+ SBI_EXT_0_1_SET_TIMER = 0x0,
+ SBI_EXT_0_1_CONSOLE_PUTCHAR = 0x1,
+ SBI_EXT_0_1_CONSOLE_GETCHAR = 0x2,
+ SBI_EXT_0_1_CLEAR_IPI = 0x3,
+ SBI_EXT_0_1_SEND_IPI = 0x4,
+ SBI_EXT_0_1_REMOTE_FENCE_I = 0x5,
+ SBI_EXT_0_1_REMOTE_SFENCE_VMA = 0x6,
+ SBI_EXT_0_1_REMOTE_SFENCE_VMA_ASID = 0x7,
+ SBI_EXT_0_1_SHUTDOWN = 0x8,
+#endif
+ SBI_EXT_BASE = 0x10,
+ SBI_EXT_TIME = 0x54494D45,
+ SBI_EXT_IPI = 0x735049,
+ SBI_EXT_RFENCE = 0x52464E43,
+ SBI_EXT_HSM = 0x48534D,
+};
+
+enum sbi_ext_base_fid {
+ SBI_EXT_BASE_GET_SPEC_VERSION = 0,
+ SBI_EXT_BASE_GET_IMP_ID,
+ SBI_EXT_BASE_GET_IMP_VERSION,
+ SBI_EXT_BASE_PROBE_EXT,
+ SBI_EXT_BASE_GET_MVENDORID,
+ SBI_EXT_BASE_GET_MARCHID,
+ SBI_EXT_BASE_GET_MIMPID,
+};
+
+enum sbi_ext_time_fid {
+ SBI_EXT_TIME_SET_TIMER = 0,
+};
+
+enum sbi_ext_ipi_fid {
+ SBI_EXT_IPI_SEND_IPI = 0,
+};
+
+enum sbi_ext_rfence_fid {
+ SBI_EXT_RFENCE_REMOTE_FENCE_I = 0,
+ SBI_EXT_RFENCE_REMOTE_SFENCE_VMA,
+ SBI_EXT_RFENCE_REMOTE_SFENCE_VMA_ASID,
+ SBI_EXT_RFENCE_REMOTE_HFENCE_GVMA_VMID,
+ SBI_EXT_RFENCE_REMOTE_HFENCE_GVMA,
+ SBI_EXT_RFENCE_REMOTE_HFENCE_VVMA_ASID,
+ SBI_EXT_RFENCE_REMOTE_HFENCE_VVMA,
+};
+
+enum sbi_ext_hsm_fid {
+ SBI_EXT_HSM_HART_START = 0,
+ SBI_EXT_HSM_HART_STOP,
+ SBI_EXT_HSM_HART_STATUS,
+};
+
+enum sbi_hsm_hart_status {
+ SBI_HSM_HART_STATUS_STARTED = 0,
+ SBI_HSM_HART_STATUS_STOPPED,
+ SBI_HSM_HART_STATUS_START_PENDING,
+ SBI_HSM_HART_STATUS_STOP_PENDING,
+};
+
+#define SBI_SPEC_VERSION_DEFAULT 0x1
+#define SBI_SPEC_VERSION_MAJOR_SHIFT 24
+#define SBI_SPEC_VERSION_MAJOR_MASK 0x7f
+#define SBI_SPEC_VERSION_MINOR_MASK 0xffffff
+
+/* SBI return error codes */
+#define SBI_SUCCESS 0
+#define SBI_ERR_FAILURE -1
+#define SBI_ERR_NOT_SUPPORTED -2
+#define SBI_ERR_INVALID_PARAM -3
+#define SBI_ERR_DENIED -4
+#define SBI_ERR_INVALID_ADDRESS -5
+
+extern unsigned long sbi_spec_version;
+struct sbiret {
+ long error;
+ long value;
+};
+
+int sbi_init(void);
+struct sbiret sbi_ecall(int ext, int fid, unsigned long arg0,
+ unsigned long arg1, unsigned long arg2,
+ unsigned long arg3, unsigned long arg4,
+ unsigned long arg5);
+
+void sbi_console_putchar(int ch);
+int sbi_console_getchar(void);
+void sbi_set_timer(uint64_t stime_value);
+void sbi_shutdown(void);
+void sbi_clear_ipi(void);
+void sbi_send_ipi(const unsigned long *hart_mask);
+void sbi_remote_fence_i(const unsigned long *hart_mask);
+void sbi_remote_sfence_vma(const unsigned long *hart_mask,
+ unsigned long start,
+ unsigned long size);
+
+void sbi_remote_sfence_vma_asid(const unsigned long *hart_mask,
+ unsigned long start,
+ unsigned long size,
+ unsigned long asid);
+int sbi_remote_hfence_gvma(const unsigned long *hart_mask,
+ unsigned long start,
+ unsigned long size);
+int sbi_remote_hfence_gvma_vmid(const unsigned long *hart_mask,
+ unsigned long start,
+ unsigned long size,
+ unsigned long vmid);
+int sbi_remote_hfence_vvma(const unsigned long *hart_mask,
+ unsigned long start,
+ unsigned long size);
+int sbi_remote_hfence_vvma_asid(const unsigned long *hart_mask,
+ unsigned long start,
+ unsigned long size,
+ unsigned long asid);
+int sbi_probe_extension(int ext);
+
+/* Check if current SBI specification version is 0.1 or not */
+static inline int sbi_spec_is_0_1(void)
+{
+ return (sbi_spec_version == SBI_SPEC_VERSION_DEFAULT) ? 1 : 0;
+}
+
+/* Get the major version of SBI */
+static inline unsigned long sbi_major_version(void)
+{
+ return (sbi_spec_version >> SBI_SPEC_VERSION_MAJOR_SHIFT) &
+ SBI_SPEC_VERSION_MAJOR_MASK;
+}
+
+/* Get the minor version of SBI */
+static inline unsigned long sbi_minor_version(void)
+{
+ return sbi_spec_version & SBI_SPEC_VERSION_MINOR_MASK;
+}
+
+int sbi_err_map_linux_errno(int err);
+#else /* CONFIG_RISCV_SBI */
+/* stubs for code that is only reachable under IS_ENABLED(CONFIG_RISCV_SBI): */
+void sbi_set_timer(uint64_t stime_value);
+void sbi_clear_ipi(void);
+void sbi_send_ipi(const unsigned long *hart_mask);
+void sbi_remote_fence_i(const unsigned long *hart_mask);
+void sbi_init(void);
+#endif /* CONFIG_RISCV_SBI */
+#endif /* _ASM_RISCV_SBI_H */
diff --git a/arch/riscv/include/asm/seccomp.h b/arch/riscv/include/asm/seccomp.h
new file mode 100644
index 000000000..bf7744ee3
--- /dev/null
+++ b/arch/riscv/include/asm/seccomp.h
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _ASM_SECCOMP_H
+#define _ASM_SECCOMP_H
+
+#include <asm/unistd.h>
+
+#include <asm-generic/seccomp.h>
+
+#endif /* _ASM_SECCOMP_H */
diff --git a/arch/riscv/include/asm/sections.h b/arch/riscv/include/asm/sections.h
new file mode 100644
index 000000000..3a9971b12
--- /dev/null
+++ b/arch/riscv/include/asm/sections.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2020 Western Digital Corporation or its affiliates.
+ */
+#ifndef __ASM_SECTIONS_H
+#define __ASM_SECTIONS_H
+
+#include <asm-generic/sections.h>
+
+extern char _start[];
+extern char _start_kernel[];
+
+#endif /* __ASM_SECTIONS_H */
diff --git a/arch/riscv/include/asm/set_memory.h b/arch/riscv/include/asm/set_memory.h
new file mode 100644
index 000000000..4c5bae7ca
--- /dev/null
+++ b/arch/riscv/include/asm/set_memory.h
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2019 SiFive
+ */
+
+#ifndef _ASM_RISCV_SET_MEMORY_H
+#define _ASM_RISCV_SET_MEMORY_H
+
+#ifndef __ASSEMBLY__
+/*
+ * Functions to change memory attributes.
+ */
+#ifdef CONFIG_MMU
+int set_memory_ro(unsigned long addr, int numpages);
+int set_memory_rw(unsigned long addr, int numpages);
+int set_memory_x(unsigned long addr, int numpages);
+int set_memory_nx(unsigned long addr, int numpages);
+#else
+static inline int set_memory_ro(unsigned long addr, int numpages) { return 0; }
+static inline int set_memory_rw(unsigned long addr, int numpages) { return 0; }
+static inline int set_memory_x(unsigned long addr, int numpages) { return 0; }
+static inline int set_memory_nx(unsigned long addr, int numpages) { return 0; }
+#endif
+
+int set_direct_map_invalid_noflush(struct page *page);
+int set_direct_map_default_noflush(struct page *page);
+
+#endif /* __ASSEMBLY__ */
+
+#ifdef CONFIG_ARCH_HAS_STRICT_KERNEL_RWX
+#ifdef CONFIG_64BIT
+#define SECTION_ALIGN (1 << 21)
+#else
+#define SECTION_ALIGN (1 << 22)
+#endif
+#else /* !CONFIG_ARCH_HAS_STRICT_KERNEL_RWX */
+#define SECTION_ALIGN L1_CACHE_BYTES
+#endif /* CONFIG_ARCH_HAS_STRICT_KERNEL_RWX */
+
+#endif /* _ASM_RISCV_SET_MEMORY_H */
diff --git a/arch/riscv/include/asm/smp.h b/arch/riscv/include/asm/smp.h
new file mode 100644
index 000000000..df1f7c4cd
--- /dev/null
+++ b/arch/riscv/include/asm/smp.h
@@ -0,0 +1,114 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2012 Regents of the University of California
+ */
+
+#ifndef _ASM_RISCV_SMP_H
+#define _ASM_RISCV_SMP_H
+
+#include <linux/cpumask.h>
+#include <linux/irqreturn.h>
+#include <linux/thread_info.h>
+
+#define INVALID_HARTID ULONG_MAX
+
+struct seq_file;
+extern unsigned long boot_cpu_hartid;
+
+struct riscv_ipi_ops {
+ void (*ipi_inject)(const struct cpumask *target);
+ void (*ipi_clear)(void);
+};
+
+#ifdef CONFIG_SMP
+/*
+ * Mapping between linux logical cpu index and hartid.
+ */
+extern unsigned long __cpuid_to_hartid_map[NR_CPUS];
+#define cpuid_to_hartid_map(cpu) __cpuid_to_hartid_map[cpu]
+
+/* print IPI stats */
+void show_ipi_stats(struct seq_file *p, int prec);
+
+/* SMP initialization hook for setup_arch */
+void __init setup_smp(void);
+
+/* Called from C code, this handles an IPI. */
+void handle_IPI(struct pt_regs *regs);
+
+/* Hook for the generic smp_call_function_many() routine. */
+void arch_send_call_function_ipi_mask(struct cpumask *mask);
+
+/* Hook for the generic smp_call_function_single() routine. */
+void arch_send_call_function_single_ipi(int cpu);
+
+int riscv_hartid_to_cpuid(int hartid);
+void riscv_cpuid_to_hartid_mask(const struct cpumask *in, struct cpumask *out);
+
+/* Set custom IPI operations */
+void riscv_set_ipi_ops(struct riscv_ipi_ops *ops);
+
+/* Clear IPI for current CPU */
+void riscv_clear_ipi(void);
+
+/* Secondary hart entry */
+asmlinkage void smp_callin(void);
+
+/*
+ * Obtains the hart ID of the currently executing task. This relies on
+ * THREAD_INFO_IN_TASK, but we define that unconditionally.
+ */
+#define raw_smp_processor_id() (current_thread_info()->cpu)
+
+#if defined CONFIG_HOTPLUG_CPU
+int __cpu_disable(void);
+void __cpu_die(unsigned int cpu);
+void cpu_stop(void);
+#else
+#endif /* CONFIG_HOTPLUG_CPU */
+
+#else
+
+static inline void show_ipi_stats(struct seq_file *p, int prec)
+{
+}
+
+static inline int riscv_hartid_to_cpuid(int hartid)
+{
+ if (hartid == boot_cpu_hartid)
+ return 0;
+
+ return -1;
+}
+static inline unsigned long cpuid_to_hartid_map(int cpu)
+{
+ return boot_cpu_hartid;
+}
+
+static inline void riscv_cpuid_to_hartid_mask(const struct cpumask *in,
+ struct cpumask *out)
+{
+ cpumask_clear(out);
+ cpumask_set_cpu(boot_cpu_hartid, out);
+}
+
+static inline void riscv_set_ipi_ops(struct riscv_ipi_ops *ops)
+{
+}
+
+static inline void riscv_clear_ipi(void)
+{
+}
+
+#endif /* CONFIG_SMP */
+
+#if defined(CONFIG_HOTPLUG_CPU) && (CONFIG_SMP)
+bool cpu_has_hotplug(unsigned int cpu);
+#else
+static inline bool cpu_has_hotplug(unsigned int cpu)
+{
+ return false;
+}
+#endif
+
+#endif /* _ASM_RISCV_SMP_H */
diff --git a/arch/riscv/include/asm/soc.h b/arch/riscv/include/asm/soc.h
new file mode 100644
index 000000000..6c8363b1f
--- /dev/null
+++ b/arch/riscv/include/asm/soc.h
@@ -0,0 +1,62 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (C) 2020 Western Digital Corporation or its affiliates.
+ * Copyright (C) 2020 Google, Inc
+ */
+
+#ifndef _ASM_RISCV_SOC_H
+#define _ASM_RISCV_SOC_H
+
+#include <linux/of.h>
+#include <linux/linkage.h>
+#include <linux/types.h>
+
+#define SOC_EARLY_INIT_DECLARE(name, compat, fn) \
+ static const struct of_device_id __soc_early_init__##name \
+ __used __section("__soc_early_init_table") \
+ = { .compatible = compat, .data = fn }
+
+void soc_early_init(void);
+
+extern unsigned long __soc_early_init_table_start;
+extern unsigned long __soc_early_init_table_end;
+
+/*
+ * Allows Linux to provide a device tree, which is necessary for SOCs that
+ * don't provide a useful one on their own.
+ */
+struct soc_builtin_dtb {
+ unsigned long vendor_id;
+ unsigned long arch_id;
+ unsigned long imp_id;
+ void *(*dtb_func)(void);
+};
+
+/*
+ * The argument name must specify a valid DTS file name without the dts
+ * extension.
+ */
+#define SOC_BUILTIN_DTB_DECLARE(name, vendor, arch, impl) \
+ extern void *__dtb_##name##_begin; \
+ \
+ static __init __used \
+ void *__soc_builtin_dtb_f__##name(void) \
+ { \
+ return (void *)&__dtb_##name##_begin; \
+ } \
+ \
+ static const struct soc_builtin_dtb __soc_builtin_dtb__##name \
+ __used __section("__soc_builtin_dtb_table") = \
+ { \
+ .vendor_id = vendor, \
+ .arch_id = arch, \
+ .imp_id = impl, \
+ .dtb_func = __soc_builtin_dtb_f__##name, \
+ }
+
+extern unsigned long __soc_builtin_dtb_table_start;
+extern unsigned long __soc_builtin_dtb_table_end;
+
+void *soc_lookup_builtin_dtb(void);
+
+#endif
diff --git a/arch/riscv/include/asm/sparsemem.h b/arch/riscv/include/asm/sparsemem.h
new file mode 100644
index 000000000..45a7018a8
--- /dev/null
+++ b/arch/riscv/include/asm/sparsemem.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _ASM_RISCV_SPARSEMEM_H
+#define _ASM_RISCV_SPARSEMEM_H
+
+#ifdef CONFIG_SPARSEMEM
+#define MAX_PHYSMEM_BITS CONFIG_PA_BITS
+#define SECTION_SIZE_BITS 27
+#endif /* CONFIG_SPARSEMEM */
+
+#endif /* _ASM_RISCV_SPARSEMEM_H */
diff --git a/arch/riscv/include/asm/spinlock.h b/arch/riscv/include/asm/spinlock.h
new file mode 100644
index 000000000..f4f7fa1b7
--- /dev/null
+++ b/arch/riscv/include/asm/spinlock.h
@@ -0,0 +1,135 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2015 Regents of the University of California
+ * Copyright (C) 2017 SiFive
+ */
+
+#ifndef _ASM_RISCV_SPINLOCK_H
+#define _ASM_RISCV_SPINLOCK_H
+
+#include <linux/kernel.h>
+#include <asm/current.h>
+#include <asm/fence.h>
+
+/*
+ * Simple spin lock operations. These provide no fairness guarantees.
+ */
+
+/* FIXME: Replace this with a ticket lock, like MIPS. */
+
+#define arch_spin_is_locked(x) (READ_ONCE((x)->lock) != 0)
+
+static inline void arch_spin_unlock(arch_spinlock_t *lock)
+{
+ smp_store_release(&lock->lock, 0);
+}
+
+static inline int arch_spin_trylock(arch_spinlock_t *lock)
+{
+ int tmp = 1, busy;
+
+ __asm__ __volatile__ (
+ " amoswap.w %0, %2, %1\n"
+ RISCV_ACQUIRE_BARRIER
+ : "=r" (busy), "+A" (lock->lock)
+ : "r" (tmp)
+ : "memory");
+
+ return !busy;
+}
+
+static inline void arch_spin_lock(arch_spinlock_t *lock)
+{
+ while (1) {
+ if (arch_spin_is_locked(lock))
+ continue;
+
+ if (arch_spin_trylock(lock))
+ break;
+ }
+}
+
+/***********************************************************/
+
+static inline void arch_read_lock(arch_rwlock_t *lock)
+{
+ int tmp;
+
+ __asm__ __volatile__(
+ "1: lr.w %1, %0\n"
+ " bltz %1, 1b\n"
+ " addi %1, %1, 1\n"
+ " sc.w %1, %1, %0\n"
+ " bnez %1, 1b\n"
+ RISCV_ACQUIRE_BARRIER
+ : "+A" (lock->lock), "=&r" (tmp)
+ :: "memory");
+}
+
+static inline void arch_write_lock(arch_rwlock_t *lock)
+{
+ int tmp;
+
+ __asm__ __volatile__(
+ "1: lr.w %1, %0\n"
+ " bnez %1, 1b\n"
+ " li %1, -1\n"
+ " sc.w %1, %1, %0\n"
+ " bnez %1, 1b\n"
+ RISCV_ACQUIRE_BARRIER
+ : "+A" (lock->lock), "=&r" (tmp)
+ :: "memory");
+}
+
+static inline int arch_read_trylock(arch_rwlock_t *lock)
+{
+ int busy;
+
+ __asm__ __volatile__(
+ "1: lr.w %1, %0\n"
+ " bltz %1, 1f\n"
+ " addi %1, %1, 1\n"
+ " sc.w %1, %1, %0\n"
+ " bnez %1, 1b\n"
+ RISCV_ACQUIRE_BARRIER
+ "1:\n"
+ : "+A" (lock->lock), "=&r" (busy)
+ :: "memory");
+
+ return !busy;
+}
+
+static inline int arch_write_trylock(arch_rwlock_t *lock)
+{
+ int busy;
+
+ __asm__ __volatile__(
+ "1: lr.w %1, %0\n"
+ " bnez %1, 1f\n"
+ " li %1, -1\n"
+ " sc.w %1, %1, %0\n"
+ " bnez %1, 1b\n"
+ RISCV_ACQUIRE_BARRIER
+ "1:\n"
+ : "+A" (lock->lock), "=&r" (busy)
+ :: "memory");
+
+ return !busy;
+}
+
+static inline void arch_read_unlock(arch_rwlock_t *lock)
+{
+ __asm__ __volatile__(
+ RISCV_RELEASE_BARRIER
+ " amoadd.w x0, %1, %0\n"
+ : "+A" (lock->lock)
+ : "r" (-1)
+ : "memory");
+}
+
+static inline void arch_write_unlock(arch_rwlock_t *lock)
+{
+ smp_store_release(&lock->lock, 0);
+}
+
+#endif /* _ASM_RISCV_SPINLOCK_H */
diff --git a/arch/riscv/include/asm/spinlock_types.h b/arch/riscv/include/asm/spinlock_types.h
new file mode 100644
index 000000000..f398e7638
--- /dev/null
+++ b/arch/riscv/include/asm/spinlock_types.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2015 Regents of the University of California
+ */
+
+#ifndef _ASM_RISCV_SPINLOCK_TYPES_H
+#define _ASM_RISCV_SPINLOCK_TYPES_H
+
+#ifndef __LINUX_SPINLOCK_TYPES_H
+# error "please don't include this file directly"
+#endif
+
+typedef struct {
+ volatile unsigned int lock;
+} arch_spinlock_t;
+
+#define __ARCH_SPIN_LOCK_UNLOCKED { 0 }
+
+typedef struct {
+ volatile unsigned int lock;
+} arch_rwlock_t;
+
+#define __ARCH_RW_LOCK_UNLOCKED { 0 }
+
+#endif /* _ASM_RISCV_SPINLOCK_TYPES_H */
diff --git a/arch/riscv/include/asm/stackprotector.h b/arch/riscv/include/asm/stackprotector.h
new file mode 100644
index 000000000..5962f8891
--- /dev/null
+++ b/arch/riscv/include/asm/stackprotector.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _ASM_RISCV_STACKPROTECTOR_H
+#define _ASM_RISCV_STACKPROTECTOR_H
+
+#include <linux/random.h>
+#include <linux/version.h>
+
+extern unsigned long __stack_chk_guard;
+
+/*
+ * Initialize the stackprotector canary value.
+ *
+ * NOTE: this must only be called from functions that never return,
+ * and it must always be inlined.
+ */
+static __always_inline void boot_init_stack_canary(void)
+{
+ unsigned long canary;
+
+ /* Try to get a semi random initial value. */
+ get_random_bytes(&canary, sizeof(canary));
+ canary ^= LINUX_VERSION_CODE;
+ canary &= CANARY_MASK;
+
+ current->stack_canary = canary;
+ __stack_chk_guard = current->stack_canary;
+}
+#endif /* _ASM_RISCV_STACKPROTECTOR_H */
diff --git a/arch/riscv/include/asm/string.h b/arch/riscv/include/asm/string.h
new file mode 100644
index 000000000..924af13f8
--- /dev/null
+++ b/arch/riscv/include/asm/string.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2013 Regents of the University of California
+ */
+
+#ifndef _ASM_RISCV_STRING_H
+#define _ASM_RISCV_STRING_H
+
+#include <linux/types.h>
+#include <linux/linkage.h>
+
+#define __HAVE_ARCH_MEMSET
+extern asmlinkage void *memset(void *, int, size_t);
+extern asmlinkage void *__memset(void *, int, size_t);
+
+#define __HAVE_ARCH_MEMCPY
+extern asmlinkage void *memcpy(void *, const void *, size_t);
+extern asmlinkage void *__memcpy(void *, const void *, size_t);
+
+/* For those files which don't want to check by kasan. */
+#if defined(CONFIG_KASAN) && !defined(__SANITIZE_ADDRESS__)
+
+#define memcpy(dst, src, len) __memcpy(dst, src, len)
+#define memset(s, c, n) __memset(s, c, n)
+
+#endif
+#endif /* _ASM_RISCV_STRING_H */
diff --git a/arch/riscv/include/asm/switch_to.h b/arch/riscv/include/asm/switch_to.h
new file mode 100644
index 000000000..407bcc96a
--- /dev/null
+++ b/arch/riscv/include/asm/switch_to.h
@@ -0,0 +1,78 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2012 Regents of the University of California
+ */
+
+#ifndef _ASM_RISCV_SWITCH_TO_H
+#define _ASM_RISCV_SWITCH_TO_H
+
+#include <linux/sched/task_stack.h>
+#include <asm/processor.h>
+#include <asm/ptrace.h>
+#include <asm/csr.h>
+
+#ifdef CONFIG_FPU
+extern void __fstate_save(struct task_struct *save_to);
+extern void __fstate_restore(struct task_struct *restore_from);
+
+static inline void __fstate_clean(struct pt_regs *regs)
+{
+ regs->status = (regs->status & ~SR_FS) | SR_FS_CLEAN;
+}
+
+static inline void fstate_off(struct task_struct *task,
+ struct pt_regs *regs)
+{
+ regs->status = (regs->status & ~SR_FS) | SR_FS_OFF;
+}
+
+static inline void fstate_save(struct task_struct *task,
+ struct pt_regs *regs)
+{
+ if ((regs->status & SR_FS) == SR_FS_DIRTY) {
+ __fstate_save(task);
+ __fstate_clean(regs);
+ }
+}
+
+static inline void fstate_restore(struct task_struct *task,
+ struct pt_regs *regs)
+{
+ if ((regs->status & SR_FS) != SR_FS_OFF) {
+ __fstate_restore(task);
+ __fstate_clean(regs);
+ }
+}
+
+static inline void __switch_to_aux(struct task_struct *prev,
+ struct task_struct *next)
+{
+ struct pt_regs *regs;
+
+ regs = task_pt_regs(prev);
+ if (unlikely(regs->status & SR_SD))
+ fstate_save(prev, regs);
+ fstate_restore(next, task_pt_regs(next));
+}
+
+extern bool has_fpu;
+#else
+#define has_fpu false
+#define fstate_save(task, regs) do { } while (0)
+#define fstate_restore(task, regs) do { } while (0)
+#define __switch_to_aux(__prev, __next) do { } while (0)
+#endif
+
+extern struct task_struct *__switch_to(struct task_struct *,
+ struct task_struct *);
+
+#define switch_to(prev, next, last) \
+do { \
+ struct task_struct *__prev = (prev); \
+ struct task_struct *__next = (next); \
+ if (has_fpu) \
+ __switch_to_aux(__prev, __next); \
+ ((last) = __switch_to(__prev, __next)); \
+} while (0)
+
+#endif /* _ASM_RISCV_SWITCH_TO_H */
diff --git a/arch/riscv/include/asm/syscall.h b/arch/riscv/include/asm/syscall.h
new file mode 100644
index 000000000..49350c8bd
--- /dev/null
+++ b/arch/riscv/include/asm/syscall.h
@@ -0,0 +1,85 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2008-2009 Red Hat, Inc. All rights reserved.
+ * Copyright 2010 Tilera Corporation. All Rights Reserved.
+ * Copyright 2015 Regents of the University of California, Berkeley
+ *
+ * See asm-generic/syscall.h for descriptions of what we must do here.
+ */
+
+#ifndef _ASM_RISCV_SYSCALL_H
+#define _ASM_RISCV_SYSCALL_H
+
+#include <uapi/linux/audit.h>
+#include <linux/sched.h>
+#include <linux/err.h>
+
+/* The array of function pointers for syscalls. */
+extern void *sys_call_table[];
+
+/*
+ * Only the low 32 bits of orig_r0 are meaningful, so we return int.
+ * This importantly ignores the high bits on 64-bit, so comparisons
+ * sign-extend the low 32 bits.
+ */
+static inline int syscall_get_nr(struct task_struct *task,
+ struct pt_regs *regs)
+{
+ return regs->a7;
+}
+
+static inline void syscall_rollback(struct task_struct *task,
+ struct pt_regs *regs)
+{
+ regs->a0 = regs->orig_a0;
+}
+
+static inline long syscall_get_error(struct task_struct *task,
+ struct pt_regs *regs)
+{
+ unsigned long error = regs->a0;
+
+ return IS_ERR_VALUE(error) ? error : 0;
+}
+
+static inline long syscall_get_return_value(struct task_struct *task,
+ struct pt_regs *regs)
+{
+ return regs->a0;
+}
+
+static inline void syscall_set_return_value(struct task_struct *task,
+ struct pt_regs *regs,
+ int error, long val)
+{
+ regs->a0 = (long) error ?: val;
+}
+
+static inline void syscall_get_arguments(struct task_struct *task,
+ struct pt_regs *regs,
+ unsigned long *args)
+{
+ args[0] = regs->orig_a0;
+ args++;
+ memcpy(args, &regs->a1, 5 * sizeof(args[0]));
+}
+
+static inline void syscall_set_arguments(struct task_struct *task,
+ struct pt_regs *regs,
+ const unsigned long *args)
+{
+ regs->orig_a0 = args[0];
+ args++;
+ memcpy(&regs->a1, args, 5 * sizeof(regs->a1));
+}
+
+static inline int syscall_get_arch(struct task_struct *task)
+{
+#ifdef CONFIG_64BIT
+ return AUDIT_ARCH_RISCV64;
+#else
+ return AUDIT_ARCH_RISCV32;
+#endif
+}
+
+#endif /* _ASM_RISCV_SYSCALL_H */
diff --git a/arch/riscv/include/asm/thread_info.h b/arch/riscv/include/asm/thread_info.h
new file mode 100644
index 000000000..c55e0a1f0
--- /dev/null
+++ b/arch/riscv/include/asm/thread_info.h
@@ -0,0 +1,102 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2009 Chen Liqin <liqin.chen@sunplusct.com>
+ * Copyright (C) 2012 Regents of the University of California
+ * Copyright (C) 2017 SiFive
+ */
+
+#ifndef _ASM_RISCV_THREAD_INFO_H
+#define _ASM_RISCV_THREAD_INFO_H
+
+#include <asm/page.h>
+#include <linux/const.h>
+
+#ifdef CONFIG_KASAN
+#define KASAN_STACK_ORDER 1
+#else
+#define KASAN_STACK_ORDER 0
+#endif
+
+/* thread information allocation */
+#ifdef CONFIG_64BIT
+#define THREAD_SIZE_ORDER (2 + KASAN_STACK_ORDER)
+#else
+#define THREAD_SIZE_ORDER (1 + KASAN_STACK_ORDER)
+#endif
+#define THREAD_SIZE (PAGE_SIZE << THREAD_SIZE_ORDER)
+
+#ifndef __ASSEMBLY__
+
+#include <asm/processor.h>
+#include <asm/csr.h>
+
+/*
+ * low level task data that entry.S needs immediate access to
+ * - this struct should fit entirely inside of one cache line
+ * - if the members of this struct changes, the assembly constants
+ * in asm-offsets.c must be updated accordingly
+ * - thread_info is included in task_struct at an offset of 0. This means that
+ * tp points to both thread_info and task_struct.
+ */
+struct thread_info {
+ unsigned long flags; /* low level flags */
+ int preempt_count; /* 0=>preemptible, <0=>BUG */
+ /*
+ * These stack pointers are overwritten on every system call or
+ * exception. SP is also saved to the stack it can be recovered when
+ * overwritten.
+ */
+ long kernel_sp; /* Kernel stack pointer */
+ long user_sp; /* User stack pointer */
+ int cpu;
+};
+
+/*
+ * macros/functions for gaining access to the thread information structure
+ *
+ * preempt_count needs to be 1 initially, until the scheduler is functional.
+ */
+#define INIT_THREAD_INFO(tsk) \
+{ \
+ .flags = 0, \
+ .preempt_count = INIT_PREEMPT_COUNT, \
+}
+
+#endif /* !__ASSEMBLY__ */
+
+/*
+ * thread information flags
+ * - these are process state flags that various assembly files may need to
+ * access
+ * - pending work-to-be-done flags are in lowest half-word
+ * - other flags in upper half-word(s)
+ */
+#define TIF_SYSCALL_TRACE 0 /* syscall trace active */
+#define TIF_NOTIFY_RESUME 1 /* callback before returning to user */
+#define TIF_SIGPENDING 2 /* signal pending */
+#define TIF_NEED_RESCHED 3 /* rescheduling necessary */
+#define TIF_RESTORE_SIGMASK 4 /* restore signal mask in do_signal() */
+#define TIF_MEMDIE 5 /* is terminating due to OOM killer */
+#define TIF_SYSCALL_TRACEPOINT 6 /* syscall tracepoint instrumentation */
+#define TIF_SYSCALL_AUDIT 7 /* syscall auditing */
+#define TIF_SECCOMP 8 /* syscall secure computing */
+#define TIF_NOTIFY_SIGNAL 9 /* signal notifications exist */
+
+#define _TIF_SYSCALL_TRACE (1 << TIF_SYSCALL_TRACE)
+#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
+#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
+#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
+#define _TIF_SYSCALL_TRACEPOINT (1 << TIF_SYSCALL_TRACEPOINT)
+#define _TIF_SYSCALL_AUDIT (1 << TIF_SYSCALL_AUDIT)
+#define _TIF_SECCOMP (1 << TIF_SECCOMP)
+#define _TIF_NOTIFY_SIGNAL (1 << TIF_NOTIFY_SIGNAL)
+
+#define _TIF_WORK_MASK \
+ (_TIF_NOTIFY_RESUME | _TIF_SIGPENDING | _TIF_NEED_RESCHED | \
+ _TIF_NOTIFY_SIGNAL)
+
+#define _TIF_SYSCALL_WORK \
+ (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_TRACEPOINT | _TIF_SYSCALL_AUDIT | \
+ _TIF_SECCOMP)
+
+#endif /* _ASM_RISCV_THREAD_INFO_H */
diff --git a/arch/riscv/include/asm/timex.h b/arch/riscv/include/asm/timex.h
new file mode 100644
index 000000000..a06697846
--- /dev/null
+++ b/arch/riscv/include/asm/timex.h
@@ -0,0 +1,91 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2012 Regents of the University of California
+ */
+
+#ifndef _ASM_RISCV_TIMEX_H
+#define _ASM_RISCV_TIMEX_H
+
+#include <asm/csr.h>
+
+typedef unsigned long cycles_t;
+
+#ifdef CONFIG_RISCV_M_MODE
+
+#include <asm/clint.h>
+
+#ifdef CONFIG_64BIT
+static inline cycles_t get_cycles(void)
+{
+ return readq_relaxed(clint_time_val);
+}
+#else /* !CONFIG_64BIT */
+static inline u32 get_cycles(void)
+{
+ return readl_relaxed(((u32 *)clint_time_val));
+}
+#define get_cycles get_cycles
+
+static inline u32 get_cycles_hi(void)
+{
+ return readl_relaxed(((u32 *)clint_time_val) + 1);
+}
+#define get_cycles_hi get_cycles_hi
+#endif /* CONFIG_64BIT */
+
+/*
+ * Much like MIPS, we may not have a viable counter to use at an early point
+ * in the boot process. Unfortunately we don't have a fallback, so instead
+ * we just return 0.
+ */
+static inline unsigned long random_get_entropy(void)
+{
+ if (unlikely(clint_time_val == NULL))
+ return random_get_entropy_fallback();
+ return get_cycles();
+}
+#define random_get_entropy() random_get_entropy()
+
+#else /* CONFIG_RISCV_M_MODE */
+
+static inline cycles_t get_cycles(void)
+{
+ return csr_read(CSR_TIME);
+}
+#define get_cycles get_cycles
+
+static inline u32 get_cycles_hi(void)
+{
+ return csr_read(CSR_TIMEH);
+}
+#define get_cycles_hi get_cycles_hi
+
+#endif /* !CONFIG_RISCV_M_MODE */
+
+#ifdef CONFIG_64BIT
+static inline u64 get_cycles64(void)
+{
+ return get_cycles();
+}
+#else /* CONFIG_64BIT */
+static inline u64 get_cycles64(void)
+{
+ u32 hi, lo;
+
+ do {
+ hi = get_cycles_hi();
+ lo = get_cycles();
+ } while (hi != get_cycles_hi());
+
+ return ((u64)hi << 32) | lo;
+}
+#endif /* CONFIG_64BIT */
+
+#define ARCH_HAS_READ_CURRENT_TIMER
+static inline int read_current_timer(unsigned long *timer_val)
+{
+ *timer_val = get_cycles();
+ return 0;
+}
+
+#endif /* _ASM_RISCV_TIMEX_H */
diff --git a/arch/riscv/include/asm/tlb.h b/arch/riscv/include/asm/tlb.h
new file mode 100644
index 000000000..120bcf2ed
--- /dev/null
+++ b/arch/riscv/include/asm/tlb.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2012 Regents of the University of California
+ */
+
+#ifndef _ASM_RISCV_TLB_H
+#define _ASM_RISCV_TLB_H
+
+struct mmu_gather;
+
+static void tlb_flush(struct mmu_gather *tlb);
+
+#define tlb_flush tlb_flush
+#include <asm-generic/tlb.h>
+
+static inline void tlb_flush(struct mmu_gather *tlb)
+{
+ flush_tlb_mm(tlb->mm);
+}
+
+#endif /* _ASM_RISCV_TLB_H */
diff --git a/arch/riscv/include/asm/tlbflush.h b/arch/riscv/include/asm/tlbflush.h
new file mode 100644
index 000000000..394cfbccd
--- /dev/null
+++ b/arch/riscv/include/asm/tlbflush.h
@@ -0,0 +1,56 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2009 Chen Liqin <liqin.chen@sunplusct.com>
+ * Copyright (C) 2012 Regents of the University of California
+ */
+
+#ifndef _ASM_RISCV_TLBFLUSH_H
+#define _ASM_RISCV_TLBFLUSH_H
+
+#include <linux/mm_types.h>
+#include <asm/smp.h>
+
+#ifdef CONFIG_MMU
+static inline void local_flush_tlb_all(void)
+{
+ __asm__ __volatile__ ("sfence.vma" : : : "memory");
+}
+
+/* Flush one page from local TLB */
+static inline void local_flush_tlb_page(unsigned long addr)
+{
+ __asm__ __volatile__ ("sfence.vma %0" : : "r" (addr) : "memory");
+}
+#else /* CONFIG_MMU */
+#define local_flush_tlb_all() do { } while (0)
+#define local_flush_tlb_page(addr) do { } while (0)
+#endif /* CONFIG_MMU */
+
+#if defined(CONFIG_SMP) && defined(CONFIG_MMU)
+void flush_tlb_all(void);
+void flush_tlb_mm(struct mm_struct *mm);
+void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr);
+void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
+ unsigned long end);
+#else /* CONFIG_SMP && CONFIG_MMU */
+
+#define flush_tlb_all() local_flush_tlb_all()
+#define flush_tlb_page(vma, addr) local_flush_tlb_page(addr)
+
+static inline void flush_tlb_range(struct vm_area_struct *vma,
+ unsigned long start, unsigned long end)
+{
+ local_flush_tlb_all();
+}
+
+#define flush_tlb_mm(mm) flush_tlb_all()
+#endif /* !CONFIG_SMP || !CONFIG_MMU */
+
+/* Flush a range of kernel pages */
+static inline void flush_tlb_kernel_range(unsigned long start,
+ unsigned long end)
+{
+ flush_tlb_all();
+}
+
+#endif /* _ASM_RISCV_TLBFLUSH_H */
diff --git a/arch/riscv/include/asm/uaccess.h b/arch/riscv/include/asm/uaccess.h
new file mode 100644
index 000000000..66af6abfe
--- /dev/null
+++ b/arch/riscv/include/asm/uaccess.h
@@ -0,0 +1,490 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2012 Regents of the University of California
+ *
+ * This file was copied from include/asm-generic/uaccess.h
+ */
+
+#ifndef _ASM_RISCV_UACCESS_H
+#define _ASM_RISCV_UACCESS_H
+
+#include <asm/pgtable.h> /* for TASK_SIZE */
+
+/*
+ * User space memory access functions
+ */
+#ifdef CONFIG_MMU
+#include <linux/errno.h>
+#include <linux/compiler.h>
+#include <linux/thread_info.h>
+#include <asm/byteorder.h>
+#include <asm/extable.h>
+#include <asm/asm.h>
+
+#define __enable_user_access() \
+ __asm__ __volatile__ ("csrs sstatus, %0" : : "r" (SR_SUM) : "memory")
+#define __disable_user_access() \
+ __asm__ __volatile__ ("csrc sstatus, %0" : : "r" (SR_SUM) : "memory")
+
+/**
+ * access_ok: - Checks if a user space pointer is valid
+ * @addr: User space pointer to start of block to check
+ * @size: Size of block to check
+ *
+ * Context: User context only. This function may sleep.
+ *
+ * Checks if a pointer to a block of memory in user space is valid.
+ *
+ * Returns true (nonzero) if the memory block may be valid, false (zero)
+ * if it is definitely invalid.
+ *
+ * Note that, depending on architecture, this function probably just
+ * checks that the pointer is in the user space range - after calling
+ * this function, memory access functions may still return -EFAULT.
+ */
+#define access_ok(addr, size) ({ \
+ __chk_user_ptr(addr); \
+ likely(__access_ok((unsigned long __force)(addr), (size))); \
+})
+
+/*
+ * Ensure that the range [addr, addr+size) is within the process's
+ * address space
+ */
+static inline int __access_ok(unsigned long addr, unsigned long size)
+{
+ return size <= TASK_SIZE && addr <= TASK_SIZE - size;
+}
+
+/*
+ * The exception table consists of pairs of addresses: the first is the
+ * address of an instruction that is allowed to fault, and the second is
+ * the address at which the program should continue. No registers are
+ * modified, so it is entirely up to the continuation code to figure out
+ * what to do.
+ *
+ * All the routines below use bits of fixup code that are out of line
+ * with the main instruction path. This means when everything is well,
+ * we don't even have to jump over them. Further, they do not intrude
+ * on our cache or tlb entries.
+ */
+
+#define __LSW 0
+#define __MSW 1
+
+/*
+ * The "__xxx" versions of the user access functions do not verify the address
+ * space - it must have been done previously with a separate "access_ok()"
+ * call.
+ */
+
+#define __get_user_asm(insn, x, ptr, err) \
+do { \
+ uintptr_t __tmp; \
+ __typeof__(x) __x; \
+ __asm__ __volatile__ ( \
+ "1:\n" \
+ " " insn " %1, %3\n" \
+ "2:\n" \
+ " .section .fixup,\"ax\"\n" \
+ " .balign 4\n" \
+ "3:\n" \
+ " li %0, %4\n" \
+ " li %1, 0\n" \
+ " jump 2b, %2\n" \
+ " .previous\n" \
+ " .section __ex_table,\"a\"\n" \
+ " .balign " RISCV_SZPTR "\n" \
+ " " RISCV_PTR " 1b, 3b\n" \
+ " .previous" \
+ : "+r" (err), "=&r" (__x), "=r" (__tmp) \
+ : "m" (*(ptr)), "i" (-EFAULT)); \
+ (x) = __x; \
+} while (0)
+
+#ifdef CONFIG_64BIT
+#define __get_user_8(x, ptr, err) \
+ __get_user_asm("ld", x, ptr, err)
+#else /* !CONFIG_64BIT */
+#define __get_user_8(x, ptr, err) \
+do { \
+ u32 __user *__ptr = (u32 __user *)(ptr); \
+ u32 __lo, __hi; \
+ uintptr_t __tmp; \
+ __asm__ __volatile__ ( \
+ "1:\n" \
+ " lw %1, %4\n" \
+ "2:\n" \
+ " lw %2, %5\n" \
+ "3:\n" \
+ " .section .fixup,\"ax\"\n" \
+ " .balign 4\n" \
+ "4:\n" \
+ " li %0, %6\n" \
+ " li %1, 0\n" \
+ " li %2, 0\n" \
+ " jump 3b, %3\n" \
+ " .previous\n" \
+ " .section __ex_table,\"a\"\n" \
+ " .balign " RISCV_SZPTR "\n" \
+ " " RISCV_PTR " 1b, 4b\n" \
+ " " RISCV_PTR " 2b, 4b\n" \
+ " .previous" \
+ : "+r" (err), "=&r" (__lo), "=r" (__hi), \
+ "=r" (__tmp) \
+ : "m" (__ptr[__LSW]), "m" (__ptr[__MSW]), \
+ "i" (-EFAULT)); \
+ (x) = (__typeof__(x))((__typeof__((x)-(x)))( \
+ (((u64)__hi << 32) | __lo))); \
+} while (0)
+#endif /* CONFIG_64BIT */
+
+#define __get_user_nocheck(x, __gu_ptr, __gu_err) \
+do { \
+ switch (sizeof(*__gu_ptr)) { \
+ case 1: \
+ __get_user_asm("lb", (x), __gu_ptr, __gu_err); \
+ break; \
+ case 2: \
+ __get_user_asm("lh", (x), __gu_ptr, __gu_err); \
+ break; \
+ case 4: \
+ __get_user_asm("lw", (x), __gu_ptr, __gu_err); \
+ break; \
+ case 8: \
+ __get_user_8((x), __gu_ptr, __gu_err); \
+ break; \
+ default: \
+ BUILD_BUG(); \
+ } \
+} while (0)
+
+/**
+ * __get_user: - Get a simple variable from user space, with less checking.
+ * @x: Variable to store result.
+ * @ptr: Source address, in user space.
+ *
+ * Context: User context only. This function may sleep.
+ *
+ * This macro copies a single simple variable from user space to kernel
+ * space. It supports simple types like char and int, but not larger
+ * data types like structures or arrays.
+ *
+ * @ptr must have pointer-to-simple-variable type, and the result of
+ * dereferencing @ptr must be assignable to @x without a cast.
+ *
+ * Caller must check the pointer with access_ok() before calling this
+ * function.
+ *
+ * Returns zero on success, or -EFAULT on error.
+ * On error, the variable @x is set to zero.
+ */
+#define __get_user(x, ptr) \
+({ \
+ const __typeof__(*(ptr)) __user *__gu_ptr = (ptr); \
+ long __gu_err = 0; \
+ \
+ __chk_user_ptr(__gu_ptr); \
+ \
+ __enable_user_access(); \
+ __get_user_nocheck(x, __gu_ptr, __gu_err); \
+ __disable_user_access(); \
+ \
+ __gu_err; \
+})
+
+/**
+ * get_user: - Get a simple variable from user space.
+ * @x: Variable to store result.
+ * @ptr: Source address, in user space.
+ *
+ * Context: User context only. This function may sleep.
+ *
+ * This macro copies a single simple variable from user space to kernel
+ * space. It supports simple types like char and int, but not larger
+ * data types like structures or arrays.
+ *
+ * @ptr must have pointer-to-simple-variable type, and the result of
+ * dereferencing @ptr must be assignable to @x without a cast.
+ *
+ * Returns zero on success, or -EFAULT on error.
+ * On error, the variable @x is set to zero.
+ */
+#define get_user(x, ptr) \
+({ \
+ const __typeof__(*(ptr)) __user *__p = (ptr); \
+ might_fault(); \
+ access_ok(__p, sizeof(*__p)) ? \
+ __get_user((x), __p) : \
+ ((x) = (__force __typeof__(x))0, -EFAULT); \
+})
+
+#define __put_user_asm(insn, x, ptr, err) \
+do { \
+ uintptr_t __tmp; \
+ __typeof__(*(ptr)) __x = x; \
+ __asm__ __volatile__ ( \
+ "1:\n" \
+ " " insn " %z3, %2\n" \
+ "2:\n" \
+ " .section .fixup,\"ax\"\n" \
+ " .balign 4\n" \
+ "3:\n" \
+ " li %0, %4\n" \
+ " jump 2b, %1\n" \
+ " .previous\n" \
+ " .section __ex_table,\"a\"\n" \
+ " .balign " RISCV_SZPTR "\n" \
+ " " RISCV_PTR " 1b, 3b\n" \
+ " .previous" \
+ : "+r" (err), "=r" (__tmp), "=m" (*(ptr)) \
+ : "rJ" (__x), "i" (-EFAULT)); \
+} while (0)
+
+#ifdef CONFIG_64BIT
+#define __put_user_8(x, ptr, err) \
+ __put_user_asm("sd", x, ptr, err)
+#else /* !CONFIG_64BIT */
+#define __put_user_8(x, ptr, err) \
+do { \
+ u32 __user *__ptr = (u32 __user *)(ptr); \
+ u64 __x = (__typeof__((x)-(x)))(x); \
+ uintptr_t __tmp; \
+ __asm__ __volatile__ ( \
+ "1:\n" \
+ " sw %z4, %2\n" \
+ "2:\n" \
+ " sw %z5, %3\n" \
+ "3:\n" \
+ " .section .fixup,\"ax\"\n" \
+ " .balign 4\n" \
+ "4:\n" \
+ " li %0, %6\n" \
+ " jump 3b, %1\n" \
+ " .previous\n" \
+ " .section __ex_table,\"a\"\n" \
+ " .balign " RISCV_SZPTR "\n" \
+ " " RISCV_PTR " 1b, 4b\n" \
+ " " RISCV_PTR " 2b, 4b\n" \
+ " .previous" \
+ : "+r" (err), "=r" (__tmp), \
+ "=m" (__ptr[__LSW]), \
+ "=m" (__ptr[__MSW]) \
+ : "rJ" (__x), "rJ" (__x >> 32), "i" (-EFAULT)); \
+} while (0)
+#endif /* CONFIG_64BIT */
+
+#define __put_user_nocheck(x, __gu_ptr, __pu_err) \
+do { \
+ switch (sizeof(*__gu_ptr)) { \
+ case 1: \
+ __put_user_asm("sb", (x), __gu_ptr, __pu_err); \
+ break; \
+ case 2: \
+ __put_user_asm("sh", (x), __gu_ptr, __pu_err); \
+ break; \
+ case 4: \
+ __put_user_asm("sw", (x), __gu_ptr, __pu_err); \
+ break; \
+ case 8: \
+ __put_user_8((x), __gu_ptr, __pu_err); \
+ break; \
+ default: \
+ BUILD_BUG(); \
+ } \
+} while (0)
+
+/**
+ * __put_user: - Write a simple value into user space, with less checking.
+ * @x: Value to copy to user space.
+ * @ptr: Destination address, in user space.
+ *
+ * Context: User context only. This function may sleep.
+ *
+ * This macro copies a single simple value from kernel space to user
+ * space. It supports simple types like char and int, but not larger
+ * data types like structures or arrays.
+ *
+ * @ptr must have pointer-to-simple-variable type, and @x must be assignable
+ * to the result of dereferencing @ptr. The value of @x is copied to avoid
+ * re-ordering where @x is evaluated inside the block that enables user-space
+ * access (thus bypassing user space protection if @x is a function).
+ *
+ * Caller must check the pointer with access_ok() before calling this
+ * function.
+ *
+ * Returns zero on success, or -EFAULT on error.
+ */
+#define __put_user(x, ptr) \
+({ \
+ __typeof__(*(ptr)) __user *__gu_ptr = (ptr); \
+ __typeof__(*__gu_ptr) __val = (x); \
+ long __pu_err = 0; \
+ \
+ __chk_user_ptr(__gu_ptr); \
+ \
+ __enable_user_access(); \
+ __put_user_nocheck(__val, __gu_ptr, __pu_err); \
+ __disable_user_access(); \
+ \
+ __pu_err; \
+})
+
+/**
+ * put_user: - Write a simple value into user space.
+ * @x: Value to copy to user space.
+ * @ptr: Destination address, in user space.
+ *
+ * Context: User context only. This function may sleep.
+ *
+ * This macro copies a single simple value from kernel space to user
+ * space. It supports simple types like char and int, but not larger
+ * data types like structures or arrays.
+ *
+ * @ptr must have pointer-to-simple-variable type, and @x must be assignable
+ * to the result of dereferencing @ptr.
+ *
+ * Returns zero on success, or -EFAULT on error.
+ */
+#define put_user(x, ptr) \
+({ \
+ __typeof__(*(ptr)) __user *__p = (ptr); \
+ might_fault(); \
+ access_ok(__p, sizeof(*__p)) ? \
+ __put_user((x), __p) : \
+ -EFAULT; \
+})
+
+
+unsigned long __must_check __asm_copy_to_user(void __user *to,
+ const void *from, unsigned long n);
+unsigned long __must_check __asm_copy_from_user(void *to,
+ const void __user *from, unsigned long n);
+
+static inline unsigned long
+raw_copy_from_user(void *to, const void __user *from, unsigned long n)
+{
+ return __asm_copy_from_user(to, from, n);
+}
+
+static inline unsigned long
+raw_copy_to_user(void __user *to, const void *from, unsigned long n)
+{
+ return __asm_copy_to_user(to, from, n);
+}
+
+extern long strncpy_from_user(char *dest, const char __user *src, long count);
+
+extern long __must_check strlen_user(const char __user *str);
+extern long __must_check strnlen_user(const char __user *str, long n);
+
+extern
+unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
+
+static inline
+unsigned long __must_check clear_user(void __user *to, unsigned long n)
+{
+ might_fault();
+ return access_ok(to, n) ?
+ __clear_user(to, n) : n;
+}
+
+/*
+ * Atomic compare-and-exchange, but with a fixup for userspace faults. Faults
+ * will set "err" to -EFAULT, while successful accesses return the previous
+ * value.
+ */
+#define __cmpxchg_user(ptr, old, new, err, size, lrb, scb) \
+({ \
+ __typeof__(ptr) __ptr = (ptr); \
+ __typeof__(*(ptr)) __old = (old); \
+ __typeof__(*(ptr)) __new = (new); \
+ __typeof__(*(ptr)) __ret; \
+ __typeof__(err) __err = 0; \
+ register unsigned int __rc; \
+ __enable_user_access(); \
+ switch (size) { \
+ case 4: \
+ __asm__ __volatile__ ( \
+ "0:\n" \
+ " lr.w" #scb " %[ret], %[ptr]\n" \
+ " bne %[ret], %z[old], 1f\n" \
+ " sc.w" #lrb " %[rc], %z[new], %[ptr]\n" \
+ " bnez %[rc], 0b\n" \
+ "1:\n" \
+ ".section .fixup,\"ax\"\n" \
+ ".balign 4\n" \
+ "2:\n" \
+ " li %[err], %[efault]\n" \
+ " jump 1b, %[rc]\n" \
+ ".previous\n" \
+ ".section __ex_table,\"a\"\n" \
+ ".balign " RISCV_SZPTR "\n" \
+ " " RISCV_PTR " 1b, 2b\n" \
+ ".previous\n" \
+ : [ret] "=&r" (__ret), \
+ [rc] "=&r" (__rc), \
+ [ptr] "+A" (*__ptr), \
+ [err] "=&r" (__err) \
+ : [old] "rJ" (__old), \
+ [new] "rJ" (__new), \
+ [efault] "i" (-EFAULT)); \
+ break; \
+ case 8: \
+ __asm__ __volatile__ ( \
+ "0:\n" \
+ " lr.d" #scb " %[ret], %[ptr]\n" \
+ " bne %[ret], %z[old], 1f\n" \
+ " sc.d" #lrb " %[rc], %z[new], %[ptr]\n" \
+ " bnez %[rc], 0b\n" \
+ "1:\n" \
+ ".section .fixup,\"ax\"\n" \
+ ".balign 4\n" \
+ "2:\n" \
+ " li %[err], %[efault]\n" \
+ " jump 1b, %[rc]\n" \
+ ".previous\n" \
+ ".section __ex_table,\"a\"\n" \
+ ".balign " RISCV_SZPTR "\n" \
+ " " RISCV_PTR " 1b, 2b\n" \
+ ".previous\n" \
+ : [ret] "=&r" (__ret), \
+ [rc] "=&r" (__rc), \
+ [ptr] "+A" (*__ptr), \
+ [err] "=&r" (__err) \
+ : [old] "rJ" (__old), \
+ [new] "rJ" (__new), \
+ [efault] "i" (-EFAULT)); \
+ break; \
+ default: \
+ BUILD_BUG(); \
+ } \
+ __disable_user_access(); \
+ (err) = __err; \
+ __ret; \
+})
+
+#define HAVE_GET_KERNEL_NOFAULT
+
+#define __get_kernel_nofault(dst, src, type, err_label) \
+do { \
+ long __kr_err; \
+ \
+ __get_user_nocheck(*((type *)(dst)), (type *)(src), __kr_err); \
+ if (unlikely(__kr_err)) \
+ goto err_label; \
+} while (0)
+
+#define __put_kernel_nofault(dst, src, type, err_label) \
+do { \
+ long __kr_err; \
+ \
+ __put_user_nocheck(*((type *)(src)), (type *)(dst), __kr_err); \
+ if (unlikely(__kr_err)) \
+ goto err_label; \
+} while (0)
+
+#else /* CONFIG_MMU */
+#include <asm-generic/uaccess.h>
+#endif /* CONFIG_MMU */
+#endif /* _ASM_RISCV_UACCESS_H */
diff --git a/arch/riscv/include/asm/unistd.h b/arch/riscv/include/asm/unistd.h
new file mode 100644
index 000000000..977ee6181
--- /dev/null
+++ b/arch/riscv/include/asm/unistd.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2012 Regents of the University of California
+ */
+
+/*
+ * There is explicitly no include guard here because this file is expected to
+ * be included multiple times.
+ */
+
+#define __ARCH_WANT_SYS_CLONE
+
+#include <uapi/asm/unistd.h>
+
+#define NR_syscalls (__NR_syscalls)
diff --git a/arch/riscv/include/asm/vdso.h b/arch/riscv/include/asm/vdso.h
new file mode 100644
index 000000000..1453a2f56
--- /dev/null
+++ b/arch/riscv/include/asm/vdso.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2012 ARM Limited
+ * Copyright (C) 2014 Regents of the University of California
+ * Copyright (C) 2017 SiFive
+ */
+
+#ifndef _ASM_RISCV_VDSO_H
+#define _ASM_RISCV_VDSO_H
+
+#include <linux/types.h>
+
+#ifndef CONFIG_GENERIC_TIME_VSYSCALL
+struct vdso_data {
+};
+#endif
+
+/*
+ * The VDSO symbols are mapped into Linux so we can just use regular symbol
+ * addressing to get their offsets in userspace. The symbols are mapped at an
+ * offset of 0, but since the linker must support setting weak undefined
+ * symbols to the absolute address 0 it also happens to support other low
+ * addresses even when the code model suggests those low addresses would not
+ * otherwise be availiable.
+ */
+#define VDSO_SYMBOL(base, name) \
+({ \
+ extern const char __vdso_##name[]; \
+ (void __user *)((unsigned long)(base) + __vdso_##name); \
+})
+
+asmlinkage long sys_riscv_flush_icache(uintptr_t, uintptr_t, uintptr_t);
+
+#endif /* _ASM_RISCV_VDSO_H */
diff --git a/arch/riscv/include/asm/vdso/clocksource.h b/arch/riscv/include/asm/vdso/clocksource.h
new file mode 100644
index 000000000..df6ea65c1
--- /dev/null
+++ b/arch/riscv/include/asm/vdso/clocksource.h
@@ -0,0 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_VDSOCLOCKSOURCE_H
+#define __ASM_VDSOCLOCKSOURCE_H
+
+#define VDSO_ARCH_CLOCKMODES \
+ VDSO_CLOCKMODE_ARCHTIMER
+
+#endif
diff --git a/arch/riscv/include/asm/vdso/gettimeofday.h b/arch/riscv/include/asm/vdso/gettimeofday.h
new file mode 100644
index 000000000..f839f16e0
--- /dev/null
+++ b/arch/riscv/include/asm/vdso/gettimeofday.h
@@ -0,0 +1,81 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_VDSO_GETTIMEOFDAY_H
+#define __ASM_VDSO_GETTIMEOFDAY_H
+
+#ifndef __ASSEMBLY__
+
+#include <asm/barrier.h>
+#include <asm/unistd.h>
+#include <asm/csr.h>
+#include <uapi/linux/time.h>
+
+#define VDSO_HAS_CLOCK_GETRES 1
+
+static __always_inline
+int gettimeofday_fallback(struct __kernel_old_timeval *_tv,
+ struct timezone *_tz)
+{
+ register struct __kernel_old_timeval *tv asm("a0") = _tv;
+ register struct timezone *tz asm("a1") = _tz;
+ register long ret asm("a0");
+ register long nr asm("a7") = __NR_gettimeofday;
+
+ asm volatile ("ecall\n"
+ : "=r" (ret)
+ : "r"(tv), "r"(tz), "r"(nr)
+ : "memory");
+
+ return ret;
+}
+
+static __always_inline
+long clock_gettime_fallback(clockid_t _clkid, struct __kernel_timespec *_ts)
+{
+ register clockid_t clkid asm("a0") = _clkid;
+ register struct __kernel_timespec *ts asm("a1") = _ts;
+ register long ret asm("a0");
+ register long nr asm("a7") = __NR_clock_gettime;
+
+ asm volatile ("ecall\n"
+ : "=r" (ret)
+ : "r"(clkid), "r"(ts), "r"(nr)
+ : "memory");
+
+ return ret;
+}
+
+static __always_inline
+int clock_getres_fallback(clockid_t _clkid, struct __kernel_timespec *_ts)
+{
+ register clockid_t clkid asm("a0") = _clkid;
+ register struct __kernel_timespec *ts asm("a1") = _ts;
+ register long ret asm("a0");
+ register long nr asm("a7") = __NR_clock_getres;
+
+ asm volatile ("ecall\n"
+ : "=r" (ret)
+ : "r"(clkid), "r"(ts), "r"(nr)
+ : "memory");
+
+ return ret;
+}
+
+static __always_inline u64 __arch_get_hw_counter(s32 clock_mode,
+ const struct vdso_data *vd)
+{
+ /*
+ * The purpose of csr_read(CSR_TIME) is to trap the system into
+ * M-mode to obtain the value of CSR_TIME. Hence, unlike other
+ * architecture, no fence instructions surround the csr_read()
+ */
+ return csr_read(CSR_TIME);
+}
+
+static __always_inline const struct vdso_data *__arch_get_vdso_data(void)
+{
+ return _vdso_data;
+}
+
+#endif /* !__ASSEMBLY__ */
+
+#endif /* __ASM_VDSO_GETTIMEOFDAY_H */
diff --git a/arch/riscv/include/asm/vdso/processor.h b/arch/riscv/include/asm/vdso/processor.h
new file mode 100644
index 000000000..134388cba
--- /dev/null
+++ b/arch/riscv/include/asm/vdso/processor.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#ifndef __ASM_VDSO_PROCESSOR_H
+#define __ASM_VDSO_PROCESSOR_H
+
+#ifndef __ASSEMBLY__
+
+#include <asm/barrier.h>
+
+static inline void cpu_relax(void)
+{
+#ifdef __riscv_muldiv
+ int dummy;
+ /* In lieu of a halt instruction, induce a long-latency stall. */
+ __asm__ __volatile__ ("div %0, %0, zero" : "=r" (dummy));
+#endif
+ barrier();
+}
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* __ASM_VDSO_PROCESSOR_H */
diff --git a/arch/riscv/include/asm/vdso/vsyscall.h b/arch/riscv/include/asm/vdso/vsyscall.h
new file mode 100644
index 000000000..82fd5d83b
--- /dev/null
+++ b/arch/riscv/include/asm/vdso/vsyscall.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_VDSO_VSYSCALL_H
+#define __ASM_VDSO_VSYSCALL_H
+
+#ifndef __ASSEMBLY__
+
+#include <linux/timekeeper_internal.h>
+#include <vdso/datapage.h>
+
+extern struct vdso_data *vdso_data;
+
+/*
+ * Update the vDSO data page to keep in sync with kernel timekeeping.
+ */
+static __always_inline struct vdso_data *__riscv_get_k_vdso_data(void)
+{
+ return vdso_data;
+}
+
+#define __arch_get_k_vdso_data __riscv_get_k_vdso_data
+
+/* The asm-generic header needs to be included after the definitions above */
+#include <asm-generic/vdso/vsyscall.h>
+
+#endif /* !__ASSEMBLY__ */
+
+#endif /* __ASM_VDSO_VSYSCALL_H */
diff --git a/arch/riscv/include/asm/vermagic.h b/arch/riscv/include/asm/vermagic.h
new file mode 100644
index 000000000..7b9441a57
--- /dev/null
+++ b/arch/riscv/include/asm/vermagic.h
@@ -0,0 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2017 Andes Technology Corporation */
+
+#ifndef _ASM_VERMAGIC_H
+#define _ASM_VERMAGIC_H
+
+#define MODULE_ARCH_VERMAGIC "riscv"
+
+#endif /* _ASM_VERMAGIC_H */
diff --git a/arch/riscv/include/asm/vmalloc.h b/arch/riscv/include/asm/vmalloc.h
new file mode 100644
index 000000000..ff9abc00d
--- /dev/null
+++ b/arch/riscv/include/asm/vmalloc.h
@@ -0,0 +1,4 @@
+#ifndef _ASM_RISCV_VMALLOC_H
+#define _ASM_RISCV_VMALLOC_H
+
+#endif /* _ASM_RISCV_VMALLOC_H */
diff --git a/arch/riscv/include/asm/word-at-a-time.h b/arch/riscv/include/asm/word-at-a-time.h
new file mode 100644
index 000000000..7c086ac6e
--- /dev/null
+++ b/arch/riscv/include/asm/word-at-a-time.h
@@ -0,0 +1,48 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2012 Regents of the University of California
+ *
+ * Derived from arch/x86/include/asm/word-at-a-time.h
+ */
+
+#ifndef _ASM_RISCV_WORD_AT_A_TIME_H
+#define _ASM_RISCV_WORD_AT_A_TIME_H
+
+
+#include <linux/kernel.h>
+
+struct word_at_a_time {
+ const unsigned long one_bits, high_bits;
+};
+
+#define WORD_AT_A_TIME_CONSTANTS { REPEAT_BYTE(0x01), REPEAT_BYTE(0x80) }
+
+static inline unsigned long has_zero(unsigned long val,
+ unsigned long *bits, const struct word_at_a_time *c)
+{
+ unsigned long mask = ((val - c->one_bits) & ~val) & c->high_bits;
+ *bits = mask;
+ return mask;
+}
+
+static inline unsigned long prep_zero_mask(unsigned long val,
+ unsigned long bits, const struct word_at_a_time *c)
+{
+ return bits;
+}
+
+static inline unsigned long create_zero_mask(unsigned long bits)
+{
+ bits = (bits - 1) & ~bits;
+ return bits >> 7;
+}
+
+static inline unsigned long find_zero(unsigned long mask)
+{
+ return fls64(mask) >> 3;
+}
+
+/* The mask we created is directly usable as a bytemask */
+#define zero_bytemask(mask) (mask)
+
+#endif /* _ASM_RISCV_WORD_AT_A_TIME_H */
diff --git a/arch/riscv/include/uapi/asm/Kbuild b/arch/riscv/include/uapi/asm/Kbuild
new file mode 100644
index 000000000..f66554cd5
--- /dev/null
+++ b/arch/riscv/include/uapi/asm/Kbuild
@@ -0,0 +1 @@
+# SPDX-License-Identifier: GPL-2.0
diff --git a/arch/riscv/include/uapi/asm/auxvec.h b/arch/riscv/include/uapi/asm/auxvec.h
new file mode 100644
index 000000000..32c73ba1d
--- /dev/null
+++ b/arch/riscv/include/uapi/asm/auxvec.h
@@ -0,0 +1,37 @@
+/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */
+/*
+ * Copyright (C) 2012 ARM Ltd.
+ * Copyright (C) 2015 Regents of the University of California
+ */
+
+#ifndef _UAPI_ASM_RISCV_AUXVEC_H
+#define _UAPI_ASM_RISCV_AUXVEC_H
+
+/* vDSO location */
+#define AT_SYSINFO_EHDR 33
+
+/*
+ * The set of entries below represent more extensive information
+ * about the caches, in the form of two entry per cache type,
+ * one entry containing the cache size in bytes, and the other
+ * containing the cache line size in bytes in the bottom 16 bits
+ * and the cache associativity in the next 16 bits.
+ *
+ * The associativity is such that if N is the 16-bit value, the
+ * cache is N way set associative. A value if 0xffff means fully
+ * associative, a value of 1 means directly mapped.
+ *
+ * For all these fields, a value of 0 means that the information
+ * is not known.
+ */
+#define AT_L1I_CACHESIZE 40
+#define AT_L1I_CACHEGEOMETRY 41
+#define AT_L1D_CACHESIZE 42
+#define AT_L1D_CACHEGEOMETRY 43
+#define AT_L2_CACHESIZE 44
+#define AT_L2_CACHEGEOMETRY 45
+
+/* entries in ARCH_DLINFO */
+#define AT_VECTOR_SIZE_ARCH 7
+
+#endif /* _UAPI_ASM_RISCV_AUXVEC_H */
diff --git a/arch/riscv/include/uapi/asm/bitsperlong.h b/arch/riscv/include/uapi/asm/bitsperlong.h
new file mode 100644
index 000000000..7d0b32e3b
--- /dev/null
+++ b/arch/riscv/include/uapi/asm/bitsperlong.h
@@ -0,0 +1,14 @@
+/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */
+/*
+ * Copyright (C) 2012 ARM Ltd.
+ * Copyright (C) 2015 Regents of the University of California
+ */
+
+#ifndef _UAPI_ASM_RISCV_BITSPERLONG_H
+#define _UAPI_ASM_RISCV_BITSPERLONG_H
+
+#define __BITS_PER_LONG (__SIZEOF_POINTER__ * 8)
+
+#include <asm-generic/bitsperlong.h>
+
+#endif /* _UAPI_ASM_RISCV_BITSPERLONG_H */
diff --git a/arch/riscv/include/uapi/asm/bpf_perf_event.h b/arch/riscv/include/uapi/asm/bpf_perf_event.h
new file mode 100644
index 000000000..6cb1c2823
--- /dev/null
+++ b/arch/riscv/include/uapi/asm/bpf_perf_event.h
@@ -0,0 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef _UAPI__ASM_BPF_PERF_EVENT_H__
+#define _UAPI__ASM_BPF_PERF_EVENT_H__
+
+#include <asm/ptrace.h>
+
+typedef struct user_regs_struct bpf_user_pt_regs_t;
+
+#endif /* _UAPI__ASM_BPF_PERF_EVENT_H__ */
diff --git a/arch/riscv/include/uapi/asm/byteorder.h b/arch/riscv/include/uapi/asm/byteorder.h
new file mode 100644
index 000000000..f671e16bf
--- /dev/null
+++ b/arch/riscv/include/uapi/asm/byteorder.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */
+/*
+ * Copyright (C) 2012 ARM Ltd.
+ * Copyright (C) 2015 Regents of the University of California
+ */
+
+#ifndef _UAPI_ASM_RISCV_BYTEORDER_H
+#define _UAPI_ASM_RISCV_BYTEORDER_H
+
+#include <linux/byteorder/little_endian.h>
+
+#endif /* _UAPI_ASM_RISCV_BYTEORDER_H */
diff --git a/arch/riscv/include/uapi/asm/elf.h b/arch/riscv/include/uapi/asm/elf.h
new file mode 100644
index 000000000..d696d6610
--- /dev/null
+++ b/arch/riscv/include/uapi/asm/elf.h
@@ -0,0 +1,98 @@
+/*
+ * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
+ * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
+ * Copyright (C) 2012 Regents of the University of California
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+
+#ifndef _UAPI_ASM_RISCV_ELF_H
+#define _UAPI_ASM_RISCV_ELF_H
+
+#include <asm/ptrace.h>
+
+/* ELF register definitions */
+typedef unsigned long elf_greg_t;
+typedef struct user_regs_struct elf_gregset_t;
+#define ELF_NGREG (sizeof(elf_gregset_t) / sizeof(elf_greg_t))
+
+/* We don't support f without d, or q. */
+typedef __u64 elf_fpreg_t;
+typedef union __riscv_fp_state elf_fpregset_t;
+#define ELF_NFPREG (sizeof(struct __riscv_d_ext_state) / sizeof(elf_fpreg_t))
+
+#if __riscv_xlen == 64
+#define ELF_RISCV_R_SYM(r_info) ELF64_R_SYM(r_info)
+#define ELF_RISCV_R_TYPE(r_info) ELF64_R_TYPE(r_info)
+#else
+#define ELF_RISCV_R_SYM(r_info) ELF32_R_SYM(r_info)
+#define ELF_RISCV_R_TYPE(r_info) ELF32_R_TYPE(r_info)
+#endif
+
+/*
+ * RISC-V relocation types
+ */
+
+/* Relocation types used by the dynamic linker */
+#define R_RISCV_NONE 0
+#define R_RISCV_32 1
+#define R_RISCV_64 2
+#define R_RISCV_RELATIVE 3
+#define R_RISCV_COPY 4
+#define R_RISCV_JUMP_SLOT 5
+#define R_RISCV_TLS_DTPMOD32 6
+#define R_RISCV_TLS_DTPMOD64 7
+#define R_RISCV_TLS_DTPREL32 8
+#define R_RISCV_TLS_DTPREL64 9
+#define R_RISCV_TLS_TPREL32 10
+#define R_RISCV_TLS_TPREL64 11
+
+/* Relocation types not used by the dynamic linker */
+#define R_RISCV_BRANCH 16
+#define R_RISCV_JAL 17
+#define R_RISCV_CALL 18
+#define R_RISCV_CALL_PLT 19
+#define R_RISCV_GOT_HI20 20
+#define R_RISCV_TLS_GOT_HI20 21
+#define R_RISCV_TLS_GD_HI20 22
+#define R_RISCV_PCREL_HI20 23
+#define R_RISCV_PCREL_LO12_I 24
+#define R_RISCV_PCREL_LO12_S 25
+#define R_RISCV_HI20 26
+#define R_RISCV_LO12_I 27
+#define R_RISCV_LO12_S 28
+#define R_RISCV_TPREL_HI20 29
+#define R_RISCV_TPREL_LO12_I 30
+#define R_RISCV_TPREL_LO12_S 31
+#define R_RISCV_TPREL_ADD 32
+#define R_RISCV_ADD8 33
+#define R_RISCV_ADD16 34
+#define R_RISCV_ADD32 35
+#define R_RISCV_ADD64 36
+#define R_RISCV_SUB8 37
+#define R_RISCV_SUB16 38
+#define R_RISCV_SUB32 39
+#define R_RISCV_SUB64 40
+#define R_RISCV_GNU_VTINHERIT 41
+#define R_RISCV_GNU_VTENTRY 42
+#define R_RISCV_ALIGN 43
+#define R_RISCV_RVC_BRANCH 44
+#define R_RISCV_RVC_JUMP 45
+#define R_RISCV_LUI 46
+#define R_RISCV_GPREL_I 47
+#define R_RISCV_GPREL_S 48
+#define R_RISCV_TPREL_I 49
+#define R_RISCV_TPREL_S 50
+#define R_RISCV_RELAX 51
+#define R_RISCV_SUB6 52
+#define R_RISCV_SET6 53
+#define R_RISCV_SET8 54
+#define R_RISCV_SET16 55
+#define R_RISCV_SET32 56
+#define R_RISCV_32_PCREL 57
+
+
+#endif /* _UAPI_ASM_RISCV_ELF_H */
diff --git a/arch/riscv/include/uapi/asm/hwcap.h b/arch/riscv/include/uapi/asm/hwcap.h
new file mode 100644
index 000000000..46dc3f5ee
--- /dev/null
+++ b/arch/riscv/include/uapi/asm/hwcap.h
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */
+/*
+ * Copied from arch/arm64/include/asm/hwcap.h
+ *
+ * Copyright (C) 2012 ARM Ltd.
+ * Copyright (C) 2017 SiFive
+ */
+#ifndef _UAPI_ASM_RISCV_HWCAP_H
+#define _UAPI_ASM_RISCV_HWCAP_H
+
+/*
+ * Linux saves the floating-point registers according to the ISA Linux is
+ * executing on, as opposed to the ISA the user program is compiled for. This
+ * is necessary for a handful of esoteric use cases: for example, userspace
+ * threading libraries must be able to examine the actual machine state in
+ * order to fully reconstruct the state of a thread.
+ */
+#define COMPAT_HWCAP_ISA_I (1 << ('I' - 'A'))
+#define COMPAT_HWCAP_ISA_M (1 << ('M' - 'A'))
+#define COMPAT_HWCAP_ISA_A (1 << ('A' - 'A'))
+#define COMPAT_HWCAP_ISA_F (1 << ('F' - 'A'))
+#define COMPAT_HWCAP_ISA_D (1 << ('D' - 'A'))
+#define COMPAT_HWCAP_ISA_C (1 << ('C' - 'A'))
+
+#endif /* _UAPI_ASM_RISCV_HWCAP_H */
diff --git a/arch/riscv/include/uapi/asm/perf_regs.h b/arch/riscv/include/uapi/asm/perf_regs.h
new file mode 100644
index 000000000..196f964bf
--- /dev/null
+++ b/arch/riscv/include/uapi/asm/perf_regs.h
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/* Copyright (C) 2019 Hangzhou C-SKY Microsystems co.,ltd. */
+
+#ifndef _ASM_RISCV_PERF_REGS_H
+#define _ASM_RISCV_PERF_REGS_H
+
+enum perf_event_riscv_regs {
+ PERF_REG_RISCV_PC,
+ PERF_REG_RISCV_RA,
+ PERF_REG_RISCV_SP,
+ PERF_REG_RISCV_GP,
+ PERF_REG_RISCV_TP,
+ PERF_REG_RISCV_T0,
+ PERF_REG_RISCV_T1,
+ PERF_REG_RISCV_T2,
+ PERF_REG_RISCV_S0,
+ PERF_REG_RISCV_S1,
+ PERF_REG_RISCV_A0,
+ PERF_REG_RISCV_A1,
+ PERF_REG_RISCV_A2,
+ PERF_REG_RISCV_A3,
+ PERF_REG_RISCV_A4,
+ PERF_REG_RISCV_A5,
+ PERF_REG_RISCV_A6,
+ PERF_REG_RISCV_A7,
+ PERF_REG_RISCV_S2,
+ PERF_REG_RISCV_S3,
+ PERF_REG_RISCV_S4,
+ PERF_REG_RISCV_S5,
+ PERF_REG_RISCV_S6,
+ PERF_REG_RISCV_S7,
+ PERF_REG_RISCV_S8,
+ PERF_REG_RISCV_S9,
+ PERF_REG_RISCV_S10,
+ PERF_REG_RISCV_S11,
+ PERF_REG_RISCV_T3,
+ PERF_REG_RISCV_T4,
+ PERF_REG_RISCV_T5,
+ PERF_REG_RISCV_T6,
+ PERF_REG_RISCV_MAX,
+};
+#endif /* _ASM_RISCV_PERF_REGS_H */
diff --git a/arch/riscv/include/uapi/asm/ptrace.h b/arch/riscv/include/uapi/asm/ptrace.h
new file mode 100644
index 000000000..882547f6b
--- /dev/null
+++ b/arch/riscv/include/uapi/asm/ptrace.h
@@ -0,0 +1,82 @@
+/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */
+/*
+ * Copyright (C) 2012 Regents of the University of California
+ */
+
+#ifndef _UAPI_ASM_RISCV_PTRACE_H
+#define _UAPI_ASM_RISCV_PTRACE_H
+
+#ifndef __ASSEMBLY__
+
+#include <linux/types.h>
+
+/*
+ * User-mode register state for core dumps, ptrace, sigcontext
+ *
+ * This decouples struct pt_regs from the userspace ABI.
+ * struct user_regs_struct must form a prefix of struct pt_regs.
+ */
+struct user_regs_struct {
+ unsigned long pc;
+ unsigned long ra;
+ unsigned long sp;
+ unsigned long gp;
+ unsigned long tp;
+ unsigned long t0;
+ unsigned long t1;
+ unsigned long t2;
+ unsigned long s0;
+ unsigned long s1;
+ unsigned long a0;
+ unsigned long a1;
+ unsigned long a2;
+ unsigned long a3;
+ unsigned long a4;
+ unsigned long a5;
+ unsigned long a6;
+ unsigned long a7;
+ unsigned long s2;
+ unsigned long s3;
+ unsigned long s4;
+ unsigned long s5;
+ unsigned long s6;
+ unsigned long s7;
+ unsigned long s8;
+ unsigned long s9;
+ unsigned long s10;
+ unsigned long s11;
+ unsigned long t3;
+ unsigned long t4;
+ unsigned long t5;
+ unsigned long t6;
+};
+
+struct __riscv_f_ext_state {
+ __u32 f[32];
+ __u32 fcsr;
+};
+
+struct __riscv_d_ext_state {
+ __u64 f[32];
+ __u32 fcsr;
+};
+
+struct __riscv_q_ext_state {
+ __u64 f[64] __attribute__((aligned(16)));
+ __u32 fcsr;
+ /*
+ * Reserved for expansion of sigcontext structure. Currently zeroed
+ * upon signal, and must be zero upon sigreturn.
+ */
+ __u32 reserved[3];
+};
+
+union __riscv_fp_state {
+ struct __riscv_f_ext_state f;
+ struct __riscv_d_ext_state d;
+ struct __riscv_q_ext_state q;
+};
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* _UAPI_ASM_RISCV_PTRACE_H */
diff --git a/arch/riscv/include/uapi/asm/setup.h b/arch/riscv/include/uapi/asm/setup.h
new file mode 100644
index 000000000..66b13a522
--- /dev/null
+++ b/arch/riscv/include/uapi/asm/setup.h
@@ -0,0 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */
+
+#ifndef _UAPI_ASM_RISCV_SETUP_H
+#define _UAPI_ASM_RISCV_SETUP_H
+
+#define COMMAND_LINE_SIZE 1024
+
+#endif /* _UAPI_ASM_RISCV_SETUP_H */
diff --git a/arch/riscv/include/uapi/asm/sigcontext.h b/arch/riscv/include/uapi/asm/sigcontext.h
new file mode 100644
index 000000000..84f2dfcfd
--- /dev/null
+++ b/arch/riscv/include/uapi/asm/sigcontext.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */
+/*
+ * Copyright (C) 2012 Regents of the University of California
+ */
+
+#ifndef _UAPI_ASM_RISCV_SIGCONTEXT_H
+#define _UAPI_ASM_RISCV_SIGCONTEXT_H
+
+#include <asm/ptrace.h>
+
+/*
+ * Signal context structure
+ *
+ * This contains the context saved before a signal handler is invoked;
+ * it is restored by sys_sigreturn / sys_rt_sigreturn.
+ */
+struct sigcontext {
+ struct user_regs_struct sc_regs;
+ union __riscv_fp_state sc_fpregs;
+};
+
+#endif /* _UAPI_ASM_RISCV_SIGCONTEXT_H */
diff --git a/arch/riscv/include/uapi/asm/ucontext.h b/arch/riscv/include/uapi/asm/ucontext.h
new file mode 100644
index 000000000..44eb99395
--- /dev/null
+++ b/arch/riscv/include/uapi/asm/ucontext.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: GPL-2.0-only WITH Linux-syscall-note */
+/*
+ * Copyright (C) 2012 ARM Ltd.
+ * Copyright (C) 2017 SiFive, Inc.
+ *
+ * This file was copied from arch/arm64/include/uapi/asm/ucontext.h
+ */
+#ifndef _UAPI_ASM_RISCV_UCONTEXT_H
+#define _UAPI_ASM_RISCV_UCONTEXT_H
+
+#include <linux/types.h>
+
+struct ucontext {
+ unsigned long uc_flags;
+ struct ucontext *uc_link;
+ stack_t uc_stack;
+ sigset_t uc_sigmask;
+ /* There's some padding here to allow sigset_t to be expanded in the
+ * future. Though this is unlikely, other architectures put uc_sigmask
+ * at the end of this structure and explicitly state it can be
+ * expanded, so we didn't want to box ourselves in here. */
+ __u8 __unused[1024 / 8 - sizeof(sigset_t)];
+ /* We can't put uc_sigmask at the end of this structure because we need
+ * to be able to expand sigcontext in the future. For example, the
+ * vector ISA extension will almost certainly add ISA state. We want
+ * to ensure all user-visible ISA state can be saved and restored via a
+ * ucontext, so we're putting this at the end in order to allow for
+ * infinite extensibility. Since we know this will be extended and we
+ * assume sigset_t won't be extended an extreme amount, we're
+ * prioritizing this. */
+ struct sigcontext uc_mcontext;
+};
+
+#endif /* _UAPI_ASM_RISCV_UCONTEXT_H */
diff --git a/arch/riscv/include/uapi/asm/unistd.h b/arch/riscv/include/uapi/asm/unistd.h
new file mode 100644
index 000000000..8062996c2
--- /dev/null
+++ b/arch/riscv/include/uapi/asm/unistd.h
@@ -0,0 +1,44 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * Copyright (C) 2018 David Abdurachmanov <david.abdurachmanov@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <https://www.gnu.org/licenses/>.
+ */
+
+#ifdef __LP64__
+#define __ARCH_WANT_NEW_STAT
+#define __ARCH_WANT_SET_GET_RLIMIT
+#endif /* __LP64__ */
+
+#define __ARCH_WANT_SYS_CLONE3
+
+#include <asm-generic/unistd.h>
+
+/*
+ * Allows the instruction cache to be flushed from userspace. Despite RISC-V
+ * having a direct 'fence.i' instruction available to userspace (which we
+ * can't trap!), that's not actually viable when running on Linux because the
+ * kernel might schedule a process on another hart. There is no way for
+ * userspace to handle this without invoking the kernel (as it doesn't know the
+ * thread->hart mappings), so we've defined a RISC-V specific system call to
+ * flush the instruction cache.
+ *
+ * __NR_riscv_flush_icache is defined to flush the instruction cache over an
+ * address range, with the flush applying to either all threads or just the
+ * caller. We don't currently do anything with the address range, that's just
+ * in there for forwards compatibility.
+ */
+#ifndef __NR_riscv_flush_icache
+#define __NR_riscv_flush_icache (__NR_arch_specific_syscall + 15)
+#endif
+__SYSCALL(__NR_riscv_flush_icache, sys_riscv_flush_icache)
diff --git a/arch/riscv/kernel/.gitignore b/arch/riscv/kernel/.gitignore
new file mode 100644
index 000000000..e052ed331
--- /dev/null
+++ b/arch/riscv/kernel/.gitignore
@@ -0,0 +1,2 @@
+# SPDX-License-Identifier: GPL-2.0-only
+/vmlinux.lds
diff --git a/arch/riscv/kernel/Makefile b/arch/riscv/kernel/Makefile
new file mode 100644
index 000000000..bc49d5f23
--- /dev/null
+++ b/arch/riscv/kernel/Makefile
@@ -0,0 +1,63 @@
+# SPDX-License-Identifier: GPL-2.0-only
+#
+# Makefile for the RISC-V Linux kernel
+#
+
+ifdef CONFIG_FTRACE
+CFLAGS_REMOVE_ftrace.o = $(CC_FLAGS_FTRACE)
+CFLAGS_REMOVE_patch.o = $(CC_FLAGS_FTRACE)
+CFLAGS_REMOVE_sbi.o = $(CC_FLAGS_FTRACE)
+endif
+
+extra-y += head.o
+extra-y += vmlinux.lds
+
+obj-y += soc.o
+obj-y += cpu.o
+obj-y += cpufeature.o
+obj-y += entry.o
+obj-y += irq.o
+obj-y += process.o
+obj-y += ptrace.o
+obj-y += reset.o
+obj-y += setup.o
+obj-y += signal.o
+obj-y += syscall_table.o
+obj-y += sys_riscv.o
+obj-y += time.o
+obj-y += traps.o
+obj-y += riscv_ksyms.o
+obj-y += stacktrace.o
+obj-y += cacheinfo.o
+obj-y += patch.o
+obj-$(CONFIG_MMU) += vdso.o vdso/
+
+obj-$(CONFIG_RISCV_M_MODE) += traps_misaligned.o
+obj-$(CONFIG_FPU) += fpu.o
+obj-$(CONFIG_SMP) += smpboot.o
+obj-$(CONFIG_SMP) += smp.o
+obj-$(CONFIG_SMP) += cpu_ops.o
+obj-$(CONFIG_SMP) += cpu_ops_spinwait.o
+obj-$(CONFIG_MODULES) += module.o
+obj-$(CONFIG_MODULE_SECTIONS) += module-sections.o
+
+obj-$(CONFIG_FUNCTION_TRACER) += mcount.o ftrace.o
+obj-$(CONFIG_DYNAMIC_FTRACE) += mcount-dyn.o
+
+obj-$(CONFIG_TRACE_IRQFLAGS) += trace_irq.o
+
+obj-$(CONFIG_RISCV_BASE_PMU) += perf_event.o
+obj-$(CONFIG_PERF_EVENTS) += perf_callchain.o
+obj-$(CONFIG_HAVE_PERF_REGS) += perf_regs.o
+obj-$(CONFIG_RISCV_SBI) += sbi.o
+ifeq ($(CONFIG_RISCV_SBI), y)
+obj-$(CONFIG_SMP) += cpu_ops_sbi.o
+endif
+obj-$(CONFIG_HOTPLUG_CPU) += cpu-hotplug.o
+obj-$(CONFIG_KGDB) += kgdb.o
+
+obj-$(CONFIG_JUMP_LABEL) += jump_label.o
+
+obj-$(CONFIG_EFI) += efi.o
+
+clean:
diff --git a/arch/riscv/kernel/asm-offsets.c b/arch/riscv/kernel/asm-offsets.c
new file mode 100644
index 000000000..db203442c
--- /dev/null
+++ b/arch/riscv/kernel/asm-offsets.c
@@ -0,0 +1,309 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2012 Regents of the University of California
+ * Copyright (C) 2017 SiFive
+ */
+
+#define GENERATING_ASM_OFFSETS
+
+#include <linux/kbuild.h>
+#include <linux/sched.h>
+#include <asm/thread_info.h>
+#include <asm/ptrace.h>
+
+void asm_offsets(void)
+{
+ OFFSET(TASK_THREAD_RA, task_struct, thread.ra);
+ OFFSET(TASK_THREAD_SP, task_struct, thread.sp);
+ OFFSET(TASK_THREAD_S0, task_struct, thread.s[0]);
+ OFFSET(TASK_THREAD_S1, task_struct, thread.s[1]);
+ OFFSET(TASK_THREAD_S2, task_struct, thread.s[2]);
+ OFFSET(TASK_THREAD_S3, task_struct, thread.s[3]);
+ OFFSET(TASK_THREAD_S4, task_struct, thread.s[4]);
+ OFFSET(TASK_THREAD_S5, task_struct, thread.s[5]);
+ OFFSET(TASK_THREAD_S6, task_struct, thread.s[6]);
+ OFFSET(TASK_THREAD_S7, task_struct, thread.s[7]);
+ OFFSET(TASK_THREAD_S8, task_struct, thread.s[8]);
+ OFFSET(TASK_THREAD_S9, task_struct, thread.s[9]);
+ OFFSET(TASK_THREAD_S10, task_struct, thread.s[10]);
+ OFFSET(TASK_THREAD_S11, task_struct, thread.s[11]);
+ OFFSET(TASK_TI_FLAGS, task_struct, thread_info.flags);
+ OFFSET(TASK_TI_PREEMPT_COUNT, task_struct, thread_info.preempt_count);
+ OFFSET(TASK_TI_KERNEL_SP, task_struct, thread_info.kernel_sp);
+ OFFSET(TASK_TI_USER_SP, task_struct, thread_info.user_sp);
+ OFFSET(TASK_TI_CPU, task_struct, thread_info.cpu);
+
+ OFFSET(TASK_THREAD_F0, task_struct, thread.fstate.f[0]);
+ OFFSET(TASK_THREAD_F1, task_struct, thread.fstate.f[1]);
+ OFFSET(TASK_THREAD_F2, task_struct, thread.fstate.f[2]);
+ OFFSET(TASK_THREAD_F3, task_struct, thread.fstate.f[3]);
+ OFFSET(TASK_THREAD_F4, task_struct, thread.fstate.f[4]);
+ OFFSET(TASK_THREAD_F5, task_struct, thread.fstate.f[5]);
+ OFFSET(TASK_THREAD_F6, task_struct, thread.fstate.f[6]);
+ OFFSET(TASK_THREAD_F7, task_struct, thread.fstate.f[7]);
+ OFFSET(TASK_THREAD_F8, task_struct, thread.fstate.f[8]);
+ OFFSET(TASK_THREAD_F9, task_struct, thread.fstate.f[9]);
+ OFFSET(TASK_THREAD_F10, task_struct, thread.fstate.f[10]);
+ OFFSET(TASK_THREAD_F11, task_struct, thread.fstate.f[11]);
+ OFFSET(TASK_THREAD_F12, task_struct, thread.fstate.f[12]);
+ OFFSET(TASK_THREAD_F13, task_struct, thread.fstate.f[13]);
+ OFFSET(TASK_THREAD_F14, task_struct, thread.fstate.f[14]);
+ OFFSET(TASK_THREAD_F15, task_struct, thread.fstate.f[15]);
+ OFFSET(TASK_THREAD_F16, task_struct, thread.fstate.f[16]);
+ OFFSET(TASK_THREAD_F17, task_struct, thread.fstate.f[17]);
+ OFFSET(TASK_THREAD_F18, task_struct, thread.fstate.f[18]);
+ OFFSET(TASK_THREAD_F19, task_struct, thread.fstate.f[19]);
+ OFFSET(TASK_THREAD_F20, task_struct, thread.fstate.f[20]);
+ OFFSET(TASK_THREAD_F21, task_struct, thread.fstate.f[21]);
+ OFFSET(TASK_THREAD_F22, task_struct, thread.fstate.f[22]);
+ OFFSET(TASK_THREAD_F23, task_struct, thread.fstate.f[23]);
+ OFFSET(TASK_THREAD_F24, task_struct, thread.fstate.f[24]);
+ OFFSET(TASK_THREAD_F25, task_struct, thread.fstate.f[25]);
+ OFFSET(TASK_THREAD_F26, task_struct, thread.fstate.f[26]);
+ OFFSET(TASK_THREAD_F27, task_struct, thread.fstate.f[27]);
+ OFFSET(TASK_THREAD_F28, task_struct, thread.fstate.f[28]);
+ OFFSET(TASK_THREAD_F29, task_struct, thread.fstate.f[29]);
+ OFFSET(TASK_THREAD_F30, task_struct, thread.fstate.f[30]);
+ OFFSET(TASK_THREAD_F31, task_struct, thread.fstate.f[31]);
+ OFFSET(TASK_THREAD_FCSR, task_struct, thread.fstate.fcsr);
+
+ DEFINE(PT_SIZE, sizeof(struct pt_regs));
+ OFFSET(PT_EPC, pt_regs, epc);
+ OFFSET(PT_RA, pt_regs, ra);
+ OFFSET(PT_FP, pt_regs, s0);
+ OFFSET(PT_S0, pt_regs, s0);
+ OFFSET(PT_S1, pt_regs, s1);
+ OFFSET(PT_S2, pt_regs, s2);
+ OFFSET(PT_S3, pt_regs, s3);
+ OFFSET(PT_S4, pt_regs, s4);
+ OFFSET(PT_S5, pt_regs, s5);
+ OFFSET(PT_S6, pt_regs, s6);
+ OFFSET(PT_S7, pt_regs, s7);
+ OFFSET(PT_S8, pt_regs, s8);
+ OFFSET(PT_S9, pt_regs, s9);
+ OFFSET(PT_S10, pt_regs, s10);
+ OFFSET(PT_S11, pt_regs, s11);
+ OFFSET(PT_SP, pt_regs, sp);
+ OFFSET(PT_TP, pt_regs, tp);
+ OFFSET(PT_A0, pt_regs, a0);
+ OFFSET(PT_A1, pt_regs, a1);
+ OFFSET(PT_A2, pt_regs, a2);
+ OFFSET(PT_A3, pt_regs, a3);
+ OFFSET(PT_A4, pt_regs, a4);
+ OFFSET(PT_A5, pt_regs, a5);
+ OFFSET(PT_A6, pt_regs, a6);
+ OFFSET(PT_A7, pt_regs, a7);
+ OFFSET(PT_T0, pt_regs, t0);
+ OFFSET(PT_T1, pt_regs, t1);
+ OFFSET(PT_T2, pt_regs, t2);
+ OFFSET(PT_T3, pt_regs, t3);
+ OFFSET(PT_T4, pt_regs, t4);
+ OFFSET(PT_T5, pt_regs, t5);
+ OFFSET(PT_T6, pt_regs, t6);
+ OFFSET(PT_GP, pt_regs, gp);
+ OFFSET(PT_ORIG_A0, pt_regs, orig_a0);
+ OFFSET(PT_STATUS, pt_regs, status);
+ OFFSET(PT_BADADDR, pt_regs, badaddr);
+ OFFSET(PT_CAUSE, pt_regs, cause);
+
+ /*
+ * THREAD_{F,X}* might be larger than a S-type offset can handle, but
+ * these are used in performance-sensitive assembly so we can't resort
+ * to loading the long immediate every time.
+ */
+ DEFINE(TASK_THREAD_RA_RA,
+ offsetof(struct task_struct, thread.ra)
+ - offsetof(struct task_struct, thread.ra)
+ );
+ DEFINE(TASK_THREAD_SP_RA,
+ offsetof(struct task_struct, thread.sp)
+ - offsetof(struct task_struct, thread.ra)
+ );
+ DEFINE(TASK_THREAD_S0_RA,
+ offsetof(struct task_struct, thread.s[0])
+ - offsetof(struct task_struct, thread.ra)
+ );
+ DEFINE(TASK_THREAD_S1_RA,
+ offsetof(struct task_struct, thread.s[1])
+ - offsetof(struct task_struct, thread.ra)
+ );
+ DEFINE(TASK_THREAD_S2_RA,
+ offsetof(struct task_struct, thread.s[2])
+ - offsetof(struct task_struct, thread.ra)
+ );
+ DEFINE(TASK_THREAD_S3_RA,
+ offsetof(struct task_struct, thread.s[3])
+ - offsetof(struct task_struct, thread.ra)
+ );
+ DEFINE(TASK_THREAD_S4_RA,
+ offsetof(struct task_struct, thread.s[4])
+ - offsetof(struct task_struct, thread.ra)
+ );
+ DEFINE(TASK_THREAD_S5_RA,
+ offsetof(struct task_struct, thread.s[5])
+ - offsetof(struct task_struct, thread.ra)
+ );
+ DEFINE(TASK_THREAD_S6_RA,
+ offsetof(struct task_struct, thread.s[6])
+ - offsetof(struct task_struct, thread.ra)
+ );
+ DEFINE(TASK_THREAD_S7_RA,
+ offsetof(struct task_struct, thread.s[7])
+ - offsetof(struct task_struct, thread.ra)
+ );
+ DEFINE(TASK_THREAD_S8_RA,
+ offsetof(struct task_struct, thread.s[8])
+ - offsetof(struct task_struct, thread.ra)
+ );
+ DEFINE(TASK_THREAD_S9_RA,
+ offsetof(struct task_struct, thread.s[9])
+ - offsetof(struct task_struct, thread.ra)
+ );
+ DEFINE(TASK_THREAD_S10_RA,
+ offsetof(struct task_struct, thread.s[10])
+ - offsetof(struct task_struct, thread.ra)
+ );
+ DEFINE(TASK_THREAD_S11_RA,
+ offsetof(struct task_struct, thread.s[11])
+ - offsetof(struct task_struct, thread.ra)
+ );
+
+ DEFINE(TASK_THREAD_F0_F0,
+ offsetof(struct task_struct, thread.fstate.f[0])
+ - offsetof(struct task_struct, thread.fstate.f[0])
+ );
+ DEFINE(TASK_THREAD_F1_F0,
+ offsetof(struct task_struct, thread.fstate.f[1])
+ - offsetof(struct task_struct, thread.fstate.f[0])
+ );
+ DEFINE(TASK_THREAD_F2_F0,
+ offsetof(struct task_struct, thread.fstate.f[2])
+ - offsetof(struct task_struct, thread.fstate.f[0])
+ );
+ DEFINE(TASK_THREAD_F3_F0,
+ offsetof(struct task_struct, thread.fstate.f[3])
+ - offsetof(struct task_struct, thread.fstate.f[0])
+ );
+ DEFINE(TASK_THREAD_F4_F0,
+ offsetof(struct task_struct, thread.fstate.f[4])
+ - offsetof(struct task_struct, thread.fstate.f[0])
+ );
+ DEFINE(TASK_THREAD_F5_F0,
+ offsetof(struct task_struct, thread.fstate.f[5])
+ - offsetof(struct task_struct, thread.fstate.f[0])
+ );
+ DEFINE(TASK_THREAD_F6_F0,
+ offsetof(struct task_struct, thread.fstate.f[6])
+ - offsetof(struct task_struct, thread.fstate.f[0])
+ );
+ DEFINE(TASK_THREAD_F7_F0,
+ offsetof(struct task_struct, thread.fstate.f[7])
+ - offsetof(struct task_struct, thread.fstate.f[0])
+ );
+ DEFINE(TASK_THREAD_F8_F0,
+ offsetof(struct task_struct, thread.fstate.f[8])
+ - offsetof(struct task_struct, thread.fstate.f[0])
+ );
+ DEFINE(TASK_THREAD_F9_F0,
+ offsetof(struct task_struct, thread.fstate.f[9])
+ - offsetof(struct task_struct, thread.fstate.f[0])
+ );
+ DEFINE(TASK_THREAD_F10_F0,
+ offsetof(struct task_struct, thread.fstate.f[10])
+ - offsetof(struct task_struct, thread.fstate.f[0])
+ );
+ DEFINE(TASK_THREAD_F11_F0,
+ offsetof(struct task_struct, thread.fstate.f[11])
+ - offsetof(struct task_struct, thread.fstate.f[0])
+ );
+ DEFINE(TASK_THREAD_F12_F0,
+ offsetof(struct task_struct, thread.fstate.f[12])
+ - offsetof(struct task_struct, thread.fstate.f[0])
+ );
+ DEFINE(TASK_THREAD_F13_F0,
+ offsetof(struct task_struct, thread.fstate.f[13])
+ - offsetof(struct task_struct, thread.fstate.f[0])
+ );
+ DEFINE(TASK_THREAD_F14_F0,
+ offsetof(struct task_struct, thread.fstate.f[14])
+ - offsetof(struct task_struct, thread.fstate.f[0])
+ );
+ DEFINE(TASK_THREAD_F15_F0,
+ offsetof(struct task_struct, thread.fstate.f[15])
+ - offsetof(struct task_struct, thread.fstate.f[0])
+ );
+ DEFINE(TASK_THREAD_F16_F0,
+ offsetof(struct task_struct, thread.fstate.f[16])
+ - offsetof(struct task_struct, thread.fstate.f[0])
+ );
+ DEFINE(TASK_THREAD_F17_F0,
+ offsetof(struct task_struct, thread.fstate.f[17])
+ - offsetof(struct task_struct, thread.fstate.f[0])
+ );
+ DEFINE(TASK_THREAD_F18_F0,
+ offsetof(struct task_struct, thread.fstate.f[18])
+ - offsetof(struct task_struct, thread.fstate.f[0])
+ );
+ DEFINE(TASK_THREAD_F19_F0,
+ offsetof(struct task_struct, thread.fstate.f[19])
+ - offsetof(struct task_struct, thread.fstate.f[0])
+ );
+ DEFINE(TASK_THREAD_F20_F0,
+ offsetof(struct task_struct, thread.fstate.f[20])
+ - offsetof(struct task_struct, thread.fstate.f[0])
+ );
+ DEFINE(TASK_THREAD_F21_F0,
+ offsetof(struct task_struct, thread.fstate.f[21])
+ - offsetof(struct task_struct, thread.fstate.f[0])
+ );
+ DEFINE(TASK_THREAD_F22_F0,
+ offsetof(struct task_struct, thread.fstate.f[22])
+ - offsetof(struct task_struct, thread.fstate.f[0])
+ );
+ DEFINE(TASK_THREAD_F23_F0,
+ offsetof(struct task_struct, thread.fstate.f[23])
+ - offsetof(struct task_struct, thread.fstate.f[0])
+ );
+ DEFINE(TASK_THREAD_F24_F0,
+ offsetof(struct task_struct, thread.fstate.f[24])
+ - offsetof(struct task_struct, thread.fstate.f[0])
+ );
+ DEFINE(TASK_THREAD_F25_F0,
+ offsetof(struct task_struct, thread.fstate.f[25])
+ - offsetof(struct task_struct, thread.fstate.f[0])
+ );
+ DEFINE(TASK_THREAD_F26_F0,
+ offsetof(struct task_struct, thread.fstate.f[26])
+ - offsetof(struct task_struct, thread.fstate.f[0])
+ );
+ DEFINE(TASK_THREAD_F27_F0,
+ offsetof(struct task_struct, thread.fstate.f[27])
+ - offsetof(struct task_struct, thread.fstate.f[0])
+ );
+ DEFINE(TASK_THREAD_F28_F0,
+ offsetof(struct task_struct, thread.fstate.f[28])
+ - offsetof(struct task_struct, thread.fstate.f[0])
+ );
+ DEFINE(TASK_THREAD_F29_F0,
+ offsetof(struct task_struct, thread.fstate.f[29])
+ - offsetof(struct task_struct, thread.fstate.f[0])
+ );
+ DEFINE(TASK_THREAD_F30_F0,
+ offsetof(struct task_struct, thread.fstate.f[30])
+ - offsetof(struct task_struct, thread.fstate.f[0])
+ );
+ DEFINE(TASK_THREAD_F31_F0,
+ offsetof(struct task_struct, thread.fstate.f[31])
+ - offsetof(struct task_struct, thread.fstate.f[0])
+ );
+ DEFINE(TASK_THREAD_FCSR_F0,
+ offsetof(struct task_struct, thread.fstate.fcsr)
+ - offsetof(struct task_struct, thread.fstate.f[0])
+ );
+
+ /*
+ * We allocate a pt_regs on the stack when entering the kernel. This
+ * ensures the alignment is sane.
+ */
+ DEFINE(PT_SIZE_ON_STACK, ALIGN(sizeof(struct pt_regs), STACK_ALIGN));
+}
diff --git a/arch/riscv/kernel/cacheinfo.c b/arch/riscv/kernel/cacheinfo.c
new file mode 100644
index 000000000..90deabfe6
--- /dev/null
+++ b/arch/riscv/kernel/cacheinfo.c
@@ -0,0 +1,189 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2017 SiFive
+ */
+
+#include <linux/cpu.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <asm/cacheinfo.h>
+
+static struct riscv_cacheinfo_ops *rv_cache_ops;
+
+void riscv_set_cacheinfo_ops(struct riscv_cacheinfo_ops *ops)
+{
+ rv_cache_ops = ops;
+}
+EXPORT_SYMBOL_GPL(riscv_set_cacheinfo_ops);
+
+const struct attribute_group *
+cache_get_priv_group(struct cacheinfo *this_leaf)
+{
+ if (rv_cache_ops && rv_cache_ops->get_priv_group)
+ return rv_cache_ops->get_priv_group(this_leaf);
+ return NULL;
+}
+
+static struct cacheinfo *get_cacheinfo(u32 level, enum cache_type type)
+{
+ /*
+ * Using raw_smp_processor_id() elides a preemptability check, but this
+ * is really indicative of a larger problem: the cacheinfo UABI assumes
+ * that cores have a homonogenous view of the cache hierarchy. That
+ * happens to be the case for the current set of RISC-V systems, but
+ * likely won't be true in general. Since there's no way to provide
+ * correct information for these systems via the current UABI we're
+ * just eliding the check for now.
+ */
+ struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(raw_smp_processor_id());
+ struct cacheinfo *this_leaf;
+ int index;
+
+ for (index = 0; index < this_cpu_ci->num_leaves; index++) {
+ this_leaf = this_cpu_ci->info_list + index;
+ if (this_leaf->level == level && this_leaf->type == type)
+ return this_leaf;
+ }
+
+ return NULL;
+}
+
+uintptr_t get_cache_size(u32 level, enum cache_type type)
+{
+ struct cacheinfo *this_leaf = get_cacheinfo(level, type);
+
+ return this_leaf ? this_leaf->size : 0;
+}
+
+uintptr_t get_cache_geometry(u32 level, enum cache_type type)
+{
+ struct cacheinfo *this_leaf = get_cacheinfo(level, type);
+
+ return this_leaf ? (this_leaf->ways_of_associativity << 16 |
+ this_leaf->coherency_line_size) :
+ 0;
+}
+
+static void ci_leaf_init(struct cacheinfo *this_leaf, enum cache_type type,
+ unsigned int level, unsigned int size,
+ unsigned int sets, unsigned int line_size)
+{
+ this_leaf->level = level;
+ this_leaf->type = type;
+ this_leaf->size = size;
+ this_leaf->number_of_sets = sets;
+ this_leaf->coherency_line_size = line_size;
+
+ /*
+ * If the cache is fully associative, there is no need to
+ * check the other properties.
+ */
+ if (sets == 1)
+ return;
+
+ /*
+ * Set the ways number for n-ways associative, make sure
+ * all properties are big than zero.
+ */
+ if (sets > 0 && size > 0 && line_size > 0)
+ this_leaf->ways_of_associativity = (size / sets) / line_size;
+}
+
+static void fill_cacheinfo(struct cacheinfo **this_leaf,
+ struct device_node *node, unsigned int level)
+{
+ unsigned int size, sets, line_size;
+
+ if (!of_property_read_u32(node, "cache-size", &size) &&
+ !of_property_read_u32(node, "cache-block-size", &line_size) &&
+ !of_property_read_u32(node, "cache-sets", &sets)) {
+ ci_leaf_init((*this_leaf)++, CACHE_TYPE_UNIFIED, level, size, sets, line_size);
+ }
+
+ if (!of_property_read_u32(node, "i-cache-size", &size) &&
+ !of_property_read_u32(node, "i-cache-sets", &sets) &&
+ !of_property_read_u32(node, "i-cache-block-size", &line_size)) {
+ ci_leaf_init((*this_leaf)++, CACHE_TYPE_INST, level, size, sets, line_size);
+ }
+
+ if (!of_property_read_u32(node, "d-cache-size", &size) &&
+ !of_property_read_u32(node, "d-cache-sets", &sets) &&
+ !of_property_read_u32(node, "d-cache-block-size", &line_size)) {
+ ci_leaf_init((*this_leaf)++, CACHE_TYPE_DATA, level, size, sets, line_size);
+ }
+}
+
+int init_cache_level(unsigned int cpu)
+{
+ struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
+ struct device_node *np = of_cpu_device_node_get(cpu);
+ struct device_node *prev = NULL;
+ int levels = 0, leaves = 0, level;
+
+ if (of_property_read_bool(np, "cache-size"))
+ ++leaves;
+ if (of_property_read_bool(np, "i-cache-size"))
+ ++leaves;
+ if (of_property_read_bool(np, "d-cache-size"))
+ ++leaves;
+ if (leaves > 0)
+ levels = 1;
+
+ prev = np;
+ while ((np = of_find_next_cache_node(np))) {
+ of_node_put(prev);
+ prev = np;
+ if (!of_device_is_compatible(np, "cache"))
+ break;
+ if (of_property_read_u32(np, "cache-level", &level))
+ break;
+ if (level <= levels)
+ break;
+ if (of_property_read_bool(np, "cache-size"))
+ ++leaves;
+ if (of_property_read_bool(np, "i-cache-size"))
+ ++leaves;
+ if (of_property_read_bool(np, "d-cache-size"))
+ ++leaves;
+ levels = level;
+ }
+
+ of_node_put(np);
+ this_cpu_ci->num_levels = levels;
+ this_cpu_ci->num_leaves = leaves;
+
+ return 0;
+}
+
+int populate_cache_leaves(unsigned int cpu)
+{
+ struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
+ struct cacheinfo *this_leaf = this_cpu_ci->info_list;
+ struct device_node *np = of_cpu_device_node_get(cpu);
+ struct device_node *prev = NULL;
+ int levels = 1, level = 1;
+
+ /* Level 1 caches in cpu node */
+ fill_cacheinfo(&this_leaf, np, level);
+
+ /* Next level caches in cache nodes */
+ prev = np;
+ while ((np = of_find_next_cache_node(np))) {
+ of_node_put(prev);
+ prev = np;
+
+ if (!of_device_is_compatible(np, "cache"))
+ break;
+ if (of_property_read_u32(np, "cache-level", &level))
+ break;
+ if (level <= levels)
+ break;
+
+ fill_cacheinfo(&this_leaf, np, level);
+
+ levels = level;
+ }
+ of_node_put(np);
+
+ return 0;
+}
diff --git a/arch/riscv/kernel/cpu-hotplug.c b/arch/riscv/kernel/cpu-hotplug.c
new file mode 100644
index 000000000..df84e0c13
--- /dev/null
+++ b/arch/riscv/kernel/cpu-hotplug.c
@@ -0,0 +1,87 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2020 Western Digital Corporation or its affiliates.
+ */
+
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/sched.h>
+#include <linux/err.h>
+#include <linux/irq.h>
+#include <linux/cpu.h>
+#include <linux/sched/hotplug.h>
+#include <asm/irq.h>
+#include <asm/cpu_ops.h>
+#include <asm/sbi.h>
+
+void cpu_stop(void);
+void arch_cpu_idle_dead(void)
+{
+ cpu_stop();
+}
+
+bool cpu_has_hotplug(unsigned int cpu)
+{
+ if (cpu_ops[cpu]->cpu_stop)
+ return true;
+
+ return false;
+}
+
+/*
+ * __cpu_disable runs on the processor to be shutdown.
+ */
+int __cpu_disable(void)
+{
+ int ret = 0;
+ unsigned int cpu = smp_processor_id();
+
+ if (!cpu_ops[cpu] || !cpu_ops[cpu]->cpu_stop)
+ return -EOPNOTSUPP;
+
+ if (cpu_ops[cpu]->cpu_disable)
+ ret = cpu_ops[cpu]->cpu_disable(cpu);
+
+ if (ret)
+ return ret;
+
+ remove_cpu_topology(cpu);
+ set_cpu_online(cpu, false);
+ irq_migrate_all_off_this_cpu();
+
+ return ret;
+}
+
+/*
+ * Called on the thread which is asking for a CPU to be shutdown.
+ */
+void __cpu_die(unsigned int cpu)
+{
+ int ret = 0;
+
+ if (!cpu_wait_death(cpu, 5)) {
+ pr_err("CPU %u: didn't die\n", cpu);
+ return;
+ }
+ pr_notice("CPU%u: off\n", cpu);
+
+ /* Verify from the firmware if the cpu is really stopped*/
+ if (cpu_ops[cpu]->cpu_is_stopped)
+ ret = cpu_ops[cpu]->cpu_is_stopped(cpu);
+ if (ret)
+ pr_warn("CPU%d may not have stopped: %d\n", cpu, ret);
+}
+
+/*
+ * Called from the idle thread for the CPU which has been shutdown.
+ */
+void cpu_stop(void)
+{
+ idle_task_exit();
+
+ (void)cpu_report_death();
+
+ cpu_ops[smp_processor_id()]->cpu_stop();
+ /* It should never reach here */
+ BUG();
+}
diff --git a/arch/riscv/kernel/cpu.c b/arch/riscv/kernel/cpu.c
new file mode 100644
index 000000000..6d59e6906
--- /dev/null
+++ b/arch/riscv/kernel/cpu.c
@@ -0,0 +1,133 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2012 Regents of the University of California
+ */
+
+#include <linux/init.h>
+#include <linux/seq_file.h>
+#include <linux/of.h>
+#include <asm/smp.h>
+
+/*
+ * Returns the hart ID of the given device tree node, or -ENODEV if the node
+ * isn't an enabled and valid RISC-V hart node.
+ */
+int riscv_of_processor_hartid(struct device_node *node)
+{
+ const char *isa;
+ u32 hart;
+
+ if (!of_device_is_compatible(node, "riscv")) {
+ pr_warn("Found incompatible CPU\n");
+ return -ENODEV;
+ }
+
+ if (of_property_read_u32(node, "reg", &hart)) {
+ pr_warn("Found CPU without hart ID\n");
+ return -ENODEV;
+ }
+
+ if (!of_device_is_available(node)) {
+ pr_info("CPU with hartid=%d is not available\n", hart);
+ return -ENODEV;
+ }
+
+ if (of_property_read_string(node, "riscv,isa", &isa)) {
+ pr_warn("CPU with hartid=%d has no \"riscv,isa\" property\n", hart);
+ return -ENODEV;
+ }
+ if (isa[0] != 'r' || isa[1] != 'v') {
+ pr_warn("CPU with hartid=%d has an invalid ISA of \"%s\"\n", hart, isa);
+ return -ENODEV;
+ }
+
+ return hart;
+}
+
+/*
+ * Find hart ID of the CPU DT node under which given DT node falls.
+ *
+ * To achieve this, we walk up the DT tree until we find an active
+ * RISC-V core (HART) node and extract the cpuid from it.
+ */
+int riscv_of_parent_hartid(struct device_node *node)
+{
+ for (; node; node = node->parent) {
+ if (of_device_is_compatible(node, "riscv"))
+ return riscv_of_processor_hartid(node);
+ }
+
+ return -1;
+}
+
+#ifdef CONFIG_PROC_FS
+
+static void print_isa(struct seq_file *f, const char *isa)
+{
+ /* Print the entire ISA as it is */
+ seq_puts(f, "isa\t\t: ");
+ seq_write(f, isa, strlen(isa));
+ seq_puts(f, "\n");
+}
+
+static void print_mmu(struct seq_file *f, const char *mmu_type)
+{
+#if defined(CONFIG_32BIT)
+ if (strcmp(mmu_type, "riscv,sv32") != 0)
+ return;
+#elif defined(CONFIG_64BIT)
+ if (strcmp(mmu_type, "riscv,sv39") != 0 &&
+ strcmp(mmu_type, "riscv,sv48") != 0)
+ return;
+#endif
+
+ seq_printf(f, "mmu\t\t: %s\n", mmu_type+6);
+}
+
+static void *c_start(struct seq_file *m, loff_t *pos)
+{
+ *pos = cpumask_next(*pos - 1, cpu_online_mask);
+ if ((*pos) < nr_cpu_ids)
+ return (void *)(uintptr_t)(1 + *pos);
+ return NULL;
+}
+
+static void *c_next(struct seq_file *m, void *v, loff_t *pos)
+{
+ (*pos)++;
+ return c_start(m, pos);
+}
+
+static void c_stop(struct seq_file *m, void *v)
+{
+}
+
+static int c_show(struct seq_file *m, void *v)
+{
+ unsigned long cpu_id = (unsigned long)v - 1;
+ struct device_node *node = of_get_cpu_node(cpu_id, NULL);
+ const char *compat, *isa, *mmu;
+
+ seq_printf(m, "processor\t: %lu\n", cpu_id);
+ seq_printf(m, "hart\t\t: %lu\n", cpuid_to_hartid_map(cpu_id));
+ if (!of_property_read_string(node, "riscv,isa", &isa))
+ print_isa(m, isa);
+ if (!of_property_read_string(node, "mmu-type", &mmu))
+ print_mmu(m, mmu);
+ if (!of_property_read_string(node, "compatible", &compat)
+ && strcmp(compat, "riscv"))
+ seq_printf(m, "uarch\t\t: %s\n", compat);
+ seq_puts(m, "\n");
+ of_node_put(node);
+
+ return 0;
+}
+
+const struct seq_operations cpuinfo_op = {
+ .start = c_start,
+ .next = c_next,
+ .stop = c_stop,
+ .show = c_show
+};
+
+#endif /* CONFIG_PROC_FS */
diff --git a/arch/riscv/kernel/cpu_ops.c b/arch/riscv/kernel/cpu_ops.c
new file mode 100644
index 000000000..1985884fe
--- /dev/null
+++ b/arch/riscv/kernel/cpu_ops.c
@@ -0,0 +1,46 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2020 Western Digital Corporation or its affiliates.
+ */
+
+#include <linux/errno.h>
+#include <linux/mm.h>
+#include <linux/of.h>
+#include <linux/string.h>
+#include <linux/sched.h>
+#include <linux/sched/task_stack.h>
+#include <asm/cpu_ops.h>
+#include <asm/sbi.h>
+#include <asm/smp.h>
+
+const struct cpu_operations *cpu_ops[NR_CPUS] __ro_after_init;
+
+void *__cpu_up_stack_pointer[NR_CPUS] __section(".data");
+void *__cpu_up_task_pointer[NR_CPUS] __section(".data");
+
+extern const struct cpu_operations cpu_ops_sbi;
+extern const struct cpu_operations cpu_ops_spinwait;
+
+void cpu_update_secondary_bootdata(unsigned int cpuid,
+ struct task_struct *tidle)
+{
+ int hartid = cpuid_to_hartid_map(cpuid);
+
+ /* Make sure tidle is updated */
+ smp_mb();
+ WRITE_ONCE(__cpu_up_stack_pointer[hartid],
+ task_stack_page(tidle) + THREAD_SIZE);
+ WRITE_ONCE(__cpu_up_task_pointer[hartid], tidle);
+}
+
+void __init cpu_set_ops(int cpuid)
+{
+#if IS_ENABLED(CONFIG_RISCV_SBI)
+ if (sbi_probe_extension(SBI_EXT_HSM) > 0) {
+ if (!cpuid)
+ pr_info("SBI v0.2 HSM extension detected\n");
+ cpu_ops[cpuid] = &cpu_ops_sbi;
+ } else
+#endif
+ cpu_ops[cpuid] = &cpu_ops_spinwait;
+}
diff --git a/arch/riscv/kernel/cpu_ops_sbi.c b/arch/riscv/kernel/cpu_ops_sbi.c
new file mode 100644
index 000000000..685fae72b
--- /dev/null
+++ b/arch/riscv/kernel/cpu_ops_sbi.c
@@ -0,0 +1,115 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * HSM extension and cpu_ops implementation.
+ *
+ * Copyright (c) 2020 Western Digital Corporation or its affiliates.
+ */
+
+#include <linux/init.h>
+#include <linux/mm.h>
+#include <asm/cpu_ops.h>
+#include <asm/sbi.h>
+#include <asm/smp.h>
+
+extern char secondary_start_sbi[];
+const struct cpu_operations cpu_ops_sbi;
+
+static int sbi_hsm_hart_start(unsigned long hartid, unsigned long saddr,
+ unsigned long priv)
+{
+ struct sbiret ret;
+
+ ret = sbi_ecall(SBI_EXT_HSM, SBI_EXT_HSM_HART_START,
+ hartid, saddr, priv, 0, 0, 0);
+ if (ret.error)
+ return sbi_err_map_linux_errno(ret.error);
+ else
+ return 0;
+}
+
+#ifdef CONFIG_HOTPLUG_CPU
+static int sbi_hsm_hart_stop(void)
+{
+ struct sbiret ret;
+
+ ret = sbi_ecall(SBI_EXT_HSM, SBI_EXT_HSM_HART_STOP, 0, 0, 0, 0, 0, 0);
+
+ if (ret.error)
+ return sbi_err_map_linux_errno(ret.error);
+ else
+ return 0;
+}
+
+static int sbi_hsm_hart_get_status(unsigned long hartid)
+{
+ struct sbiret ret;
+
+ ret = sbi_ecall(SBI_EXT_HSM, SBI_EXT_HSM_HART_STATUS,
+ hartid, 0, 0, 0, 0, 0);
+ if (ret.error)
+ return sbi_err_map_linux_errno(ret.error);
+ else
+ return ret.value;
+}
+#endif
+
+static int sbi_cpu_start(unsigned int cpuid, struct task_struct *tidle)
+{
+ int rc;
+ unsigned long boot_addr = __pa_symbol(secondary_start_sbi);
+ int hartid = cpuid_to_hartid_map(cpuid);
+
+ cpu_update_secondary_bootdata(cpuid, tidle);
+ rc = sbi_hsm_hart_start(hartid, boot_addr, 0);
+
+ return rc;
+}
+
+static int sbi_cpu_prepare(unsigned int cpuid)
+{
+ if (!cpu_ops_sbi.cpu_start) {
+ pr_err("cpu start method not defined for CPU [%d]\n", cpuid);
+ return -ENODEV;
+ }
+ return 0;
+}
+
+#ifdef CONFIG_HOTPLUG_CPU
+static int sbi_cpu_disable(unsigned int cpuid)
+{
+ if (!cpu_ops_sbi.cpu_stop)
+ return -EOPNOTSUPP;
+ return 0;
+}
+
+static void sbi_cpu_stop(void)
+{
+ int ret;
+
+ ret = sbi_hsm_hart_stop();
+ pr_crit("Unable to stop the cpu %u (%d)\n", smp_processor_id(), ret);
+}
+
+static int sbi_cpu_is_stopped(unsigned int cpuid)
+{
+ int rc;
+ int hartid = cpuid_to_hartid_map(cpuid);
+
+ rc = sbi_hsm_hart_get_status(hartid);
+
+ if (rc == SBI_HSM_HART_STATUS_STOPPED)
+ return 0;
+ return rc;
+}
+#endif
+
+const struct cpu_operations cpu_ops_sbi = {
+ .name = "sbi",
+ .cpu_prepare = sbi_cpu_prepare,
+ .cpu_start = sbi_cpu_start,
+#ifdef CONFIG_HOTPLUG_CPU
+ .cpu_disable = sbi_cpu_disable,
+ .cpu_stop = sbi_cpu_stop,
+ .cpu_is_stopped = sbi_cpu_is_stopped,
+#endif
+};
diff --git a/arch/riscv/kernel/cpu_ops_spinwait.c b/arch/riscv/kernel/cpu_ops_spinwait.c
new file mode 100644
index 000000000..b2c957bb6
--- /dev/null
+++ b/arch/riscv/kernel/cpu_ops_spinwait.c
@@ -0,0 +1,43 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (c) 2020 Western Digital Corporation or its affiliates.
+ */
+
+#include <linux/errno.h>
+#include <linux/of.h>
+#include <linux/string.h>
+#include <asm/cpu_ops.h>
+#include <asm/sbi.h>
+#include <asm/smp.h>
+
+const struct cpu_operations cpu_ops_spinwait;
+
+static int spinwait_cpu_prepare(unsigned int cpuid)
+{
+ if (!cpu_ops_spinwait.cpu_start) {
+ pr_err("cpu start method not defined for CPU [%d]\n", cpuid);
+ return -ENODEV;
+ }
+ return 0;
+}
+
+static int spinwait_cpu_start(unsigned int cpuid, struct task_struct *tidle)
+{
+ /*
+ * In this protocol, all cpus boot on their own accord. _start
+ * selects the first cpu to boot the kernel and causes the remainder
+ * of the cpus to spin in a loop waiting for their stack pointer to be
+ * setup by that main cpu. Writing to bootdata
+ * (i.e __cpu_up_stack_pointer) signals to the spinning cpus that they
+ * can continue the boot process.
+ */
+ cpu_update_secondary_bootdata(cpuid, tidle);
+
+ return 0;
+}
+
+const struct cpu_operations cpu_ops_spinwait = {
+ .name = "spinwait",
+ .cpu_prepare = spinwait_cpu_prepare,
+ .cpu_start = spinwait_cpu_start,
+};
diff --git a/arch/riscv/kernel/cpufeature.c b/arch/riscv/kernel/cpufeature.c
new file mode 100644
index 000000000..ac202f44a
--- /dev/null
+++ b/arch/riscv/kernel/cpufeature.c
@@ -0,0 +1,151 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copied from arch/arm64/kernel/cpufeature.c
+ *
+ * Copyright (C) 2015 ARM Ltd.
+ * Copyright (C) 2017 SiFive
+ */
+
+#include <linux/bitmap.h>
+#include <linux/of.h>
+#include <asm/processor.h>
+#include <asm/hwcap.h>
+#include <asm/smp.h>
+#include <asm/switch_to.h>
+
+unsigned long elf_hwcap __read_mostly;
+
+/* Host ISA bitmap */
+static DECLARE_BITMAP(riscv_isa, RISCV_ISA_EXT_MAX) __read_mostly;
+
+#ifdef CONFIG_FPU
+bool has_fpu __read_mostly;
+#endif
+
+/**
+ * riscv_isa_extension_base() - Get base extension word
+ *
+ * @isa_bitmap: ISA bitmap to use
+ * Return: base extension word as unsigned long value
+ *
+ * NOTE: If isa_bitmap is NULL then Host ISA bitmap will be used.
+ */
+unsigned long riscv_isa_extension_base(const unsigned long *isa_bitmap)
+{
+ if (!isa_bitmap)
+ return riscv_isa[0];
+ return isa_bitmap[0];
+}
+EXPORT_SYMBOL_GPL(riscv_isa_extension_base);
+
+/**
+ * __riscv_isa_extension_available() - Check whether given extension
+ * is available or not
+ *
+ * @isa_bitmap: ISA bitmap to use
+ * @bit: bit position of the desired extension
+ * Return: true or false
+ *
+ * NOTE: If isa_bitmap is NULL then Host ISA bitmap will be used.
+ */
+bool __riscv_isa_extension_available(const unsigned long *isa_bitmap, int bit)
+{
+ const unsigned long *bmap = (isa_bitmap) ? isa_bitmap : riscv_isa;
+
+ if (bit >= RISCV_ISA_EXT_MAX)
+ return false;
+
+ return test_bit(bit, bmap) ? true : false;
+}
+EXPORT_SYMBOL_GPL(__riscv_isa_extension_available);
+
+void riscv_fill_hwcap(void)
+{
+ struct device_node *node;
+ const char *isa;
+ char print_str[BITS_PER_LONG + 1];
+ size_t i, j, isa_len;
+ static unsigned long isa2hwcap[256] = {0};
+
+ isa2hwcap['i'] = isa2hwcap['I'] = COMPAT_HWCAP_ISA_I;
+ isa2hwcap['m'] = isa2hwcap['M'] = COMPAT_HWCAP_ISA_M;
+ isa2hwcap['a'] = isa2hwcap['A'] = COMPAT_HWCAP_ISA_A;
+ isa2hwcap['f'] = isa2hwcap['F'] = COMPAT_HWCAP_ISA_F;
+ isa2hwcap['d'] = isa2hwcap['D'] = COMPAT_HWCAP_ISA_D;
+ isa2hwcap['c'] = isa2hwcap['C'] = COMPAT_HWCAP_ISA_C;
+
+ elf_hwcap = 0;
+
+ bitmap_zero(riscv_isa, RISCV_ISA_EXT_MAX);
+
+ for_each_of_cpu_node(node) {
+ unsigned long this_hwcap = 0;
+ unsigned long this_isa = 0;
+
+ if (riscv_of_processor_hartid(node) < 0)
+ continue;
+
+ if (of_property_read_string(node, "riscv,isa", &isa)) {
+ pr_warn("Unable to find \"riscv,isa\" devicetree entry\n");
+ continue;
+ }
+
+ i = 0;
+ isa_len = strlen(isa);
+#if IS_ENABLED(CONFIG_32BIT)
+ if (!strncmp(isa, "rv32", 4))
+ i += 4;
+#elif IS_ENABLED(CONFIG_64BIT)
+ if (!strncmp(isa, "rv64", 4))
+ i += 4;
+#endif
+ for (; i < isa_len; ++i) {
+ this_hwcap |= isa2hwcap[(unsigned char)(isa[i])];
+ /*
+ * TODO: X, Y and Z extension parsing for Host ISA
+ * bitmap will be added in-future.
+ */
+ if ('a' <= isa[i] && isa[i] < 'x')
+ this_isa |= (1UL << (isa[i] - 'a'));
+ }
+
+ /*
+ * All "okay" hart should have same isa. Set HWCAP based on
+ * common capabilities of every "okay" hart, in case they don't
+ * have.
+ */
+ if (elf_hwcap)
+ elf_hwcap &= this_hwcap;
+ else
+ elf_hwcap = this_hwcap;
+
+ if (riscv_isa[0])
+ riscv_isa[0] &= this_isa;
+ else
+ riscv_isa[0] = this_isa;
+ }
+
+ /* We don't support systems with F but without D, so mask those out
+ * here. */
+ if ((elf_hwcap & COMPAT_HWCAP_ISA_F) && !(elf_hwcap & COMPAT_HWCAP_ISA_D)) {
+ pr_info("This kernel does not support systems with F but not D\n");
+ elf_hwcap &= ~COMPAT_HWCAP_ISA_F;
+ }
+
+ memset(print_str, 0, sizeof(print_str));
+ for (i = 0, j = 0; i < BITS_PER_LONG; i++)
+ if (riscv_isa[0] & BIT_MASK(i))
+ print_str[j++] = (char)('a' + i);
+ pr_info("riscv: ISA extensions %s\n", print_str);
+
+ memset(print_str, 0, sizeof(print_str));
+ for (i = 0, j = 0; i < BITS_PER_LONG; i++)
+ if (elf_hwcap & BIT_MASK(i))
+ print_str[j++] = (char)('a' + i);
+ pr_info("riscv: ELF capabilities %s\n", print_str);
+
+#ifdef CONFIG_FPU
+ if (elf_hwcap & (COMPAT_HWCAP_ISA_F | COMPAT_HWCAP_ISA_D))
+ has_fpu = true;
+#endif
+}
diff --git a/arch/riscv/kernel/efi-header.S b/arch/riscv/kernel/efi-header.S
new file mode 100644
index 000000000..8e733aa48
--- /dev/null
+++ b/arch/riscv/kernel/efi-header.S
@@ -0,0 +1,111 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2020 Western Digital Corporation or its affiliates.
+ * Adapted from arch/arm64/kernel/efi-header.S
+ */
+
+#include <linux/pe.h>
+#include <linux/sizes.h>
+
+ .macro __EFI_PE_HEADER
+ .long PE_MAGIC
+coff_header:
+#ifdef CONFIG_64BIT
+ .short IMAGE_FILE_MACHINE_RISCV64 // Machine
+#else
+ .short IMAGE_FILE_MACHINE_RISCV32 // Machine
+#endif
+ .short section_count // NumberOfSections
+ .long 0 // TimeDateStamp
+ .long 0 // PointerToSymbolTable
+ .long 0 // NumberOfSymbols
+ .short section_table - optional_header // SizeOfOptionalHeader
+ .short IMAGE_FILE_DEBUG_STRIPPED | \
+ IMAGE_FILE_EXECUTABLE_IMAGE | \
+ IMAGE_FILE_LINE_NUMS_STRIPPED // Characteristics
+
+optional_header:
+#ifdef CONFIG_64BIT
+ .short PE_OPT_MAGIC_PE32PLUS // PE32+ format
+#else
+ .short PE_OPT_MAGIC_PE32 // PE32 format
+#endif
+ .byte 0x02 // MajorLinkerVersion
+ .byte 0x14 // MinorLinkerVersion
+ .long __pecoff_text_end - efi_header_end // SizeOfCode
+ .long __pecoff_data_virt_size // SizeOfInitializedData
+ .long 0 // SizeOfUninitializedData
+ .long __efistub_efi_pe_entry - _start // AddressOfEntryPoint
+ .long efi_header_end - _start // BaseOfCode
+#ifdef CONFIG_32BIT
+ .long __pecoff_text_end - _start // BaseOfData
+#endif
+
+extra_header_fields:
+ .quad 0 // ImageBase
+ .long PECOFF_SECTION_ALIGNMENT // SectionAlignment
+ .long PECOFF_FILE_ALIGNMENT // FileAlignment
+ .short 0 // MajorOperatingSystemVersion
+ .short 0 // MinorOperatingSystemVersion
+ .short LINUX_EFISTUB_MAJOR_VERSION // MajorImageVersion
+ .short LINUX_EFISTUB_MINOR_VERSION // MinorImageVersion
+ .short 0 // MajorSubsystemVersion
+ .short 0 // MinorSubsystemVersion
+ .long 0 // Win32VersionValue
+
+ .long _end - _start // SizeOfImage
+
+ // Everything before the kernel image is considered part of the header
+ .long efi_header_end - _start // SizeOfHeaders
+ .long 0 // CheckSum
+ .short IMAGE_SUBSYSTEM_EFI_APPLICATION // Subsystem
+ .short 0 // DllCharacteristics
+ .quad 0 // SizeOfStackReserve
+ .quad 0 // SizeOfStackCommit
+ .quad 0 // SizeOfHeapReserve
+ .quad 0 // SizeOfHeapCommit
+ .long 0 // LoaderFlags
+ .long (section_table - .) / 8 // NumberOfRvaAndSizes
+
+ .quad 0 // ExportTable
+ .quad 0 // ImportTable
+ .quad 0 // ResourceTable
+ .quad 0 // ExceptionTable
+ .quad 0 // CertificationTable
+ .quad 0 // BaseRelocationTable
+
+ // Section table
+section_table:
+ .ascii ".text\0\0\0"
+ .long __pecoff_text_end - efi_header_end // VirtualSize
+ .long efi_header_end - _start // VirtualAddress
+ .long __pecoff_text_end - efi_header_end // SizeOfRawData
+ .long efi_header_end - _start // PointerToRawData
+
+ .long 0 // PointerToRelocations
+ .long 0 // PointerToLineNumbers
+ .short 0 // NumberOfRelocations
+ .short 0 // NumberOfLineNumbers
+ .long IMAGE_SCN_CNT_CODE | \
+ IMAGE_SCN_MEM_READ | \
+ IMAGE_SCN_MEM_EXECUTE // Characteristics
+
+ .ascii ".data\0\0\0"
+ .long __pecoff_data_virt_size // VirtualSize
+ .long __pecoff_text_end - _start // VirtualAddress
+ .long __pecoff_data_raw_size // SizeOfRawData
+ .long __pecoff_text_end - _start // PointerToRawData
+
+ .long 0 // PointerToRelocations
+ .long 0 // PointerToLineNumbers
+ .short 0 // NumberOfRelocations
+ .short 0 // NumberOfLineNumbers
+ .long IMAGE_SCN_CNT_INITIALIZED_DATA | \
+ IMAGE_SCN_MEM_READ | \
+ IMAGE_SCN_MEM_WRITE // Characteristics
+
+ .set section_count, (. - section_table) / 40
+
+ .balign 0x1000
+efi_header_end:
+ .endm
diff --git a/arch/riscv/kernel/efi.c b/arch/riscv/kernel/efi.c
new file mode 100644
index 000000000..1aa540350
--- /dev/null
+++ b/arch/riscv/kernel/efi.c
@@ -0,0 +1,96 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2020 Western Digital Corporation or its affiliates.
+ * Adapted from arch/arm64/kernel/efi.c
+ */
+
+#include <linux/efi.h>
+#include <linux/init.h>
+
+#include <asm/efi.h>
+#include <asm/pgtable.h>
+#include <asm/pgtable-bits.h>
+
+/*
+ * Only regions of type EFI_RUNTIME_SERVICES_CODE need to be
+ * executable, everything else can be mapped with the XN bits
+ * set. Also take the new (optional) RO/XP bits into account.
+ */
+static __init pgprot_t efimem_to_pgprot_map(efi_memory_desc_t *md)
+{
+ u64 attr = md->attribute;
+ u32 type = md->type;
+
+ if (type == EFI_MEMORY_MAPPED_IO)
+ return PAGE_KERNEL;
+
+ /* R-- */
+ if ((attr & (EFI_MEMORY_XP | EFI_MEMORY_RO)) ==
+ (EFI_MEMORY_XP | EFI_MEMORY_RO))
+ return PAGE_KERNEL_READ;
+
+ /* R-X */
+ if (attr & EFI_MEMORY_RO)
+ return PAGE_KERNEL_READ_EXEC;
+
+ /* RW- */
+ if (((attr & (EFI_MEMORY_RP | EFI_MEMORY_WP | EFI_MEMORY_XP)) ==
+ EFI_MEMORY_XP) ||
+ type != EFI_RUNTIME_SERVICES_CODE)
+ return PAGE_KERNEL;
+
+ /* RWX */
+ return PAGE_KERNEL_EXEC;
+}
+
+int __init efi_create_mapping(struct mm_struct *mm, efi_memory_desc_t *md)
+{
+ pgprot_t prot = __pgprot(pgprot_val(efimem_to_pgprot_map(md)) &
+ ~(_PAGE_GLOBAL));
+ int i;
+
+ /* RISC-V maps one page at a time */
+ for (i = 0; i < md->num_pages; i++)
+ create_pgd_mapping(mm->pgd, md->virt_addr + i * PAGE_SIZE,
+ md->phys_addr + i * PAGE_SIZE,
+ PAGE_SIZE, prot);
+ return 0;
+}
+
+static int __init set_permissions(pte_t *ptep, unsigned long addr, void *data)
+{
+ efi_memory_desc_t *md = data;
+ pte_t pte = READ_ONCE(*ptep);
+ unsigned long val;
+
+ if (md->attribute & EFI_MEMORY_RO) {
+ val = pte_val(pte) & ~_PAGE_WRITE;
+ val |= _PAGE_READ;
+ pte = __pte(val);
+ }
+ if (md->attribute & EFI_MEMORY_XP) {
+ val = pte_val(pte) & ~_PAGE_EXEC;
+ pte = __pte(val);
+ }
+ set_pte(ptep, pte);
+
+ return 0;
+}
+
+int __init efi_set_mapping_permissions(struct mm_struct *mm,
+ efi_memory_desc_t *md)
+{
+ BUG_ON(md->type != EFI_RUNTIME_SERVICES_CODE &&
+ md->type != EFI_RUNTIME_SERVICES_DATA);
+
+ /*
+ * Calling apply_to_page_range() is only safe on regions that are
+ * guaranteed to be mapped down to pages. Since we are only called
+ * for regions that have been mapped using efi_create_mapping() above
+ * (and this is checked by the generic Memory Attributes table parsing
+ * routines), there is no need to check that again here.
+ */
+ return apply_to_page_range(mm, md->virt_addr,
+ md->num_pages << EFI_PAGE_SHIFT,
+ set_permissions, md);
+}
diff --git a/arch/riscv/kernel/entry.S b/arch/riscv/kernel/entry.S
new file mode 100644
index 000000000..5214c578a
--- /dev/null
+++ b/arch/riscv/kernel/entry.S
@@ -0,0 +1,477 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2012 Regents of the University of California
+ * Copyright (C) 2017 SiFive
+ */
+
+#include <linux/init.h>
+#include <linux/linkage.h>
+
+#include <asm/asm.h>
+#include <asm/csr.h>
+#include <asm/unistd.h>
+#include <asm/thread_info.h>
+#include <asm/asm-offsets.h>
+
+#if !IS_ENABLED(CONFIG_PREEMPTION)
+.set resume_kernel, restore_all
+#endif
+
+ENTRY(handle_exception)
+ /*
+ * If coming from userspace, preserve the user thread pointer and load
+ * the kernel thread pointer. If we came from the kernel, the scratch
+ * register will contain 0, and we should continue on the current TP.
+ */
+ csrrw tp, CSR_SCRATCH, tp
+ bnez tp, _save_context
+
+_restore_kernel_tpsp:
+ csrr tp, CSR_SCRATCH
+ REG_S sp, TASK_TI_KERNEL_SP(tp)
+_save_context:
+ REG_S sp, TASK_TI_USER_SP(tp)
+ REG_L sp, TASK_TI_KERNEL_SP(tp)
+ addi sp, sp, -(PT_SIZE_ON_STACK)
+ REG_S x1, PT_RA(sp)
+ REG_S x3, PT_GP(sp)
+ REG_S x5, PT_T0(sp)
+ REG_S x6, PT_T1(sp)
+ REG_S x7, PT_T2(sp)
+ REG_S x8, PT_S0(sp)
+ REG_S x9, PT_S1(sp)
+ REG_S x10, PT_A0(sp)
+ REG_S x11, PT_A1(sp)
+ REG_S x12, PT_A2(sp)
+ REG_S x13, PT_A3(sp)
+ REG_S x14, PT_A4(sp)
+ REG_S x15, PT_A5(sp)
+ REG_S x16, PT_A6(sp)
+ REG_S x17, PT_A7(sp)
+ REG_S x18, PT_S2(sp)
+ REG_S x19, PT_S3(sp)
+ REG_S x20, PT_S4(sp)
+ REG_S x21, PT_S5(sp)
+ REG_S x22, PT_S6(sp)
+ REG_S x23, PT_S7(sp)
+ REG_S x24, PT_S8(sp)
+ REG_S x25, PT_S9(sp)
+ REG_S x26, PT_S10(sp)
+ REG_S x27, PT_S11(sp)
+ REG_S x28, PT_T3(sp)
+ REG_S x29, PT_T4(sp)
+ REG_S x30, PT_T5(sp)
+ REG_S x31, PT_T6(sp)
+
+ /*
+ * Disable user-mode memory access as it should only be set in the
+ * actual user copy routines.
+ *
+ * Disable the FPU to detect illegal usage of floating point in kernel
+ * space.
+ */
+ li t0, SR_SUM | SR_FS
+
+ REG_L s0, TASK_TI_USER_SP(tp)
+ csrrc s1, CSR_STATUS, t0
+ csrr s2, CSR_EPC
+ csrr s3, CSR_TVAL
+ csrr s4, CSR_CAUSE
+ csrr s5, CSR_SCRATCH
+ REG_S s0, PT_SP(sp)
+ REG_S s1, PT_STATUS(sp)
+ REG_S s2, PT_EPC(sp)
+ REG_S s3, PT_BADADDR(sp)
+ REG_S s4, PT_CAUSE(sp)
+ REG_S s5, PT_TP(sp)
+
+ /*
+ * Set the scratch register to 0, so that if a recursive exception
+ * occurs, the exception vector knows it came from the kernel
+ */
+ csrw CSR_SCRATCH, x0
+
+ /* Load the global pointer */
+.option push
+.option norelax
+ la gp, __global_pointer$
+.option pop
+
+#ifdef CONFIG_TRACE_IRQFLAGS
+ call __trace_hardirqs_off
+#endif
+
+#ifdef CONFIG_CONTEXT_TRACKING
+ /* If previous state is in user mode, call context_tracking_user_exit. */
+ li a0, SR_PP
+ and a0, s1, a0
+ bnez a0, skip_context_tracking
+ call context_tracking_user_exit
+skip_context_tracking:
+#endif
+
+ /*
+ * MSB of cause differentiates between
+ * interrupts and exceptions
+ */
+ bge s4, zero, 1f
+
+ la ra, ret_from_exception
+
+ /* Handle interrupts */
+ move a0, sp /* pt_regs */
+ la a1, handle_arch_irq
+ REG_L a1, (a1)
+ jr a1
+1:
+ /*
+ * Exceptions run with interrupts enabled or disabled depending on the
+ * state of SR_PIE in m/sstatus.
+ */
+ andi t0, s1, SR_PIE
+ beqz t0, 1f
+#ifdef CONFIG_TRACE_IRQFLAGS
+ call __trace_hardirqs_on
+#endif
+ csrs CSR_STATUS, SR_IE
+
+1:
+ la ra, ret_from_exception
+ /* Handle syscalls */
+ li t0, EXC_SYSCALL
+ beq s4, t0, handle_syscall
+
+ /* Handle other exceptions */
+ slli t0, s4, RISCV_LGPTR
+ la t1, excp_vect_table
+ la t2, excp_vect_table_end
+ move a0, sp /* pt_regs */
+ add t0, t1, t0
+ /* Check if exception code lies within bounds */
+ bgeu t0, t2, 1f
+ REG_L t0, 0(t0)
+ jr t0
+1:
+ tail do_trap_unknown
+
+handle_syscall:
+#ifdef CONFIG_RISCV_M_MODE
+ /*
+ * When running is M-Mode (no MMU config), MPIE does not get set.
+ * As a result, we need to force enable interrupts here because
+ * handle_exception did not do set SR_IE as it always sees SR_PIE
+ * being cleared.
+ */
+ csrs CSR_STATUS, SR_IE
+#endif
+#if defined(CONFIG_TRACE_IRQFLAGS) || defined(CONFIG_CONTEXT_TRACKING)
+ /* Recover a0 - a7 for system calls */
+ REG_L a0, PT_A0(sp)
+ REG_L a1, PT_A1(sp)
+ REG_L a2, PT_A2(sp)
+ REG_L a3, PT_A3(sp)
+ REG_L a4, PT_A4(sp)
+ REG_L a5, PT_A5(sp)
+ REG_L a6, PT_A6(sp)
+ REG_L a7, PT_A7(sp)
+#endif
+ /* save the initial A0 value (needed in signal handlers) */
+ REG_S a0, PT_ORIG_A0(sp)
+ /*
+ * Advance SEPC to avoid executing the original
+ * scall instruction on sret
+ */
+ addi s2, s2, 0x4
+ REG_S s2, PT_EPC(sp)
+ /* Trace syscalls, but only if requested by the user. */
+ REG_L t0, TASK_TI_FLAGS(tp)
+ andi t0, t0, _TIF_SYSCALL_WORK
+ bnez t0, handle_syscall_trace_enter
+check_syscall_nr:
+ /* Check to make sure we don't jump to a bogus syscall number. */
+ li t0, __NR_syscalls
+ la s0, sys_ni_syscall
+ /*
+ * Syscall number held in a7.
+ * If syscall number is above allowed value, redirect to ni_syscall.
+ */
+ bgeu a7, t0, 1f
+ /* Call syscall */
+ la s0, sys_call_table
+ slli t0, a7, RISCV_LGPTR
+ add s0, s0, t0
+ REG_L s0, 0(s0)
+1:
+ jalr s0
+
+ret_from_syscall:
+ /* Set user a0 to kernel a0 */
+ REG_S a0, PT_A0(sp)
+ /*
+ * We didn't execute the actual syscall.
+ * Seccomp already set return value for the current task pt_regs.
+ * (If it was configured with SECCOMP_RET_ERRNO/TRACE)
+ */
+ret_from_syscall_rejected:
+ /* Trace syscalls, but only if requested by the user. */
+ REG_L t0, TASK_TI_FLAGS(tp)
+ andi t0, t0, _TIF_SYSCALL_WORK
+ bnez t0, handle_syscall_trace_exit
+
+ret_from_exception:
+ REG_L s0, PT_STATUS(sp)
+ csrc CSR_STATUS, SR_IE
+#ifdef CONFIG_TRACE_IRQFLAGS
+ call __trace_hardirqs_off
+#endif
+#ifdef CONFIG_RISCV_M_MODE
+ /* the MPP value is too large to be used as an immediate arg for addi */
+ li t0, SR_MPP
+ and s0, s0, t0
+#else
+ andi s0, s0, SR_SPP
+#endif
+ bnez s0, resume_kernel
+
+resume_userspace:
+ /* Interrupts must be disabled here so flags are checked atomically */
+ REG_L s0, TASK_TI_FLAGS(tp) /* current_thread_info->flags */
+ andi s1, s0, _TIF_WORK_MASK
+ bnez s1, work_pending
+
+#ifdef CONFIG_CONTEXT_TRACKING
+ call context_tracking_user_enter
+#endif
+
+ /* Save unwound kernel stack pointer in thread_info */
+ addi s0, sp, PT_SIZE_ON_STACK
+ REG_S s0, TASK_TI_KERNEL_SP(tp)
+
+ /*
+ * Save TP into the scratch register , so we can find the kernel data
+ * structures again.
+ */
+ csrw CSR_SCRATCH, tp
+
+restore_all:
+#ifdef CONFIG_TRACE_IRQFLAGS
+ REG_L s1, PT_STATUS(sp)
+ andi t0, s1, SR_PIE
+ beqz t0, 1f
+ call __trace_hardirqs_on
+ j 2f
+1:
+ call __trace_hardirqs_off
+2:
+#endif
+ REG_L a0, PT_STATUS(sp)
+ /*
+ * The current load reservation is effectively part of the processor's
+ * state, in the sense that load reservations cannot be shared between
+ * different hart contexts. We can't actually save and restore a load
+ * reservation, so instead here we clear any existing reservation --
+ * it's always legal for implementations to clear load reservations at
+ * any point (as long as the forward progress guarantee is kept, but
+ * we'll ignore that here).
+ *
+ * Dangling load reservations can be the result of taking a trap in the
+ * middle of an LR/SC sequence, but can also be the result of a taken
+ * forward branch around an SC -- which is how we implement CAS. As a
+ * result we need to clear reservations between the last CAS and the
+ * jump back to the new context. While it is unlikely the store
+ * completes, implementations are allowed to expand reservations to be
+ * arbitrarily large.
+ */
+ REG_L a2, PT_EPC(sp)
+ REG_SC x0, a2, PT_EPC(sp)
+
+ csrw CSR_STATUS, a0
+ csrw CSR_EPC, a2
+
+ REG_L x1, PT_RA(sp)
+ REG_L x3, PT_GP(sp)
+ REG_L x4, PT_TP(sp)
+ REG_L x5, PT_T0(sp)
+ REG_L x6, PT_T1(sp)
+ REG_L x7, PT_T2(sp)
+ REG_L x8, PT_S0(sp)
+ REG_L x9, PT_S1(sp)
+ REG_L x10, PT_A0(sp)
+ REG_L x11, PT_A1(sp)
+ REG_L x12, PT_A2(sp)
+ REG_L x13, PT_A3(sp)
+ REG_L x14, PT_A4(sp)
+ REG_L x15, PT_A5(sp)
+ REG_L x16, PT_A6(sp)
+ REG_L x17, PT_A7(sp)
+ REG_L x18, PT_S2(sp)
+ REG_L x19, PT_S3(sp)
+ REG_L x20, PT_S4(sp)
+ REG_L x21, PT_S5(sp)
+ REG_L x22, PT_S6(sp)
+ REG_L x23, PT_S7(sp)
+ REG_L x24, PT_S8(sp)
+ REG_L x25, PT_S9(sp)
+ REG_L x26, PT_S10(sp)
+ REG_L x27, PT_S11(sp)
+ REG_L x28, PT_T3(sp)
+ REG_L x29, PT_T4(sp)
+ REG_L x30, PT_T5(sp)
+ REG_L x31, PT_T6(sp)
+
+ REG_L x2, PT_SP(sp)
+
+#ifdef CONFIG_RISCV_M_MODE
+ mret
+#else
+ sret
+#endif
+
+#if IS_ENABLED(CONFIG_PREEMPTION)
+resume_kernel:
+ REG_L s0, TASK_TI_PREEMPT_COUNT(tp)
+ bnez s0, restore_all
+ REG_L s0, TASK_TI_FLAGS(tp)
+ andi s0, s0, _TIF_NEED_RESCHED
+ beqz s0, restore_all
+ call preempt_schedule_irq
+ j restore_all
+#endif
+
+work_pending:
+ /* Enter slow path for supplementary processing */
+ la ra, ret_from_exception
+ andi s1, s0, _TIF_NEED_RESCHED
+ bnez s1, work_resched
+work_notifysig:
+ /* Handle pending signals and notify-resume requests */
+ csrs CSR_STATUS, SR_IE /* Enable interrupts for do_notify_resume() */
+ move a0, sp /* pt_regs */
+ move a1, s0 /* current_thread_info->flags */
+ tail do_notify_resume
+work_resched:
+ tail schedule
+
+/* Slow paths for ptrace. */
+handle_syscall_trace_enter:
+ move a0, sp
+ call do_syscall_trace_enter
+ move t0, a0
+ REG_L a0, PT_A0(sp)
+ REG_L a1, PT_A1(sp)
+ REG_L a2, PT_A2(sp)
+ REG_L a3, PT_A3(sp)
+ REG_L a4, PT_A4(sp)
+ REG_L a5, PT_A5(sp)
+ REG_L a6, PT_A6(sp)
+ REG_L a7, PT_A7(sp)
+ bnez t0, ret_from_syscall_rejected
+ j check_syscall_nr
+handle_syscall_trace_exit:
+ move a0, sp
+ call do_syscall_trace_exit
+ j ret_from_exception
+
+END(handle_exception)
+
+ENTRY(ret_from_fork)
+ la ra, ret_from_exception
+ tail schedule_tail
+ENDPROC(ret_from_fork)
+
+ENTRY(ret_from_kernel_thread)
+ call schedule_tail
+ /* Call fn(arg) */
+ la ra, ret_from_exception
+ move a0, s1
+ jr s0
+ENDPROC(ret_from_kernel_thread)
+
+
+/*
+ * Integer register context switch
+ * The callee-saved registers must be saved and restored.
+ *
+ * a0: previous task_struct (must be preserved across the switch)
+ * a1: next task_struct
+ *
+ * The value of a0 and a1 must be preserved by this function, as that's how
+ * arguments are passed to schedule_tail.
+ */
+ENTRY(__switch_to)
+ /* Save context into prev->thread */
+ li a4, TASK_THREAD_RA
+ add a3, a0, a4
+ add a4, a1, a4
+ REG_S ra, TASK_THREAD_RA_RA(a3)
+ REG_S sp, TASK_THREAD_SP_RA(a3)
+ REG_S s0, TASK_THREAD_S0_RA(a3)
+ REG_S s1, TASK_THREAD_S1_RA(a3)
+ REG_S s2, TASK_THREAD_S2_RA(a3)
+ REG_S s3, TASK_THREAD_S3_RA(a3)
+ REG_S s4, TASK_THREAD_S4_RA(a3)
+ REG_S s5, TASK_THREAD_S5_RA(a3)
+ REG_S s6, TASK_THREAD_S6_RA(a3)
+ REG_S s7, TASK_THREAD_S7_RA(a3)
+ REG_S s8, TASK_THREAD_S8_RA(a3)
+ REG_S s9, TASK_THREAD_S9_RA(a3)
+ REG_S s10, TASK_THREAD_S10_RA(a3)
+ REG_S s11, TASK_THREAD_S11_RA(a3)
+ /* Restore context from next->thread */
+ REG_L ra, TASK_THREAD_RA_RA(a4)
+ REG_L sp, TASK_THREAD_SP_RA(a4)
+ REG_L s0, TASK_THREAD_S0_RA(a4)
+ REG_L s1, TASK_THREAD_S1_RA(a4)
+ REG_L s2, TASK_THREAD_S2_RA(a4)
+ REG_L s3, TASK_THREAD_S3_RA(a4)
+ REG_L s4, TASK_THREAD_S4_RA(a4)
+ REG_L s5, TASK_THREAD_S5_RA(a4)
+ REG_L s6, TASK_THREAD_S6_RA(a4)
+ REG_L s7, TASK_THREAD_S7_RA(a4)
+ REG_L s8, TASK_THREAD_S8_RA(a4)
+ REG_L s9, TASK_THREAD_S9_RA(a4)
+ REG_L s10, TASK_THREAD_S10_RA(a4)
+ REG_L s11, TASK_THREAD_S11_RA(a4)
+ /* Swap the CPU entry around. */
+ lw a3, TASK_TI_CPU(a0)
+ lw a4, TASK_TI_CPU(a1)
+ sw a3, TASK_TI_CPU(a1)
+ sw a4, TASK_TI_CPU(a0)
+ /* The offset of thread_info in task_struct is zero. */
+ move tp, a1
+ ret
+ENDPROC(__switch_to)
+
+#ifndef CONFIG_MMU
+#define do_page_fault do_trap_unknown
+#endif
+
+ .section ".rodata"
+ .align LGREG
+ /* Exception vector table */
+ENTRY(excp_vect_table)
+ RISCV_PTR do_trap_insn_misaligned
+ RISCV_PTR do_trap_insn_fault
+ RISCV_PTR do_trap_insn_illegal
+ RISCV_PTR do_trap_break
+ RISCV_PTR do_trap_load_misaligned
+ RISCV_PTR do_trap_load_fault
+ RISCV_PTR do_trap_store_misaligned
+ RISCV_PTR do_trap_store_fault
+ RISCV_PTR do_trap_ecall_u /* system call, gets intercepted */
+ RISCV_PTR do_trap_ecall_s
+ RISCV_PTR do_trap_unknown
+ RISCV_PTR do_trap_ecall_m
+ RISCV_PTR do_page_fault /* instruction page fault */
+ RISCV_PTR do_page_fault /* load page fault */
+ RISCV_PTR do_trap_unknown
+ RISCV_PTR do_page_fault /* store page fault */
+excp_vect_table_end:
+END(excp_vect_table)
+
+#ifndef CONFIG_MMU
+ENTRY(__user_rt_sigreturn)
+ li a7, __NR_rt_sigreturn
+ scall
+END(__user_rt_sigreturn)
+#endif
diff --git a/arch/riscv/kernel/fpu.S b/arch/riscv/kernel/fpu.S
new file mode 100644
index 000000000..dd2205473
--- /dev/null
+++ b/arch/riscv/kernel/fpu.S
@@ -0,0 +1,106 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2012 Regents of the University of California
+ * Copyright (C) 2017 SiFive
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation, version 2.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/linkage.h>
+
+#include <asm/asm.h>
+#include <asm/csr.h>
+#include <asm/asm-offsets.h>
+
+ENTRY(__fstate_save)
+ li a2, TASK_THREAD_F0
+ add a0, a0, a2
+ li t1, SR_FS
+ csrs CSR_STATUS, t1
+ frcsr t0
+ fsd f0, TASK_THREAD_F0_F0(a0)
+ fsd f1, TASK_THREAD_F1_F0(a0)
+ fsd f2, TASK_THREAD_F2_F0(a0)
+ fsd f3, TASK_THREAD_F3_F0(a0)
+ fsd f4, TASK_THREAD_F4_F0(a0)
+ fsd f5, TASK_THREAD_F5_F0(a0)
+ fsd f6, TASK_THREAD_F6_F0(a0)
+ fsd f7, TASK_THREAD_F7_F0(a0)
+ fsd f8, TASK_THREAD_F8_F0(a0)
+ fsd f9, TASK_THREAD_F9_F0(a0)
+ fsd f10, TASK_THREAD_F10_F0(a0)
+ fsd f11, TASK_THREAD_F11_F0(a0)
+ fsd f12, TASK_THREAD_F12_F0(a0)
+ fsd f13, TASK_THREAD_F13_F0(a0)
+ fsd f14, TASK_THREAD_F14_F0(a0)
+ fsd f15, TASK_THREAD_F15_F0(a0)
+ fsd f16, TASK_THREAD_F16_F0(a0)
+ fsd f17, TASK_THREAD_F17_F0(a0)
+ fsd f18, TASK_THREAD_F18_F0(a0)
+ fsd f19, TASK_THREAD_F19_F0(a0)
+ fsd f20, TASK_THREAD_F20_F0(a0)
+ fsd f21, TASK_THREAD_F21_F0(a0)
+ fsd f22, TASK_THREAD_F22_F0(a0)
+ fsd f23, TASK_THREAD_F23_F0(a0)
+ fsd f24, TASK_THREAD_F24_F0(a0)
+ fsd f25, TASK_THREAD_F25_F0(a0)
+ fsd f26, TASK_THREAD_F26_F0(a0)
+ fsd f27, TASK_THREAD_F27_F0(a0)
+ fsd f28, TASK_THREAD_F28_F0(a0)
+ fsd f29, TASK_THREAD_F29_F0(a0)
+ fsd f30, TASK_THREAD_F30_F0(a0)
+ fsd f31, TASK_THREAD_F31_F0(a0)
+ sw t0, TASK_THREAD_FCSR_F0(a0)
+ csrc CSR_STATUS, t1
+ ret
+ENDPROC(__fstate_save)
+
+ENTRY(__fstate_restore)
+ li a2, TASK_THREAD_F0
+ add a0, a0, a2
+ li t1, SR_FS
+ lw t0, TASK_THREAD_FCSR_F0(a0)
+ csrs CSR_STATUS, t1
+ fld f0, TASK_THREAD_F0_F0(a0)
+ fld f1, TASK_THREAD_F1_F0(a0)
+ fld f2, TASK_THREAD_F2_F0(a0)
+ fld f3, TASK_THREAD_F3_F0(a0)
+ fld f4, TASK_THREAD_F4_F0(a0)
+ fld f5, TASK_THREAD_F5_F0(a0)
+ fld f6, TASK_THREAD_F6_F0(a0)
+ fld f7, TASK_THREAD_F7_F0(a0)
+ fld f8, TASK_THREAD_F8_F0(a0)
+ fld f9, TASK_THREAD_F9_F0(a0)
+ fld f10, TASK_THREAD_F10_F0(a0)
+ fld f11, TASK_THREAD_F11_F0(a0)
+ fld f12, TASK_THREAD_F12_F0(a0)
+ fld f13, TASK_THREAD_F13_F0(a0)
+ fld f14, TASK_THREAD_F14_F0(a0)
+ fld f15, TASK_THREAD_F15_F0(a0)
+ fld f16, TASK_THREAD_F16_F0(a0)
+ fld f17, TASK_THREAD_F17_F0(a0)
+ fld f18, TASK_THREAD_F18_F0(a0)
+ fld f19, TASK_THREAD_F19_F0(a0)
+ fld f20, TASK_THREAD_F20_F0(a0)
+ fld f21, TASK_THREAD_F21_F0(a0)
+ fld f22, TASK_THREAD_F22_F0(a0)
+ fld f23, TASK_THREAD_F23_F0(a0)
+ fld f24, TASK_THREAD_F24_F0(a0)
+ fld f25, TASK_THREAD_F25_F0(a0)
+ fld f26, TASK_THREAD_F26_F0(a0)
+ fld f27, TASK_THREAD_F27_F0(a0)
+ fld f28, TASK_THREAD_F28_F0(a0)
+ fld f29, TASK_THREAD_F29_F0(a0)
+ fld f30, TASK_THREAD_F30_F0(a0)
+ fld f31, TASK_THREAD_F31_F0(a0)
+ fscsr t0
+ csrc CSR_STATUS, t1
+ ret
+ENDPROC(__fstate_restore)
diff --git a/arch/riscv/kernel/ftrace.c b/arch/riscv/kernel/ftrace.c
new file mode 100644
index 000000000..8693dfcff
--- /dev/null
+++ b/arch/riscv/kernel/ftrace.c
@@ -0,0 +1,239 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2013 Linaro Limited
+ * Author: AKASHI Takahiro <takahiro.akashi@linaro.org>
+ * Copyright (C) 2017 Andes Technology Corporation
+ */
+
+#include <linux/ftrace.h>
+#include <linux/uaccess.h>
+#include <linux/memory.h>
+#include <asm/cacheflush.h>
+#include <asm/patch.h>
+
+#ifdef CONFIG_DYNAMIC_FTRACE
+int ftrace_arch_code_modify_prepare(void) __acquires(&text_mutex)
+{
+ mutex_lock(&text_mutex);
+
+ /*
+ * The code sequences we use for ftrace can't be patched while the
+ * kernel is running, so we need to use stop_machine() to modify them
+ * for now. This doesn't play nice with text_mutex, we use this flag
+ * to elide the check.
+ */
+ riscv_patch_in_stop_machine = true;
+
+ return 0;
+}
+
+int ftrace_arch_code_modify_post_process(void) __releases(&text_mutex)
+{
+ riscv_patch_in_stop_machine = false;
+ mutex_unlock(&text_mutex);
+ return 0;
+}
+
+static int ftrace_check_current_call(unsigned long hook_pos,
+ unsigned int *expected)
+{
+ unsigned int replaced[2];
+ unsigned int nops[2] = {NOP4, NOP4};
+
+ /* we expect nops at the hook position */
+ if (!expected)
+ expected = nops;
+
+ /*
+ * Read the text we want to modify;
+ * return must be -EFAULT on read error
+ */
+ if (copy_from_kernel_nofault(replaced, (void *)hook_pos,
+ MCOUNT_INSN_SIZE))
+ return -EFAULT;
+
+ /*
+ * Make sure it is what we expect it to be;
+ * return must be -EINVAL on failed comparison
+ */
+ if (memcmp(expected, replaced, sizeof(replaced))) {
+ pr_err("%p: expected (%08x %08x) but got (%08x %08x)\n",
+ (void *)hook_pos, expected[0], expected[1], replaced[0],
+ replaced[1]);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int __ftrace_modify_call(unsigned long hook_pos, unsigned long target,
+ bool enable)
+{
+ unsigned int call[2];
+ unsigned int nops[2] = {NOP4, NOP4};
+
+ make_call(hook_pos, target, call);
+
+ /* Replace the auipc-jalr pair at once. Return -EPERM on write error. */
+ if (patch_text_nosync
+ ((void *)hook_pos, enable ? call : nops, MCOUNT_INSN_SIZE))
+ return -EPERM;
+
+ return 0;
+}
+
+int ftrace_make_call(struct dyn_ftrace *rec, unsigned long addr)
+{
+ int ret = ftrace_check_current_call(rec->ip, NULL);
+
+ if (ret)
+ return ret;
+
+ return __ftrace_modify_call(rec->ip, addr, true);
+}
+
+int ftrace_make_nop(struct module *mod, struct dyn_ftrace *rec,
+ unsigned long addr)
+{
+ unsigned int call[2];
+ int ret;
+
+ make_call(rec->ip, addr, call);
+ ret = ftrace_check_current_call(rec->ip, call);
+
+ if (ret)
+ return ret;
+
+ return __ftrace_modify_call(rec->ip, addr, false);
+}
+
+
+/*
+ * This is called early on, and isn't wrapped by
+ * ftrace_arch_code_modify_{prepare,post_process}() and therefor doesn't hold
+ * text_mutex, which triggers a lockdep failure. SMP isn't running so we could
+ * just directly poke the text, but it's simpler to just take the lock
+ * ourselves.
+ */
+int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec)
+{
+ int out;
+
+ mutex_lock(&text_mutex);
+ out = ftrace_make_nop(mod, rec, MCOUNT_ADDR);
+ mutex_unlock(&text_mutex);
+
+ return out;
+}
+
+int ftrace_update_ftrace_func(ftrace_func_t func)
+{
+ int ret = __ftrace_modify_call((unsigned long)&ftrace_call,
+ (unsigned long)func, true);
+ if (!ret) {
+ ret = __ftrace_modify_call((unsigned long)&ftrace_regs_call,
+ (unsigned long)func, true);
+ }
+
+ return ret;
+}
+
+int __init ftrace_dyn_arch_init(void)
+{
+ return 0;
+}
+#endif
+
+#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
+int ftrace_modify_call(struct dyn_ftrace *rec, unsigned long old_addr,
+ unsigned long addr)
+{
+ unsigned int call[2];
+ int ret;
+
+ make_call(rec->ip, old_addr, call);
+ ret = ftrace_check_current_call(rec->ip, call);
+
+ if (ret)
+ return ret;
+
+ return __ftrace_modify_call(rec->ip, addr, true);
+}
+#endif
+
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+/*
+ * Most of this function is copied from arm64.
+ */
+void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr,
+ unsigned long frame_pointer)
+{
+ unsigned long return_hooker = (unsigned long)&return_to_handler;
+ unsigned long old;
+
+ if (unlikely(atomic_read(&current->tracing_graph_pause)))
+ return;
+
+ /*
+ * We don't suffer access faults, so no extra fault-recovery assembly
+ * is needed here.
+ */
+ old = *parent;
+
+ if (!function_graph_enter(old, self_addr, frame_pointer, parent))
+ *parent = return_hooker;
+}
+
+#ifdef CONFIG_DYNAMIC_FTRACE
+extern void ftrace_graph_call(void);
+int ftrace_enable_ftrace_graph_caller(void)
+{
+ unsigned int call[2];
+ static int init_graph = 1;
+ int ret;
+
+ make_call(&ftrace_graph_call, &ftrace_stub, call);
+
+ /*
+ * When enabling graph tracer for the first time, ftrace_graph_call
+ * should contains a call to ftrace_stub. Once it has been disabled,
+ * the 8-bytes at the position becomes NOPs.
+ */
+ if (init_graph) {
+ ret = ftrace_check_current_call((unsigned long)&ftrace_graph_call,
+ call);
+ init_graph = 0;
+ } else {
+ ret = ftrace_check_current_call((unsigned long)&ftrace_graph_call,
+ NULL);
+ }
+
+ if (ret)
+ return ret;
+
+ return __ftrace_modify_call((unsigned long)&ftrace_graph_call,
+ (unsigned long)&prepare_ftrace_return, true);
+}
+
+int ftrace_disable_ftrace_graph_caller(void)
+{
+ unsigned int call[2];
+ int ret;
+
+ make_call(&ftrace_graph_call, &prepare_ftrace_return, call);
+
+ /*
+ * This is to make sure that ftrace_enable_ftrace_graph_caller
+ * did the right thing.
+ */
+ ret = ftrace_check_current_call((unsigned long)&ftrace_graph_call,
+ call);
+
+ if (ret)
+ return ret;
+
+ return __ftrace_modify_call((unsigned long)&ftrace_graph_call,
+ (unsigned long)&prepare_ftrace_return, false);
+}
+#endif /* CONFIG_DYNAMIC_FTRACE */
+#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
diff --git a/arch/riscv/kernel/head.S b/arch/riscv/kernel/head.S
new file mode 100644
index 000000000..47d1411db
--- /dev/null
+++ b/arch/riscv/kernel/head.S
@@ -0,0 +1,396 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2012 Regents of the University of California
+ */
+
+#include <asm/asm-offsets.h>
+#include <asm/asm.h>
+#include <linux/init.h>
+#include <linux/linkage.h>
+#include <asm/thread_info.h>
+#include <asm/page.h>
+#include <asm/csr.h>
+#include <asm/hwcap.h>
+#include <asm/image.h>
+#include "efi-header.S"
+
+__HEAD
+ENTRY(_start)
+ /*
+ * Image header expected by Linux boot-loaders. The image header data
+ * structure is described in asm/image.h.
+ * Do not modify it without modifying the structure and all bootloaders
+ * that expects this header format!!
+ */
+#ifdef CONFIG_EFI
+ /*
+ * This instruction decodes to "MZ" ASCII required by UEFI.
+ */
+ c.li s4,-13
+ j _start_kernel
+#else
+ /* jump to start kernel */
+ j _start_kernel
+ /* reserved */
+ .word 0
+#endif
+ .balign 8
+#ifdef CONFIG_RISCV_M_MODE
+ /* Image load offset (0MB) from start of RAM for M-mode */
+ .dword 0
+#else
+#if __riscv_xlen == 64
+ /* Image load offset(2MB) from start of RAM */
+ .dword 0x200000
+#else
+ /* Image load offset(4MB) from start of RAM */
+ .dword 0x400000
+#endif
+#endif
+ /* Effective size of kernel image */
+ .dword _end - _start
+ .dword __HEAD_FLAGS
+ .word RISCV_HEADER_VERSION
+ .word 0
+ .dword 0
+ .ascii RISCV_IMAGE_MAGIC
+ .balign 4
+ .ascii RISCV_IMAGE_MAGIC2
+#ifdef CONFIG_EFI
+ .word pe_head_start - _start
+pe_head_start:
+
+ __EFI_PE_HEADER
+#else
+ .word 0
+#endif
+
+.align 2
+#ifdef CONFIG_MMU
+relocate:
+ /* Relocate return address */
+ li a1, PAGE_OFFSET
+ la a2, _start
+ sub a1, a1, a2
+ add ra, ra, a1
+
+ /* Point stvec to virtual address of intruction after satp write */
+ la a2, 1f
+ add a2, a2, a1
+ csrw CSR_TVEC, a2
+
+ /* Compute satp for kernel page tables, but don't load it yet */
+ srl a2, a0, PAGE_SHIFT
+ li a1, SATP_MODE
+ or a2, a2, a1
+
+ /*
+ * Load trampoline page directory, which will cause us to trap to
+ * stvec if VA != PA, or simply fall through if VA == PA. We need a
+ * full fence here because setup_vm() just wrote these PTEs and we need
+ * to ensure the new translations are in use.
+ */
+ la a0, trampoline_pg_dir
+ srl a0, a0, PAGE_SHIFT
+ or a0, a0, a1
+ sfence.vma
+ csrw CSR_SATP, a0
+.align 2
+1:
+ /* Set trap vector to spin forever to help debug */
+ la a0, .Lsecondary_park
+ csrw CSR_TVEC, a0
+
+ /* Reload the global pointer */
+.option push
+.option norelax
+ la gp, __global_pointer$
+.option pop
+
+ /*
+ * Switch to kernel page tables. A full fence is necessary in order to
+ * avoid using the trampoline translations, which are only correct for
+ * the first superpage. Fetching the fence is guarnteed to work
+ * because that first superpage is translated the same way.
+ */
+ csrw CSR_SATP, a2
+ sfence.vma
+
+ ret
+#endif /* CONFIG_MMU */
+#ifdef CONFIG_SMP
+ .global secondary_start_sbi
+secondary_start_sbi:
+ /* Mask all interrupts */
+ csrw CSR_IE, zero
+ csrw CSR_IP, zero
+
+ /* Load the global pointer */
+ .option push
+ .option norelax
+ la gp, __global_pointer$
+ .option pop
+
+ /*
+ * Disable FPU to detect illegal usage of
+ * floating point in kernel space
+ */
+ li t0, SR_FS
+ csrc CSR_STATUS, t0
+
+ /* Set trap vector to spin forever to help debug */
+ la a3, .Lsecondary_park
+ csrw CSR_TVEC, a3
+
+ slli a3, a0, LGREG
+ la a4, __cpu_up_stack_pointer
+ la a5, __cpu_up_task_pointer
+ add a4, a3, a4
+ add a5, a3, a5
+ REG_L sp, (a4)
+ REG_L tp, (a5)
+
+ .global secondary_start_common
+secondary_start_common:
+
+#ifdef CONFIG_MMU
+ /* Enable virtual memory and relocate to virtual address */
+ la a0, swapper_pg_dir
+ call relocate
+#endif
+ call setup_trap_vector
+ tail smp_callin
+#endif /* CONFIG_SMP */
+
+.align 2
+setup_trap_vector:
+ /* Set trap vector to exception handler */
+ la a0, handle_exception
+ csrw CSR_TVEC, a0
+
+ /*
+ * Set sup0 scratch register to 0, indicating to exception vector that
+ * we are presently executing in kernel.
+ */
+ csrw CSR_SCRATCH, zero
+ ret
+
+.align 2
+.Lsecondary_park:
+ /* We lack SMP support or have too many harts, so park this hart */
+ wfi
+ j .Lsecondary_park
+
+END(_start)
+
+ __INIT
+ENTRY(_start_kernel)
+ /* Mask all interrupts */
+ csrw CSR_IE, zero
+ csrw CSR_IP, zero
+
+#ifdef CONFIG_RISCV_M_MODE
+ /* flush the instruction cache */
+ fence.i
+
+ /* Reset all registers except ra, a0, a1 */
+ call reset_regs
+
+ /*
+ * Setup a PMP to permit access to all of memory. Some machines may
+ * not implement PMPs, so we set up a quick trap handler to just skip
+ * touching the PMPs on any trap.
+ */
+ la a0, pmp_done
+ csrw CSR_TVEC, a0
+
+ li a0, -1
+ csrw CSR_PMPADDR0, a0
+ li a0, (PMP_A_NAPOT | PMP_R | PMP_W | PMP_X)
+ csrw CSR_PMPCFG0, a0
+.align 2
+pmp_done:
+
+ /*
+ * The hartid in a0 is expected later on, and we have no firmware
+ * to hand it to us.
+ */
+ csrr a0, CSR_MHARTID
+#endif /* CONFIG_RISCV_M_MODE */
+
+ /* Load the global pointer */
+.option push
+.option norelax
+ la gp, __global_pointer$
+.option pop
+
+ /*
+ * Disable FPU to detect illegal usage of
+ * floating point in kernel space
+ */
+ li t0, SR_FS
+ csrc CSR_STATUS, t0
+
+#ifdef CONFIG_SMP
+ li t0, CONFIG_NR_CPUS
+ blt a0, t0, .Lgood_cores
+ tail .Lsecondary_park
+.Lgood_cores:
+#endif
+
+ /* Pick one hart to run the main boot sequence */
+ la a3, hart_lottery
+ li a2, 1
+ amoadd.w a3, a2, (a3)
+ bnez a3, .Lsecondary_start
+
+ /* Clear BSS for flat non-ELF images */
+ la a3, __bss_start
+ la a4, __bss_stop
+ ble a4, a3, clear_bss_done
+clear_bss:
+ REG_S zero, (a3)
+ add a3, a3, RISCV_SZPTR
+ blt a3, a4, clear_bss
+clear_bss_done:
+
+ /* Save hart ID and DTB physical address */
+ mv s0, a0
+ mv s1, a1
+ la a2, boot_cpu_hartid
+ REG_S a0, (a2)
+
+ /* Initialize page tables and relocate to virtual addresses */
+ la tp, init_task
+ la sp, init_thread_union + THREAD_SIZE
+ mv a0, s1
+ call setup_vm
+#ifdef CONFIG_MMU
+ la a0, early_pg_dir
+ call relocate
+#endif /* CONFIG_MMU */
+
+ call setup_trap_vector
+ /* Restore C environment */
+ la tp, init_task
+ sw zero, TASK_TI_CPU(tp)
+ la sp, init_thread_union + THREAD_SIZE
+
+#ifdef CONFIG_KASAN
+ call kasan_early_init
+#endif
+ /* Start the kernel */
+ call soc_early_init
+ tail start_kernel
+
+.Lsecondary_start:
+#ifdef CONFIG_SMP
+ /* Set trap vector to spin forever to help debug */
+ la a3, .Lsecondary_park
+ csrw CSR_TVEC, a3
+
+ slli a3, a0, LGREG
+ la a1, __cpu_up_stack_pointer
+ la a2, __cpu_up_task_pointer
+ add a1, a3, a1
+ add a2, a3, a2
+
+ /*
+ * This hart didn't win the lottery, so we wait for the winning hart to
+ * get far enough along the boot process that it should continue.
+ */
+.Lwait_for_cpu_up:
+ /* FIXME: We should WFI to save some energy here. */
+ REG_L sp, (a1)
+ REG_L tp, (a2)
+ beqz sp, .Lwait_for_cpu_up
+ beqz tp, .Lwait_for_cpu_up
+ fence
+
+ tail secondary_start_common
+#endif
+
+END(_start_kernel)
+
+#ifdef CONFIG_RISCV_M_MODE
+ENTRY(reset_regs)
+ li sp, 0
+ li gp, 0
+ li tp, 0
+ li t0, 0
+ li t1, 0
+ li t2, 0
+ li s0, 0
+ li s1, 0
+ li a2, 0
+ li a3, 0
+ li a4, 0
+ li a5, 0
+ li a6, 0
+ li a7, 0
+ li s2, 0
+ li s3, 0
+ li s4, 0
+ li s5, 0
+ li s6, 0
+ li s7, 0
+ li s8, 0
+ li s9, 0
+ li s10, 0
+ li s11, 0
+ li t3, 0
+ li t4, 0
+ li t5, 0
+ li t6, 0
+ csrw CSR_SCRATCH, 0
+
+#ifdef CONFIG_FPU
+ csrr t0, CSR_MISA
+ andi t0, t0, (COMPAT_HWCAP_ISA_F | COMPAT_HWCAP_ISA_D)
+ beqz t0, .Lreset_regs_done
+
+ li t1, SR_FS
+ csrs CSR_STATUS, t1
+ fmv.s.x f0, zero
+ fmv.s.x f1, zero
+ fmv.s.x f2, zero
+ fmv.s.x f3, zero
+ fmv.s.x f4, zero
+ fmv.s.x f5, zero
+ fmv.s.x f6, zero
+ fmv.s.x f7, zero
+ fmv.s.x f8, zero
+ fmv.s.x f9, zero
+ fmv.s.x f10, zero
+ fmv.s.x f11, zero
+ fmv.s.x f12, zero
+ fmv.s.x f13, zero
+ fmv.s.x f14, zero
+ fmv.s.x f15, zero
+ fmv.s.x f16, zero
+ fmv.s.x f17, zero
+ fmv.s.x f18, zero
+ fmv.s.x f19, zero
+ fmv.s.x f20, zero
+ fmv.s.x f21, zero
+ fmv.s.x f22, zero
+ fmv.s.x f23, zero
+ fmv.s.x f24, zero
+ fmv.s.x f25, zero
+ fmv.s.x f26, zero
+ fmv.s.x f27, zero
+ fmv.s.x f28, zero
+ fmv.s.x f29, zero
+ fmv.s.x f30, zero
+ fmv.s.x f31, zero
+ csrw fcsr, 0
+ /* note that the caller must clear SR_FS */
+#endif /* CONFIG_FPU */
+.Lreset_regs_done:
+ ret
+END(reset_regs)
+#endif /* CONFIG_RISCV_M_MODE */
+
+__PAGE_ALIGNED_BSS
+ /* Empty zero page */
+ .balign PAGE_SIZE
diff --git a/arch/riscv/kernel/head.h b/arch/riscv/kernel/head.h
new file mode 100644
index 000000000..b48dda3d0
--- /dev/null
+++ b/arch/riscv/kernel/head.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2019 SiFive, Inc.
+ */
+#ifndef __ASM_HEAD_H
+#define __ASM_HEAD_H
+
+#include <linux/linkage.h>
+#include <linux/init.h>
+
+extern atomic_t hart_lottery;
+
+asmlinkage void do_page_fault(struct pt_regs *regs);
+asmlinkage void __init setup_vm(uintptr_t dtb_pa);
+
+extern void *__cpu_up_stack_pointer[];
+extern void *__cpu_up_task_pointer[];
+
+#endif /* __ASM_HEAD_H */
diff --git a/arch/riscv/kernel/image-vars.h b/arch/riscv/kernel/image-vars.h
new file mode 100644
index 000000000..8c212efb3
--- /dev/null
+++ b/arch/riscv/kernel/image-vars.h
@@ -0,0 +1,51 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2020 Western Digital Corporation or its affiliates.
+ * Linker script variables to be set after section resolution, as
+ * ld.lld does not like variables assigned before SECTIONS is processed.
+ * Based on arch/arm64/kerne/image-vars.h
+ */
+#ifndef __RISCV_KERNEL_IMAGE_VARS_H
+#define __RISCV_KERNEL_IMAGE_VARS_H
+
+#ifndef LINKER_SCRIPT
+#error This file should only be included in vmlinux.lds.S
+#endif
+
+#ifdef CONFIG_EFI
+
+/*
+ * The EFI stub has its own symbol namespace prefixed by __efistub_, to
+ * isolate it from the kernel proper. The following symbols are legally
+ * accessed by the stub, so provide some aliases to make them accessible.
+ * Only include data symbols here, or text symbols of functions that are
+ * guaranteed to be safe when executed at another offset than they were
+ * linked at. The routines below are all implemented in assembler in a
+ * position independent manner
+ */
+__efistub_memcmp = memcmp;
+__efistub_memchr = memchr;
+__efistub_memcpy = memcpy;
+__efistub_memmove = memmove;
+__efistub_memset = memset;
+__efistub_strlen = strlen;
+__efistub_strnlen = strnlen;
+__efistub_strcmp = strcmp;
+__efistub_strncmp = strncmp;
+__efistub_strrchr = strrchr;
+
+#ifdef CONFIG_KASAN
+__efistub___memcpy = memcpy;
+__efistub___memmove = memmove;
+__efistub___memset = memset;
+#endif
+
+__efistub__start = _start;
+__efistub__start_kernel = _start_kernel;
+__efistub__end = _end;
+__efistub__edata = _edata;
+__efistub_screen_info = screen_info;
+
+#endif
+
+#endif /* __RISCV_KERNEL_IMAGE_VARS_H */
diff --git a/arch/riscv/kernel/irq.c b/arch/riscv/kernel/irq.c
new file mode 100644
index 000000000..7207fa08d
--- /dev/null
+++ b/arch/riscv/kernel/irq.c
@@ -0,0 +1,24 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2012 Regents of the University of California
+ * Copyright (C) 2017 SiFive
+ * Copyright (C) 2018 Christoph Hellwig
+ */
+
+#include <linux/interrupt.h>
+#include <linux/irqchip.h>
+#include <linux/seq_file.h>
+#include <asm/smp.h>
+
+int arch_show_interrupts(struct seq_file *p, int prec)
+{
+ show_ipi_stats(p, prec);
+ return 0;
+}
+
+void __init init_IRQ(void)
+{
+ irqchip_init();
+ if (!handle_arch_irq)
+ panic("No interrupt controller found.");
+}
diff --git a/arch/riscv/kernel/jump_label.c b/arch/riscv/kernel/jump_label.c
new file mode 100644
index 000000000..20e09056d
--- /dev/null
+++ b/arch/riscv/kernel/jump_label.c
@@ -0,0 +1,53 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2020 Emil Renner Berthing
+ *
+ * Based on arch/arm64/kernel/jump_label.c
+ */
+#include <linux/jump_label.h>
+#include <linux/kernel.h>
+#include <linux/memory.h>
+#include <linux/mutex.h>
+#include <asm/bug.h>
+#include <asm/patch.h>
+
+#define RISCV_INSN_NOP 0x00000013U
+#define RISCV_INSN_JAL 0x0000006fU
+
+void arch_jump_label_transform(struct jump_entry *entry,
+ enum jump_label_type type)
+{
+ void *addr = (void *)jump_entry_code(entry);
+ u32 insn;
+
+ if (type == JUMP_LABEL_JMP) {
+ long offset = jump_entry_target(entry) - jump_entry_code(entry);
+
+ if (WARN_ON(offset & 1 || offset < -524288 || offset >= 524288))
+ return;
+
+ insn = RISCV_INSN_JAL |
+ (((u32)offset & GENMASK(19, 12)) << (12 - 12)) |
+ (((u32)offset & GENMASK(11, 11)) << (20 - 11)) |
+ (((u32)offset & GENMASK(10, 1)) << (21 - 1)) |
+ (((u32)offset & GENMASK(20, 20)) << (31 - 20));
+ } else {
+ insn = RISCV_INSN_NOP;
+ }
+
+ mutex_lock(&text_mutex);
+ patch_text_nosync(addr, &insn, sizeof(insn));
+ mutex_unlock(&text_mutex);
+}
+
+void arch_jump_label_transform_static(struct jump_entry *entry,
+ enum jump_label_type type)
+{
+ /*
+ * We use the same instructions in the arch_static_branch and
+ * arch_static_branch_jump inline functions, so there's no
+ * need to patch them up here.
+ * The core will call arch_jump_label_transform when those
+ * instructions need to be replaced.
+ */
+}
diff --git a/arch/riscv/kernel/kgdb.c b/arch/riscv/kernel/kgdb.c
new file mode 100644
index 000000000..963ed7edc
--- /dev/null
+++ b/arch/riscv/kernel/kgdb.c
@@ -0,0 +1,390 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2020 SiFive
+ */
+
+#include <linux/ptrace.h>
+#include <linux/kdebug.h>
+#include <linux/bug.h>
+#include <linux/kgdb.h>
+#include <linux/irqflags.h>
+#include <linux/string.h>
+#include <asm/cacheflush.h>
+#include <asm/gdb_xml.h>
+#include <asm/parse_asm.h>
+
+enum {
+ NOT_KGDB_BREAK = 0,
+ KGDB_SW_BREAK,
+ KGDB_COMPILED_BREAK,
+ KGDB_SW_SINGLE_STEP
+};
+
+static unsigned long stepped_address;
+static unsigned int stepped_opcode;
+
+#if __riscv_xlen == 32
+/* C.JAL is an RV32C-only instruction */
+DECLARE_INSN(c_jal, MATCH_C_JAL, MASK_C_JAL)
+#else
+#define is_c_jal_insn(opcode) 0
+#endif
+DECLARE_INSN(jalr, MATCH_JALR, MASK_JALR)
+DECLARE_INSN(jal, MATCH_JAL, MASK_JAL)
+DECLARE_INSN(c_jr, MATCH_C_JR, MASK_C_JR)
+DECLARE_INSN(c_jalr, MATCH_C_JALR, MASK_C_JALR)
+DECLARE_INSN(c_j, MATCH_C_J, MASK_C_J)
+DECLARE_INSN(beq, MATCH_BEQ, MASK_BEQ)
+DECLARE_INSN(bne, MATCH_BNE, MASK_BNE)
+DECLARE_INSN(blt, MATCH_BLT, MASK_BLT)
+DECLARE_INSN(bge, MATCH_BGE, MASK_BGE)
+DECLARE_INSN(bltu, MATCH_BLTU, MASK_BLTU)
+DECLARE_INSN(bgeu, MATCH_BGEU, MASK_BGEU)
+DECLARE_INSN(c_beqz, MATCH_C_BEQZ, MASK_C_BEQZ)
+DECLARE_INSN(c_bnez, MATCH_C_BNEZ, MASK_C_BNEZ)
+DECLARE_INSN(sret, MATCH_SRET, MASK_SRET)
+
+static int decode_register_index(unsigned long opcode, int offset)
+{
+ return (opcode >> offset) & 0x1F;
+}
+
+static int decode_register_index_short(unsigned long opcode, int offset)
+{
+ return ((opcode >> offset) & 0x7) + 8;
+}
+
+/* Calculate the new address for after a step */
+static int get_step_address(struct pt_regs *regs, unsigned long *next_addr)
+{
+ unsigned long pc = regs->epc;
+ unsigned long *regs_ptr = (unsigned long *)regs;
+ unsigned int rs1_num, rs2_num;
+ int op_code;
+
+ if (get_kernel_nofault(op_code, (void *)pc))
+ return -EINVAL;
+ if ((op_code & __INSN_LENGTH_MASK) != __INSN_LENGTH_GE_32) {
+ if (is_c_jalr_insn(op_code) || is_c_jr_insn(op_code)) {
+ rs1_num = decode_register_index(op_code, RVC_C2_RS1_OPOFF);
+ *next_addr = regs_ptr[rs1_num];
+ } else if (is_c_j_insn(op_code) || is_c_jal_insn(op_code)) {
+ *next_addr = EXTRACT_RVC_J_IMM(op_code) + pc;
+ } else if (is_c_beqz_insn(op_code)) {
+ rs1_num = decode_register_index_short(op_code,
+ RVC_C1_RS1_OPOFF);
+ if (!rs1_num || regs_ptr[rs1_num] == 0)
+ *next_addr = EXTRACT_RVC_B_IMM(op_code) + pc;
+ else
+ *next_addr = pc + 2;
+ } else if (is_c_bnez_insn(op_code)) {
+ rs1_num =
+ decode_register_index_short(op_code, RVC_C1_RS1_OPOFF);
+ if (rs1_num && regs_ptr[rs1_num] != 0)
+ *next_addr = EXTRACT_RVC_B_IMM(op_code) + pc;
+ else
+ *next_addr = pc + 2;
+ } else {
+ *next_addr = pc + 2;
+ }
+ } else {
+ if ((op_code & __INSN_OPCODE_MASK) == __INSN_BRANCH_OPCODE) {
+ bool result = false;
+ long imm = EXTRACT_BTYPE_IMM(op_code);
+ unsigned long rs1_val = 0, rs2_val = 0;
+
+ rs1_num = decode_register_index(op_code, RVG_RS1_OPOFF);
+ rs2_num = decode_register_index(op_code, RVG_RS2_OPOFF);
+ if (rs1_num)
+ rs1_val = regs_ptr[rs1_num];
+ if (rs2_num)
+ rs2_val = regs_ptr[rs2_num];
+
+ if (is_beq_insn(op_code))
+ result = (rs1_val == rs2_val) ? true : false;
+ else if (is_bne_insn(op_code))
+ result = (rs1_val != rs2_val) ? true : false;
+ else if (is_blt_insn(op_code))
+ result =
+ ((long)rs1_val <
+ (long)rs2_val) ? true : false;
+ else if (is_bge_insn(op_code))
+ result =
+ ((long)rs1_val >=
+ (long)rs2_val) ? true : false;
+ else if (is_bltu_insn(op_code))
+ result = (rs1_val < rs2_val) ? true : false;
+ else if (is_bgeu_insn(op_code))
+ result = (rs1_val >= rs2_val) ? true : false;
+ if (result)
+ *next_addr = imm + pc;
+ else
+ *next_addr = pc + 4;
+ } else if (is_jal_insn(op_code)) {
+ *next_addr = EXTRACT_JTYPE_IMM(op_code) + pc;
+ } else if (is_jalr_insn(op_code)) {
+ rs1_num = decode_register_index(op_code, RVG_RS1_OPOFF);
+ if (rs1_num)
+ *next_addr = ((unsigned long *)regs)[rs1_num];
+ *next_addr += EXTRACT_ITYPE_IMM(op_code);
+ } else if (is_sret_insn(op_code)) {
+ *next_addr = pc;
+ } else {
+ *next_addr = pc + 4;
+ }
+ }
+ return 0;
+}
+
+static int do_single_step(struct pt_regs *regs)
+{
+ /* Determine where the target instruction will send us to */
+ unsigned long addr = 0;
+ int error = get_step_address(regs, &addr);
+
+ if (error)
+ return error;
+
+ /* Store the op code in the stepped address */
+ error = get_kernel_nofault(stepped_opcode, (void *)addr);
+ if (error)
+ return error;
+
+ stepped_address = addr;
+
+ /* Replace the op code with the break instruction */
+ error = copy_to_kernel_nofault((void *)stepped_address,
+ arch_kgdb_ops.gdb_bpt_instr,
+ BREAK_INSTR_SIZE);
+ /* Flush and return */
+ if (!error) {
+ flush_icache_range(addr, addr + BREAK_INSTR_SIZE);
+ kgdb_single_step = 1;
+ atomic_set(&kgdb_cpu_doing_single_step,
+ raw_smp_processor_id());
+ } else {
+ stepped_address = 0;
+ stepped_opcode = 0;
+ }
+ return error;
+}
+
+/* Undo a single step */
+static void undo_single_step(struct pt_regs *regs)
+{
+ if (stepped_opcode != 0) {
+ copy_to_kernel_nofault((void *)stepped_address,
+ (void *)&stepped_opcode, BREAK_INSTR_SIZE);
+ flush_icache_range(stepped_address,
+ stepped_address + BREAK_INSTR_SIZE);
+ }
+ stepped_address = 0;
+ stepped_opcode = 0;
+ kgdb_single_step = 0;
+ atomic_set(&kgdb_cpu_doing_single_step, -1);
+}
+
+struct dbg_reg_def_t dbg_reg_def[DBG_MAX_REG_NUM] = {
+ {DBG_REG_ZERO, GDB_SIZEOF_REG, -1},
+ {DBG_REG_RA, GDB_SIZEOF_REG, offsetof(struct pt_regs, ra)},
+ {DBG_REG_SP, GDB_SIZEOF_REG, offsetof(struct pt_regs, sp)},
+ {DBG_REG_GP, GDB_SIZEOF_REG, offsetof(struct pt_regs, gp)},
+ {DBG_REG_TP, GDB_SIZEOF_REG, offsetof(struct pt_regs, tp)},
+ {DBG_REG_T0, GDB_SIZEOF_REG, offsetof(struct pt_regs, t0)},
+ {DBG_REG_T1, GDB_SIZEOF_REG, offsetof(struct pt_regs, t1)},
+ {DBG_REG_T2, GDB_SIZEOF_REG, offsetof(struct pt_regs, t2)},
+ {DBG_REG_FP, GDB_SIZEOF_REG, offsetof(struct pt_regs, s0)},
+ {DBG_REG_S1, GDB_SIZEOF_REG, offsetof(struct pt_regs, a1)},
+ {DBG_REG_A0, GDB_SIZEOF_REG, offsetof(struct pt_regs, a0)},
+ {DBG_REG_A1, GDB_SIZEOF_REG, offsetof(struct pt_regs, a1)},
+ {DBG_REG_A2, GDB_SIZEOF_REG, offsetof(struct pt_regs, a2)},
+ {DBG_REG_A3, GDB_SIZEOF_REG, offsetof(struct pt_regs, a3)},
+ {DBG_REG_A4, GDB_SIZEOF_REG, offsetof(struct pt_regs, a4)},
+ {DBG_REG_A5, GDB_SIZEOF_REG, offsetof(struct pt_regs, a5)},
+ {DBG_REG_A6, GDB_SIZEOF_REG, offsetof(struct pt_regs, a6)},
+ {DBG_REG_A7, GDB_SIZEOF_REG, offsetof(struct pt_regs, a7)},
+ {DBG_REG_S2, GDB_SIZEOF_REG, offsetof(struct pt_regs, s2)},
+ {DBG_REG_S3, GDB_SIZEOF_REG, offsetof(struct pt_regs, s3)},
+ {DBG_REG_S4, GDB_SIZEOF_REG, offsetof(struct pt_regs, s4)},
+ {DBG_REG_S5, GDB_SIZEOF_REG, offsetof(struct pt_regs, s5)},
+ {DBG_REG_S6, GDB_SIZEOF_REG, offsetof(struct pt_regs, s6)},
+ {DBG_REG_S7, GDB_SIZEOF_REG, offsetof(struct pt_regs, s7)},
+ {DBG_REG_S8, GDB_SIZEOF_REG, offsetof(struct pt_regs, s8)},
+ {DBG_REG_S9, GDB_SIZEOF_REG, offsetof(struct pt_regs, s9)},
+ {DBG_REG_S10, GDB_SIZEOF_REG, offsetof(struct pt_regs, s10)},
+ {DBG_REG_S11, GDB_SIZEOF_REG, offsetof(struct pt_regs, s11)},
+ {DBG_REG_T3, GDB_SIZEOF_REG, offsetof(struct pt_regs, t3)},
+ {DBG_REG_T4, GDB_SIZEOF_REG, offsetof(struct pt_regs, t4)},
+ {DBG_REG_T5, GDB_SIZEOF_REG, offsetof(struct pt_regs, t5)},
+ {DBG_REG_T6, GDB_SIZEOF_REG, offsetof(struct pt_regs, t6)},
+ {DBG_REG_EPC, GDB_SIZEOF_REG, offsetof(struct pt_regs, epc)},
+ {DBG_REG_STATUS, GDB_SIZEOF_REG, offsetof(struct pt_regs, status)},
+ {DBG_REG_BADADDR, GDB_SIZEOF_REG, offsetof(struct pt_regs, badaddr)},
+ {DBG_REG_CAUSE, GDB_SIZEOF_REG, offsetof(struct pt_regs, cause)},
+};
+
+char *dbg_get_reg(int regno, void *mem, struct pt_regs *regs)
+{
+ if (regno >= DBG_MAX_REG_NUM || regno < 0)
+ return NULL;
+
+ if (dbg_reg_def[regno].offset != -1)
+ memcpy(mem, (void *)regs + dbg_reg_def[regno].offset,
+ dbg_reg_def[regno].size);
+ else
+ memset(mem, 0, dbg_reg_def[regno].size);
+ return dbg_reg_def[regno].name;
+}
+
+int dbg_set_reg(int regno, void *mem, struct pt_regs *regs)
+{
+ if (regno >= DBG_MAX_REG_NUM || regno < 0)
+ return -EINVAL;
+
+ if (dbg_reg_def[regno].offset != -1)
+ memcpy((void *)regs + dbg_reg_def[regno].offset, mem,
+ dbg_reg_def[regno].size);
+ return 0;
+}
+
+void
+sleeping_thread_to_gdb_regs(unsigned long *gdb_regs, struct task_struct *task)
+{
+ /* Initialize to zero */
+ memset((char *)gdb_regs, 0, NUMREGBYTES);
+
+ gdb_regs[DBG_REG_SP_OFF] = task->thread.sp;
+ gdb_regs[DBG_REG_FP_OFF] = task->thread.s[0];
+ gdb_regs[DBG_REG_S1_OFF] = task->thread.s[1];
+ gdb_regs[DBG_REG_S2_OFF] = task->thread.s[2];
+ gdb_regs[DBG_REG_S3_OFF] = task->thread.s[3];
+ gdb_regs[DBG_REG_S4_OFF] = task->thread.s[4];
+ gdb_regs[DBG_REG_S5_OFF] = task->thread.s[5];
+ gdb_regs[DBG_REG_S6_OFF] = task->thread.s[6];
+ gdb_regs[DBG_REG_S7_OFF] = task->thread.s[7];
+ gdb_regs[DBG_REG_S8_OFF] = task->thread.s[8];
+ gdb_regs[DBG_REG_S9_OFF] = task->thread.s[10];
+ gdb_regs[DBG_REG_S10_OFF] = task->thread.s[11];
+ gdb_regs[DBG_REG_EPC_OFF] = task->thread.ra;
+}
+
+void kgdb_arch_set_pc(struct pt_regs *regs, unsigned long pc)
+{
+ regs->epc = pc;
+}
+
+void kgdb_arch_handle_qxfer_pkt(char *remcom_in_buffer,
+ char *remcom_out_buffer)
+{
+ if (!strncmp(remcom_in_buffer, gdb_xfer_read_target,
+ sizeof(gdb_xfer_read_target)))
+ strcpy(remcom_out_buffer, riscv_gdb_stub_target_desc);
+ else if (!strncmp(remcom_in_buffer, gdb_xfer_read_cpuxml,
+ sizeof(gdb_xfer_read_cpuxml)))
+ strcpy(remcom_out_buffer, riscv_gdb_stub_cpuxml);
+}
+
+static inline void kgdb_arch_update_addr(struct pt_regs *regs,
+ char *remcom_in_buffer)
+{
+ unsigned long addr;
+ char *ptr;
+
+ ptr = &remcom_in_buffer[1];
+ if (kgdb_hex2long(&ptr, &addr))
+ regs->epc = addr;
+}
+
+int kgdb_arch_handle_exception(int vector, int signo, int err_code,
+ char *remcom_in_buffer, char *remcom_out_buffer,
+ struct pt_regs *regs)
+{
+ int err = 0;
+
+ undo_single_step(regs);
+
+ switch (remcom_in_buffer[0]) {
+ case 'c':
+ case 'D':
+ case 'k':
+ if (remcom_in_buffer[0] == 'c')
+ kgdb_arch_update_addr(regs, remcom_in_buffer);
+ break;
+ case 's':
+ kgdb_arch_update_addr(regs, remcom_in_buffer);
+ err = do_single_step(regs);
+ break;
+ default:
+ err = -1;
+ }
+ return err;
+}
+
+static int kgdb_riscv_kgdbbreak(unsigned long addr)
+{
+ if (stepped_address == addr)
+ return KGDB_SW_SINGLE_STEP;
+ if (atomic_read(&kgdb_setting_breakpoint))
+ if (addr == (unsigned long)&kgdb_compiled_break)
+ return KGDB_COMPILED_BREAK;
+
+ return kgdb_has_hit_break(addr);
+}
+
+static int kgdb_riscv_notify(struct notifier_block *self, unsigned long cmd,
+ void *ptr)
+{
+ struct die_args *args = (struct die_args *)ptr;
+ struct pt_regs *regs = args->regs;
+ unsigned long flags;
+ int type;
+
+ if (user_mode(regs))
+ return NOTIFY_DONE;
+
+ type = kgdb_riscv_kgdbbreak(regs->epc);
+ if (type == NOT_KGDB_BREAK && cmd == DIE_TRAP)
+ return NOTIFY_DONE;
+
+ local_irq_save(flags);
+
+ if (kgdb_handle_exception(type == KGDB_SW_SINGLE_STEP ? 0 : 1,
+ args->signr, cmd, regs))
+ return NOTIFY_DONE;
+
+ if (type == KGDB_COMPILED_BREAK)
+ regs->epc += 4;
+
+ local_irq_restore(flags);
+
+ return NOTIFY_STOP;
+}
+
+static struct notifier_block kgdb_notifier = {
+ .notifier_call = kgdb_riscv_notify,
+};
+
+int kgdb_arch_init(void)
+{
+ register_die_notifier(&kgdb_notifier);
+
+ return 0;
+}
+
+void kgdb_arch_exit(void)
+{
+ unregister_die_notifier(&kgdb_notifier);
+}
+
+/*
+ * Global data
+ */
+#ifdef CONFIG_RISCV_ISA_C
+const struct kgdb_arch arch_kgdb_ops = {
+ .gdb_bpt_instr = {0x02, 0x90}, /* c.ebreak */
+};
+#else
+const struct kgdb_arch arch_kgdb_ops = {
+ .gdb_bpt_instr = {0x73, 0x00, 0x10, 0x00}, /* ebreak */
+};
+#endif
diff --git a/arch/riscv/kernel/mcount-dyn.S b/arch/riscv/kernel/mcount-dyn.S
new file mode 100644
index 000000000..35a6ed76c
--- /dev/null
+++ b/arch/riscv/kernel/mcount-dyn.S
@@ -0,0 +1,239 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2017 Andes Technology Corporation */
+
+#include <linux/init.h>
+#include <linux/linkage.h>
+#include <asm/asm.h>
+#include <asm/csr.h>
+#include <asm/unistd.h>
+#include <asm/thread_info.h>
+#include <asm/asm-offsets.h>
+#include <asm-generic/export.h>
+#include <asm/ftrace.h>
+
+ .text
+
+ .macro SAVE_ABI_STATE
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+ addi sp, sp, -48
+ sd s0, 32(sp)
+ sd ra, 40(sp)
+ addi s0, sp, 48
+ sd t0, 24(sp)
+ sd t1, 16(sp)
+#ifdef HAVE_FUNCTION_GRAPH_FP_TEST
+ sd t2, 8(sp)
+#endif
+#else
+ addi sp, sp, -16
+ sd s0, 0(sp)
+ sd ra, 8(sp)
+ addi s0, sp, 16
+#endif
+ .endm
+
+ .macro RESTORE_ABI_STATE
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+ ld s0, 32(sp)
+ ld ra, 40(sp)
+ addi sp, sp, 48
+#else
+ ld ra, 8(sp)
+ ld s0, 0(sp)
+ addi sp, sp, 16
+#endif
+ .endm
+
+ .macro RESTORE_GRAPH_ARGS
+ ld a0, 24(sp)
+ ld a1, 16(sp)
+#ifdef HAVE_FUNCTION_GRAPH_FP_TEST
+ ld a2, 8(sp)
+#endif
+ .endm
+
+ENTRY(ftrace_graph_caller)
+ addi sp, sp, -16
+ sd s0, 0(sp)
+ sd ra, 8(sp)
+ addi s0, sp, 16
+ftrace_graph_call:
+ .global ftrace_graph_call
+ /*
+ * Calling ftrace_enable/disable_ftrace_graph_caller would overwrite the
+ * call below. Check ftrace_modify_all_code for details.
+ */
+ call ftrace_stub
+ ld ra, 8(sp)
+ ld s0, 0(sp)
+ addi sp, sp, 16
+ ret
+ENDPROC(ftrace_graph_caller)
+
+ENTRY(ftrace_caller)
+ /*
+ * a0: the address in the caller when calling ftrace_caller
+ * a1: the caller's return address
+ * a2: the address of global variable function_trace_op
+ */
+ ld a1, -8(s0)
+ addi a0, ra, -MCOUNT_INSN_SIZE
+ la t5, function_trace_op
+ ld a2, 0(t5)
+
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+ /*
+ * the graph tracer (specifically, prepare_ftrace_return) needs these
+ * arguments but for now the function tracer occupies the regs, so we
+ * save them in temporary regs to recover later.
+ */
+ addi t0, s0, -8
+ mv t1, a0
+#ifdef HAVE_FUNCTION_GRAPH_FP_TEST
+ ld t2, -16(s0)
+#endif
+#endif
+
+ SAVE_ABI_STATE
+ftrace_call:
+ .global ftrace_call
+ /*
+ * For the dynamic ftrace to work, here we should reserve at least
+ * 8 bytes for a functional auipc-jalr pair. The following call
+ * serves this purpose.
+ *
+ * Calling ftrace_update_ftrace_func would overwrite the nops below.
+ * Check ftrace_modify_all_code for details.
+ */
+ call ftrace_stub
+
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+ RESTORE_GRAPH_ARGS
+ call ftrace_graph_caller
+#endif
+
+ RESTORE_ABI_STATE
+ ret
+ENDPROC(ftrace_caller)
+
+#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
+ .macro SAVE_ALL
+ addi sp, sp, -(PT_SIZE_ON_STACK+16)
+ sd s0, (PT_SIZE_ON_STACK)(sp)
+ sd ra, (PT_SIZE_ON_STACK+8)(sp)
+ addi s0, sp, (PT_SIZE_ON_STACK+16)
+
+ sd x1, PT_RA(sp)
+ sd x2, PT_SP(sp)
+ sd x3, PT_GP(sp)
+ sd x4, PT_TP(sp)
+ sd x5, PT_T0(sp)
+ sd x6, PT_T1(sp)
+ sd x7, PT_T2(sp)
+ sd x8, PT_S0(sp)
+ sd x9, PT_S1(sp)
+ sd x10, PT_A0(sp)
+ sd x11, PT_A1(sp)
+ sd x12, PT_A2(sp)
+ sd x13, PT_A3(sp)
+ sd x14, PT_A4(sp)
+ sd x15, PT_A5(sp)
+ sd x16, PT_A6(sp)
+ sd x17, PT_A7(sp)
+ sd x18, PT_S2(sp)
+ sd x19, PT_S3(sp)
+ sd x20, PT_S4(sp)
+ sd x21, PT_S5(sp)
+ sd x22, PT_S6(sp)
+ sd x23, PT_S7(sp)
+ sd x24, PT_S8(sp)
+ sd x25, PT_S9(sp)
+ sd x26, PT_S10(sp)
+ sd x27, PT_S11(sp)
+ sd x28, PT_T3(sp)
+ sd x29, PT_T4(sp)
+ sd x30, PT_T5(sp)
+ sd x31, PT_T6(sp)
+ .endm
+
+ .macro RESTORE_ALL
+ ld x1, PT_RA(sp)
+ ld x2, PT_SP(sp)
+ ld x3, PT_GP(sp)
+ ld x4, PT_TP(sp)
+ ld x5, PT_T0(sp)
+ ld x6, PT_T1(sp)
+ ld x7, PT_T2(sp)
+ ld x8, PT_S0(sp)
+ ld x9, PT_S1(sp)
+ ld x10, PT_A0(sp)
+ ld x11, PT_A1(sp)
+ ld x12, PT_A2(sp)
+ ld x13, PT_A3(sp)
+ ld x14, PT_A4(sp)
+ ld x15, PT_A5(sp)
+ ld x16, PT_A6(sp)
+ ld x17, PT_A7(sp)
+ ld x18, PT_S2(sp)
+ ld x19, PT_S3(sp)
+ ld x20, PT_S4(sp)
+ ld x21, PT_S5(sp)
+ ld x22, PT_S6(sp)
+ ld x23, PT_S7(sp)
+ ld x24, PT_S8(sp)
+ ld x25, PT_S9(sp)
+ ld x26, PT_S10(sp)
+ ld x27, PT_S11(sp)
+ ld x28, PT_T3(sp)
+ ld x29, PT_T4(sp)
+ ld x30, PT_T5(sp)
+ ld x31, PT_T6(sp)
+
+ ld s0, (PT_SIZE_ON_STACK)(sp)
+ ld ra, (PT_SIZE_ON_STACK+8)(sp)
+ addi sp, sp, (PT_SIZE_ON_STACK+16)
+ .endm
+
+ .macro RESTORE_GRAPH_REG_ARGS
+ ld a0, PT_T0(sp)
+ ld a1, PT_T1(sp)
+#ifdef HAVE_FUNCTION_GRAPH_FP_TEST
+ ld a2, PT_T2(sp)
+#endif
+ .endm
+
+/*
+ * Most of the contents are the same as ftrace_caller.
+ */
+ENTRY(ftrace_regs_caller)
+ /*
+ * a3: the address of all registers in the stack
+ */
+ ld a1, -8(s0)
+ addi a0, ra, -MCOUNT_INSN_SIZE
+ la t5, function_trace_op
+ ld a2, 0(t5)
+ addi a3, sp, -(PT_SIZE_ON_STACK+16)
+
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+ addi t0, s0, -8
+ mv t1, a0
+#ifdef HAVE_FUNCTION_GRAPH_FP_TEST
+ ld t2, -16(s0)
+#endif
+#endif
+ SAVE_ALL
+
+ftrace_regs_call:
+ .global ftrace_regs_call
+ call ftrace_stub
+
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+ RESTORE_GRAPH_REG_ARGS
+ call ftrace_graph_caller
+#endif
+
+ RESTORE_ALL
+ ret
+ENDPROC(ftrace_regs_caller)
+#endif /* CONFIG_DYNAMIC_FTRACE_WITH_REGS */
diff --git a/arch/riscv/kernel/mcount.S b/arch/riscv/kernel/mcount.S
new file mode 100644
index 000000000..6d462681c
--- /dev/null
+++ b/arch/riscv/kernel/mcount.S
@@ -0,0 +1,129 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2017 Andes Technology Corporation */
+
+#include <linux/init.h>
+#include <linux/linkage.h>
+#include <asm/asm.h>
+#include <asm/csr.h>
+#include <asm/unistd.h>
+#include <asm/thread_info.h>
+#include <asm/asm-offsets.h>
+#include <asm-generic/export.h>
+#include <asm/ftrace.h>
+
+ .text
+
+ .macro SAVE_ABI_STATE
+ addi sp, sp, -16
+ sd s0, 0(sp)
+ sd ra, 8(sp)
+ addi s0, sp, 16
+ .endm
+
+ /*
+ * The call to ftrace_return_to_handler would overwrite the return
+ * register if a0 was not saved.
+ */
+ .macro SAVE_RET_ABI_STATE
+ addi sp, sp, -32
+ sd s0, 16(sp)
+ sd ra, 24(sp)
+ sd a0, 8(sp)
+ addi s0, sp, 32
+ .endm
+
+ .macro RESTORE_ABI_STATE
+ ld ra, 8(sp)
+ ld s0, 0(sp)
+ addi sp, sp, 16
+ .endm
+
+ .macro RESTORE_RET_ABI_STATE
+ ld ra, 24(sp)
+ ld s0, 16(sp)
+ ld a0, 8(sp)
+ addi sp, sp, 32
+ .endm
+
+ENTRY(ftrace_stub)
+#ifdef CONFIG_DYNAMIC_FTRACE
+ .global MCOUNT_NAME
+ .set MCOUNT_NAME, ftrace_stub
+#endif
+ ret
+ENDPROC(ftrace_stub)
+
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+ENTRY(return_to_handler)
+/*
+ * On implementing the frame point test, the ideal way is to compare the
+ * s0 (frame pointer, if enabled) on entry and the sp (stack pointer) on return.
+ * However, the psABI of variable-length-argument functions does not allow this.
+ *
+ * So alternatively we check the *old* frame pointer position, that is, the
+ * value stored in -16(s0) on entry, and the s0 on return.
+ */
+#ifdef HAVE_FUNCTION_GRAPH_FP_TEST
+ mv t6, s0
+#endif
+ SAVE_RET_ABI_STATE
+#ifdef HAVE_FUNCTION_GRAPH_FP_TEST
+ mv a0, t6
+#endif
+ call ftrace_return_to_handler
+ mv a1, a0
+ RESTORE_RET_ABI_STATE
+ jalr a1
+ENDPROC(return_to_handler)
+#endif
+
+#ifndef CONFIG_DYNAMIC_FTRACE
+ENTRY(MCOUNT_NAME)
+ la t4, ftrace_stub
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+ la t0, ftrace_graph_return
+ ld t1, 0(t0)
+ bne t1, t4, do_ftrace_graph_caller
+
+ la t3, ftrace_graph_entry
+ ld t2, 0(t3)
+ la t6, ftrace_graph_entry_stub
+ bne t2, t6, do_ftrace_graph_caller
+#endif
+ la t3, ftrace_trace_function
+ ld t5, 0(t3)
+ bne t5, t4, do_trace
+ ret
+
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+/*
+ * A pseudo representation for the function graph tracer:
+ * prepare_to_return(&ra_to_caller_of_caller, ra_to_caller)
+ */
+do_ftrace_graph_caller:
+ addi a0, s0, -8
+ mv a1, ra
+#ifdef HAVE_FUNCTION_GRAPH_FP_TEST
+ ld a2, -16(s0)
+#endif
+ SAVE_ABI_STATE
+ call prepare_ftrace_return
+ RESTORE_ABI_STATE
+ ret
+#endif
+
+/*
+ * A pseudo representation for the function tracer:
+ * (*ftrace_trace_function)(ra_to_caller, ra_to_caller_of_caller)
+ */
+do_trace:
+ ld a1, -8(s0)
+ mv a0, ra
+
+ SAVE_ABI_STATE
+ jalr t5
+ RESTORE_ABI_STATE
+ ret
+ENDPROC(MCOUNT_NAME)
+#endif
+EXPORT_SYMBOL(MCOUNT_NAME)
diff --git a/arch/riscv/kernel/module-sections.c b/arch/riscv/kernel/module-sections.c
new file mode 100644
index 000000000..e264e59e5
--- /dev/null
+++ b/arch/riscv/kernel/module-sections.c
@@ -0,0 +1,159 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright (C) 2014-2017 Linaro Ltd. <ard.biesheuvel@linaro.org>
+ *
+ * Copyright (C) 2018 Andes Technology Corporation <zong@andestech.com>
+ */
+
+#include <linux/elf.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/moduleloader.h>
+
+unsigned long module_emit_got_entry(struct module *mod, unsigned long val)
+{
+ struct mod_section *got_sec = &mod->arch.got;
+ int i = got_sec->num_entries;
+ struct got_entry *got = get_got_entry(val, got_sec);
+
+ if (got)
+ return (unsigned long)got;
+
+ /* There is no duplicate entry, create a new one */
+ got = (struct got_entry *)got_sec->shdr->sh_addr;
+ got[i] = emit_got_entry(val);
+
+ got_sec->num_entries++;
+ BUG_ON(got_sec->num_entries > got_sec->max_entries);
+
+ return (unsigned long)&got[i];
+}
+
+unsigned long module_emit_plt_entry(struct module *mod, unsigned long val)
+{
+ struct mod_section *got_plt_sec = &mod->arch.got_plt;
+ struct got_entry *got_plt;
+ struct mod_section *plt_sec = &mod->arch.plt;
+ struct plt_entry *plt = get_plt_entry(val, plt_sec, got_plt_sec);
+ int i = plt_sec->num_entries;
+
+ if (plt)
+ return (unsigned long)plt;
+
+ /* There is no duplicate entry, create a new one */
+ got_plt = (struct got_entry *)got_plt_sec->shdr->sh_addr;
+ got_plt[i] = emit_got_entry(val);
+ plt = (struct plt_entry *)plt_sec->shdr->sh_addr;
+ plt[i] = emit_plt_entry(val,
+ (unsigned long)&plt[i],
+ (unsigned long)&got_plt[i]);
+
+ plt_sec->num_entries++;
+ got_plt_sec->num_entries++;
+ BUG_ON(plt_sec->num_entries > plt_sec->max_entries);
+
+ return (unsigned long)&plt[i];
+}
+
+static int is_rela_equal(const Elf_Rela *x, const Elf_Rela *y)
+{
+ return x->r_info == y->r_info && x->r_addend == y->r_addend;
+}
+
+static bool duplicate_rela(const Elf_Rela *rela, int idx)
+{
+ int i;
+ for (i = 0; i < idx; i++) {
+ if (is_rela_equal(&rela[i], &rela[idx]))
+ return true;
+ }
+ return false;
+}
+
+static void count_max_entries(Elf_Rela *relas, int num,
+ unsigned int *plts, unsigned int *gots)
+{
+ unsigned int type, i;
+
+ for (i = 0; i < num; i++) {
+ type = ELF_RISCV_R_TYPE(relas[i].r_info);
+ if (type == R_RISCV_CALL_PLT) {
+ if (!duplicate_rela(relas, i))
+ (*plts)++;
+ } else if (type == R_RISCV_GOT_HI20) {
+ if (!duplicate_rela(relas, i))
+ (*gots)++;
+ }
+ }
+}
+
+int module_frob_arch_sections(Elf_Ehdr *ehdr, Elf_Shdr *sechdrs,
+ char *secstrings, struct module *mod)
+{
+ unsigned int num_plts = 0;
+ unsigned int num_gots = 0;
+ int i;
+
+ /*
+ * Find the empty .got and .plt sections.
+ */
+ for (i = 0; i < ehdr->e_shnum; i++) {
+ if (!strcmp(secstrings + sechdrs[i].sh_name, ".plt"))
+ mod->arch.plt.shdr = sechdrs + i;
+ else if (!strcmp(secstrings + sechdrs[i].sh_name, ".got"))
+ mod->arch.got.shdr = sechdrs + i;
+ else if (!strcmp(secstrings + sechdrs[i].sh_name, ".got.plt"))
+ mod->arch.got_plt.shdr = sechdrs + i;
+ }
+
+ if (!mod->arch.plt.shdr) {
+ pr_err("%s: module PLT section(s) missing\n", mod->name);
+ return -ENOEXEC;
+ }
+ if (!mod->arch.got.shdr) {
+ pr_err("%s: module GOT section(s) missing\n", mod->name);
+ return -ENOEXEC;
+ }
+ if (!mod->arch.got_plt.shdr) {
+ pr_err("%s: module GOT.PLT section(s) missing\n", mod->name);
+ return -ENOEXEC;
+ }
+
+ /* Calculate the maxinum number of entries */
+ for (i = 0; i < ehdr->e_shnum; i++) {
+ Elf_Rela *relas = (void *)ehdr + sechdrs[i].sh_offset;
+ int num_rela = sechdrs[i].sh_size / sizeof(Elf_Rela);
+ Elf_Shdr *dst_sec = sechdrs + sechdrs[i].sh_info;
+
+ if (sechdrs[i].sh_type != SHT_RELA)
+ continue;
+
+ /* ignore relocations that operate on non-exec sections */
+ if (!(dst_sec->sh_flags & SHF_EXECINSTR))
+ continue;
+
+ count_max_entries(relas, num_rela, &num_plts, &num_gots);
+ }
+
+ mod->arch.plt.shdr->sh_type = SHT_NOBITS;
+ mod->arch.plt.shdr->sh_flags = SHF_EXECINSTR | SHF_ALLOC;
+ mod->arch.plt.shdr->sh_addralign = L1_CACHE_BYTES;
+ mod->arch.plt.shdr->sh_size = (num_plts + 1) * sizeof(struct plt_entry);
+ mod->arch.plt.num_entries = 0;
+ mod->arch.plt.max_entries = num_plts;
+
+ mod->arch.got.shdr->sh_type = SHT_NOBITS;
+ mod->arch.got.shdr->sh_flags = SHF_ALLOC;
+ mod->arch.got.shdr->sh_addralign = L1_CACHE_BYTES;
+ mod->arch.got.shdr->sh_size = (num_gots + 1) * sizeof(struct got_entry);
+ mod->arch.got.num_entries = 0;
+ mod->arch.got.max_entries = num_gots;
+
+ mod->arch.got_plt.shdr->sh_type = SHT_NOBITS;
+ mod->arch.got_plt.shdr->sh_flags = SHF_ALLOC;
+ mod->arch.got_plt.shdr->sh_addralign = L1_CACHE_BYTES;
+ mod->arch.got_plt.shdr->sh_size = (num_plts + 1) * sizeof(struct got_entry);
+ mod->arch.got_plt.num_entries = 0;
+ mod->arch.got_plt.max_entries = num_plts;
+ return 0;
+}
diff --git a/arch/riscv/kernel/module.c b/arch/riscv/kernel/module.c
new file mode 100644
index 000000000..c3310a68a
--- /dev/null
+++ b/arch/riscv/kernel/module.c
@@ -0,0 +1,431 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ *
+ * Copyright (C) 2017 Zihao Yu
+ */
+
+#include <linux/elf.h>
+#include <linux/err.h>
+#include <linux/errno.h>
+#include <linux/moduleloader.h>
+#include <linux/vmalloc.h>
+#include <linux/sizes.h>
+#include <linux/pgtable.h>
+#include <asm/sections.h>
+
+/*
+ * The auipc+jalr instruction pair can reach any PC-relative offset
+ * in the range [-2^31 - 2^11, 2^31 - 2^11)
+ */
+static bool riscv_insn_valid_32bit_offset(ptrdiff_t val)
+{
+#ifdef CONFIG_32BIT
+ return true;
+#else
+ return (-(1L << 31) - (1L << 11)) <= val && val < ((1L << 31) - (1L << 11));
+#endif
+}
+
+static int apply_r_riscv_32_rela(struct module *me, u32 *location, Elf_Addr v)
+{
+ if (v != (u32)v) {
+ pr_err("%s: value %016llx out of range for 32-bit field\n",
+ me->name, (long long)v);
+ return -EINVAL;
+ }
+ *location = v;
+ return 0;
+}
+
+static int apply_r_riscv_64_rela(struct module *me, u32 *location, Elf_Addr v)
+{
+ *(u64 *)location = v;
+ return 0;
+}
+
+static int apply_r_riscv_branch_rela(struct module *me, u32 *location,
+ Elf_Addr v)
+{
+ ptrdiff_t offset = (void *)v - (void *)location;
+ u32 imm12 = (offset & 0x1000) << (31 - 12);
+ u32 imm11 = (offset & 0x800) >> (11 - 7);
+ u32 imm10_5 = (offset & 0x7e0) << (30 - 10);
+ u32 imm4_1 = (offset & 0x1e) << (11 - 4);
+
+ *location = (*location & 0x1fff07f) | imm12 | imm11 | imm10_5 | imm4_1;
+ return 0;
+}
+
+static int apply_r_riscv_jal_rela(struct module *me, u32 *location,
+ Elf_Addr v)
+{
+ ptrdiff_t offset = (void *)v - (void *)location;
+ u32 imm20 = (offset & 0x100000) << (31 - 20);
+ u32 imm19_12 = (offset & 0xff000);
+ u32 imm11 = (offset & 0x800) << (20 - 11);
+ u32 imm10_1 = (offset & 0x7fe) << (30 - 10);
+
+ *location = (*location & 0xfff) | imm20 | imm19_12 | imm11 | imm10_1;
+ return 0;
+}
+
+static int apply_r_riscv_rcv_branch_rela(struct module *me, u32 *location,
+ Elf_Addr v)
+{
+ ptrdiff_t offset = (void *)v - (void *)location;
+ u16 imm8 = (offset & 0x100) << (12 - 8);
+ u16 imm7_6 = (offset & 0xc0) >> (6 - 5);
+ u16 imm5 = (offset & 0x20) >> (5 - 2);
+ u16 imm4_3 = (offset & 0x18) << (12 - 5);
+ u16 imm2_1 = (offset & 0x6) << (12 - 10);
+
+ *(u16 *)location = (*(u16 *)location & 0xe383) |
+ imm8 | imm7_6 | imm5 | imm4_3 | imm2_1;
+ return 0;
+}
+
+static int apply_r_riscv_rvc_jump_rela(struct module *me, u32 *location,
+ Elf_Addr v)
+{
+ ptrdiff_t offset = (void *)v - (void *)location;
+ u16 imm11 = (offset & 0x800) << (12 - 11);
+ u16 imm10 = (offset & 0x400) >> (10 - 8);
+ u16 imm9_8 = (offset & 0x300) << (12 - 11);
+ u16 imm7 = (offset & 0x80) >> (7 - 6);
+ u16 imm6 = (offset & 0x40) << (12 - 11);
+ u16 imm5 = (offset & 0x20) >> (5 - 2);
+ u16 imm4 = (offset & 0x10) << (12 - 5);
+ u16 imm3_1 = (offset & 0xe) << (12 - 10);
+
+ *(u16 *)location = (*(u16 *)location & 0xe003) |
+ imm11 | imm10 | imm9_8 | imm7 | imm6 | imm5 | imm4 | imm3_1;
+ return 0;
+}
+
+static int apply_r_riscv_pcrel_hi20_rela(struct module *me, u32 *location,
+ Elf_Addr v)
+{
+ ptrdiff_t offset = (void *)v - (void *)location;
+ s32 hi20;
+
+ if (!riscv_insn_valid_32bit_offset(offset)) {
+ pr_err(
+ "%s: target %016llx can not be addressed by the 32-bit offset from PC = %p\n",
+ me->name, (long long)v, location);
+ return -EINVAL;
+ }
+
+ hi20 = (offset + 0x800) & 0xfffff000;
+ *location = (*location & 0xfff) | hi20;
+ return 0;
+}
+
+static int apply_r_riscv_pcrel_lo12_i_rela(struct module *me, u32 *location,
+ Elf_Addr v)
+{
+ /*
+ * v is the lo12 value to fill. It is calculated before calling this
+ * handler.
+ */
+ *location = (*location & 0xfffff) | ((v & 0xfff) << 20);
+ return 0;
+}
+
+static int apply_r_riscv_pcrel_lo12_s_rela(struct module *me, u32 *location,
+ Elf_Addr v)
+{
+ /*
+ * v is the lo12 value to fill. It is calculated before calling this
+ * handler.
+ */
+ u32 imm11_5 = (v & 0xfe0) << (31 - 11);
+ u32 imm4_0 = (v & 0x1f) << (11 - 4);
+
+ *location = (*location & 0x1fff07f) | imm11_5 | imm4_0;
+ return 0;
+}
+
+static int apply_r_riscv_hi20_rela(struct module *me, u32 *location,
+ Elf_Addr v)
+{
+ s32 hi20;
+
+ if (IS_ENABLED(CONFIG_CMODEL_MEDLOW)) {
+ pr_err(
+ "%s: target %016llx can not be addressed by the 32-bit offset from PC = %p\n",
+ me->name, (long long)v, location);
+ return -EINVAL;
+ }
+
+ hi20 = ((s32)v + 0x800) & 0xfffff000;
+ *location = (*location & 0xfff) | hi20;
+ return 0;
+}
+
+static int apply_r_riscv_lo12_i_rela(struct module *me, u32 *location,
+ Elf_Addr v)
+{
+ /* Skip medlow checking because of filtering by HI20 already */
+ s32 hi20 = ((s32)v + 0x800) & 0xfffff000;
+ s32 lo12 = ((s32)v - hi20);
+ *location = (*location & 0xfffff) | ((lo12 & 0xfff) << 20);
+ return 0;
+}
+
+static int apply_r_riscv_lo12_s_rela(struct module *me, u32 *location,
+ Elf_Addr v)
+{
+ /* Skip medlow checking because of filtering by HI20 already */
+ s32 hi20 = ((s32)v + 0x800) & 0xfffff000;
+ s32 lo12 = ((s32)v - hi20);
+ u32 imm11_5 = (lo12 & 0xfe0) << (31 - 11);
+ u32 imm4_0 = (lo12 & 0x1f) << (11 - 4);
+ *location = (*location & 0x1fff07f) | imm11_5 | imm4_0;
+ return 0;
+}
+
+static int apply_r_riscv_got_hi20_rela(struct module *me, u32 *location,
+ Elf_Addr v)
+{
+ ptrdiff_t offset = (void *)v - (void *)location;
+ s32 hi20;
+
+ /* Always emit the got entry */
+ if (IS_ENABLED(CONFIG_MODULE_SECTIONS)) {
+ offset = module_emit_got_entry(me, v);
+ offset = (void *)offset - (void *)location;
+ } else {
+ pr_err(
+ "%s: can not generate the GOT entry for symbol = %016llx from PC = %p\n",
+ me->name, (long long)v, location);
+ return -EINVAL;
+ }
+
+ hi20 = (offset + 0x800) & 0xfffff000;
+ *location = (*location & 0xfff) | hi20;
+ return 0;
+}
+
+static int apply_r_riscv_call_plt_rela(struct module *me, u32 *location,
+ Elf_Addr v)
+{
+ ptrdiff_t offset = (void *)v - (void *)location;
+ u32 hi20, lo12;
+
+ if (!riscv_insn_valid_32bit_offset(offset)) {
+ /* Only emit the plt entry if offset over 32-bit range */
+ if (IS_ENABLED(CONFIG_MODULE_SECTIONS)) {
+ offset = module_emit_plt_entry(me, v);
+ offset = (void *)offset - (void *)location;
+ } else {
+ pr_err(
+ "%s: target %016llx can not be addressed by the 32-bit offset from PC = %p\n",
+ me->name, (long long)v, location);
+ return -EINVAL;
+ }
+ }
+
+ hi20 = (offset + 0x800) & 0xfffff000;
+ lo12 = (offset - hi20) & 0xfff;
+ *location = (*location & 0xfff) | hi20;
+ *(location + 1) = (*(location + 1) & 0xfffff) | (lo12 << 20);
+ return 0;
+}
+
+static int apply_r_riscv_call_rela(struct module *me, u32 *location,
+ Elf_Addr v)
+{
+ ptrdiff_t offset = (void *)v - (void *)location;
+ u32 hi20, lo12;
+
+ if (!riscv_insn_valid_32bit_offset(offset)) {
+ pr_err(
+ "%s: target %016llx can not be addressed by the 32-bit offset from PC = %p\n",
+ me->name, (long long)v, location);
+ return -EINVAL;
+ }
+
+ hi20 = (offset + 0x800) & 0xfffff000;
+ lo12 = (offset - hi20) & 0xfff;
+ *location = (*location & 0xfff) | hi20;
+ *(location + 1) = (*(location + 1) & 0xfffff) | (lo12 << 20);
+ return 0;
+}
+
+static int apply_r_riscv_relax_rela(struct module *me, u32 *location,
+ Elf_Addr v)
+{
+ return 0;
+}
+
+static int apply_r_riscv_align_rela(struct module *me, u32 *location,
+ Elf_Addr v)
+{
+ pr_err(
+ "%s: The unexpected relocation type 'R_RISCV_ALIGN' from PC = %p\n",
+ me->name, location);
+ return -EINVAL;
+}
+
+static int apply_r_riscv_add32_rela(struct module *me, u32 *location,
+ Elf_Addr v)
+{
+ *(u32 *)location += (u32)v;
+ return 0;
+}
+
+static int apply_r_riscv_add64_rela(struct module *me, u32 *location,
+ Elf_Addr v)
+{
+ *(u64 *)location += (u64)v;
+ return 0;
+}
+
+static int apply_r_riscv_sub32_rela(struct module *me, u32 *location,
+ Elf_Addr v)
+{
+ *(u32 *)location -= (u32)v;
+ return 0;
+}
+
+static int apply_r_riscv_sub64_rela(struct module *me, u32 *location,
+ Elf_Addr v)
+{
+ *(u64 *)location -= (u64)v;
+ return 0;
+}
+
+static int (*reloc_handlers_rela[]) (struct module *me, u32 *location,
+ Elf_Addr v) = {
+ [R_RISCV_32] = apply_r_riscv_32_rela,
+ [R_RISCV_64] = apply_r_riscv_64_rela,
+ [R_RISCV_BRANCH] = apply_r_riscv_branch_rela,
+ [R_RISCV_JAL] = apply_r_riscv_jal_rela,
+ [R_RISCV_RVC_BRANCH] = apply_r_riscv_rcv_branch_rela,
+ [R_RISCV_RVC_JUMP] = apply_r_riscv_rvc_jump_rela,
+ [R_RISCV_PCREL_HI20] = apply_r_riscv_pcrel_hi20_rela,
+ [R_RISCV_PCREL_LO12_I] = apply_r_riscv_pcrel_lo12_i_rela,
+ [R_RISCV_PCREL_LO12_S] = apply_r_riscv_pcrel_lo12_s_rela,
+ [R_RISCV_HI20] = apply_r_riscv_hi20_rela,
+ [R_RISCV_LO12_I] = apply_r_riscv_lo12_i_rela,
+ [R_RISCV_LO12_S] = apply_r_riscv_lo12_s_rela,
+ [R_RISCV_GOT_HI20] = apply_r_riscv_got_hi20_rela,
+ [R_RISCV_CALL_PLT] = apply_r_riscv_call_plt_rela,
+ [R_RISCV_CALL] = apply_r_riscv_call_rela,
+ [R_RISCV_RELAX] = apply_r_riscv_relax_rela,
+ [R_RISCV_ALIGN] = apply_r_riscv_align_rela,
+ [R_RISCV_ADD32] = apply_r_riscv_add32_rela,
+ [R_RISCV_ADD64] = apply_r_riscv_add64_rela,
+ [R_RISCV_SUB32] = apply_r_riscv_sub32_rela,
+ [R_RISCV_SUB64] = apply_r_riscv_sub64_rela,
+};
+
+int apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab,
+ unsigned int symindex, unsigned int relsec,
+ struct module *me)
+{
+ Elf_Rela *rel = (void *) sechdrs[relsec].sh_addr;
+ int (*handler)(struct module *me, u32 *location, Elf_Addr v);
+ Elf_Sym *sym;
+ u32 *location;
+ unsigned int i, type;
+ Elf_Addr v;
+ int res;
+
+ pr_debug("Applying relocate section %u to %u\n", relsec,
+ sechdrs[relsec].sh_info);
+
+ for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
+ /* This is where to make the change */
+ location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
+ + rel[i].r_offset;
+ /* This is the symbol it is referring to */
+ sym = (Elf_Sym *)sechdrs[symindex].sh_addr
+ + ELF_RISCV_R_SYM(rel[i].r_info);
+ if (IS_ERR_VALUE(sym->st_value)) {
+ /* Ignore unresolved weak symbol */
+ if (ELF_ST_BIND(sym->st_info) == STB_WEAK)
+ continue;
+ pr_warn("%s: Unknown symbol %s\n",
+ me->name, strtab + sym->st_name);
+ return -ENOENT;
+ }
+
+ type = ELF_RISCV_R_TYPE(rel[i].r_info);
+
+ if (type < ARRAY_SIZE(reloc_handlers_rela))
+ handler = reloc_handlers_rela[type];
+ else
+ handler = NULL;
+
+ if (!handler) {
+ pr_err("%s: Unknown relocation type %u\n",
+ me->name, type);
+ return -EINVAL;
+ }
+
+ v = sym->st_value + rel[i].r_addend;
+
+ if (type == R_RISCV_PCREL_LO12_I || type == R_RISCV_PCREL_LO12_S) {
+ unsigned int j;
+
+ for (j = 0; j < sechdrs[relsec].sh_size / sizeof(*rel); j++) {
+ unsigned long hi20_loc =
+ sechdrs[sechdrs[relsec].sh_info].sh_addr
+ + rel[j].r_offset;
+ u32 hi20_type = ELF_RISCV_R_TYPE(rel[j].r_info);
+
+ /* Find the corresponding HI20 relocation entry */
+ if (hi20_loc == sym->st_value
+ && (hi20_type == R_RISCV_PCREL_HI20
+ || hi20_type == R_RISCV_GOT_HI20)) {
+ s32 hi20, lo12;
+ Elf_Sym *hi20_sym =
+ (Elf_Sym *)sechdrs[symindex].sh_addr
+ + ELF_RISCV_R_SYM(rel[j].r_info);
+ unsigned long hi20_sym_val =
+ hi20_sym->st_value
+ + rel[j].r_addend;
+
+ /* Calculate lo12 */
+ size_t offset = hi20_sym_val - hi20_loc;
+ if (IS_ENABLED(CONFIG_MODULE_SECTIONS)
+ && hi20_type == R_RISCV_GOT_HI20) {
+ offset = module_emit_got_entry(
+ me, hi20_sym_val);
+ offset = offset - hi20_loc;
+ }
+ hi20 = (offset + 0x800) & 0xfffff000;
+ lo12 = offset - hi20;
+ v = lo12;
+
+ break;
+ }
+ }
+ if (j == sechdrs[relsec].sh_size / sizeof(*rel)) {
+ pr_err(
+ "%s: Can not find HI20 relocation information\n",
+ me->name);
+ return -EINVAL;
+ }
+ }
+
+ res = handler(me, location, v);
+ if (res)
+ return res;
+ }
+
+ return 0;
+}
+
+#if defined(CONFIG_MMU) && defined(CONFIG_64BIT)
+#define VMALLOC_MODULE_START \
+ max(PFN_ALIGN((unsigned long)&_end - SZ_2G), VMALLOC_START)
+void *module_alloc(unsigned long size)
+{
+ return __vmalloc_node_range(size, 1, VMALLOC_MODULE_START,
+ VMALLOC_END, GFP_KERNEL,
+ PAGE_KERNEL_EXEC, 0, NUMA_NO_NODE,
+ __builtin_return_address(0));
+}
+#endif
diff --git a/arch/riscv/kernel/patch.c b/arch/riscv/kernel/patch.c
new file mode 100644
index 000000000..c3fced410
--- /dev/null
+++ b/arch/riscv/kernel/patch.c
@@ -0,0 +1,151 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2020 SiFive
+ */
+
+#include <linux/spinlock.h>
+#include <linux/mm.h>
+#include <linux/memory.h>
+#include <linux/uaccess.h>
+#include <linux/stop_machine.h>
+#include <asm/kprobes.h>
+#include <asm/cacheflush.h>
+#include <asm/fixmap.h>
+#include <asm/ftrace.h>
+#include <asm/patch.h>
+
+struct patch_insn {
+ void *addr;
+ u32 insn;
+ atomic_t cpu_count;
+};
+
+int riscv_patch_in_stop_machine = false;
+
+#ifdef CONFIG_MMU
+static void *patch_map(void *addr, int fixmap)
+{
+ uintptr_t uintaddr = (uintptr_t) addr;
+ struct page *page;
+
+ if (core_kernel_text(uintaddr))
+ page = phys_to_page(__pa_symbol(addr));
+ else if (IS_ENABLED(CONFIG_STRICT_MODULE_RWX))
+ page = vmalloc_to_page(addr);
+ else
+ return addr;
+
+ BUG_ON(!page);
+
+ return (void *)set_fixmap_offset(fixmap, page_to_phys(page) +
+ (uintaddr & ~PAGE_MASK));
+}
+NOKPROBE_SYMBOL(patch_map);
+
+static void patch_unmap(int fixmap)
+{
+ clear_fixmap(fixmap);
+}
+NOKPROBE_SYMBOL(patch_unmap);
+
+static int patch_insn_write(void *addr, const void *insn, size_t len)
+{
+ void *waddr = addr;
+ bool across_pages = (((uintptr_t) addr & ~PAGE_MASK) + len) > PAGE_SIZE;
+ int ret;
+
+ /*
+ * Before reaching here, it was expected to lock the text_mutex
+ * already, so we don't need to give another lock here and could
+ * ensure that it was safe between each cores.
+ *
+ * We're currently using stop_machine() for ftrace & kprobes, and while
+ * that ensures text_mutex is held before installing the mappings it
+ * does not ensure text_mutex is held by the calling thread. That's
+ * safe but triggers a lockdep failure, so just elide it for that
+ * specific case.
+ */
+ if (!riscv_patch_in_stop_machine)
+ lockdep_assert_held(&text_mutex);
+
+ if (across_pages)
+ patch_map(addr + len, FIX_TEXT_POKE1);
+
+ waddr = patch_map(addr, FIX_TEXT_POKE0);
+
+ ret = copy_to_kernel_nofault(waddr, insn, len);
+
+ patch_unmap(FIX_TEXT_POKE0);
+
+ if (across_pages)
+ patch_unmap(FIX_TEXT_POKE1);
+
+ return ret;
+}
+NOKPROBE_SYMBOL(patch_insn_write);
+#else
+static int patch_insn_write(void *addr, const void *insn, size_t len)
+{
+ return copy_to_kernel_nofault(addr, insn, len);
+}
+NOKPROBE_SYMBOL(patch_insn_write);
+#endif /* CONFIG_MMU */
+
+int patch_text_nosync(void *addr, const void *insns, size_t len)
+{
+ u32 *tp = addr;
+ int ret;
+
+ ret = patch_insn_write(tp, insns, len);
+
+ if (!ret)
+ flush_icache_range((uintptr_t) tp, (uintptr_t) tp + len);
+
+ return ret;
+}
+NOKPROBE_SYMBOL(patch_text_nosync);
+
+static int patch_text_cb(void *data)
+{
+ struct patch_insn *patch = data;
+ int ret = 0;
+
+ if (atomic_inc_return(&patch->cpu_count) == num_online_cpus()) {
+ ret =
+ patch_text_nosync(patch->addr, &patch->insn,
+ GET_INSN_LENGTH(patch->insn));
+ atomic_inc(&patch->cpu_count);
+ } else {
+ while (atomic_read(&patch->cpu_count) <= num_online_cpus())
+ cpu_relax();
+ smp_mb();
+ }
+
+ return ret;
+}
+NOKPROBE_SYMBOL(patch_text_cb);
+
+int patch_text(void *addr, u32 insn)
+{
+ int ret;
+ struct patch_insn patch = {
+ .addr = addr,
+ .insn = insn,
+ .cpu_count = ATOMIC_INIT(0),
+ };
+
+ /*
+ * kprobes takes text_mutex, before calling patch_text(), but as we call
+ * calls stop_machine(), the lockdep assertion in patch_insn_write()
+ * gets confused by the context in which the lock is taken.
+ * Instead, ensure the lock is held before calling stop_machine(), and
+ * set riscv_patch_in_stop_machine to skip the check in
+ * patch_insn_write().
+ */
+ lockdep_assert_held(&text_mutex);
+ riscv_patch_in_stop_machine = true;
+ ret = stop_machine_cpuslocked(patch_text_cb, &patch, cpu_online_mask);
+ riscv_patch_in_stop_machine = false;
+ return ret;
+}
+NOKPROBE_SYMBOL(patch_text);
diff --git a/arch/riscv/kernel/perf_callchain.c b/arch/riscv/kernel/perf_callchain.c
new file mode 100644
index 000000000..fb02811df
--- /dev/null
+++ b/arch/riscv/kernel/perf_callchain.c
@@ -0,0 +1,97 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2019 Hangzhou C-SKY Microsystems co.,ltd. */
+
+#include <linux/perf_event.h>
+#include <linux/uaccess.h>
+
+/* Kernel callchain */
+struct stackframe {
+ unsigned long fp;
+ unsigned long ra;
+};
+
+/*
+ * Get the return address for a single stackframe and return a pointer to the
+ * next frame tail.
+ */
+static unsigned long user_backtrace(struct perf_callchain_entry_ctx *entry,
+ unsigned long fp, unsigned long reg_ra)
+{
+ struct stackframe buftail;
+ unsigned long ra = 0;
+ unsigned long __user *user_frame_tail =
+ (unsigned long __user *)(fp - sizeof(struct stackframe));
+
+ /* Check accessibility of one struct frame_tail beyond */
+ if (!access_ok(user_frame_tail, sizeof(buftail)))
+ return 0;
+ if (__copy_from_user_inatomic(&buftail, user_frame_tail,
+ sizeof(buftail)))
+ return 0;
+
+ if (reg_ra != 0)
+ ra = reg_ra;
+ else
+ ra = buftail.ra;
+
+ fp = buftail.fp;
+ if (ra != 0)
+ perf_callchain_store(entry, ra);
+ else
+ return 0;
+
+ return fp;
+}
+
+/*
+ * This will be called when the target is in user mode
+ * This function will only be called when we use
+ * "PERF_SAMPLE_CALLCHAIN" in
+ * kernel/events/core.c:perf_prepare_sample()
+ *
+ * How to trigger perf_callchain_[user/kernel] :
+ * $ perf record -e cpu-clock --call-graph fp ./program
+ * $ perf report --call-graph
+ *
+ * On RISC-V platform, the program being sampled and the C library
+ * need to be compiled with -fno-omit-frame-pointer, otherwise
+ * the user stack will not contain function frame.
+ */
+void perf_callchain_user(struct perf_callchain_entry_ctx *entry,
+ struct pt_regs *regs)
+{
+ struct perf_guest_info_callbacks *guest_cbs = perf_get_guest_cbs();
+ unsigned long fp = 0;
+
+ /* RISC-V does not support perf in guest mode. */
+ if (guest_cbs && guest_cbs->is_in_guest())
+ return;
+
+ fp = regs->s0;
+ perf_callchain_store(entry, regs->epc);
+
+ fp = user_backtrace(entry, fp, regs->ra);
+ while (fp && !(fp & 0x3) && entry->nr < entry->max_stack)
+ fp = user_backtrace(entry, fp, 0);
+}
+
+bool fill_callchain(unsigned long pc, void *entry)
+{
+ return perf_callchain_store(entry, pc) == 0;
+}
+
+void notrace walk_stackframe(struct task_struct *task,
+ struct pt_regs *regs, bool (*fn)(unsigned long, void *), void *arg);
+void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry,
+ struct pt_regs *regs)
+{
+ struct perf_guest_info_callbacks *guest_cbs = perf_get_guest_cbs();
+
+ /* RISC-V does not support perf in guest mode. */
+ if (guest_cbs && guest_cbs->is_in_guest()) {
+ pr_warn("RISC-V does not support perf in guest mode!");
+ return;
+ }
+
+ walk_stackframe(NULL, regs, fill_callchain, entry);
+}
diff --git a/arch/riscv/kernel/perf_event.c b/arch/riscv/kernel/perf_event.c
new file mode 100644
index 000000000..c835f0362
--- /dev/null
+++ b/arch/riscv/kernel/perf_event.c
@@ -0,0 +1,485 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
+ * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
+ * Copyright (C) 2009 Jaswinder Singh Rajput
+ * Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter
+ * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra
+ * Copyright (C) 2009 Intel Corporation, <markus.t.metzger@intel.com>
+ * Copyright (C) 2009 Google, Inc., Stephane Eranian
+ * Copyright 2014 Tilera Corporation. All Rights Reserved.
+ * Copyright (C) 2018 Andes Technology Corporation
+ *
+ * Perf_events support for RISC-V platforms.
+ *
+ * Since the spec. (as of now, Priv-Spec 1.10) does not provide enough
+ * functionality for perf event to fully work, this file provides
+ * the very basic framework only.
+ *
+ * For platform portings, please check Documentations/riscv/pmu.txt.
+ *
+ * The Copyright line includes x86 and tile ones.
+ */
+
+#include <linux/kprobes.h>
+#include <linux/kernel.h>
+#include <linux/kdebug.h>
+#include <linux/mutex.h>
+#include <linux/bitmap.h>
+#include <linux/irq.h>
+#include <linux/perf_event.h>
+#include <linux/atomic.h>
+#include <linux/of.h>
+#include <asm/perf_event.h>
+
+static const struct riscv_pmu *riscv_pmu __read_mostly;
+static DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events);
+
+/*
+ * Hardware & cache maps and their methods
+ */
+
+static const int riscv_hw_event_map[] = {
+ [PERF_COUNT_HW_CPU_CYCLES] = RISCV_PMU_CYCLE,
+ [PERF_COUNT_HW_INSTRUCTIONS] = RISCV_PMU_INSTRET,
+ [PERF_COUNT_HW_CACHE_REFERENCES] = RISCV_OP_UNSUPP,
+ [PERF_COUNT_HW_CACHE_MISSES] = RISCV_OP_UNSUPP,
+ [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = RISCV_OP_UNSUPP,
+ [PERF_COUNT_HW_BRANCH_MISSES] = RISCV_OP_UNSUPP,
+ [PERF_COUNT_HW_BUS_CYCLES] = RISCV_OP_UNSUPP,
+};
+
+#define C(x) PERF_COUNT_HW_CACHE_##x
+static const int riscv_cache_event_map[PERF_COUNT_HW_CACHE_MAX]
+[PERF_COUNT_HW_CACHE_OP_MAX]
+[PERF_COUNT_HW_CACHE_RESULT_MAX] = {
+ [C(L1D)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = RISCV_OP_UNSUPP,
+ [C(RESULT_MISS)] = RISCV_OP_UNSUPP,
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = RISCV_OP_UNSUPP,
+ [C(RESULT_MISS)] = RISCV_OP_UNSUPP,
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = RISCV_OP_UNSUPP,
+ [C(RESULT_MISS)] = RISCV_OP_UNSUPP,
+ },
+ },
+ [C(L1I)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = RISCV_OP_UNSUPP,
+ [C(RESULT_MISS)] = RISCV_OP_UNSUPP,
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = RISCV_OP_UNSUPP,
+ [C(RESULT_MISS)] = RISCV_OP_UNSUPP,
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = RISCV_OP_UNSUPP,
+ [C(RESULT_MISS)] = RISCV_OP_UNSUPP,
+ },
+ },
+ [C(LL)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = RISCV_OP_UNSUPP,
+ [C(RESULT_MISS)] = RISCV_OP_UNSUPP,
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = RISCV_OP_UNSUPP,
+ [C(RESULT_MISS)] = RISCV_OP_UNSUPP,
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = RISCV_OP_UNSUPP,
+ [C(RESULT_MISS)] = RISCV_OP_UNSUPP,
+ },
+ },
+ [C(DTLB)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = RISCV_OP_UNSUPP,
+ [C(RESULT_MISS)] = RISCV_OP_UNSUPP,
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = RISCV_OP_UNSUPP,
+ [C(RESULT_MISS)] = RISCV_OP_UNSUPP,
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = RISCV_OP_UNSUPP,
+ [C(RESULT_MISS)] = RISCV_OP_UNSUPP,
+ },
+ },
+ [C(ITLB)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = RISCV_OP_UNSUPP,
+ [C(RESULT_MISS)] = RISCV_OP_UNSUPP,
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = RISCV_OP_UNSUPP,
+ [C(RESULT_MISS)] = RISCV_OP_UNSUPP,
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = RISCV_OP_UNSUPP,
+ [C(RESULT_MISS)] = RISCV_OP_UNSUPP,
+ },
+ },
+ [C(BPU)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = RISCV_OP_UNSUPP,
+ [C(RESULT_MISS)] = RISCV_OP_UNSUPP,
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = RISCV_OP_UNSUPP,
+ [C(RESULT_MISS)] = RISCV_OP_UNSUPP,
+ },
+ [C(OP_PREFETCH)] = {
+ [C(RESULT_ACCESS)] = RISCV_OP_UNSUPP,
+ [C(RESULT_MISS)] = RISCV_OP_UNSUPP,
+ },
+ },
+};
+
+static int riscv_map_hw_event(u64 config)
+{
+ if (config >= riscv_pmu->max_events)
+ return -EINVAL;
+
+ return riscv_pmu->hw_events[config];
+}
+
+static int riscv_map_cache_decode(u64 config, unsigned int *type,
+ unsigned int *op, unsigned int *result)
+{
+ return -ENOENT;
+}
+
+static int riscv_map_cache_event(u64 config)
+{
+ unsigned int type, op, result;
+ int err = -ENOENT;
+ int code;
+
+ err = riscv_map_cache_decode(config, &type, &op, &result);
+ if (!riscv_pmu->cache_events || err)
+ return err;
+
+ if (type >= PERF_COUNT_HW_CACHE_MAX ||
+ op >= PERF_COUNT_HW_CACHE_OP_MAX ||
+ result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
+ return -EINVAL;
+
+ code = (*riscv_pmu->cache_events)[type][op][result];
+ if (code == RISCV_OP_UNSUPP)
+ return -EINVAL;
+
+ return code;
+}
+
+/*
+ * Low-level functions: reading/writing counters
+ */
+
+static inline u64 read_counter(int idx)
+{
+ u64 val = 0;
+
+ switch (idx) {
+ case RISCV_PMU_CYCLE:
+ val = csr_read(CSR_CYCLE);
+ break;
+ case RISCV_PMU_INSTRET:
+ val = csr_read(CSR_INSTRET);
+ break;
+ default:
+ WARN_ON_ONCE(idx < 0 || idx > RISCV_MAX_COUNTERS);
+ return -EINVAL;
+ }
+
+ return val;
+}
+
+static inline void write_counter(int idx, u64 value)
+{
+ /* currently not supported */
+ WARN_ON_ONCE(1);
+}
+
+/*
+ * pmu->read: read and update the counter
+ *
+ * Other architectures' implementation often have a xxx_perf_event_update
+ * routine, which can return counter values when called in the IRQ, but
+ * return void when being called by the pmu->read method.
+ */
+static void riscv_pmu_read(struct perf_event *event)
+{
+ struct hw_perf_event *hwc = &event->hw;
+ u64 prev_raw_count, new_raw_count;
+ u64 oldval;
+ int idx = hwc->idx;
+ u64 delta;
+
+ do {
+ prev_raw_count = local64_read(&hwc->prev_count);
+ new_raw_count = read_counter(idx);
+
+ oldval = local64_cmpxchg(&hwc->prev_count, prev_raw_count,
+ new_raw_count);
+ } while (oldval != prev_raw_count);
+
+ /*
+ * delta is the value to update the counter we maintain in the kernel.
+ */
+ delta = (new_raw_count - prev_raw_count) &
+ ((1ULL << riscv_pmu->counter_width) - 1);
+ local64_add(delta, &event->count);
+ /*
+ * Something like local64_sub(delta, &hwc->period_left) here is
+ * needed if there is an interrupt for perf.
+ */
+}
+
+/*
+ * State transition functions:
+ *
+ * stop()/start() & add()/del()
+ */
+
+/*
+ * pmu->stop: stop the counter
+ */
+static void riscv_pmu_stop(struct perf_event *event, int flags)
+{
+ struct hw_perf_event *hwc = &event->hw;
+
+ WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED);
+ hwc->state |= PERF_HES_STOPPED;
+
+ if ((flags & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) {
+ riscv_pmu->pmu->read(event);
+ hwc->state |= PERF_HES_UPTODATE;
+ }
+}
+
+/*
+ * pmu->start: start the event.
+ */
+static void riscv_pmu_start(struct perf_event *event, int flags)
+{
+ struct hw_perf_event *hwc = &event->hw;
+
+ if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED)))
+ return;
+
+ if (flags & PERF_EF_RELOAD) {
+ WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE));
+
+ /*
+ * Set the counter to the period to the next interrupt here,
+ * if you have any.
+ */
+ }
+
+ hwc->state = 0;
+ perf_event_update_userpage(event);
+
+ /*
+ * Since we cannot write to counters, this serves as an initialization
+ * to the delta-mechanism in pmu->read(); otherwise, the delta would be
+ * wrong when pmu->read is called for the first time.
+ */
+ local64_set(&hwc->prev_count, read_counter(hwc->idx));
+}
+
+/*
+ * pmu->add: add the event to PMU.
+ */
+static int riscv_pmu_add(struct perf_event *event, int flags)
+{
+ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
+ struct hw_perf_event *hwc = &event->hw;
+
+ if (cpuc->n_events == riscv_pmu->num_counters)
+ return -ENOSPC;
+
+ /*
+ * We don't have general conunters, so no binding-event-to-counter
+ * process here.
+ *
+ * Indexing using hwc->config generally not works, since config may
+ * contain extra information, but here the only info we have in
+ * hwc->config is the event index.
+ */
+ hwc->idx = hwc->config;
+ cpuc->events[hwc->idx] = event;
+ cpuc->n_events++;
+
+ hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
+
+ if (flags & PERF_EF_START)
+ riscv_pmu->pmu->start(event, PERF_EF_RELOAD);
+
+ return 0;
+}
+
+/*
+ * pmu->del: delete the event from PMU.
+ */
+static void riscv_pmu_del(struct perf_event *event, int flags)
+{
+ struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
+ struct hw_perf_event *hwc = &event->hw;
+
+ cpuc->events[hwc->idx] = NULL;
+ cpuc->n_events--;
+ riscv_pmu->pmu->stop(event, PERF_EF_UPDATE);
+ perf_event_update_userpage(event);
+}
+
+/*
+ * Interrupt: a skeletion for reference.
+ */
+
+static DEFINE_MUTEX(pmc_reserve_mutex);
+
+static irqreturn_t riscv_base_pmu_handle_irq(int irq_num, void *dev)
+{
+ return IRQ_NONE;
+}
+
+static int reserve_pmc_hardware(void)
+{
+ int err = 0;
+
+ mutex_lock(&pmc_reserve_mutex);
+ if (riscv_pmu->irq >= 0 && riscv_pmu->handle_irq) {
+ err = request_irq(riscv_pmu->irq, riscv_pmu->handle_irq,
+ IRQF_PERCPU, "riscv-base-perf", NULL);
+ }
+ mutex_unlock(&pmc_reserve_mutex);
+
+ return err;
+}
+
+static void release_pmc_hardware(void)
+{
+ mutex_lock(&pmc_reserve_mutex);
+ if (riscv_pmu->irq >= 0)
+ free_irq(riscv_pmu->irq, NULL);
+ mutex_unlock(&pmc_reserve_mutex);
+}
+
+/*
+ * Event Initialization/Finalization
+ */
+
+static atomic_t riscv_active_events = ATOMIC_INIT(0);
+
+static void riscv_event_destroy(struct perf_event *event)
+{
+ if (atomic_dec_return(&riscv_active_events) == 0)
+ release_pmc_hardware();
+}
+
+static int riscv_event_init(struct perf_event *event)
+{
+ struct perf_event_attr *attr = &event->attr;
+ struct hw_perf_event *hwc = &event->hw;
+ int err;
+ int code;
+
+ if (atomic_inc_return(&riscv_active_events) == 1) {
+ err = reserve_pmc_hardware();
+
+ if (err) {
+ pr_warn("PMC hardware not available\n");
+ atomic_dec(&riscv_active_events);
+ return -EBUSY;
+ }
+ }
+
+ switch (event->attr.type) {
+ case PERF_TYPE_HARDWARE:
+ code = riscv_pmu->map_hw_event(attr->config);
+ break;
+ case PERF_TYPE_HW_CACHE:
+ code = riscv_pmu->map_cache_event(attr->config);
+ break;
+ case PERF_TYPE_RAW:
+ return -EOPNOTSUPP;
+ default:
+ return -ENOENT;
+ }
+
+ event->destroy = riscv_event_destroy;
+ if (code < 0) {
+ event->destroy(event);
+ return code;
+ }
+
+ /*
+ * idx is set to -1 because the index of a general event should not be
+ * decided until binding to some counter in pmu->add().
+ *
+ * But since we don't have such support, later in pmu->add(), we just
+ * use hwc->config as the index instead.
+ */
+ hwc->config = code;
+ hwc->idx = -1;
+
+ return 0;
+}
+
+/*
+ * Initialization
+ */
+
+static struct pmu min_pmu = {
+ .name = "riscv-base",
+ .event_init = riscv_event_init,
+ .add = riscv_pmu_add,
+ .del = riscv_pmu_del,
+ .start = riscv_pmu_start,
+ .stop = riscv_pmu_stop,
+ .read = riscv_pmu_read,
+};
+
+static const struct riscv_pmu riscv_base_pmu = {
+ .pmu = &min_pmu,
+ .max_events = ARRAY_SIZE(riscv_hw_event_map),
+ .map_hw_event = riscv_map_hw_event,
+ .hw_events = riscv_hw_event_map,
+ .map_cache_event = riscv_map_cache_event,
+ .cache_events = &riscv_cache_event_map,
+ .counter_width = 63,
+ .num_counters = RISCV_BASE_COUNTERS + 0,
+ .handle_irq = &riscv_base_pmu_handle_irq,
+
+ /* This means this PMU has no IRQ. */
+ .irq = -1,
+};
+
+static const struct of_device_id riscv_pmu_of_ids[] = {
+ {.compatible = "riscv,base-pmu", .data = &riscv_base_pmu},
+ { /* sentinel value */ }
+};
+
+static int __init init_hw_perf_events(void)
+{
+ struct device_node *node = of_find_node_by_type(NULL, "pmu");
+ const struct of_device_id *of_id;
+
+ riscv_pmu = &riscv_base_pmu;
+
+ if (node) {
+ of_id = of_match_node(riscv_pmu_of_ids, node);
+
+ if (of_id)
+ riscv_pmu = of_id->data;
+ of_node_put(node);
+ }
+
+ perf_pmu_register(riscv_pmu->pmu, "cpu", PERF_TYPE_RAW);
+ return 0;
+}
+arch_initcall(init_hw_perf_events);
diff --git a/arch/riscv/kernel/perf_regs.c b/arch/riscv/kernel/perf_regs.c
new file mode 100644
index 000000000..fd304a248
--- /dev/null
+++ b/arch/riscv/kernel/perf_regs.c
@@ -0,0 +1,43 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (C) 2019 Hangzhou C-SKY Microsystems co.,ltd. */
+
+#include <linux/errno.h>
+#include <linux/kernel.h>
+#include <linux/perf_event.h>
+#include <linux/bug.h>
+#include <asm/perf_regs.h>
+#include <asm/ptrace.h>
+
+u64 perf_reg_value(struct pt_regs *regs, int idx)
+{
+ if (WARN_ON_ONCE((u32)idx >= PERF_REG_RISCV_MAX))
+ return 0;
+
+ return ((unsigned long *)regs)[idx];
+}
+
+#define REG_RESERVED (~((1ULL << PERF_REG_RISCV_MAX) - 1))
+
+int perf_reg_validate(u64 mask)
+{
+ if (!mask || mask & REG_RESERVED)
+ return -EINVAL;
+
+ return 0;
+}
+
+u64 perf_reg_abi(struct task_struct *task)
+{
+#if __riscv_xlen == 64
+ return PERF_SAMPLE_REGS_ABI_64;
+#else
+ return PERF_SAMPLE_REGS_ABI_32;
+#endif
+}
+
+void perf_get_regs_user(struct perf_regs *regs_user,
+ struct pt_regs *regs)
+{
+ regs_user->regs = task_pt_regs(current);
+ regs_user->abi = perf_reg_abi(current);
+}
diff --git a/arch/riscv/kernel/process.c b/arch/riscv/kernel/process.c
new file mode 100644
index 000000000..7868050ff
--- /dev/null
+++ b/arch/riscv/kernel/process.c
@@ -0,0 +1,138 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2009 Sunplus Core Technology Co., Ltd.
+ * Chen Liqin <liqin.chen@sunplusct.com>
+ * Lennox Wu <lennox.wu@sunplusct.com>
+ * Copyright (C) 2012 Regents of the University of California
+ * Copyright (C) 2017 SiFive
+ */
+
+#include <linux/cpu.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/sched/task_stack.h>
+#include <linux/tick.h>
+#include <linux/ptrace.h>
+#include <linux/uaccess.h>
+
+#include <asm/unistd.h>
+#include <asm/processor.h>
+#include <asm/csr.h>
+#include <asm/string.h>
+#include <asm/switch_to.h>
+#include <asm/thread_info.h>
+
+register unsigned long gp_in_global __asm__("gp");
+
+#ifdef CONFIG_STACKPROTECTOR
+#include <linux/stackprotector.h>
+unsigned long __stack_chk_guard __read_mostly;
+EXPORT_SYMBOL(__stack_chk_guard);
+#endif
+
+extern asmlinkage void ret_from_fork(void);
+extern asmlinkage void ret_from_kernel_thread(void);
+
+void arch_cpu_idle(void)
+{
+ wait_for_interrupt();
+ raw_local_irq_enable();
+}
+
+void show_regs(struct pt_regs *regs)
+{
+ show_regs_print_info(KERN_DEFAULT);
+
+ pr_cont("epc: " REG_FMT " ra : " REG_FMT " sp : " REG_FMT "\n",
+ regs->epc, regs->ra, regs->sp);
+ pr_cont(" gp : " REG_FMT " tp : " REG_FMT " t0 : " REG_FMT "\n",
+ regs->gp, regs->tp, regs->t0);
+ pr_cont(" t1 : " REG_FMT " t2 : " REG_FMT " s0 : " REG_FMT "\n",
+ regs->t1, regs->t2, regs->s0);
+ pr_cont(" s1 : " REG_FMT " a0 : " REG_FMT " a1 : " REG_FMT "\n",
+ regs->s1, regs->a0, regs->a1);
+ pr_cont(" a2 : " REG_FMT " a3 : " REG_FMT " a4 : " REG_FMT "\n",
+ regs->a2, regs->a3, regs->a4);
+ pr_cont(" a5 : " REG_FMT " a6 : " REG_FMT " a7 : " REG_FMT "\n",
+ regs->a5, regs->a6, regs->a7);
+ pr_cont(" s2 : " REG_FMT " s3 : " REG_FMT " s4 : " REG_FMT "\n",
+ regs->s2, regs->s3, regs->s4);
+ pr_cont(" s5 : " REG_FMT " s6 : " REG_FMT " s7 : " REG_FMT "\n",
+ regs->s5, regs->s6, regs->s7);
+ pr_cont(" s8 : " REG_FMT " s9 : " REG_FMT " s10: " REG_FMT "\n",
+ regs->s8, regs->s9, regs->s10);
+ pr_cont(" s11: " REG_FMT " t3 : " REG_FMT " t4 : " REG_FMT "\n",
+ regs->s11, regs->t3, regs->t4);
+ pr_cont(" t5 : " REG_FMT " t6 : " REG_FMT "\n",
+ regs->t5, regs->t6);
+
+ pr_cont("status: " REG_FMT " badaddr: " REG_FMT " cause: " REG_FMT "\n",
+ regs->status, regs->badaddr, regs->cause);
+}
+
+void start_thread(struct pt_regs *regs, unsigned long pc,
+ unsigned long sp)
+{
+ regs->status = SR_PIE;
+ if (has_fpu) {
+ regs->status |= SR_FS_INITIAL;
+ /*
+ * Restore the initial value to the FP register
+ * before starting the user program.
+ */
+ fstate_restore(current, regs);
+ }
+ regs->epc = pc;
+ regs->sp = sp;
+}
+
+void flush_thread(void)
+{
+#ifdef CONFIG_FPU
+ /*
+ * Reset FPU state and context
+ * frm: round to nearest, ties to even (IEEE default)
+ * fflags: accrued exceptions cleared
+ */
+ fstate_off(current, task_pt_regs(current));
+ memset(&current->thread.fstate, 0, sizeof(current->thread.fstate));
+#endif
+}
+
+int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
+{
+ fstate_save(src, task_pt_regs(src));
+ *dst = *src;
+ return 0;
+}
+
+int copy_thread(unsigned long clone_flags, unsigned long usp, unsigned long arg,
+ struct task_struct *p, unsigned long tls)
+{
+ struct pt_regs *childregs = task_pt_regs(p);
+
+ memset(&p->thread.s, 0, sizeof(p->thread.s));
+
+ /* p->thread holds context to be restored by __switch_to() */
+ if (unlikely(p->flags & (PF_KTHREAD | PF_IO_WORKER))) {
+ /* Kernel thread */
+ memset(childregs, 0, sizeof(struct pt_regs));
+ childregs->gp = gp_in_global;
+ /* Supervisor/Machine, irqs on: */
+ childregs->status = SR_PP | SR_PIE;
+
+ p->thread.ra = (unsigned long)ret_from_kernel_thread;
+ p->thread.s[0] = usp; /* fn */
+ p->thread.s[1] = arg;
+ } else {
+ *childregs = *(current_pt_regs());
+ if (usp) /* User fork */
+ childregs->sp = usp;
+ if (clone_flags & CLONE_SETTLS)
+ childregs->tp = tls;
+ childregs->a0 = 0; /* Return value of fork() */
+ p->thread.ra = (unsigned long)ret_from_fork;
+ }
+ p->thread.sp = (unsigned long)childregs; /* kernel sp */
+ return 0;
+}
diff --git a/arch/riscv/kernel/ptrace.c b/arch/riscv/kernel/ptrace.c
new file mode 100644
index 000000000..69678ab64
--- /dev/null
+++ b/arch/riscv/kernel/ptrace.c
@@ -0,0 +1,178 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright 2010 Tilera Corporation. All Rights Reserved.
+ * Copyright 2015 Regents of the University of California
+ * Copyright 2017 SiFive
+ *
+ * Copied from arch/tile/kernel/ptrace.c
+ */
+
+#include <asm/ptrace.h>
+#include <asm/syscall.h>
+#include <asm/thread_info.h>
+#include <asm/switch_to.h>
+#include <linux/audit.h>
+#include <linux/ptrace.h>
+#include <linux/elf.h>
+#include <linux/regset.h>
+#include <linux/sched.h>
+#include <linux/sched/task_stack.h>
+#include <linux/tracehook.h>
+
+#define CREATE_TRACE_POINTS
+#include <trace/events/syscalls.h>
+
+enum riscv_regset {
+ REGSET_X,
+#ifdef CONFIG_FPU
+ REGSET_F,
+#endif
+};
+
+static int riscv_gpr_get(struct task_struct *target,
+ const struct user_regset *regset,
+ struct membuf to)
+{
+ return membuf_write(&to, task_pt_regs(target),
+ sizeof(struct user_regs_struct));
+}
+
+static int riscv_gpr_set(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ const void *kbuf, const void __user *ubuf)
+{
+ int ret;
+ struct pt_regs *regs;
+
+ regs = task_pt_regs(target);
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, regs, 0, -1);
+ return ret;
+}
+
+#ifdef CONFIG_FPU
+static int riscv_fpr_get(struct task_struct *target,
+ const struct user_regset *regset,
+ struct membuf to)
+{
+ struct __riscv_d_ext_state *fstate = &target->thread.fstate;
+
+ if (target == current)
+ fstate_save(current, task_pt_regs(current));
+
+ membuf_write(&to, fstate, offsetof(struct __riscv_d_ext_state, fcsr));
+ membuf_store(&to, fstate->fcsr);
+ return membuf_zero(&to, 4); // explicitly pad
+}
+
+static int riscv_fpr_set(struct task_struct *target,
+ const struct user_regset *regset,
+ unsigned int pos, unsigned int count,
+ const void *kbuf, const void __user *ubuf)
+{
+ int ret;
+ struct __riscv_d_ext_state *fstate = &target->thread.fstate;
+
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, fstate, 0,
+ offsetof(struct __riscv_d_ext_state, fcsr));
+ if (!ret) {
+ ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, fstate, 0,
+ offsetof(struct __riscv_d_ext_state, fcsr) +
+ sizeof(fstate->fcsr));
+ }
+
+ return ret;
+}
+#endif
+
+static const struct user_regset riscv_user_regset[] = {
+ [REGSET_X] = {
+ .core_note_type = NT_PRSTATUS,
+ .n = ELF_NGREG,
+ .size = sizeof(elf_greg_t),
+ .align = sizeof(elf_greg_t),
+ .regset_get = riscv_gpr_get,
+ .set = riscv_gpr_set,
+ },
+#ifdef CONFIG_FPU
+ [REGSET_F] = {
+ .core_note_type = NT_PRFPREG,
+ .n = ELF_NFPREG,
+ .size = sizeof(elf_fpreg_t),
+ .align = sizeof(elf_fpreg_t),
+ .regset_get = riscv_fpr_get,
+ .set = riscv_fpr_set,
+ },
+#endif
+};
+
+static const struct user_regset_view riscv_user_native_view = {
+ .name = "riscv",
+ .e_machine = EM_RISCV,
+ .regsets = riscv_user_regset,
+ .n = ARRAY_SIZE(riscv_user_regset),
+};
+
+const struct user_regset_view *task_user_regset_view(struct task_struct *task)
+{
+ return &riscv_user_native_view;
+}
+
+void ptrace_disable(struct task_struct *child)
+{
+ clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
+}
+
+long arch_ptrace(struct task_struct *child, long request,
+ unsigned long addr, unsigned long data)
+{
+ long ret = -EIO;
+
+ switch (request) {
+ default:
+ ret = ptrace_request(child, request, addr, data);
+ break;
+ }
+
+ return ret;
+}
+
+/*
+ * Allows PTRACE_SYSCALL to work. These are called from entry.S in
+ * {handle,ret_from}_syscall.
+ */
+__visible int do_syscall_trace_enter(struct pt_regs *regs)
+{
+ if (test_thread_flag(TIF_SYSCALL_TRACE))
+ if (tracehook_report_syscall_entry(regs))
+ return -1;
+
+ /*
+ * Do the secure computing after ptrace; failures should be fast.
+ * If this fails we might have return value in a0 from seccomp
+ * (via SECCOMP_RET_ERRNO/TRACE).
+ */
+ if (secure_computing() == -1)
+ return -1;
+
+#ifdef CONFIG_HAVE_SYSCALL_TRACEPOINTS
+ if (test_thread_flag(TIF_SYSCALL_TRACEPOINT))
+ trace_sys_enter(regs, syscall_get_nr(current, regs));
+#endif
+
+ audit_syscall_entry(regs->a7, regs->a0, regs->a1, regs->a2, regs->a3);
+ return 0;
+}
+
+__visible void do_syscall_trace_exit(struct pt_regs *regs)
+{
+ audit_syscall_exit(regs);
+
+ if (test_thread_flag(TIF_SYSCALL_TRACE))
+ tracehook_report_syscall_exit(regs, 0);
+
+#ifdef CONFIG_HAVE_SYSCALL_TRACEPOINTS
+ if (test_thread_flag(TIF_SYSCALL_TRACEPOINT))
+ trace_sys_exit(regs, regs_return_value(regs));
+#endif
+}
diff --git a/arch/riscv/kernel/reset.c b/arch/riscv/kernel/reset.c
new file mode 100644
index 000000000..9c842c416
--- /dev/null
+++ b/arch/riscv/kernel/reset.c
@@ -0,0 +1,38 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2012 Regents of the University of California
+ */
+
+#include <linux/reboot.h>
+#include <linux/pm.h>
+
+static void default_power_off(void)
+{
+ while (1)
+ wait_for_interrupt();
+}
+
+void (*pm_power_off)(void) = NULL;
+EXPORT_SYMBOL(pm_power_off);
+
+void machine_restart(char *cmd)
+{
+ do_kernel_restart(cmd);
+ while (1);
+}
+
+void machine_halt(void)
+{
+ if (pm_power_off != NULL)
+ pm_power_off();
+ else
+ default_power_off();
+}
+
+void machine_power_off(void)
+{
+ if (pm_power_off != NULL)
+ pm_power_off();
+ else
+ default_power_off();
+}
diff --git a/arch/riscv/kernel/riscv_ksyms.c b/arch/riscv/kernel/riscv_ksyms.c
new file mode 100644
index 000000000..450492e1c
--- /dev/null
+++ b/arch/riscv/kernel/riscv_ksyms.c
@@ -0,0 +1,15 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2017 Zihao Yu
+ */
+
+#include <linux/export.h>
+#include <linux/uaccess.h>
+
+/*
+ * Assembly functions that may be used (directly or indirectly) by modules
+ */
+EXPORT_SYMBOL(memset);
+EXPORT_SYMBOL(memcpy);
+EXPORT_SYMBOL(__memset);
+EXPORT_SYMBOL(__memcpy);
diff --git a/arch/riscv/kernel/sbi.c b/arch/riscv/kernel/sbi.c
new file mode 100644
index 000000000..226ccce0f
--- /dev/null
+++ b/arch/riscv/kernel/sbi.c
@@ -0,0 +1,605 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * SBI initialilization and all extension implementation.
+ *
+ * Copyright (c) 2020 Western Digital Corporation or its affiliates.
+ */
+
+#include <linux/init.h>
+#include <linux/pm.h>
+#include <asm/sbi.h>
+#include <asm/smp.h>
+
+/* default SBI version is 0.1 */
+unsigned long sbi_spec_version = SBI_SPEC_VERSION_DEFAULT;
+EXPORT_SYMBOL(sbi_spec_version);
+
+static void (*__sbi_set_timer)(uint64_t stime);
+static int (*__sbi_send_ipi)(const unsigned long *hart_mask);
+static int (*__sbi_rfence)(int fid, const unsigned long *hart_mask,
+ unsigned long start, unsigned long size,
+ unsigned long arg4, unsigned long arg5);
+
+struct sbiret sbi_ecall(int ext, int fid, unsigned long arg0,
+ unsigned long arg1, unsigned long arg2,
+ unsigned long arg3, unsigned long arg4,
+ unsigned long arg5)
+{
+ struct sbiret ret;
+
+ register uintptr_t a0 asm ("a0") = (uintptr_t)(arg0);
+ register uintptr_t a1 asm ("a1") = (uintptr_t)(arg1);
+ register uintptr_t a2 asm ("a2") = (uintptr_t)(arg2);
+ register uintptr_t a3 asm ("a3") = (uintptr_t)(arg3);
+ register uintptr_t a4 asm ("a4") = (uintptr_t)(arg4);
+ register uintptr_t a5 asm ("a5") = (uintptr_t)(arg5);
+ register uintptr_t a6 asm ("a6") = (uintptr_t)(fid);
+ register uintptr_t a7 asm ("a7") = (uintptr_t)(ext);
+ asm volatile ("ecall"
+ : "+r" (a0), "+r" (a1)
+ : "r" (a2), "r" (a3), "r" (a4), "r" (a5), "r" (a6), "r" (a7)
+ : "memory");
+ ret.error = a0;
+ ret.value = a1;
+
+ return ret;
+}
+EXPORT_SYMBOL(sbi_ecall);
+
+int sbi_err_map_linux_errno(int err)
+{
+ switch (err) {
+ case SBI_SUCCESS:
+ return 0;
+ case SBI_ERR_DENIED:
+ return -EPERM;
+ case SBI_ERR_INVALID_PARAM:
+ return -EINVAL;
+ case SBI_ERR_INVALID_ADDRESS:
+ return -EFAULT;
+ case SBI_ERR_NOT_SUPPORTED:
+ case SBI_ERR_FAILURE:
+ default:
+ return -ENOTSUPP;
+ };
+}
+EXPORT_SYMBOL(sbi_err_map_linux_errno);
+
+#ifdef CONFIG_RISCV_SBI_V01
+/**
+ * sbi_console_putchar() - Writes given character to the console device.
+ * @ch: The data to be written to the console.
+ *
+ * Return: None
+ */
+void sbi_console_putchar(int ch)
+{
+ sbi_ecall(SBI_EXT_0_1_CONSOLE_PUTCHAR, 0, ch, 0, 0, 0, 0, 0);
+}
+EXPORT_SYMBOL(sbi_console_putchar);
+
+/**
+ * sbi_console_getchar() - Reads a byte from console device.
+ *
+ * Returns the value read from console.
+ */
+int sbi_console_getchar(void)
+{
+ struct sbiret ret;
+
+ ret = sbi_ecall(SBI_EXT_0_1_CONSOLE_GETCHAR, 0, 0, 0, 0, 0, 0, 0);
+
+ return ret.error;
+}
+EXPORT_SYMBOL(sbi_console_getchar);
+
+/**
+ * sbi_shutdown() - Remove all the harts from executing supervisor code.
+ *
+ * Return: None
+ */
+void sbi_shutdown(void)
+{
+ sbi_ecall(SBI_EXT_0_1_SHUTDOWN, 0, 0, 0, 0, 0, 0, 0);
+}
+EXPORT_SYMBOL(sbi_shutdown);
+
+/**
+ * sbi_clear_ipi() - Clear any pending IPIs for the calling hart.
+ *
+ * Return: None
+ */
+void sbi_clear_ipi(void)
+{
+ sbi_ecall(SBI_EXT_0_1_CLEAR_IPI, 0, 0, 0, 0, 0, 0, 0);
+}
+EXPORT_SYMBOL(sbi_clear_ipi);
+
+/**
+ * sbi_set_timer_v01() - Program the timer for next timer event.
+ * @stime_value: The value after which next timer event should fire.
+ *
+ * Return: None
+ */
+static void __sbi_set_timer_v01(uint64_t stime_value)
+{
+#if __riscv_xlen == 32
+ sbi_ecall(SBI_EXT_0_1_SET_TIMER, 0, stime_value,
+ stime_value >> 32, 0, 0, 0, 0);
+#else
+ sbi_ecall(SBI_EXT_0_1_SET_TIMER, 0, stime_value, 0, 0, 0, 0, 0);
+#endif
+}
+
+static int __sbi_send_ipi_v01(const unsigned long *hart_mask)
+{
+ sbi_ecall(SBI_EXT_0_1_SEND_IPI, 0, (unsigned long)hart_mask,
+ 0, 0, 0, 0, 0);
+ return 0;
+}
+
+static int __sbi_rfence_v01(int fid, const unsigned long *hart_mask,
+ unsigned long start, unsigned long size,
+ unsigned long arg4, unsigned long arg5)
+{
+ int result = 0;
+
+ /* v0.2 function IDs are equivalent to v0.1 extension IDs */
+ switch (fid) {
+ case SBI_EXT_RFENCE_REMOTE_FENCE_I:
+ sbi_ecall(SBI_EXT_0_1_REMOTE_FENCE_I, 0,
+ (unsigned long)hart_mask, 0, 0, 0, 0, 0);
+ break;
+ case SBI_EXT_RFENCE_REMOTE_SFENCE_VMA:
+ sbi_ecall(SBI_EXT_0_1_REMOTE_SFENCE_VMA, 0,
+ (unsigned long)hart_mask, start, size,
+ 0, 0, 0);
+ break;
+ case SBI_EXT_RFENCE_REMOTE_SFENCE_VMA_ASID:
+ sbi_ecall(SBI_EXT_0_1_REMOTE_SFENCE_VMA_ASID, 0,
+ (unsigned long)hart_mask, start, size,
+ arg4, 0, 0);
+ break;
+ default:
+ pr_err("SBI call [%d]not supported in SBI v0.1\n", fid);
+ result = -EINVAL;
+ }
+
+ return result;
+}
+
+static void sbi_set_power_off(void)
+{
+ pm_power_off = sbi_shutdown;
+}
+#else
+static void __sbi_set_timer_v01(uint64_t stime_value)
+{
+ pr_warn("Timer extension is not available in SBI v%lu.%lu\n",
+ sbi_major_version(), sbi_minor_version());
+}
+
+static int __sbi_send_ipi_v01(const unsigned long *hart_mask)
+{
+ pr_warn("IPI extension is not available in SBI v%lu.%lu\n",
+ sbi_major_version(), sbi_minor_version());
+
+ return 0;
+}
+
+static int __sbi_rfence_v01(int fid, const unsigned long *hart_mask,
+ unsigned long start, unsigned long size,
+ unsigned long arg4, unsigned long arg5)
+{
+ pr_warn("remote fence extension is not available in SBI v%lu.%lu\n",
+ sbi_major_version(), sbi_minor_version());
+
+ return 0;
+}
+
+static void sbi_set_power_off(void) {}
+#endif /* CONFIG_RISCV_SBI_V01 */
+
+static void __sbi_set_timer_v02(uint64_t stime_value)
+{
+#if __riscv_xlen == 32
+ sbi_ecall(SBI_EXT_TIME, SBI_EXT_TIME_SET_TIMER, stime_value,
+ stime_value >> 32, 0, 0, 0, 0);
+#else
+ sbi_ecall(SBI_EXT_TIME, SBI_EXT_TIME_SET_TIMER, stime_value, 0,
+ 0, 0, 0, 0);
+#endif
+}
+
+static int __sbi_send_ipi_v02(const unsigned long *hart_mask)
+{
+ unsigned long hartid, hmask_val, hbase;
+ struct cpumask tmask;
+ struct sbiret ret = {0};
+ int result;
+
+ if (!hart_mask || !(*hart_mask)) {
+ riscv_cpuid_to_hartid_mask(cpu_online_mask, &tmask);
+ hart_mask = cpumask_bits(&tmask);
+ }
+
+ hmask_val = 0;
+ hbase = 0;
+ for_each_set_bit(hartid, hart_mask, NR_CPUS) {
+ if (hmask_val && ((hbase + BITS_PER_LONG) <= hartid)) {
+ ret = sbi_ecall(SBI_EXT_IPI, SBI_EXT_IPI_SEND_IPI,
+ hmask_val, hbase, 0, 0, 0, 0);
+ if (ret.error)
+ goto ecall_failed;
+ hmask_val = 0;
+ hbase = 0;
+ }
+ if (!hmask_val)
+ hbase = hartid;
+ hmask_val |= 1UL << (hartid - hbase);
+ }
+
+ if (hmask_val) {
+ ret = sbi_ecall(SBI_EXT_IPI, SBI_EXT_IPI_SEND_IPI,
+ hmask_val, hbase, 0, 0, 0, 0);
+ if (ret.error)
+ goto ecall_failed;
+ }
+
+ return 0;
+
+ecall_failed:
+ result = sbi_err_map_linux_errno(ret.error);
+ pr_err("%s: hbase = [%lu] hmask = [0x%lx] failed (error [%d])\n",
+ __func__, hbase, hmask_val, result);
+ return result;
+}
+
+static int __sbi_rfence_v02_call(unsigned long fid, unsigned long hmask_val,
+ unsigned long hbase, unsigned long start,
+ unsigned long size, unsigned long arg4,
+ unsigned long arg5)
+{
+ struct sbiret ret = {0};
+ int ext = SBI_EXT_RFENCE;
+ int result = 0;
+
+ switch (fid) {
+ case SBI_EXT_RFENCE_REMOTE_FENCE_I:
+ ret = sbi_ecall(ext, fid, hmask_val, hbase, 0, 0, 0, 0);
+ break;
+ case SBI_EXT_RFENCE_REMOTE_SFENCE_VMA:
+ ret = sbi_ecall(ext, fid, hmask_val, hbase, start,
+ size, 0, 0);
+ break;
+ case SBI_EXT_RFENCE_REMOTE_SFENCE_VMA_ASID:
+ ret = sbi_ecall(ext, fid, hmask_val, hbase, start,
+ size, arg4, 0);
+ break;
+
+ case SBI_EXT_RFENCE_REMOTE_HFENCE_GVMA:
+ ret = sbi_ecall(ext, fid, hmask_val, hbase, start,
+ size, 0, 0);
+ break;
+ case SBI_EXT_RFENCE_REMOTE_HFENCE_GVMA_VMID:
+ ret = sbi_ecall(ext, fid, hmask_val, hbase, start,
+ size, arg4, 0);
+ break;
+ case SBI_EXT_RFENCE_REMOTE_HFENCE_VVMA:
+ ret = sbi_ecall(ext, fid, hmask_val, hbase, start,
+ size, 0, 0);
+ break;
+ case SBI_EXT_RFENCE_REMOTE_HFENCE_VVMA_ASID:
+ ret = sbi_ecall(ext, fid, hmask_val, hbase, start,
+ size, arg4, 0);
+ break;
+ default:
+ pr_err("unknown function ID [%lu] for SBI extension [%d]\n",
+ fid, ext);
+ result = -EINVAL;
+ }
+
+ if (ret.error) {
+ result = sbi_err_map_linux_errno(ret.error);
+ pr_err("%s: hbase = [%lu] hmask = [0x%lx] failed (error [%d])\n",
+ __func__, hbase, hmask_val, result);
+ }
+
+ return result;
+}
+
+static int __sbi_rfence_v02(int fid, const unsigned long *hart_mask,
+ unsigned long start, unsigned long size,
+ unsigned long arg4, unsigned long arg5)
+{
+ unsigned long hmask_val, hartid, hbase;
+ struct cpumask tmask;
+ int result;
+
+ if (!hart_mask || !(*hart_mask)) {
+ riscv_cpuid_to_hartid_mask(cpu_online_mask, &tmask);
+ hart_mask = cpumask_bits(&tmask);
+ }
+
+ hmask_val = 0;
+ hbase = 0;
+ for_each_set_bit(hartid, hart_mask, NR_CPUS) {
+ if (hmask_val && ((hbase + BITS_PER_LONG) <= hartid)) {
+ result = __sbi_rfence_v02_call(fid, hmask_val, hbase,
+ start, size, arg4, arg5);
+ if (result)
+ return result;
+ hmask_val = 0;
+ hbase = 0;
+ }
+ if (!hmask_val)
+ hbase = hartid;
+ hmask_val |= 1UL << (hartid - hbase);
+ }
+
+ if (hmask_val) {
+ result = __sbi_rfence_v02_call(fid, hmask_val, hbase,
+ start, size, arg4, arg5);
+ if (result)
+ return result;
+ }
+
+ return 0;
+}
+
+/**
+ * sbi_set_timer() - Program the timer for next timer event.
+ * @stime_value: The value after which next timer event should fire.
+ *
+ * Return: None
+ */
+void sbi_set_timer(uint64_t stime_value)
+{
+ __sbi_set_timer(stime_value);
+}
+
+/**
+ * sbi_send_ipi() - Send an IPI to any hart.
+ * @hart_mask: A cpu mask containing all the target harts.
+ *
+ * Return: None
+ */
+void sbi_send_ipi(const unsigned long *hart_mask)
+{
+ __sbi_send_ipi(hart_mask);
+}
+EXPORT_SYMBOL(sbi_send_ipi);
+
+/**
+ * sbi_remote_fence_i() - Execute FENCE.I instruction on given remote harts.
+ * @hart_mask: A cpu mask containing all the target harts.
+ *
+ * Return: None
+ */
+void sbi_remote_fence_i(const unsigned long *hart_mask)
+{
+ __sbi_rfence(SBI_EXT_RFENCE_REMOTE_FENCE_I,
+ hart_mask, 0, 0, 0, 0);
+}
+EXPORT_SYMBOL(sbi_remote_fence_i);
+
+/**
+ * sbi_remote_sfence_vma() - Execute SFENCE.VMA instructions on given remote
+ * harts for the specified virtual address range.
+ * @hart_mask: A cpu mask containing all the target harts.
+ * @start: Start of the virtual address
+ * @size: Total size of the virtual address range.
+ *
+ * Return: None
+ */
+void sbi_remote_sfence_vma(const unsigned long *hart_mask,
+ unsigned long start,
+ unsigned long size)
+{
+ __sbi_rfence(SBI_EXT_RFENCE_REMOTE_SFENCE_VMA,
+ hart_mask, start, size, 0, 0);
+}
+EXPORT_SYMBOL(sbi_remote_sfence_vma);
+
+/**
+ * sbi_remote_sfence_vma_asid() - Execute SFENCE.VMA instructions on given
+ * remote harts for a virtual address range belonging to a specific ASID.
+ *
+ * @hart_mask: A cpu mask containing all the target harts.
+ * @start: Start of the virtual address
+ * @size: Total size of the virtual address range.
+ * @asid: The value of address space identifier (ASID).
+ *
+ * Return: None
+ */
+void sbi_remote_sfence_vma_asid(const unsigned long *hart_mask,
+ unsigned long start,
+ unsigned long size,
+ unsigned long asid)
+{
+ __sbi_rfence(SBI_EXT_RFENCE_REMOTE_SFENCE_VMA_ASID,
+ hart_mask, start, size, asid, 0);
+}
+EXPORT_SYMBOL(sbi_remote_sfence_vma_asid);
+
+/**
+ * sbi_remote_hfence_gvma() - Execute HFENCE.GVMA instructions on given remote
+ * harts for the specified guest physical address range.
+ * @hart_mask: A cpu mask containing all the target harts.
+ * @start: Start of the guest physical address
+ * @size: Total size of the guest physical address range.
+ *
+ * Return: None
+ */
+int sbi_remote_hfence_gvma(const unsigned long *hart_mask,
+ unsigned long start,
+ unsigned long size)
+{
+ return __sbi_rfence(SBI_EXT_RFENCE_REMOTE_HFENCE_GVMA,
+ hart_mask, start, size, 0, 0);
+}
+EXPORT_SYMBOL_GPL(sbi_remote_hfence_gvma);
+
+/**
+ * sbi_remote_hfence_gvma_vmid() - Execute HFENCE.GVMA instructions on given
+ * remote harts for a guest physical address range belonging to a specific VMID.
+ *
+ * @hart_mask: A cpu mask containing all the target harts.
+ * @start: Start of the guest physical address
+ * @size: Total size of the guest physical address range.
+ * @vmid: The value of guest ID (VMID).
+ *
+ * Return: 0 if success, Error otherwise.
+ */
+int sbi_remote_hfence_gvma_vmid(const unsigned long *hart_mask,
+ unsigned long start,
+ unsigned long size,
+ unsigned long vmid)
+{
+ return __sbi_rfence(SBI_EXT_RFENCE_REMOTE_HFENCE_GVMA_VMID,
+ hart_mask, start, size, vmid, 0);
+}
+EXPORT_SYMBOL(sbi_remote_hfence_gvma_vmid);
+
+/**
+ * sbi_remote_hfence_vvma() - Execute HFENCE.VVMA instructions on given remote
+ * harts for the current guest virtual address range.
+ * @hart_mask: A cpu mask containing all the target harts.
+ * @start: Start of the current guest virtual address
+ * @size: Total size of the current guest virtual address range.
+ *
+ * Return: None
+ */
+int sbi_remote_hfence_vvma(const unsigned long *hart_mask,
+ unsigned long start,
+ unsigned long size)
+{
+ return __sbi_rfence(SBI_EXT_RFENCE_REMOTE_HFENCE_VVMA,
+ hart_mask, start, size, 0, 0);
+}
+EXPORT_SYMBOL(sbi_remote_hfence_vvma);
+
+/**
+ * sbi_remote_hfence_vvma_asid() - Execute HFENCE.VVMA instructions on given
+ * remote harts for current guest virtual address range belonging to a specific
+ * ASID.
+ *
+ * @hart_mask: A cpu mask containing all the target harts.
+ * @start: Start of the current guest virtual address
+ * @size: Total size of the current guest virtual address range.
+ * @asid: The value of address space identifier (ASID).
+ *
+ * Return: None
+ */
+int sbi_remote_hfence_vvma_asid(const unsigned long *hart_mask,
+ unsigned long start,
+ unsigned long size,
+ unsigned long asid)
+{
+ return __sbi_rfence(SBI_EXT_RFENCE_REMOTE_HFENCE_VVMA_ASID,
+ hart_mask, start, size, asid, 0);
+}
+EXPORT_SYMBOL(sbi_remote_hfence_vvma_asid);
+
+/**
+ * sbi_probe_extension() - Check if an SBI extension ID is supported or not.
+ * @extid: The extension ID to be probed.
+ *
+ * Return: Extension specific nonzero value f yes, -ENOTSUPP otherwise.
+ */
+int sbi_probe_extension(int extid)
+{
+ struct sbiret ret;
+
+ ret = sbi_ecall(SBI_EXT_BASE, SBI_EXT_BASE_PROBE_EXT, extid,
+ 0, 0, 0, 0, 0);
+ if (!ret.error)
+ if (ret.value)
+ return ret.value;
+
+ return -ENOTSUPP;
+}
+EXPORT_SYMBOL(sbi_probe_extension);
+
+static long __sbi_base_ecall(int fid)
+{
+ struct sbiret ret;
+
+ ret = sbi_ecall(SBI_EXT_BASE, fid, 0, 0, 0, 0, 0, 0);
+ if (!ret.error)
+ return ret.value;
+ else
+ return sbi_err_map_linux_errno(ret.error);
+}
+
+static inline long sbi_get_spec_version(void)
+{
+ return __sbi_base_ecall(SBI_EXT_BASE_GET_SPEC_VERSION);
+}
+
+static inline long sbi_get_firmware_id(void)
+{
+ return __sbi_base_ecall(SBI_EXT_BASE_GET_IMP_ID);
+}
+
+static inline long sbi_get_firmware_version(void)
+{
+ return __sbi_base_ecall(SBI_EXT_BASE_GET_IMP_VERSION);
+}
+
+static void sbi_send_cpumask_ipi(const struct cpumask *target)
+{
+ struct cpumask hartid_mask;
+
+ riscv_cpuid_to_hartid_mask(target, &hartid_mask);
+
+ sbi_send_ipi(cpumask_bits(&hartid_mask));
+}
+
+static struct riscv_ipi_ops sbi_ipi_ops = {
+ .ipi_inject = sbi_send_cpumask_ipi
+};
+
+int __init sbi_init(void)
+{
+ int ret;
+
+ sbi_set_power_off();
+ ret = sbi_get_spec_version();
+ if (ret > 0)
+ sbi_spec_version = ret;
+
+ pr_info("SBI specification v%lu.%lu detected\n",
+ sbi_major_version(), sbi_minor_version());
+
+ if (!sbi_spec_is_0_1()) {
+ pr_info("SBI implementation ID=0x%lx Version=0x%lx\n",
+ sbi_get_firmware_id(), sbi_get_firmware_version());
+ if (sbi_probe_extension(SBI_EXT_TIME) > 0) {
+ __sbi_set_timer = __sbi_set_timer_v02;
+ pr_info("SBI v0.2 TIME extension detected\n");
+ } else {
+ __sbi_set_timer = __sbi_set_timer_v01;
+ }
+ if (sbi_probe_extension(SBI_EXT_IPI) > 0) {
+ __sbi_send_ipi = __sbi_send_ipi_v02;
+ pr_info("SBI v0.2 IPI extension detected\n");
+ } else {
+ __sbi_send_ipi = __sbi_send_ipi_v01;
+ }
+ if (sbi_probe_extension(SBI_EXT_RFENCE) > 0) {
+ __sbi_rfence = __sbi_rfence_v02;
+ pr_info("SBI v0.2 RFENCE extension detected\n");
+ } else {
+ __sbi_rfence = __sbi_rfence_v01;
+ }
+ } else {
+ __sbi_set_timer = __sbi_set_timer_v01;
+ __sbi_send_ipi = __sbi_send_ipi_v01;
+ __sbi_rfence = __sbi_rfence_v01;
+ }
+
+ riscv_set_ipi_ops(&sbi_ipi_ops);
+
+ return 0;
+}
diff --git a/arch/riscv/kernel/setup.c b/arch/riscv/kernel/setup.c
new file mode 100644
index 000000000..8e78a8ab6
--- /dev/null
+++ b/arch/riscv/kernel/setup.c
@@ -0,0 +1,134 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2009 Sunplus Core Technology Co., Ltd.
+ * Chen Liqin <liqin.chen@sunplusct.com>
+ * Lennox Wu <lennox.wu@sunplusct.com>
+ * Copyright (C) 2012 Regents of the University of California
+ */
+
+#include <linux/init.h>
+#include <linux/mm.h>
+#include <linux/memblock.h>
+#include <linux/sched.h>
+#include <linux/console.h>
+#include <linux/screen_info.h>
+#include <linux/of_fdt.h>
+#include <linux/of_platform.h>
+#include <linux/sched/task.h>
+#include <linux/swiotlb.h>
+#include <linux/smp.h>
+#include <linux/efi.h>
+
+#include <asm/cpu_ops.h>
+#include <asm/early_ioremap.h>
+#include <asm/setup.h>
+#include <asm/sections.h>
+#include <asm/sbi.h>
+#include <asm/tlbflush.h>
+#include <asm/thread_info.h>
+#include <asm/kasan.h>
+#include <asm/efi.h>
+
+#include "head.h"
+
+#if defined(CONFIG_DUMMY_CONSOLE) || defined(CONFIG_EFI)
+struct screen_info screen_info __section(".data") = {
+ .orig_video_lines = 30,
+ .orig_video_cols = 80,
+ .orig_video_mode = 0,
+ .orig_video_ega_bx = 0,
+ .orig_video_isVGA = 1,
+ .orig_video_points = 8
+};
+#endif
+
+/*
+ * The lucky hart to first increment this variable will boot the other cores.
+ * This is used before the kernel initializes the BSS so it can't be in the
+ * BSS.
+ */
+atomic_t hart_lottery __section(".sdata");
+unsigned long boot_cpu_hartid;
+static DEFINE_PER_CPU(struct cpu, cpu_devices);
+
+static void __init parse_dtb(void)
+{
+ /* Early scan of device tree from init memory */
+ if (early_init_dt_scan(dtb_early_va)) {
+ const char *name = of_flat_dt_get_machine_name();
+
+ if (name) {
+ pr_info("Machine model: %s\n", name);
+ dump_stack_set_arch_desc("%s (DT)", name);
+ }
+ } else {
+ pr_err("No DTB passed to the kernel\n");
+ }
+
+#ifdef CONFIG_CMDLINE_FORCE
+ strlcpy(boot_command_line, CONFIG_CMDLINE, COMMAND_LINE_SIZE);
+ pr_info("Forcing kernel command line to: %s\n", boot_command_line);
+#endif
+}
+
+void __init setup_arch(char **cmdline_p)
+{
+ parse_dtb();
+ init_mm.start_code = (unsigned long) _stext;
+ init_mm.end_code = (unsigned long) _etext;
+ init_mm.end_data = (unsigned long) _edata;
+ init_mm.brk = (unsigned long) _end;
+
+ *cmdline_p = boot_command_line;
+
+ early_ioremap_setup();
+ jump_label_init();
+ parse_early_param();
+
+ efi_init();
+ setup_bootmem();
+ paging_init();
+#if IS_ENABLED(CONFIG_BUILTIN_DTB)
+ unflatten_and_copy_device_tree();
+#else
+ if (early_init_dt_verify(__va(dtb_early_pa)))
+ unflatten_device_tree();
+ else
+ pr_err("No DTB found in kernel mappings\n");
+#endif
+ early_init_fdt_scan_reserved_mem();
+ misc_mem_init();
+
+#ifdef CONFIG_SWIOTLB
+ swiotlb_init(1);
+#endif
+
+#ifdef CONFIG_KASAN
+ kasan_init();
+#endif
+
+#if IS_ENABLED(CONFIG_RISCV_SBI)
+ sbi_init();
+#endif
+
+#ifdef CONFIG_SMP
+ setup_smp();
+#endif
+
+ riscv_fill_hwcap();
+}
+
+static int __init topology_init(void)
+{
+ int i;
+
+ for_each_possible_cpu(i) {
+ struct cpu *cpu = &per_cpu(cpu_devices, i);
+
+ cpu->hotpluggable = cpu_has_hotplug(i);
+ register_cpu(cpu, i);
+ }
+
+ return 0;
+}
+subsys_initcall(topology_init);
diff --git a/arch/riscv/kernel/signal.c b/arch/riscv/kernel/signal.c
new file mode 100644
index 000000000..dd63407e8
--- /dev/null
+++ b/arch/riscv/kernel/signal.c
@@ -0,0 +1,327 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2009 Sunplus Core Technology Co., Ltd.
+ * Chen Liqin <liqin.chen@sunplusct.com>
+ * Lennox Wu <lennox.wu@sunplusct.com>
+ * Copyright (C) 2012 Regents of the University of California
+ */
+
+#include <linux/signal.h>
+#include <linux/uaccess.h>
+#include <linux/syscalls.h>
+#include <linux/tracehook.h>
+#include <linux/linkage.h>
+
+#include <asm/ucontext.h>
+#include <asm/vdso.h>
+#include <asm/switch_to.h>
+#include <asm/csr.h>
+#include <asm/cacheflush.h>
+
+extern u32 __user_rt_sigreturn[2];
+
+#define DEBUG_SIG 0
+
+struct rt_sigframe {
+ struct siginfo info;
+ struct ucontext uc;
+#ifndef CONFIG_MMU
+ u32 sigreturn_code[2];
+#endif
+};
+
+#ifdef CONFIG_FPU
+static long restore_fp_state(struct pt_regs *regs,
+ union __riscv_fp_state __user *sc_fpregs)
+{
+ long err;
+ struct __riscv_d_ext_state __user *state = &sc_fpregs->d;
+ size_t i;
+
+ err = __copy_from_user(&current->thread.fstate, state, sizeof(*state));
+ if (unlikely(err))
+ return err;
+
+ fstate_restore(current, regs);
+
+ /* We support no other extension state at this time. */
+ for (i = 0; i < ARRAY_SIZE(sc_fpregs->q.reserved); i++) {
+ u32 value;
+
+ err = __get_user(value, &sc_fpregs->q.reserved[i]);
+ if (unlikely(err))
+ break;
+ if (value != 0)
+ return -EINVAL;
+ }
+
+ return err;
+}
+
+static long save_fp_state(struct pt_regs *regs,
+ union __riscv_fp_state __user *sc_fpregs)
+{
+ long err;
+ struct __riscv_d_ext_state __user *state = &sc_fpregs->d;
+ size_t i;
+
+ fstate_save(current, regs);
+ err = __copy_to_user(state, &current->thread.fstate, sizeof(*state));
+ if (unlikely(err))
+ return err;
+
+ /* We support no other extension state at this time. */
+ for (i = 0; i < ARRAY_SIZE(sc_fpregs->q.reserved); i++) {
+ err = __put_user(0, &sc_fpregs->q.reserved[i]);
+ if (unlikely(err))
+ break;
+ }
+
+ return err;
+}
+#else
+#define save_fp_state(task, regs) (0)
+#define restore_fp_state(task, regs) (0)
+#endif
+
+static long restore_sigcontext(struct pt_regs *regs,
+ struct sigcontext __user *sc)
+{
+ long err;
+ /* sc_regs is structured the same as the start of pt_regs */
+ err = __copy_from_user(regs, &sc->sc_regs, sizeof(sc->sc_regs));
+ /* Restore the floating-point state. */
+ if (has_fpu)
+ err |= restore_fp_state(regs, &sc->sc_fpregs);
+ return err;
+}
+
+SYSCALL_DEFINE0(rt_sigreturn)
+{
+ struct pt_regs *regs = current_pt_regs();
+ struct rt_sigframe __user *frame;
+ struct task_struct *task;
+ sigset_t set;
+
+ /* Always make any pending restarted system calls return -EINTR */
+ current->restart_block.fn = do_no_restart_syscall;
+
+ frame = (struct rt_sigframe __user *)regs->sp;
+
+ if (!access_ok(frame, sizeof(*frame)))
+ goto badframe;
+
+ if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
+ goto badframe;
+
+ set_current_blocked(&set);
+
+ if (restore_sigcontext(regs, &frame->uc.uc_mcontext))
+ goto badframe;
+
+ if (restore_altstack(&frame->uc.uc_stack))
+ goto badframe;
+
+ regs->cause = -1UL;
+
+ return regs->a0;
+
+badframe:
+ task = current;
+ if (show_unhandled_signals) {
+ pr_info_ratelimited(
+ "%s[%d]: bad frame in %s: frame=%p pc=%p sp=%p\n",
+ task->comm, task_pid_nr(task), __func__,
+ frame, (void *)regs->epc, (void *)regs->sp);
+ }
+ force_sig(SIGSEGV);
+ return 0;
+}
+
+static long setup_sigcontext(struct rt_sigframe __user *frame,
+ struct pt_regs *regs)
+{
+ struct sigcontext __user *sc = &frame->uc.uc_mcontext;
+ long err;
+ /* sc_regs is structured the same as the start of pt_regs */
+ err = __copy_to_user(&sc->sc_regs, regs, sizeof(sc->sc_regs));
+ /* Save the floating-point state. */
+ if (has_fpu)
+ err |= save_fp_state(regs, &sc->sc_fpregs);
+ return err;
+}
+
+static inline void __user *get_sigframe(struct ksignal *ksig,
+ struct pt_regs *regs, size_t framesize)
+{
+ unsigned long sp;
+ /* Default to using normal stack */
+ sp = regs->sp;
+
+ /*
+ * If we are on the alternate signal stack and would overflow it, don't.
+ * Return an always-bogus address instead so we will die with SIGSEGV.
+ */
+ if (on_sig_stack(sp) && !likely(on_sig_stack(sp - framesize)))
+ return (void __user __force *)(-1UL);
+
+ /* This is the X/Open sanctioned signal stack switching. */
+ sp = sigsp(sp, ksig) - framesize;
+
+ /* Align the stack frame. */
+ sp &= ~0xfUL;
+
+ return (void __user *)sp;
+}
+
+static int setup_rt_frame(struct ksignal *ksig, sigset_t *set,
+ struct pt_regs *regs)
+{
+ struct rt_sigframe __user *frame;
+ long err = 0;
+ unsigned long __maybe_unused addr;
+
+ frame = get_sigframe(ksig, regs, sizeof(*frame));
+ if (!access_ok(frame, sizeof(*frame)))
+ return -EFAULT;
+
+ err |= copy_siginfo_to_user(&frame->info, &ksig->info);
+
+ /* Create the ucontext. */
+ err |= __put_user(0, &frame->uc.uc_flags);
+ err |= __put_user(NULL, &frame->uc.uc_link);
+ err |= __save_altstack(&frame->uc.uc_stack, regs->sp);
+ err |= setup_sigcontext(frame, regs);
+ err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
+ if (err)
+ return -EFAULT;
+
+ /* Set up to return from userspace. */
+#ifdef CONFIG_MMU
+ regs->ra = (unsigned long)VDSO_SYMBOL(
+ current->mm->context.vdso, rt_sigreturn);
+#else
+ /*
+ * For the nommu case we don't have a VDSO. Instead we push two
+ * instructions to call the rt_sigreturn syscall onto the user stack.
+ */
+ if (copy_to_user(&frame->sigreturn_code, __user_rt_sigreturn,
+ sizeof(frame->sigreturn_code)))
+ return -EFAULT;
+
+ addr = (unsigned long)&frame->sigreturn_code;
+ /* Make sure the two instructions are pushed to icache. */
+ flush_icache_range(addr, addr + sizeof(frame->sigreturn_code));
+
+ regs->ra = addr;
+#endif /* CONFIG_MMU */
+
+ /*
+ * Set up registers for signal handler.
+ * Registers that we don't modify keep the value they had from
+ * user-space at the time we took the signal.
+ * We always pass siginfo and mcontext, regardless of SA_SIGINFO,
+ * since some things rely on this (e.g. glibc's debug/segfault.c).
+ */
+ regs->epc = (unsigned long)ksig->ka.sa.sa_handler;
+ regs->sp = (unsigned long)frame;
+ regs->a0 = ksig->sig; /* a0: signal number */
+ regs->a1 = (unsigned long)(&frame->info); /* a1: siginfo pointer */
+ regs->a2 = (unsigned long)(&frame->uc); /* a2: ucontext pointer */
+
+#if DEBUG_SIG
+ pr_info("SIG deliver (%s:%d): sig=%d pc=%p ra=%p sp=%p\n",
+ current->comm, task_pid_nr(current), ksig->sig,
+ (void *)regs->epc, (void *)regs->ra, frame);
+#endif
+
+ return 0;
+}
+
+static void handle_signal(struct ksignal *ksig, struct pt_regs *regs)
+{
+ sigset_t *oldset = sigmask_to_save();
+ int ret;
+
+ /* Are we from a system call? */
+ if (regs->cause == EXC_SYSCALL) {
+ /* Avoid additional syscall restarting via ret_from_exception */
+ regs->cause = -1UL;
+ /* If so, check system call restarting.. */
+ switch (regs->a0) {
+ case -ERESTART_RESTARTBLOCK:
+ case -ERESTARTNOHAND:
+ regs->a0 = -EINTR;
+ break;
+
+ case -ERESTARTSYS:
+ if (!(ksig->ka.sa.sa_flags & SA_RESTART)) {
+ regs->a0 = -EINTR;
+ break;
+ }
+ fallthrough;
+ case -ERESTARTNOINTR:
+ regs->a0 = regs->orig_a0;
+ regs->epc -= 0x4;
+ break;
+ }
+ }
+
+ /* Set up the stack frame */
+ ret = setup_rt_frame(ksig, oldset, regs);
+
+ signal_setup_done(ret, ksig, 0);
+}
+
+static void do_signal(struct pt_regs *regs)
+{
+ struct ksignal ksig;
+
+ if (get_signal(&ksig)) {
+ /* Actually deliver the signal */
+ handle_signal(&ksig, regs);
+ return;
+ }
+
+ /* Did we come from a system call? */
+ if (regs->cause == EXC_SYSCALL) {
+ /* Avoid additional syscall restarting via ret_from_exception */
+ regs->cause = -1UL;
+
+ /* Restart the system call - no handlers present */
+ switch (regs->a0) {
+ case -ERESTARTNOHAND:
+ case -ERESTARTSYS:
+ case -ERESTARTNOINTR:
+ regs->a0 = regs->orig_a0;
+ regs->epc -= 0x4;
+ break;
+ case -ERESTART_RESTARTBLOCK:
+ regs->a0 = regs->orig_a0;
+ regs->a7 = __NR_restart_syscall;
+ regs->epc -= 0x4;
+ break;
+ }
+ }
+
+ /*
+ * If there is no signal to deliver, we just put the saved
+ * sigmask back.
+ */
+ restore_saved_sigmask();
+}
+
+/*
+ * notification of userspace execution resumption
+ * - triggered by the _TIF_WORK_MASK flags
+ */
+asmlinkage __visible void do_notify_resume(struct pt_regs *regs,
+ unsigned long thread_info_flags)
+{
+ /* Handle pending signal delivery */
+ if (thread_info_flags & (_TIF_SIGPENDING | _TIF_NOTIFY_SIGNAL))
+ do_signal(regs);
+
+ if (thread_info_flags & _TIF_NOTIFY_RESUME)
+ tracehook_notify_resume(regs);
+}
diff --git a/arch/riscv/kernel/smp.c b/arch/riscv/kernel/smp.c
new file mode 100644
index 000000000..d44567490
--- /dev/null
+++ b/arch/riscv/kernel/smp.c
@@ -0,0 +1,249 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * SMP initialisation and IPI support
+ * Based on arch/arm64/kernel/smp.c
+ *
+ * Copyright (C) 2012 ARM Ltd.
+ * Copyright (C) 2015 Regents of the University of California
+ * Copyright (C) 2017 SiFive
+ */
+
+#include <linux/cpu.h>
+#include <linux/interrupt.h>
+#include <linux/module.h>
+#include <linux/profile.h>
+#include <linux/smp.h>
+#include <linux/sched.h>
+#include <linux/seq_file.h>
+#include <linux/delay.h>
+#include <linux/irq_work.h>
+
+#include <asm/sbi.h>
+#include <asm/tlbflush.h>
+#include <asm/cacheflush.h>
+
+enum ipi_message_type {
+ IPI_RESCHEDULE,
+ IPI_CALL_FUNC,
+ IPI_CPU_STOP,
+ IPI_IRQ_WORK,
+ IPI_MAX
+};
+
+unsigned long __cpuid_to_hartid_map[NR_CPUS] = {
+ [0 ... NR_CPUS-1] = INVALID_HARTID
+};
+
+void __init smp_setup_processor_id(void)
+{
+ cpuid_to_hartid_map(0) = boot_cpu_hartid;
+}
+
+/* A collection of single bit ipi messages. */
+static struct {
+ unsigned long stats[IPI_MAX] ____cacheline_aligned;
+ unsigned long bits ____cacheline_aligned;
+} ipi_data[NR_CPUS] __cacheline_aligned;
+
+int riscv_hartid_to_cpuid(int hartid)
+{
+ int i;
+
+ for (i = 0; i < NR_CPUS; i++)
+ if (cpuid_to_hartid_map(i) == hartid)
+ return i;
+
+ pr_err("Couldn't find cpu id for hartid [%d]\n", hartid);
+ return -ENOENT;
+}
+
+void riscv_cpuid_to_hartid_mask(const struct cpumask *in, struct cpumask *out)
+{
+ int cpu;
+
+ cpumask_clear(out);
+ for_each_cpu(cpu, in)
+ cpumask_set_cpu(cpuid_to_hartid_map(cpu), out);
+}
+EXPORT_SYMBOL_GPL(riscv_cpuid_to_hartid_mask);
+
+bool arch_match_cpu_phys_id(int cpu, u64 phys_id)
+{
+ return phys_id == cpuid_to_hartid_map(cpu);
+}
+
+/* Unsupported */
+int setup_profiling_timer(unsigned int multiplier)
+{
+ return -EINVAL;
+}
+
+static void ipi_stop(void)
+{
+ set_cpu_online(smp_processor_id(), false);
+ while (1)
+ wait_for_interrupt();
+}
+
+static struct riscv_ipi_ops *ipi_ops;
+
+void riscv_set_ipi_ops(struct riscv_ipi_ops *ops)
+{
+ ipi_ops = ops;
+}
+EXPORT_SYMBOL_GPL(riscv_set_ipi_ops);
+
+void riscv_clear_ipi(void)
+{
+ if (ipi_ops && ipi_ops->ipi_clear)
+ ipi_ops->ipi_clear();
+
+ csr_clear(CSR_IP, IE_SIE);
+}
+EXPORT_SYMBOL_GPL(riscv_clear_ipi);
+
+static void send_ipi_mask(const struct cpumask *mask, enum ipi_message_type op)
+{
+ int cpu;
+
+ smp_mb__before_atomic();
+ for_each_cpu(cpu, mask)
+ set_bit(op, &ipi_data[cpu].bits);
+ smp_mb__after_atomic();
+
+ if (ipi_ops && ipi_ops->ipi_inject)
+ ipi_ops->ipi_inject(mask);
+ else
+ pr_warn("SMP: IPI inject method not available\n");
+}
+
+static void send_ipi_single(int cpu, enum ipi_message_type op)
+{
+ smp_mb__before_atomic();
+ set_bit(op, &ipi_data[cpu].bits);
+ smp_mb__after_atomic();
+
+ if (ipi_ops && ipi_ops->ipi_inject)
+ ipi_ops->ipi_inject(cpumask_of(cpu));
+ else
+ pr_warn("SMP: IPI inject method not available\n");
+}
+
+#ifdef CONFIG_IRQ_WORK
+void arch_irq_work_raise(void)
+{
+ send_ipi_single(smp_processor_id(), IPI_IRQ_WORK);
+}
+#endif
+
+void handle_IPI(struct pt_regs *regs)
+{
+ struct pt_regs *old_regs = set_irq_regs(regs);
+ unsigned long *pending_ipis = &ipi_data[smp_processor_id()].bits;
+ unsigned long *stats = ipi_data[smp_processor_id()].stats;
+
+ irq_enter();
+
+ riscv_clear_ipi();
+
+ while (true) {
+ unsigned long ops;
+
+ /* Order bit clearing and data access. */
+ mb();
+
+ ops = xchg(pending_ipis, 0);
+ if (ops == 0)
+ goto done;
+
+ if (ops & (1 << IPI_RESCHEDULE)) {
+ stats[IPI_RESCHEDULE]++;
+ scheduler_ipi();
+ }
+
+ if (ops & (1 << IPI_CALL_FUNC)) {
+ stats[IPI_CALL_FUNC]++;
+ generic_smp_call_function_interrupt();
+ }
+
+ if (ops & (1 << IPI_CPU_STOP)) {
+ stats[IPI_CPU_STOP]++;
+ ipi_stop();
+ }
+
+ if (ops & (1 << IPI_IRQ_WORK)) {
+ stats[IPI_IRQ_WORK]++;
+ irq_work_run();
+ }
+
+ BUG_ON((ops >> IPI_MAX) != 0);
+
+ /* Order data access and bit testing. */
+ mb();
+ }
+
+done:
+ irq_exit();
+ set_irq_regs(old_regs);
+}
+
+static const char * const ipi_names[] = {
+ [IPI_RESCHEDULE] = "Rescheduling interrupts",
+ [IPI_CALL_FUNC] = "Function call interrupts",
+ [IPI_CPU_STOP] = "CPU stop interrupts",
+ [IPI_IRQ_WORK] = "IRQ work interrupts",
+};
+
+void show_ipi_stats(struct seq_file *p, int prec)
+{
+ unsigned int cpu, i;
+
+ for (i = 0; i < IPI_MAX; i++) {
+ seq_printf(p, "%*s%u:%s", prec - 1, "IPI", i,
+ prec >= 4 ? " " : "");
+ for_each_online_cpu(cpu)
+ seq_printf(p, "%10lu ", ipi_data[cpu].stats[i]);
+ seq_printf(p, " %s\n", ipi_names[i]);
+ }
+}
+
+void arch_send_call_function_ipi_mask(struct cpumask *mask)
+{
+ send_ipi_mask(mask, IPI_CALL_FUNC);
+}
+
+void arch_send_call_function_single_ipi(int cpu)
+{
+ send_ipi_single(cpu, IPI_CALL_FUNC);
+}
+
+void smp_send_stop(void)
+{
+ unsigned long timeout;
+
+ if (num_online_cpus() > 1) {
+ cpumask_t mask;
+
+ cpumask_copy(&mask, cpu_online_mask);
+ cpumask_clear_cpu(smp_processor_id(), &mask);
+
+ if (system_state <= SYSTEM_RUNNING)
+ pr_crit("SMP: stopping secondary CPUs\n");
+ send_ipi_mask(&mask, IPI_CPU_STOP);
+ }
+
+ /* Wait up to one second for other CPUs to stop */
+ timeout = USEC_PER_SEC;
+ while (num_online_cpus() > 1 && timeout--)
+ udelay(1);
+
+ if (num_online_cpus() > 1)
+ pr_warn("SMP: failed to stop secondary CPUs %*pbl\n",
+ cpumask_pr_args(cpu_online_mask));
+}
+
+void smp_send_reschedule(int cpu)
+{
+ send_ipi_single(cpu, IPI_RESCHEDULE);
+}
+EXPORT_SYMBOL_GPL(smp_send_reschedule);
diff --git a/arch/riscv/kernel/smpboot.c b/arch/riscv/kernel/smpboot.c
new file mode 100644
index 000000000..0e0aed380
--- /dev/null
+++ b/arch/riscv/kernel/smpboot.c
@@ -0,0 +1,173 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * SMP initialisation and IPI support
+ * Based on arch/arm64/kernel/smp.c
+ *
+ * Copyright (C) 2012 ARM Ltd.
+ * Copyright (C) 2015 Regents of the University of California
+ * Copyright (C) 2017 SiFive
+ */
+
+#include <linux/arch_topology.h>
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/sched.h>
+#include <linux/kernel_stat.h>
+#include <linux/notifier.h>
+#include <linux/cpu.h>
+#include <linux/percpu.h>
+#include <linux/delay.h>
+#include <linux/err.h>
+#include <linux/irq.h>
+#include <linux/of.h>
+#include <linux/sched/task_stack.h>
+#include <linux/sched/mm.h>
+#include <asm/cpu_ops.h>
+#include <asm/irq.h>
+#include <asm/mmu_context.h>
+#include <asm/tlbflush.h>
+#include <asm/sections.h>
+#include <asm/sbi.h>
+#include <asm/smp.h>
+
+#include "head.h"
+
+static DECLARE_COMPLETION(cpu_running);
+
+void __init smp_prepare_boot_cpu(void)
+{
+ init_cpu_topology();
+}
+
+void __init smp_prepare_cpus(unsigned int max_cpus)
+{
+ int cpuid;
+ int ret;
+
+ store_cpu_topology(smp_processor_id());
+
+ /* This covers non-smp usecase mandated by "nosmp" option */
+ if (max_cpus == 0)
+ return;
+
+ for_each_possible_cpu(cpuid) {
+ if (cpuid == smp_processor_id())
+ continue;
+ if (cpu_ops[cpuid]->cpu_prepare) {
+ ret = cpu_ops[cpuid]->cpu_prepare(cpuid);
+ if (ret)
+ continue;
+ }
+ set_cpu_present(cpuid, true);
+ }
+}
+
+void __init setup_smp(void)
+{
+ struct device_node *dn;
+ int hart;
+ bool found_boot_cpu = false;
+ int cpuid = 1;
+
+ cpu_set_ops(0);
+
+ for_each_of_cpu_node(dn) {
+ hart = riscv_of_processor_hartid(dn);
+ if (hart < 0)
+ continue;
+
+ if (hart == cpuid_to_hartid_map(0)) {
+ BUG_ON(found_boot_cpu);
+ found_boot_cpu = 1;
+ continue;
+ }
+ if (cpuid >= NR_CPUS) {
+ pr_warn("Invalid cpuid [%d] for hartid [%d]\n",
+ cpuid, hart);
+ break;
+ }
+
+ cpuid_to_hartid_map(cpuid) = hart;
+ cpuid++;
+ }
+
+ BUG_ON(!found_boot_cpu);
+
+ if (cpuid > nr_cpu_ids)
+ pr_warn("Total number of cpus [%d] is greater than nr_cpus option value [%d]\n",
+ cpuid, nr_cpu_ids);
+
+ for (cpuid = 1; cpuid < nr_cpu_ids; cpuid++) {
+ if (cpuid_to_hartid_map(cpuid) != INVALID_HARTID) {
+ cpu_set_ops(cpuid);
+ set_cpu_possible(cpuid, true);
+ }
+ }
+}
+
+static int start_secondary_cpu(int cpu, struct task_struct *tidle)
+{
+ if (cpu_ops[cpu]->cpu_start)
+ return cpu_ops[cpu]->cpu_start(cpu, tidle);
+
+ return -EOPNOTSUPP;
+}
+
+int __cpu_up(unsigned int cpu, struct task_struct *tidle)
+{
+ int ret = 0;
+ tidle->thread_info.cpu = cpu;
+
+ ret = start_secondary_cpu(cpu, tidle);
+ if (!ret) {
+ wait_for_completion_timeout(&cpu_running,
+ msecs_to_jiffies(1000));
+
+ if (!cpu_online(cpu)) {
+ pr_crit("CPU%u: failed to come online\n", cpu);
+ ret = -EIO;
+ }
+ } else {
+ pr_crit("CPU%u: failed to start\n", cpu);
+ }
+
+ return ret;
+}
+
+void __init smp_cpus_done(unsigned int max_cpus)
+{
+}
+
+/*
+ * C entry point for a secondary processor.
+ */
+asmlinkage __visible void smp_callin(void)
+{
+ struct mm_struct *mm = &init_mm;
+ unsigned int curr_cpuid = smp_processor_id();
+
+ riscv_clear_ipi();
+
+ /* All kernel threads share the same mm context. */
+ mmgrab(mm);
+ current->active_mm = mm;
+
+ store_cpu_topology(curr_cpuid);
+ notify_cpu_starting(curr_cpuid);
+ set_cpu_online(curr_cpuid, 1);
+
+ /*
+ * Remote TLB flushes are ignored while the CPU is offline, so emit
+ * a local TLB flush right now just in case.
+ */
+ local_flush_tlb_all();
+ complete(&cpu_running);
+ /*
+ * Disable preemption before enabling interrupts, so we don't try to
+ * schedule a CPU that hasn't actually started yet.
+ */
+ local_irq_enable();
+ cpu_startup_entry(CPUHP_AP_ONLINE_IDLE);
+}
diff --git a/arch/riscv/kernel/soc.c b/arch/riscv/kernel/soc.c
new file mode 100644
index 000000000..c7b0a73e3
--- /dev/null
+++ b/arch/riscv/kernel/soc.c
@@ -0,0 +1,55 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2020 Western Digital Corporation or its affiliates.
+ */
+#include <linux/init.h>
+#include <linux/libfdt.h>
+#include <linux/pgtable.h>
+#include <asm/soc.h>
+
+/*
+ * This is called extremly early, before parse_dtb(), to allow initializing
+ * SoC hardware before memory or any device driver initialization.
+ */
+void __init soc_early_init(void)
+{
+ void (*early_fn)(const void *fdt);
+ const struct of_device_id *s;
+ const void *fdt = dtb_early_va;
+
+ for (s = (void *)&__soc_early_init_table_start;
+ (void *)s < (void *)&__soc_early_init_table_end; s++) {
+ if (!fdt_node_check_compatible(fdt, 0, s->compatible)) {
+ early_fn = s->data;
+ early_fn(fdt);
+ return;
+ }
+ }
+}
+
+static bool soc_builtin_dtb_match(unsigned long vendor_id,
+ unsigned long arch_id, unsigned long imp_id,
+ const struct soc_builtin_dtb *entry)
+{
+ return entry->vendor_id == vendor_id &&
+ entry->arch_id == arch_id &&
+ entry->imp_id == imp_id;
+}
+
+void * __init soc_lookup_builtin_dtb(void)
+{
+ unsigned long vendor_id, arch_id, imp_id;
+ const struct soc_builtin_dtb *s;
+
+ __asm__ ("csrr %0, mvendorid" : "=r"(vendor_id));
+ __asm__ ("csrr %0, marchid" : "=r"(arch_id));
+ __asm__ ("csrr %0, mimpid" : "=r"(imp_id));
+
+ for (s = (void *)&__soc_builtin_dtb_table_start;
+ (void *)s < (void *)&__soc_builtin_dtb_table_end; s++) {
+ if (soc_builtin_dtb_match(vendor_id, arch_id, imp_id, s))
+ return s->dtb_func();
+ }
+
+ return NULL;
+}
diff --git a/arch/riscv/kernel/stacktrace.c b/arch/riscv/kernel/stacktrace.c
new file mode 100644
index 000000000..9c34735c1
--- /dev/null
+++ b/arch/riscv/kernel/stacktrace.c
@@ -0,0 +1,177 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2008 ARM Limited
+ * Copyright (C) 2014 Regents of the University of California
+ */
+
+#include <linux/export.h>
+#include <linux/kallsyms.h>
+#include <linux/sched.h>
+#include <linux/sched/debug.h>
+#include <linux/sched/task_stack.h>
+#include <linux/stacktrace.h>
+#include <linux/ftrace.h>
+
+register unsigned long sp_in_global __asm__("sp");
+
+#ifdef CONFIG_FRAME_POINTER
+
+struct stackframe {
+ unsigned long fp;
+ unsigned long ra;
+};
+
+void notrace walk_stackframe(struct task_struct *task, struct pt_regs *regs,
+ bool (*fn)(unsigned long, void *), void *arg)
+{
+ unsigned long fp, sp, pc;
+
+ if (regs) {
+ fp = frame_pointer(regs);
+ sp = user_stack_pointer(regs);
+ pc = instruction_pointer(regs);
+ } else if (task == NULL || task == current) {
+ const register unsigned long current_sp = sp_in_global;
+ fp = (unsigned long)__builtin_frame_address(0);
+ sp = current_sp;
+ pc = (unsigned long)walk_stackframe;
+ } else {
+ /* task blocked in __switch_to */
+ fp = task->thread.s[0];
+ sp = task->thread.sp;
+ pc = task->thread.ra;
+ }
+
+ for (;;) {
+ unsigned long low, high;
+ struct stackframe *frame;
+
+ if (unlikely(!__kernel_text_address(pc) || fn(pc, arg)))
+ break;
+
+ /* Validate frame pointer */
+ low = sp + sizeof(struct stackframe);
+ high = ALIGN(sp, THREAD_SIZE);
+ if (unlikely(fp < low || fp > high || fp & 0x7))
+ break;
+ /* Unwind stack frame */
+ frame = (struct stackframe *)fp - 1;
+ sp = fp;
+ if (regs && (regs->epc == pc) && (frame->fp & 0x7)) {
+ fp = frame->ra;
+ pc = regs->ra;
+ } else {
+ fp = frame->fp;
+ pc = ftrace_graph_ret_addr(current, NULL, frame->ra,
+ &frame->ra);
+ }
+
+ }
+}
+
+#else /* !CONFIG_FRAME_POINTER */
+
+void notrace walk_stackframe(struct task_struct *task,
+ struct pt_regs *regs, bool (*fn)(unsigned long, void *), void *arg)
+{
+ unsigned long sp, pc;
+ unsigned long *ksp;
+
+ if (regs) {
+ sp = user_stack_pointer(regs);
+ pc = instruction_pointer(regs);
+ } else if (task == NULL || task == current) {
+ sp = sp_in_global;
+ pc = (unsigned long)walk_stackframe;
+ } else {
+ /* task blocked in __switch_to */
+ sp = task->thread.sp;
+ pc = task->thread.ra;
+ }
+
+ if (unlikely(sp & 0x7))
+ return;
+
+ ksp = (unsigned long *)sp;
+ while (!kstack_end(ksp)) {
+ if (__kernel_text_address(pc) && unlikely(fn(pc, arg)))
+ break;
+ pc = READ_ONCE_NOCHECK(*ksp++) - 0x4;
+ }
+}
+
+#endif /* CONFIG_FRAME_POINTER */
+
+
+static bool print_trace_address(unsigned long pc, void *arg)
+{
+ const char *loglvl = arg;
+
+ print_ip_sym(loglvl, pc);
+ return false;
+}
+
+void show_stack(struct task_struct *task, unsigned long *sp, const char *loglvl)
+{
+ pr_cont("Call Trace:\n");
+ walk_stackframe(task, NULL, print_trace_address, (void *)loglvl);
+}
+
+static bool save_wchan(unsigned long pc, void *arg)
+{
+ if (!in_sched_functions(pc)) {
+ unsigned long *p = arg;
+ *p = pc;
+ return true;
+ }
+ return false;
+}
+
+unsigned long get_wchan(struct task_struct *task)
+{
+ unsigned long pc = 0;
+
+ if (likely(task && task != current && task->state != TASK_RUNNING))
+ walk_stackframe(task, NULL, save_wchan, &pc);
+ return pc;
+}
+
+
+#ifdef CONFIG_STACKTRACE
+
+static bool __save_trace(unsigned long pc, void *arg, bool nosched)
+{
+ struct stack_trace *trace = arg;
+
+ if (unlikely(nosched && in_sched_functions(pc)))
+ return false;
+ if (unlikely(trace->skip > 0)) {
+ trace->skip--;
+ return false;
+ }
+
+ trace->entries[trace->nr_entries++] = pc;
+ return (trace->nr_entries >= trace->max_entries);
+}
+
+static bool save_trace(unsigned long pc, void *arg)
+{
+ return __save_trace(pc, arg, false);
+}
+
+/*
+ * Save stack-backtrace addresses into a stack_trace buffer.
+ */
+void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
+{
+ walk_stackframe(tsk, NULL, save_trace, trace);
+}
+EXPORT_SYMBOL_GPL(save_stack_trace_tsk);
+
+void save_stack_trace(struct stack_trace *trace)
+{
+ save_stack_trace_tsk(NULL, trace);
+}
+EXPORT_SYMBOL_GPL(save_stack_trace);
+
+#endif /* CONFIG_STACKTRACE */
diff --git a/arch/riscv/kernel/sys_riscv.c b/arch/riscv/kernel/sys_riscv.c
new file mode 100644
index 000000000..bb4026850
--- /dev/null
+++ b/arch/riscv/kernel/sys_riscv.c
@@ -0,0 +1,69 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2012 Regents of the University of California
+ * Copyright (C) 2014 Darius Rad <darius@bluespec.com>
+ * Copyright (C) 2017 SiFive
+ */
+
+#include <linux/syscalls.h>
+#include <asm/unistd.h>
+#include <asm/cacheflush.h>
+#include <asm-generic/mman-common.h>
+
+static long riscv_sys_mmap(unsigned long addr, unsigned long len,
+ unsigned long prot, unsigned long flags,
+ unsigned long fd, off_t offset,
+ unsigned long page_shift_offset)
+{
+ if (unlikely(offset & (~PAGE_MASK >> page_shift_offset)))
+ return -EINVAL;
+
+ return ksys_mmap_pgoff(addr, len, prot, flags, fd,
+ offset >> (PAGE_SHIFT - page_shift_offset));
+}
+
+#ifdef CONFIG_64BIT
+SYSCALL_DEFINE6(mmap, unsigned long, addr, unsigned long, len,
+ unsigned long, prot, unsigned long, flags,
+ unsigned long, fd, off_t, offset)
+{
+ return riscv_sys_mmap(addr, len, prot, flags, fd, offset, 0);
+}
+#else
+SYSCALL_DEFINE6(mmap2, unsigned long, addr, unsigned long, len,
+ unsigned long, prot, unsigned long, flags,
+ unsigned long, fd, off_t, offset)
+{
+ /*
+ * Note that the shift for mmap2 is constant (12),
+ * regardless of PAGE_SIZE
+ */
+ return riscv_sys_mmap(addr, len, prot, flags, fd, offset, 12);
+}
+#endif /* !CONFIG_64BIT */
+
+/*
+ * Allows the instruction cache to be flushed from userspace. Despite RISC-V
+ * having a direct 'fence.i' instruction available to userspace (which we
+ * can't trap!), that's not actually viable when running on Linux because the
+ * kernel might schedule a process on another hart. There is no way for
+ * userspace to handle this without invoking the kernel (as it doesn't know the
+ * thread->hart mappings), so we've defined a RISC-V specific system call to
+ * flush the instruction cache.
+ *
+ * sys_riscv_flush_icache() is defined to flush the instruction cache over an
+ * address range, with the flush applying to either all threads or just the
+ * caller. We don't currently do anything with the address range, that's just
+ * in there for forwards compatibility.
+ */
+SYSCALL_DEFINE3(riscv_flush_icache, uintptr_t, start, uintptr_t, end,
+ uintptr_t, flags)
+{
+ /* Check the reserved flags. */
+ if (unlikely(flags & ~SYS_RISCV_FLUSH_ICACHE_ALL))
+ return -EINVAL;
+
+ flush_icache_mm(current->mm, flags & SYS_RISCV_FLUSH_ICACHE_LOCAL);
+
+ return 0;
+}
diff --git a/arch/riscv/kernel/syscall_table.c b/arch/riscv/kernel/syscall_table.c
new file mode 100644
index 000000000..f1ead9df9
--- /dev/null
+++ b/arch/riscv/kernel/syscall_table.c
@@ -0,0 +1,19 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2009 Arnd Bergmann <arnd@arndb.de>
+ * Copyright (C) 2012 Regents of the University of California
+ */
+
+#include <linux/linkage.h>
+#include <linux/syscalls.h>
+#include <asm-generic/syscalls.h>
+#include <asm/vdso.h>
+#include <asm/syscall.h>
+
+#undef __SYSCALL
+#define __SYSCALL(nr, call) [nr] = (call),
+
+void *sys_call_table[__NR_syscalls] = {
+ [0 ... __NR_syscalls - 1] = sys_ni_syscall,
+#include <asm/unistd.h>
+};
diff --git a/arch/riscv/kernel/time.c b/arch/riscv/kernel/time.c
new file mode 100644
index 000000000..303ae47df
--- /dev/null
+++ b/arch/riscv/kernel/time.c
@@ -0,0 +1,43 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2012 Regents of the University of California
+ * Copyright (C) 2017 SiFive
+ */
+
+#include <linux/of_clk.h>
+#include <linux/clockchips.h>
+#include <linux/clocksource.h>
+#include <linux/delay.h>
+#include <asm/sbi.h>
+#include <asm/processor.h>
+
+unsigned long riscv_timebase;
+EXPORT_SYMBOL_GPL(riscv_timebase);
+
+void __init time_init(void)
+{
+ struct device_node *cpu;
+ u32 prop;
+
+ cpu = of_find_node_by_path("/cpus");
+ if (!cpu || of_property_read_u32(cpu, "timebase-frequency", &prop))
+ panic(KERN_WARNING "RISC-V system with no 'timebase-frequency' in DTS\n");
+ of_node_put(cpu);
+ riscv_timebase = prop;
+
+ lpj_fine = riscv_timebase / HZ;
+
+ of_clk_init(NULL);
+ timer_probe();
+
+ tick_setup_hrtimer_broadcast();
+}
+
+void clocksource_arch_init(struct clocksource *cs)
+{
+#ifdef CONFIG_GENERIC_GETTIMEOFDAY
+ cs->vdso_clock_mode = VDSO_CLOCKMODE_ARCHTIMER;
+#else
+ cs->vdso_clock_mode = VDSO_CLOCKMODE_NONE;
+#endif
+}
diff --git a/arch/riscv/kernel/trace_irq.c b/arch/riscv/kernel/trace_irq.c
new file mode 100644
index 000000000..095ac976d
--- /dev/null
+++ b/arch/riscv/kernel/trace_irq.c
@@ -0,0 +1,27 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2022 Changbin Du <changbin.du@gmail.com>
+ */
+
+#include <linux/irqflags.h>
+#include <linux/kprobes.h>
+#include "trace_irq.h"
+
+/*
+ * trace_hardirqs_on/off require the caller to setup frame pointer properly.
+ * Otherwise, CALLER_ADDR1 might trigger an pagging exception in kernel.
+ * Here we add one extra level so they can be safely called by low
+ * level entry code which $fp is used for other purpose.
+ */
+
+void __trace_hardirqs_on(void)
+{
+ trace_hardirqs_on();
+}
+NOKPROBE_SYMBOL(__trace_hardirqs_on);
+
+void __trace_hardirqs_off(void)
+{
+ trace_hardirqs_off();
+}
+NOKPROBE_SYMBOL(__trace_hardirqs_off);
diff --git a/arch/riscv/kernel/trace_irq.h b/arch/riscv/kernel/trace_irq.h
new file mode 100644
index 000000000..99fe67377
--- /dev/null
+++ b/arch/riscv/kernel/trace_irq.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2022 Changbin Du <changbin.du@gmail.com>
+ */
+#ifndef __TRACE_IRQ_H
+#define __TRACE_IRQ_H
+
+void __trace_hardirqs_on(void);
+void __trace_hardirqs_off(void);
+
+#endif /* __TRACE_IRQ_H */
diff --git a/arch/riscv/kernel/traps.c b/arch/riscv/kernel/traps.c
new file mode 100644
index 000000000..227253fde
--- /dev/null
+++ b/arch/riscv/kernel/traps.c
@@ -0,0 +1,188 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2012 Regents of the University of California
+ */
+
+#include <linux/cpu.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/sched.h>
+#include <linux/sched/debug.h>
+#include <linux/sched/signal.h>
+#include <linux/signal.h>
+#include <linux/kdebug.h>
+#include <linux/uaccess.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/irq.h>
+#include <linux/kexec.h>
+
+#include <asm/processor.h>
+#include <asm/ptrace.h>
+#include <asm/csr.h>
+
+int show_unhandled_signals = 1;
+
+extern asmlinkage void handle_exception(void);
+
+static DEFINE_SPINLOCK(die_lock);
+
+void die(struct pt_regs *regs, const char *str)
+{
+ static int die_counter;
+ int ret;
+ long cause;
+ unsigned long flags;
+
+ oops_enter();
+
+ spin_lock_irqsave(&die_lock, flags);
+ console_verbose();
+ bust_spinlocks(1);
+
+ pr_emerg("%s [#%d]\n", str, ++die_counter);
+ print_modules();
+ if (regs)
+ show_regs(regs);
+
+ cause = regs ? regs->cause : -1;
+ ret = notify_die(DIE_OOPS, str, regs, 0, cause, SIGSEGV);
+
+ if (kexec_should_crash(current))
+ crash_kexec(regs);
+
+ bust_spinlocks(0);
+ add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
+ spin_unlock_irqrestore(&die_lock, flags);
+ oops_exit();
+
+ if (in_interrupt())
+ panic("Fatal exception in interrupt");
+ if (panic_on_oops)
+ panic("Fatal exception");
+ if (ret != NOTIFY_STOP)
+ make_task_dead(SIGSEGV);
+}
+
+void do_trap(struct pt_regs *regs, int signo, int code, unsigned long addr)
+{
+ struct task_struct *tsk = current;
+
+ if (show_unhandled_signals && unhandled_signal(tsk, signo)
+ && printk_ratelimit()) {
+ pr_info("%s[%d]: unhandled signal %d code 0x%x at 0x" REG_FMT,
+ tsk->comm, task_pid_nr(tsk), signo, code, addr);
+ print_vma_addr(KERN_CONT " in ", instruction_pointer(regs));
+ pr_cont("\n");
+ show_regs(regs);
+ }
+
+ force_sig_fault(signo, code, (void __user *)addr);
+}
+
+static void do_trap_error(struct pt_regs *regs, int signo, int code,
+ unsigned long addr, const char *str)
+{
+ if (user_mode(regs)) {
+ do_trap(regs, signo, code, addr);
+ } else {
+ if (!fixup_exception(regs))
+ die(regs, str);
+ }
+}
+
+#define DO_ERROR_INFO(name, signo, code, str) \
+asmlinkage __visible void name(struct pt_regs *regs) \
+{ \
+ do_trap_error(regs, signo, code, regs->epc, "Oops - " str); \
+}
+
+DO_ERROR_INFO(do_trap_unknown,
+ SIGILL, ILL_ILLTRP, "unknown exception");
+DO_ERROR_INFO(do_trap_insn_misaligned,
+ SIGBUS, BUS_ADRALN, "instruction address misaligned");
+DO_ERROR_INFO(do_trap_insn_fault,
+ SIGSEGV, SEGV_ACCERR, "instruction access fault");
+DO_ERROR_INFO(do_trap_insn_illegal,
+ SIGILL, ILL_ILLOPC, "illegal instruction");
+DO_ERROR_INFO(do_trap_load_fault,
+ SIGSEGV, SEGV_ACCERR, "load access fault");
+#ifndef CONFIG_RISCV_M_MODE
+DO_ERROR_INFO(do_trap_load_misaligned,
+ SIGBUS, BUS_ADRALN, "Oops - load address misaligned");
+DO_ERROR_INFO(do_trap_store_misaligned,
+ SIGBUS, BUS_ADRALN, "Oops - store (or AMO) address misaligned");
+#else
+int handle_misaligned_load(struct pt_regs *regs);
+int handle_misaligned_store(struct pt_regs *regs);
+
+asmlinkage void do_trap_load_misaligned(struct pt_regs *regs)
+{
+ if (!handle_misaligned_load(regs))
+ return;
+ do_trap_error(regs, SIGBUS, BUS_ADRALN, regs->epc,
+ "Oops - load address misaligned");
+}
+
+asmlinkage void do_trap_store_misaligned(struct pt_regs *regs)
+{
+ if (!handle_misaligned_store(regs))
+ return;
+ do_trap_error(regs, SIGBUS, BUS_ADRALN, regs->epc,
+ "Oops - store (or AMO) address misaligned");
+}
+#endif
+DO_ERROR_INFO(do_trap_store_fault,
+ SIGSEGV, SEGV_ACCERR, "store (or AMO) access fault");
+DO_ERROR_INFO(do_trap_ecall_u,
+ SIGILL, ILL_ILLTRP, "environment call from U-mode");
+DO_ERROR_INFO(do_trap_ecall_s,
+ SIGILL, ILL_ILLTRP, "environment call from S-mode");
+DO_ERROR_INFO(do_trap_ecall_m,
+ SIGILL, ILL_ILLTRP, "environment call from M-mode");
+
+static inline unsigned long get_break_insn_length(unsigned long pc)
+{
+ bug_insn_t insn;
+
+ if (get_kernel_nofault(insn, (bug_insn_t *)pc))
+ return 0;
+
+ return GET_INSN_LENGTH(insn);
+}
+
+asmlinkage __visible void do_trap_break(struct pt_regs *regs)
+{
+ if (user_mode(regs))
+ force_sig_fault(SIGTRAP, TRAP_BRKPT, (void __user *)regs->epc);
+#ifdef CONFIG_KGDB
+ else if (notify_die(DIE_TRAP, "EBREAK", regs, 0, regs->cause, SIGTRAP)
+ == NOTIFY_STOP)
+ return;
+#endif
+ else if (report_bug(regs->epc, regs) == BUG_TRAP_TYPE_WARN)
+ regs->epc += get_break_insn_length(regs->epc);
+ else
+ die(regs, "Kernel BUG");
+}
+
+#ifdef CONFIG_GENERIC_BUG
+int is_valid_bugaddr(unsigned long pc)
+{
+ bug_insn_t insn;
+
+ if (pc < VMALLOC_START)
+ return 0;
+ if (get_kernel_nofault(insn, (bug_insn_t *)pc))
+ return 0;
+ if ((insn & __INSN_LENGTH_MASK) == __INSN_LENGTH_32)
+ return (insn == __BUG_INSN_32);
+ else
+ return ((insn & __COMPRESSED_INSN_MASK) == __BUG_INSN_16);
+}
+#endif /* CONFIG_GENERIC_BUG */
+
+/* stvec & scratch is already set from head.S */
+void trap_init(void)
+{
+}
diff --git a/arch/riscv/kernel/traps_misaligned.c b/arch/riscv/kernel/traps_misaligned.c
new file mode 100644
index 000000000..b246c3dc6
--- /dev/null
+++ b/arch/riscv/kernel/traps_misaligned.c
@@ -0,0 +1,368 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2020 Western Digital Corporation or its affiliates.
+ */
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/irq.h>
+
+#include <asm/processor.h>
+#include <asm/ptrace.h>
+#include <asm/csr.h>
+
+#define INSN_MATCH_LB 0x3
+#define INSN_MASK_LB 0x707f
+#define INSN_MATCH_LH 0x1003
+#define INSN_MASK_LH 0x707f
+#define INSN_MATCH_LW 0x2003
+#define INSN_MASK_LW 0x707f
+#define INSN_MATCH_LD 0x3003
+#define INSN_MASK_LD 0x707f
+#define INSN_MATCH_LBU 0x4003
+#define INSN_MASK_LBU 0x707f
+#define INSN_MATCH_LHU 0x5003
+#define INSN_MASK_LHU 0x707f
+#define INSN_MATCH_LWU 0x6003
+#define INSN_MASK_LWU 0x707f
+#define INSN_MATCH_SB 0x23
+#define INSN_MASK_SB 0x707f
+#define INSN_MATCH_SH 0x1023
+#define INSN_MASK_SH 0x707f
+#define INSN_MATCH_SW 0x2023
+#define INSN_MASK_SW 0x707f
+#define INSN_MATCH_SD 0x3023
+#define INSN_MASK_SD 0x707f
+
+#define INSN_MATCH_FLW 0x2007
+#define INSN_MASK_FLW 0x707f
+#define INSN_MATCH_FLD 0x3007
+#define INSN_MASK_FLD 0x707f
+#define INSN_MATCH_FLQ 0x4007
+#define INSN_MASK_FLQ 0x707f
+#define INSN_MATCH_FSW 0x2027
+#define INSN_MASK_FSW 0x707f
+#define INSN_MATCH_FSD 0x3027
+#define INSN_MASK_FSD 0x707f
+#define INSN_MATCH_FSQ 0x4027
+#define INSN_MASK_FSQ 0x707f
+
+#define INSN_MATCH_C_LD 0x6000
+#define INSN_MASK_C_LD 0xe003
+#define INSN_MATCH_C_SD 0xe000
+#define INSN_MASK_C_SD 0xe003
+#define INSN_MATCH_C_LW 0x4000
+#define INSN_MASK_C_LW 0xe003
+#define INSN_MATCH_C_SW 0xc000
+#define INSN_MASK_C_SW 0xe003
+#define INSN_MATCH_C_LDSP 0x6002
+#define INSN_MASK_C_LDSP 0xe003
+#define INSN_MATCH_C_SDSP 0xe002
+#define INSN_MASK_C_SDSP 0xe003
+#define INSN_MATCH_C_LWSP 0x4002
+#define INSN_MASK_C_LWSP 0xe003
+#define INSN_MATCH_C_SWSP 0xc002
+#define INSN_MASK_C_SWSP 0xe003
+
+#define INSN_MATCH_C_FLD 0x2000
+#define INSN_MASK_C_FLD 0xe003
+#define INSN_MATCH_C_FLW 0x6000
+#define INSN_MASK_C_FLW 0xe003
+#define INSN_MATCH_C_FSD 0xa000
+#define INSN_MASK_C_FSD 0xe003
+#define INSN_MATCH_C_FSW 0xe000
+#define INSN_MASK_C_FSW 0xe003
+#define INSN_MATCH_C_FLDSP 0x2002
+#define INSN_MASK_C_FLDSP 0xe003
+#define INSN_MATCH_C_FSDSP 0xa002
+#define INSN_MASK_C_FSDSP 0xe003
+#define INSN_MATCH_C_FLWSP 0x6002
+#define INSN_MASK_C_FLWSP 0xe003
+#define INSN_MATCH_C_FSWSP 0xe002
+#define INSN_MASK_C_FSWSP 0xe003
+
+#define INSN_LEN(insn) ((((insn) & 0x3) < 0x3) ? 2 : 4)
+
+#if defined(CONFIG_64BIT)
+#define LOG_REGBYTES 3
+#define XLEN 64
+#else
+#define LOG_REGBYTES 2
+#define XLEN 32
+#endif
+#define REGBYTES (1 << LOG_REGBYTES)
+#define XLEN_MINUS_16 ((XLEN) - 16)
+
+#define SH_RD 7
+#define SH_RS1 15
+#define SH_RS2 20
+#define SH_RS2C 2
+
+#define RV_X(x, s, n) (((x) >> (s)) & ((1 << (n)) - 1))
+#define RVC_LW_IMM(x) ((RV_X(x, 6, 1) << 2) | \
+ (RV_X(x, 10, 3) << 3) | \
+ (RV_X(x, 5, 1) << 6))
+#define RVC_LD_IMM(x) ((RV_X(x, 10, 3) << 3) | \
+ (RV_X(x, 5, 2) << 6))
+#define RVC_LWSP_IMM(x) ((RV_X(x, 4, 3) << 2) | \
+ (RV_X(x, 12, 1) << 5) | \
+ (RV_X(x, 2, 2) << 6))
+#define RVC_LDSP_IMM(x) ((RV_X(x, 5, 2) << 3) | \
+ (RV_X(x, 12, 1) << 5) | \
+ (RV_X(x, 2, 3) << 6))
+#define RVC_SWSP_IMM(x) ((RV_X(x, 9, 4) << 2) | \
+ (RV_X(x, 7, 2) << 6))
+#define RVC_SDSP_IMM(x) ((RV_X(x, 10, 3) << 3) | \
+ (RV_X(x, 7, 3) << 6))
+#define RVC_RS1S(insn) (8 + RV_X(insn, SH_RD, 3))
+#define RVC_RS2S(insn) (8 + RV_X(insn, SH_RS2C, 3))
+#define RVC_RS2(insn) RV_X(insn, SH_RS2C, 5)
+
+#define SHIFT_RIGHT(x, y) \
+ ((y) < 0 ? ((x) << -(y)) : ((x) >> (y)))
+
+#define REG_MASK \
+ ((1 << (5 + LOG_REGBYTES)) - (1 << LOG_REGBYTES))
+
+#define REG_OFFSET(insn, pos) \
+ (SHIFT_RIGHT((insn), (pos) - LOG_REGBYTES) & REG_MASK)
+
+#define REG_PTR(insn, pos, regs) \
+ (ulong *)((ulong)(regs) + REG_OFFSET(insn, pos))
+
+#define GET_RM(insn) (((insn) >> 12) & 7)
+
+#define GET_RS1(insn, regs) (*REG_PTR(insn, SH_RS1, regs))
+#define GET_RS2(insn, regs) (*REG_PTR(insn, SH_RS2, regs))
+#define GET_RS1S(insn, regs) (*REG_PTR(RVC_RS1S(insn), 0, regs))
+#define GET_RS2S(insn, regs) (*REG_PTR(RVC_RS2S(insn), 0, regs))
+#define GET_RS2C(insn, regs) (*REG_PTR(insn, SH_RS2C, regs))
+#define GET_SP(regs) (*REG_PTR(2, 0, regs))
+#define SET_RD(insn, regs, val) (*REG_PTR(insn, SH_RD, regs) = (val))
+#define IMM_I(insn) ((s32)(insn) >> 20)
+#define IMM_S(insn) (((s32)(insn) >> 25 << 5) | \
+ (s32)(((insn) >> 7) & 0x1f))
+#define MASK_FUNCT3 0x7000
+
+#define GET_PRECISION(insn) (((insn) >> 25) & 3)
+#define GET_RM(insn) (((insn) >> 12) & 7)
+#define PRECISION_S 0
+#define PRECISION_D 1
+
+#define STR(x) XSTR(x)
+#define XSTR(x) #x
+
+#define DECLARE_UNPRIVILEGED_LOAD_FUNCTION(type, insn) \
+static inline type load_##type(const type *addr) \
+{ \
+ type val; \
+ asm (#insn " %0, %1" \
+ : "=&r" (val) : "m" (*addr)); \
+ return val; \
+}
+
+#define DECLARE_UNPRIVILEGED_STORE_FUNCTION(type, insn) \
+static inline void store_##type(type *addr, type val) \
+{ \
+ asm volatile (#insn " %0, %1\n" \
+ : : "r" (val), "m" (*addr)); \
+}
+
+DECLARE_UNPRIVILEGED_LOAD_FUNCTION(u8, lbu)
+DECLARE_UNPRIVILEGED_LOAD_FUNCTION(u16, lhu)
+DECLARE_UNPRIVILEGED_LOAD_FUNCTION(s8, lb)
+DECLARE_UNPRIVILEGED_LOAD_FUNCTION(s16, lh)
+DECLARE_UNPRIVILEGED_LOAD_FUNCTION(s32, lw)
+DECLARE_UNPRIVILEGED_STORE_FUNCTION(u8, sb)
+DECLARE_UNPRIVILEGED_STORE_FUNCTION(u16, sh)
+DECLARE_UNPRIVILEGED_STORE_FUNCTION(u32, sw)
+#if defined(CONFIG_64BIT)
+DECLARE_UNPRIVILEGED_LOAD_FUNCTION(u32, lwu)
+DECLARE_UNPRIVILEGED_LOAD_FUNCTION(u64, ld)
+DECLARE_UNPRIVILEGED_STORE_FUNCTION(u64, sd)
+DECLARE_UNPRIVILEGED_LOAD_FUNCTION(ulong, ld)
+#else
+DECLARE_UNPRIVILEGED_LOAD_FUNCTION(u32, lw)
+DECLARE_UNPRIVILEGED_LOAD_FUNCTION(ulong, lw)
+
+static inline u64 load_u64(const u64 *addr)
+{
+ return load_u32((u32 *)addr)
+ + ((u64)load_u32((u32 *)addr + 1) << 32);
+}
+
+static inline void store_u64(u64 *addr, u64 val)
+{
+ store_u32((u32 *)addr, val);
+ store_u32((u32 *)addr + 1, val >> 32);
+}
+#endif
+
+static inline ulong get_insn(ulong mepc)
+{
+ register ulong __mepc asm ("a2") = mepc;
+ ulong val, rvc_mask = 3, tmp;
+
+ asm ("and %[tmp], %[addr], 2\n"
+ "bnez %[tmp], 1f\n"
+#if defined(CONFIG_64BIT)
+ STR(LWU) " %[insn], (%[addr])\n"
+#else
+ STR(LW) " %[insn], (%[addr])\n"
+#endif
+ "and %[tmp], %[insn], %[rvc_mask]\n"
+ "beq %[tmp], %[rvc_mask], 2f\n"
+ "sll %[insn], %[insn], %[xlen_minus_16]\n"
+ "srl %[insn], %[insn], %[xlen_minus_16]\n"
+ "j 2f\n"
+ "1:\n"
+ "lhu %[insn], (%[addr])\n"
+ "and %[tmp], %[insn], %[rvc_mask]\n"
+ "bne %[tmp], %[rvc_mask], 2f\n"
+ "lhu %[tmp], 2(%[addr])\n"
+ "sll %[tmp], %[tmp], 16\n"
+ "add %[insn], %[insn], %[tmp]\n"
+ "2:"
+ : [insn] "=&r" (val), [tmp] "=&r" (tmp)
+ : [addr] "r" (__mepc), [rvc_mask] "r" (rvc_mask),
+ [xlen_minus_16] "i" (XLEN_MINUS_16));
+
+ return val;
+}
+
+union reg_data {
+ u8 data_bytes[8];
+ ulong data_ulong;
+ u64 data_u64;
+};
+
+int handle_misaligned_load(struct pt_regs *regs)
+{
+ union reg_data val;
+ unsigned long epc = regs->epc;
+ unsigned long insn = get_insn(epc);
+ unsigned long addr = csr_read(mtval);
+ int i, fp = 0, shift = 0, len = 0;
+
+ regs->epc = 0;
+
+ if ((insn & INSN_MASK_LW) == INSN_MATCH_LW) {
+ len = 4;
+ shift = 8 * (sizeof(unsigned long) - len);
+#if defined(CONFIG_64BIT)
+ } else if ((insn & INSN_MASK_LD) == INSN_MATCH_LD) {
+ len = 8;
+ shift = 8 * (sizeof(unsigned long) - len);
+ } else if ((insn & INSN_MASK_LWU) == INSN_MATCH_LWU) {
+ len = 4;
+#endif
+ } else if ((insn & INSN_MASK_FLD) == INSN_MATCH_FLD) {
+ fp = 1;
+ len = 8;
+ } else if ((insn & INSN_MASK_FLW) == INSN_MATCH_FLW) {
+ fp = 1;
+ len = 4;
+ } else if ((insn & INSN_MASK_LH) == INSN_MATCH_LH) {
+ len = 2;
+ shift = 8 * (sizeof(unsigned long) - len);
+ } else if ((insn & INSN_MASK_LHU) == INSN_MATCH_LHU) {
+ len = 2;
+#if defined(CONFIG_64BIT)
+ } else if ((insn & INSN_MASK_C_LD) == INSN_MATCH_C_LD) {
+ len = 8;
+ shift = 8 * (sizeof(unsigned long) - len);
+ insn = RVC_RS2S(insn) << SH_RD;
+ } else if ((insn & INSN_MASK_C_LDSP) == INSN_MATCH_C_LDSP &&
+ ((insn >> SH_RD) & 0x1f)) {
+ len = 8;
+ shift = 8 * (sizeof(unsigned long) - len);
+#endif
+ } else if ((insn & INSN_MASK_C_LW) == INSN_MATCH_C_LW) {
+ len = 4;
+ shift = 8 * (sizeof(unsigned long) - len);
+ insn = RVC_RS2S(insn) << SH_RD;
+ } else if ((insn & INSN_MASK_C_LWSP) == INSN_MATCH_C_LWSP &&
+ ((insn >> SH_RD) & 0x1f)) {
+ len = 4;
+ shift = 8 * (sizeof(unsigned long) - len);
+ } else if ((insn & INSN_MASK_C_FLD) == INSN_MATCH_C_FLD) {
+ fp = 1;
+ len = 8;
+ insn = RVC_RS2S(insn) << SH_RD;
+ } else if ((insn & INSN_MASK_C_FLDSP) == INSN_MATCH_C_FLDSP) {
+ fp = 1;
+ len = 8;
+#if defined(CONFIG_32BIT)
+ } else if ((insn & INSN_MASK_C_FLW) == INSN_MATCH_C_FLW) {
+ fp = 1;
+ len = 4;
+ insn = RVC_RS2S(insn) << SH_RD;
+ } else if ((insn & INSN_MASK_C_FLWSP) == INSN_MATCH_C_FLWSP) {
+ fp = 1;
+ len = 4;
+#endif
+ } else {
+ regs->epc = epc;
+ return -1;
+ }
+
+ val.data_u64 = 0;
+ for (i = 0; i < len; i++)
+ val.data_bytes[i] = load_u8((void *)(addr + i));
+
+ if (fp)
+ return -1;
+ SET_RD(insn, regs, val.data_ulong << shift >> shift);
+
+ regs->epc = epc + INSN_LEN(insn);
+
+ return 0;
+}
+
+int handle_misaligned_store(struct pt_regs *regs)
+{
+ union reg_data val;
+ unsigned long epc = regs->epc;
+ unsigned long insn = get_insn(epc);
+ unsigned long addr = csr_read(mtval);
+ int i, len = 0;
+
+ regs->epc = 0;
+
+ val.data_ulong = GET_RS2(insn, regs);
+
+ if ((insn & INSN_MASK_SW) == INSN_MATCH_SW) {
+ len = 4;
+#if defined(CONFIG_64BIT)
+ } else if ((insn & INSN_MASK_SD) == INSN_MATCH_SD) {
+ len = 8;
+#endif
+ } else if ((insn & INSN_MASK_SH) == INSN_MATCH_SH) {
+ len = 2;
+#if defined(CONFIG_64BIT)
+ } else if ((insn & INSN_MASK_C_SD) == INSN_MATCH_C_SD) {
+ len = 8;
+ val.data_ulong = GET_RS2S(insn, regs);
+ } else if ((insn & INSN_MASK_C_SDSP) == INSN_MATCH_C_SDSP) {
+ len = 8;
+ val.data_ulong = GET_RS2C(insn, regs);
+#endif
+ } else if ((insn & INSN_MASK_C_SW) == INSN_MATCH_C_SW) {
+ len = 4;
+ val.data_ulong = GET_RS2S(insn, regs);
+ } else if ((insn & INSN_MASK_C_SWSP) == INSN_MATCH_C_SWSP) {
+ len = 4;
+ val.data_ulong = GET_RS2C(insn, regs);
+ } else {
+ regs->epc = epc;
+ return -1;
+ }
+
+ for (i = 0; i < len; i++)
+ store_u8((void *)(addr + i), val.data_bytes[i]);
+
+ regs->epc = epc + INSN_LEN(insn);
+
+ return 0;
+}
diff --git a/arch/riscv/kernel/vdso.c b/arch/riscv/kernel/vdso.c
new file mode 100644
index 000000000..73d45931a
--- /dev/null
+++ b/arch/riscv/kernel/vdso.c
@@ -0,0 +1,113 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2004 Benjamin Herrenschmidt, IBM Corp.
+ * <benh@kernel.crashing.org>
+ * Copyright (C) 2012 ARM Limited
+ * Copyright (C) 2015 Regents of the University of California
+ */
+
+#include <linux/elf.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <linux/binfmts.h>
+#include <linux/err.h>
+#include <asm/page.h>
+#ifdef CONFIG_GENERIC_TIME_VSYSCALL
+#include <vdso/datapage.h>
+#else
+#include <asm/vdso.h>
+#endif
+
+extern char vdso_start[], vdso_end[];
+
+static unsigned int vdso_pages;
+static struct page **vdso_pagelist;
+
+/*
+ * The vDSO data page.
+ */
+static union {
+ struct vdso_data data;
+ u8 page[PAGE_SIZE];
+} vdso_data_store __page_aligned_data;
+struct vdso_data *vdso_data = &vdso_data_store.data;
+
+static int __init vdso_init(void)
+{
+ unsigned int i;
+
+ vdso_pages = (vdso_end - vdso_start) >> PAGE_SHIFT;
+ vdso_pagelist =
+ kcalloc(vdso_pages + 1, sizeof(struct page *), GFP_KERNEL);
+ if (unlikely(vdso_pagelist == NULL)) {
+ pr_err("vdso: pagelist allocation failed\n");
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < vdso_pages; i++) {
+ struct page *pg;
+
+ pg = virt_to_page(vdso_start + (i << PAGE_SHIFT));
+ vdso_pagelist[i] = pg;
+ }
+ vdso_pagelist[i] = virt_to_page(vdso_data);
+
+ return 0;
+}
+arch_initcall(vdso_init);
+
+int arch_setup_additional_pages(struct linux_binprm *bprm,
+ int uses_interp)
+{
+ struct mm_struct *mm = current->mm;
+ unsigned long vdso_base, vdso_len;
+ int ret;
+
+ vdso_len = (vdso_pages + 1) << PAGE_SHIFT;
+
+ if (mmap_write_lock_killable(mm))
+ return -EINTR;
+
+ vdso_base = get_unmapped_area(NULL, 0, vdso_len, 0, 0);
+ if (IS_ERR_VALUE(vdso_base)) {
+ ret = vdso_base;
+ goto end;
+ }
+
+ /*
+ * Put vDSO base into mm struct. We need to do this before calling
+ * install_special_mapping or the perf counter mmap tracking code
+ * will fail to recognise it as a vDSO (since arch_vma_name fails).
+ */
+ mm->context.vdso = (void *)vdso_base;
+
+ ret =
+ install_special_mapping(mm, vdso_base, vdso_pages << PAGE_SHIFT,
+ (VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC),
+ vdso_pagelist);
+
+ if (unlikely(ret)) {
+ mm->context.vdso = NULL;
+ goto end;
+ }
+
+ vdso_base += (vdso_pages << PAGE_SHIFT);
+ ret = install_special_mapping(mm, vdso_base, PAGE_SIZE,
+ (VM_READ | VM_MAYREAD), &vdso_pagelist[vdso_pages]);
+
+ if (unlikely(ret))
+ mm->context.vdso = NULL;
+end:
+ mmap_write_unlock(mm);
+ return ret;
+}
+
+const char *arch_vma_name(struct vm_area_struct *vma)
+{
+ if (vma->vm_mm && (vma->vm_start == (long)vma->vm_mm->context.vdso))
+ return "[vdso]";
+ if (vma->vm_mm && (vma->vm_start ==
+ (long)vma->vm_mm->context.vdso + PAGE_SIZE))
+ return "[vdso_data]";
+ return NULL;
+}
diff --git a/arch/riscv/kernel/vdso/.gitignore b/arch/riscv/kernel/vdso/.gitignore
new file mode 100644
index 000000000..3a19def86
--- /dev/null
+++ b/arch/riscv/kernel/vdso/.gitignore
@@ -0,0 +1,4 @@
+# SPDX-License-Identifier: GPL-2.0-only
+vdso.lds
+*.tmp
+vdso-syms.S
diff --git a/arch/riscv/kernel/vdso/Makefile b/arch/riscv/kernel/vdso/Makefile
new file mode 100644
index 000000000..f4ac7ff56
--- /dev/null
+++ b/arch/riscv/kernel/vdso/Makefile
@@ -0,0 +1,85 @@
+# SPDX-License-Identifier: GPL-2.0-only
+# Copied from arch/tile/kernel/vdso/Makefile
+
+# Absolute relocation type $(ARCH_REL_TYPE_ABS) needs to be defined before
+# the inclusion of generic Makefile.
+ARCH_REL_TYPE_ABS := R_RISCV_32|R_RISCV_64|R_RISCV_JUMP_SLOT
+include $(srctree)/lib/vdso/Makefile
+# Symbols present in the vdso
+vdso-syms = rt_sigreturn
+ifdef CONFIG_64BIT
+vdso-syms += vgettimeofday
+endif
+vdso-syms += getcpu
+vdso-syms += flush_icache
+
+# Files to link into the vdso
+obj-vdso = $(patsubst %, %.o, $(vdso-syms)) note.o
+
+ccflags-y := -fno-stack-protector
+ccflags-y += -DDISABLE_BRANCH_PROFILING
+
+ifneq ($(c-gettimeofday-y),)
+ CFLAGS_vgettimeofday.o += -fPIC -include $(c-gettimeofday-y)
+endif
+
+# Build rules
+targets := $(obj-vdso) vdso.so vdso.so.dbg vdso.lds vdso-syms.S
+obj-vdso := $(addprefix $(obj)/, $(obj-vdso))
+
+obj-y += vdso.o vdso-syms.o
+CPPFLAGS_vdso.lds += -P -C -U$(ARCH)
+ifneq ($(filter vgettimeofday, $(vdso-syms)),)
+CPPFLAGS_vdso.lds += -DHAS_VGETTIMEOFDAY
+endif
+
+# Disable -pg to prevent insert call site
+CFLAGS_REMOVE_vgettimeofday.o = $(CC_FLAGS_FTRACE)
+
+# Disable profiling and instrumentation for VDSO code
+GCOV_PROFILE := n
+KCOV_INSTRUMENT := n
+KASAN_SANITIZE := n
+
+# Force dependency
+$(obj)/vdso.o: $(obj)/vdso.so
+
+# link rule for the .so file, .lds has to be first
+$(obj)/vdso.so.dbg: $(obj)/vdso.lds $(obj-vdso) FORCE
+ $(call if_changed,vdsold)
+LDFLAGS_vdso.so.dbg = -shared -s -soname=linux-vdso.so.1 \
+ --build-id=sha1 --hash-style=both --eh-frame-hdr
+
+# We also create a special relocatable object that should mirror the symbol
+# table and layout of the linked DSO. With ld --just-symbols we can then
+# refer to these symbols in the kernel code rather than hand-coded addresses.
+$(obj)/vdso-syms.S: $(obj)/vdso.so FORCE
+ $(call if_changed,so2s)
+
+# strip rule for the .so file
+$(obj)/%.so: OBJCOPYFLAGS := -S
+$(obj)/%.so: $(obj)/%.so.dbg FORCE
+ $(call if_changed,objcopy)
+
+# actual build commands
+# The DSO images are built using a special linker script
+# Make sure only to export the intended __vdso_xxx symbol offsets.
+quiet_cmd_vdsold = VDSOLD $@
+ cmd_vdsold = $(LD) $(ld_flags) -T $(filter-out FORCE,$^) -o $@.tmp && \
+ $(OBJCOPY) $(patsubst %, -G __vdso_%, $(vdso-syms)) $@.tmp $@ && \
+ rm $@.tmp
+
+# Extracts symbol offsets from the VDSO, converting them into an assembly file
+# that contains the same symbols at the same offsets.
+quiet_cmd_so2s = SO2S $@
+ cmd_so2s = $(NM) -D $< | $(srctree)/$(src)/so2s.sh > $@
+
+# install commands for the unstripped file
+quiet_cmd_vdso_install = INSTALL $@
+ cmd_vdso_install = cp $(obj)/$@.dbg $(MODLIB)/vdso/$@
+
+vdso.so: $(obj)/vdso.so.dbg
+ @mkdir -p $(MODLIB)/vdso
+ $(call cmd,vdso_install)
+
+vdso_install: vdso.so
diff --git a/arch/riscv/kernel/vdso/flush_icache.S b/arch/riscv/kernel/vdso/flush_icache.S
new file mode 100644
index 000000000..82f97d67c
--- /dev/null
+++ b/arch/riscv/kernel/vdso/flush_icache.S
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2017 SiFive
+ */
+
+#include <linux/linkage.h>
+#include <asm/unistd.h>
+
+ .text
+/* int __vdso_flush_icache(void *start, void *end, unsigned long flags); */
+ENTRY(__vdso_flush_icache)
+ .cfi_startproc
+#ifdef CONFIG_SMP
+ li a7, __NR_riscv_flush_icache
+ ecall
+#else
+ fence.i
+ li a0, 0
+#endif
+ ret
+ .cfi_endproc
+ENDPROC(__vdso_flush_icache)
diff --git a/arch/riscv/kernel/vdso/getcpu.S b/arch/riscv/kernel/vdso/getcpu.S
new file mode 100644
index 000000000..bb0c05e2f
--- /dev/null
+++ b/arch/riscv/kernel/vdso/getcpu.S
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2017 SiFive
+ */
+
+#include <linux/linkage.h>
+#include <asm/unistd.h>
+
+ .text
+/* int __vdso_getcpu(unsigned *cpu, unsigned *node, void *unused); */
+ENTRY(__vdso_getcpu)
+ .cfi_startproc
+ /* For now, just do the syscall. */
+ li a7, __NR_getcpu
+ ecall
+ ret
+ .cfi_endproc
+ENDPROC(__vdso_getcpu)
diff --git a/arch/riscv/kernel/vdso/note.S b/arch/riscv/kernel/vdso/note.S
new file mode 100644
index 000000000..2a956c942
--- /dev/null
+++ b/arch/riscv/kernel/vdso/note.S
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * This supplies .note.* sections to go into the PT_NOTE inside the vDSO text.
+ * Here we can supply some information useful to userland.
+ */
+
+#include <linux/elfnote.h>
+#include <linux/version.h>
+
+ELFNOTE_START(Linux, 0, "a")
+ .long LINUX_VERSION_CODE
+ELFNOTE_END
diff --git a/arch/riscv/kernel/vdso/rt_sigreturn.S b/arch/riscv/kernel/vdso/rt_sigreturn.S
new file mode 100644
index 000000000..0573705ea
--- /dev/null
+++ b/arch/riscv/kernel/vdso/rt_sigreturn.S
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2014 Regents of the University of California
+ */
+
+#include <linux/linkage.h>
+#include <asm/unistd.h>
+
+ .text
+ENTRY(__vdso_rt_sigreturn)
+ .cfi_startproc
+ .cfi_signal_frame
+ li a7, __NR_rt_sigreturn
+ scall
+ .cfi_endproc
+ENDPROC(__vdso_rt_sigreturn)
diff --git a/arch/riscv/kernel/vdso/so2s.sh b/arch/riscv/kernel/vdso/so2s.sh
new file mode 100755
index 000000000..e64cb6d94
--- /dev/null
+++ b/arch/riscv/kernel/vdso/so2s.sh
@@ -0,0 +1,6 @@
+#!/bin/sh
+# SPDX-License-Identifier: GPL-2.0+
+# Copyright 2020 Palmer Dabbelt <palmerdabbelt@google.com>
+
+sed 's!\([0-9a-f]*\) T \([a-z0-9_]*\)\(@@LINUX_4.15\)*!.global \2\n.set \2,0x\1!' \
+| grep '^\.'
diff --git a/arch/riscv/kernel/vdso/vdso.S b/arch/riscv/kernel/vdso/vdso.S
new file mode 100644
index 000000000..df222245b
--- /dev/null
+++ b/arch/riscv/kernel/vdso/vdso.S
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2014 Regents of the University of California
+ */
+
+#include <linux/init.h>
+#include <linux/linkage.h>
+#include <asm/page.h>
+
+ __PAGE_ALIGNED_DATA
+
+ .globl vdso_start, vdso_end
+ .balign PAGE_SIZE
+vdso_start:
+ .incbin "arch/riscv/kernel/vdso/vdso.so"
+ .balign PAGE_SIZE
+vdso_end:
+
+ .previous
diff --git a/arch/riscv/kernel/vdso/vdso.lds.S b/arch/riscv/kernel/vdso/vdso.lds.S
new file mode 100644
index 000000000..b3e58402c
--- /dev/null
+++ b/arch/riscv/kernel/vdso/vdso.lds.S
@@ -0,0 +1,76 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2012 Regents of the University of California
+ */
+#include <asm/page.h>
+
+OUTPUT_ARCH(riscv)
+
+SECTIONS
+{
+ PROVIDE(_vdso_data = . + PAGE_SIZE);
+ . = SIZEOF_HEADERS;
+
+ .hash : { *(.hash) } :text
+ .gnu.hash : { *(.gnu.hash) }
+ .dynsym : { *(.dynsym) }
+ .dynstr : { *(.dynstr) }
+ .gnu.version : { *(.gnu.version) }
+ .gnu.version_d : { *(.gnu.version_d) }
+ .gnu.version_r : { *(.gnu.version_r) }
+
+ .note : { *(.note.*) } :text :note
+ .dynamic : { *(.dynamic) } :text :dynamic
+
+ .eh_frame_hdr : { *(.eh_frame_hdr) } :text :eh_frame_hdr
+ .eh_frame : { KEEP (*(.eh_frame)) } :text
+
+ .rodata : { *(.rodata .rodata.* .gnu.linkonce.r.*) }
+
+ /*
+ * This linker script is used both with -r and with -shared.
+ * For the layouts to match, we need to skip more than enough
+ * space for the dynamic symbol table, etc. If this amount is
+ * insufficient, ld -shared will error; simply increase it here.
+ */
+ . = 0x800;
+ .text : { *(.text .text.*) } :text
+
+ .data : {
+ *(.got.plt) *(.got)
+ *(.data .data.* .gnu.linkonce.d.*)
+ *(.dynbss)
+ *(.bss .bss.* .gnu.linkonce.b.*)
+ }
+}
+
+/*
+ * We must supply the ELF program headers explicitly to get just one
+ * PT_LOAD segment, and set the flags explicitly to make segments read-only.
+ */
+PHDRS
+{
+ text PT_LOAD FLAGS(5) FILEHDR PHDRS; /* PF_R|PF_X */
+ dynamic PT_DYNAMIC FLAGS(4); /* PF_R */
+ note PT_NOTE FLAGS(4); /* PF_R */
+ eh_frame_hdr PT_GNU_EH_FRAME;
+}
+
+/*
+ * This controls what symbols we export from the DSO.
+ */
+VERSION
+{
+ LINUX_4.15 {
+ global:
+ __vdso_rt_sigreturn;
+#ifdef HAS_VGETTIMEOFDAY
+ __vdso_gettimeofday;
+ __vdso_clock_gettime;
+ __vdso_clock_getres;
+#endif
+ __vdso_getcpu;
+ __vdso_flush_icache;
+ local: *;
+ };
+}
diff --git a/arch/riscv/kernel/vdso/vgettimeofday.c b/arch/riscv/kernel/vdso/vgettimeofday.c
new file mode 100644
index 000000000..cc0d80699
--- /dev/null
+++ b/arch/riscv/kernel/vdso/vgettimeofday.c
@@ -0,0 +1,31 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copied from arch/arm64/kernel/vdso/vgettimeofday.c
+ *
+ * Copyright (C) 2018 ARM Ltd.
+ * Copyright (C) 2020 SiFive
+ */
+
+#include <linux/time.h>
+#include <linux/types.h>
+
+extern
+int __vdso_clock_gettime(clockid_t clock, struct __kernel_timespec *ts);
+int __vdso_clock_gettime(clockid_t clock, struct __kernel_timespec *ts)
+{
+ return __cvdso_clock_gettime(clock, ts);
+}
+
+extern
+int __vdso_gettimeofday(struct __kernel_old_timeval *tv, struct timezone *tz);
+int __vdso_gettimeofday(struct __kernel_old_timeval *tv, struct timezone *tz)
+{
+ return __cvdso_gettimeofday(tv, tz);
+}
+
+extern
+int __vdso_clock_getres(clockid_t clock_id, struct __kernel_timespec *res);
+int __vdso_clock_getres(clockid_t clock_id, struct __kernel_timespec *res)
+{
+ return __cvdso_clock_getres(clock_id, res);
+}
diff --git a/arch/riscv/kernel/vmlinux.lds.S b/arch/riscv/kernel/vmlinux.lds.S
new file mode 100644
index 000000000..3ffbd6cbd
--- /dev/null
+++ b/arch/riscv/kernel/vmlinux.lds.S
@@ -0,0 +1,123 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2012 Regents of the University of California
+ * Copyright (C) 2017 SiFive
+ */
+
+#define LOAD_OFFSET PAGE_OFFSET
+#include <asm/vmlinux.lds.h>
+#include <asm/page.h>
+#include <asm/cache.h>
+#include <asm/thread_info.h>
+#include <asm/set_memory.h>
+#include "image-vars.h"
+
+#include <linux/sizes.h>
+OUTPUT_ARCH(riscv)
+ENTRY(_start)
+
+jiffies = jiffies_64;
+
+PECOFF_SECTION_ALIGNMENT = 0x1000;
+PECOFF_FILE_ALIGNMENT = 0x200;
+
+SECTIONS
+{
+ /* Beginning of code and text segment */
+ . = LOAD_OFFSET;
+ _start = .;
+ HEAD_TEXT_SECTION
+ . = ALIGN(PAGE_SIZE);
+
+ __init_begin = .;
+ INIT_TEXT_SECTION(PAGE_SIZE)
+ . = ALIGN(8);
+ __soc_early_init_table : {
+ __soc_early_init_table_start = .;
+ KEEP(*(__soc_early_init_table))
+ __soc_early_init_table_end = .;
+ }
+ __soc_builtin_dtb_table : {
+ __soc_builtin_dtb_table_start = .;
+ KEEP(*(__soc_builtin_dtb_table))
+ __soc_builtin_dtb_table_end = .;
+ }
+ /* we have to discard exit text and such at runtime, not link time */
+ .exit.text :
+ {
+ EXIT_TEXT
+ }
+ .exit.data :
+ {
+ EXIT_DATA
+ }
+ PERCPU_SECTION(L1_CACHE_BYTES)
+ __init_end = .;
+
+ . = ALIGN(SECTION_ALIGN);
+ .text : {
+ _text = .;
+ _stext = .;
+ TEXT_TEXT
+ SCHED_TEXT
+ CPUIDLE_TEXT
+ LOCK_TEXT
+ KPROBES_TEXT
+ ENTRY_TEXT
+ IRQENTRY_TEXT
+ SOFTIRQENTRY_TEXT
+ *(.fixup)
+ _etext = .;
+ }
+
+#ifdef CONFIG_EFI
+ . = ALIGN(PECOFF_SECTION_ALIGNMENT);
+ __pecoff_text_end = .;
+#endif
+
+ INIT_DATA_SECTION(16)
+
+ /* Start of data section */
+ _sdata = .;
+ RO_DATA(SECTION_ALIGN)
+ .srodata : {
+ *(.srodata*)
+ }
+
+ EXCEPTION_TABLE(0x10)
+
+ . = ALIGN(SECTION_ALIGN);
+ _data = .;
+
+ RW_DATA(L1_CACHE_BYTES, PAGE_SIZE, THREAD_SIZE)
+ .sdata : {
+ __global_pointer$ = . + 0x800;
+ *(.sdata*)
+ }
+
+#ifdef CONFIG_EFI
+ .pecoff_edata_padding : { BYTE(0); . = ALIGN(PECOFF_FILE_ALIGNMENT); }
+ __pecoff_data_raw_size = ABSOLUTE(. - __pecoff_text_end);
+#endif
+
+ /* End of data section */
+ _edata = .;
+
+ BSS_SECTION(PAGE_SIZE, PAGE_SIZE, 0)
+
+ .rel.dyn : {
+ *(.rel.dyn*)
+ }
+
+#ifdef CONFIG_EFI
+ . = ALIGN(PECOFF_SECTION_ALIGNMENT);
+ __pecoff_data_virt_size = ABSOLUTE(. - __pecoff_text_end);
+#endif
+ _end = .;
+
+ STABS_DEBUG
+ DWARF_DEBUG
+ ELF_DETAILS
+
+ DISCARDS
+}
diff --git a/arch/riscv/lib/Makefile b/arch/riscv/lib/Makefile
new file mode 100644
index 000000000..47e7a8204
--- /dev/null
+++ b/arch/riscv/lib/Makefile
@@ -0,0 +1,6 @@
+# SPDX-License-Identifier: GPL-2.0-only
+lib-y += delay.o
+lib-y += memcpy.o
+lib-y += memset.o
+lib-$(CONFIG_MMU) += uaccess.o
+lib-$(CONFIG_64BIT) += tishift.o
diff --git a/arch/riscv/lib/delay.c b/arch/riscv/lib/delay.c
new file mode 100644
index 000000000..f51c9a03b
--- /dev/null
+++ b/arch/riscv/lib/delay.c
@@ -0,0 +1,107 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2012 Regents of the University of California
+ */
+
+#include <linux/delay.h>
+#include <linux/param.h>
+#include <linux/timex.h>
+#include <linux/export.h>
+
+/*
+ * This is copies from arch/arm/include/asm/delay.h
+ *
+ * Loop (or tick) based delay:
+ *
+ * loops = loops_per_jiffy * jiffies_per_sec * delay_us / us_per_sec
+ *
+ * where:
+ *
+ * jiffies_per_sec = HZ
+ * us_per_sec = 1000000
+ *
+ * Therefore the constant part is HZ / 1000000 which is a small
+ * fractional number. To make this usable with integer math, we
+ * scale up this constant by 2^31, perform the actual multiplication,
+ * and scale the result back down by 2^31 with a simple shift:
+ *
+ * loops = (loops_per_jiffy * delay_us * UDELAY_MULT) >> 31
+ *
+ * where:
+ *
+ * UDELAY_MULT = 2^31 * HZ / 1000000
+ * = (2^31 / 1000000) * HZ
+ * = 2147.483648 * HZ
+ * = 2147 * HZ + 483648 * HZ / 1000000
+ *
+ * 31 is the biggest scale shift value that won't overflow 32 bits for
+ * delay_us * UDELAY_MULT assuming HZ <= 1000 and delay_us <= 2000.
+ */
+#define MAX_UDELAY_US 2000
+#define MAX_UDELAY_HZ 1000
+#define UDELAY_MULT (2147UL * HZ + 483648UL * HZ / 1000000UL)
+#define UDELAY_SHIFT 31
+
+#if HZ > MAX_UDELAY_HZ
+#error "HZ > MAX_UDELAY_HZ"
+#endif
+
+/*
+ * RISC-V supports both UDELAY and NDELAY. This is largely the same as above,
+ * but with different constants. I added 10 bits to the shift to get this, but
+ * the result is that I need a 64-bit multiply, which is slow on 32-bit
+ * platforms.
+ *
+ * NDELAY_MULT = 2^41 * HZ / 1000000000
+ * = (2^41 / 1000000000) * HZ
+ * = 2199.02325555 * HZ
+ * = 2199 * HZ + 23255550 * HZ / 1000000000
+ *
+ * The maximum here is to avoid 64-bit overflow, but it isn't checked as it
+ * won't happen.
+ */
+#define MAX_NDELAY_NS (1ULL << 42)
+#define MAX_NDELAY_HZ MAX_UDELAY_HZ
+#define NDELAY_MULT ((unsigned long long)(2199ULL * HZ + 23255550ULL * HZ / 1000000000ULL))
+#define NDELAY_SHIFT 41
+
+#if HZ > MAX_NDELAY_HZ
+#error "HZ > MAX_NDELAY_HZ"
+#endif
+
+void __delay(unsigned long cycles)
+{
+ u64 t0 = get_cycles();
+
+ while ((unsigned long)(get_cycles() - t0) < cycles)
+ cpu_relax();
+}
+EXPORT_SYMBOL(__delay);
+
+void udelay(unsigned long usecs)
+{
+ u64 ucycles = (u64)usecs * lpj_fine * UDELAY_MULT;
+ u64 n;
+
+ if (unlikely(usecs > MAX_UDELAY_US)) {
+ n = (u64)usecs * riscv_timebase;
+ do_div(n, 1000000);
+
+ __delay(n);
+ return;
+ }
+
+ __delay(ucycles >> UDELAY_SHIFT);
+}
+EXPORT_SYMBOL(udelay);
+
+void ndelay(unsigned long nsecs)
+{
+ /*
+ * This doesn't bother checking for overflow, as it won't happen (it's
+ * an hour) of delay.
+ */
+ unsigned long long ncycles = nsecs * lpj_fine * NDELAY_MULT;
+ __delay(ncycles >> NDELAY_SHIFT);
+}
+EXPORT_SYMBOL(ndelay);
diff --git a/arch/riscv/lib/memcpy.S b/arch/riscv/lib/memcpy.S
new file mode 100644
index 000000000..51ab71625
--- /dev/null
+++ b/arch/riscv/lib/memcpy.S
@@ -0,0 +1,108 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2013 Regents of the University of California
+ */
+
+#include <linux/linkage.h>
+#include <asm/asm.h>
+
+/* void *memcpy(void *, const void *, size_t) */
+ENTRY(__memcpy)
+WEAK(memcpy)
+ move t6, a0 /* Preserve return value */
+
+ /* Defer to byte-oriented copy for small sizes */
+ sltiu a3, a2, 128
+ bnez a3, 4f
+ /* Use word-oriented copy only if low-order bits match */
+ andi a3, t6, SZREG-1
+ andi a4, a1, SZREG-1
+ bne a3, a4, 4f
+
+ beqz a3, 2f /* Skip if already aligned */
+ /*
+ * Round to nearest double word-aligned address
+ * greater than or equal to start address
+ */
+ andi a3, a1, ~(SZREG-1)
+ addi a3, a3, SZREG
+ /* Handle initial misalignment */
+ sub a4, a3, a1
+1:
+ lb a5, 0(a1)
+ addi a1, a1, 1
+ sb a5, 0(t6)
+ addi t6, t6, 1
+ bltu a1, a3, 1b
+ sub a2, a2, a4 /* Update count */
+
+2:
+ andi a4, a2, ~((16*SZREG)-1)
+ beqz a4, 4f
+ add a3, a1, a4
+3:
+ REG_L a4, 0(a1)
+ REG_L a5, SZREG(a1)
+ REG_L a6, 2*SZREG(a1)
+ REG_L a7, 3*SZREG(a1)
+ REG_L t0, 4*SZREG(a1)
+ REG_L t1, 5*SZREG(a1)
+ REG_L t2, 6*SZREG(a1)
+ REG_L t3, 7*SZREG(a1)
+ REG_L t4, 8*SZREG(a1)
+ REG_L t5, 9*SZREG(a1)
+ REG_S a4, 0(t6)
+ REG_S a5, SZREG(t6)
+ REG_S a6, 2*SZREG(t6)
+ REG_S a7, 3*SZREG(t6)
+ REG_S t0, 4*SZREG(t6)
+ REG_S t1, 5*SZREG(t6)
+ REG_S t2, 6*SZREG(t6)
+ REG_S t3, 7*SZREG(t6)
+ REG_S t4, 8*SZREG(t6)
+ REG_S t5, 9*SZREG(t6)
+ REG_L a4, 10*SZREG(a1)
+ REG_L a5, 11*SZREG(a1)
+ REG_L a6, 12*SZREG(a1)
+ REG_L a7, 13*SZREG(a1)
+ REG_L t0, 14*SZREG(a1)
+ REG_L t1, 15*SZREG(a1)
+ addi a1, a1, 16*SZREG
+ REG_S a4, 10*SZREG(t6)
+ REG_S a5, 11*SZREG(t6)
+ REG_S a6, 12*SZREG(t6)
+ REG_S a7, 13*SZREG(t6)
+ REG_S t0, 14*SZREG(t6)
+ REG_S t1, 15*SZREG(t6)
+ addi t6, t6, 16*SZREG
+ bltu a1, a3, 3b
+ andi a2, a2, (16*SZREG)-1 /* Update count */
+
+4:
+ /* Handle trailing misalignment */
+ beqz a2, 6f
+ add a3, a1, a2
+
+ /* Use word-oriented copy if co-aligned to word boundary */
+ or a5, a1, t6
+ or a5, a5, a3
+ andi a5, a5, 3
+ bnez a5, 5f
+7:
+ lw a4, 0(a1)
+ addi a1, a1, 4
+ sw a4, 0(t6)
+ addi t6, t6, 4
+ bltu a1, a3, 7b
+
+ ret
+
+5:
+ lb a4, 0(a1)
+ addi a1, a1, 1
+ sb a4, 0(t6)
+ addi t6, t6, 1
+ bltu a1, a3, 5b
+6:
+ ret
+END(__memcpy)
diff --git a/arch/riscv/lib/memset.S b/arch/riscv/lib/memset.S
new file mode 100644
index 000000000..34c5360c6
--- /dev/null
+++ b/arch/riscv/lib/memset.S
@@ -0,0 +1,113 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2013 Regents of the University of California
+ */
+
+
+#include <linux/linkage.h>
+#include <asm/asm.h>
+
+/* void *memset(void *, int, size_t) */
+ENTRY(__memset)
+WEAK(memset)
+ move t0, a0 /* Preserve return value */
+
+ /* Defer to byte-oriented fill for small sizes */
+ sltiu a3, a2, 16
+ bnez a3, 4f
+
+ /*
+ * Round to nearest XLEN-aligned address
+ * greater than or equal to start address
+ */
+ addi a3, t0, SZREG-1
+ andi a3, a3, ~(SZREG-1)
+ beq a3, t0, 2f /* Skip if already aligned */
+ /* Handle initial misalignment */
+ sub a4, a3, t0
+1:
+ sb a1, 0(t0)
+ addi t0, t0, 1
+ bltu t0, a3, 1b
+ sub a2, a2, a4 /* Update count */
+
+2: /* Duff's device with 32 XLEN stores per iteration */
+ /* Broadcast value into all bytes */
+ andi a1, a1, 0xff
+ slli a3, a1, 8
+ or a1, a3, a1
+ slli a3, a1, 16
+ or a1, a3, a1
+#ifdef CONFIG_64BIT
+ slli a3, a1, 32
+ or a1, a3, a1
+#endif
+
+ /* Calculate end address */
+ andi a4, a2, ~(SZREG-1)
+ add a3, t0, a4
+
+ andi a4, a4, 31*SZREG /* Calculate remainder */
+ beqz a4, 3f /* Shortcut if no remainder */
+ neg a4, a4
+ addi a4, a4, 32*SZREG /* Calculate initial offset */
+
+ /* Adjust start address with offset */
+ sub t0, t0, a4
+
+ /* Jump into loop body */
+ /* Assumes 32-bit instruction lengths */
+ la a5, 3f
+#ifdef CONFIG_64BIT
+ srli a4, a4, 1
+#endif
+ add a5, a5, a4
+ jr a5
+3:
+ REG_S a1, 0(t0)
+ REG_S a1, SZREG(t0)
+ REG_S a1, 2*SZREG(t0)
+ REG_S a1, 3*SZREG(t0)
+ REG_S a1, 4*SZREG(t0)
+ REG_S a1, 5*SZREG(t0)
+ REG_S a1, 6*SZREG(t0)
+ REG_S a1, 7*SZREG(t0)
+ REG_S a1, 8*SZREG(t0)
+ REG_S a1, 9*SZREG(t0)
+ REG_S a1, 10*SZREG(t0)
+ REG_S a1, 11*SZREG(t0)
+ REG_S a1, 12*SZREG(t0)
+ REG_S a1, 13*SZREG(t0)
+ REG_S a1, 14*SZREG(t0)
+ REG_S a1, 15*SZREG(t0)
+ REG_S a1, 16*SZREG(t0)
+ REG_S a1, 17*SZREG(t0)
+ REG_S a1, 18*SZREG(t0)
+ REG_S a1, 19*SZREG(t0)
+ REG_S a1, 20*SZREG(t0)
+ REG_S a1, 21*SZREG(t0)
+ REG_S a1, 22*SZREG(t0)
+ REG_S a1, 23*SZREG(t0)
+ REG_S a1, 24*SZREG(t0)
+ REG_S a1, 25*SZREG(t0)
+ REG_S a1, 26*SZREG(t0)
+ REG_S a1, 27*SZREG(t0)
+ REG_S a1, 28*SZREG(t0)
+ REG_S a1, 29*SZREG(t0)
+ REG_S a1, 30*SZREG(t0)
+ REG_S a1, 31*SZREG(t0)
+ addi t0, t0, 32*SZREG
+ bltu t0, a3, 3b
+ andi a2, a2, SZREG-1 /* Update count */
+
+4:
+ /* Handle trailing misalignment */
+ beqz a2, 6f
+ add a3, t0, a2
+5:
+ sb a1, 0(t0)
+ addi t0, t0, 1
+ bltu t0, a3, 5b
+6:
+ ret
+END(__memset)
diff --git a/arch/riscv/lib/tishift.S b/arch/riscv/lib/tishift.S
new file mode 100644
index 000000000..ef90075c4
--- /dev/null
+++ b/arch/riscv/lib/tishift.S
@@ -0,0 +1,76 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2018 Free Software Foundation, Inc.
+ */
+
+#include <linux/linkage.h>
+#include <asm-generic/export.h>
+
+SYM_FUNC_START(__lshrti3)
+ beqz a2, .L1
+ li a5,64
+ sub a5,a5,a2
+ sext.w a4,a5
+ blez a5, .L2
+ sext.w a2,a2
+ srl a0,a0,a2
+ sll a4,a1,a4
+ srl a2,a1,a2
+ or a0,a0,a4
+ mv a1,a2
+.L1:
+ ret
+.L2:
+ negw a0,a4
+ li a2,0
+ srl a0,a1,a0
+ mv a1,a2
+ ret
+SYM_FUNC_END(__lshrti3)
+EXPORT_SYMBOL(__lshrti3)
+
+SYM_FUNC_START(__ashrti3)
+ beqz a2, .L3
+ li a5,64
+ sub a5,a5,a2
+ sext.w a4,a5
+ blez a5, .L4
+ sext.w a2,a2
+ srl a0,a0,a2
+ sll a4,a1,a4
+ sra a2,a1,a2
+ or a0,a0,a4
+ mv a1,a2
+.L3:
+ ret
+.L4:
+ negw a0,a4
+ srai a2,a1,0x3f
+ sra a0,a1,a0
+ mv a1,a2
+ ret
+SYM_FUNC_END(__ashrti3)
+EXPORT_SYMBOL(__ashrti3)
+
+SYM_FUNC_START(__ashlti3)
+ beqz a2, .L5
+ li a5,64
+ sub a5,a5,a2
+ sext.w a4,a5
+ blez a5, .L6
+ sext.w a2,a2
+ sll a1,a1,a2
+ srl a4,a0,a4
+ sll a2,a0,a2
+ or a1,a1,a4
+ mv a0,a2
+.L5:
+ ret
+.L6:
+ negw a1,a4
+ li a2,0
+ sll a1,a0,a1
+ mv a0,a2
+ ret
+SYM_FUNC_END(__ashlti3)
+EXPORT_SYMBOL(__ashlti3)
diff --git a/arch/riscv/lib/uaccess.S b/arch/riscv/lib/uaccess.S
new file mode 100644
index 000000000..fceaeb18c
--- /dev/null
+++ b/arch/riscv/lib/uaccess.S
@@ -0,0 +1,126 @@
+#include <linux/linkage.h>
+#include <asm-generic/export.h>
+#include <asm/asm.h>
+#include <asm/csr.h>
+
+ .macro fixup op reg addr lbl
+100:
+ \op \reg, \addr
+ .section __ex_table,"a"
+ .balign RISCV_SZPTR
+ RISCV_PTR 100b, \lbl
+ .previous
+ .endm
+
+ENTRY(__asm_copy_to_user)
+ENTRY(__asm_copy_from_user)
+
+ /* Enable access to user memory */
+ li t6, SR_SUM
+ csrs CSR_STATUS, t6
+
+ add a3, a1, a2
+ /* Use word-oriented copy only if low-order bits match */
+ andi t0, a0, SZREG-1
+ andi t1, a1, SZREG-1
+ bne t0, t1, 2f
+
+ addi t0, a1, SZREG-1
+ andi t1, a3, ~(SZREG-1)
+ andi t0, t0, ~(SZREG-1)
+ /*
+ * a3: terminal address of source region
+ * t0: lowest XLEN-aligned address in source
+ * t1: highest XLEN-aligned address in source
+ */
+ bgeu t0, t1, 2f
+ bltu a1, t0, 4f
+1:
+ fixup REG_L, t2, (a1), 10f
+ fixup REG_S, t2, (a0), 10f
+ addi a1, a1, SZREG
+ addi a0, a0, SZREG
+ bltu a1, t1, 1b
+2:
+ bltu a1, a3, 5f
+
+3:
+ /* Disable access to user memory */
+ csrc CSR_STATUS, t6
+ li a0, 0
+ ret
+4: /* Edge case: unalignment */
+ fixup lbu, t2, (a1), 10f
+ fixup sb, t2, (a0), 10f
+ addi a1, a1, 1
+ addi a0, a0, 1
+ bltu a1, t0, 4b
+ j 1b
+5: /* Edge case: remainder */
+ fixup lbu, t2, (a1), 10f
+ fixup sb, t2, (a0), 10f
+ addi a1, a1, 1
+ addi a0, a0, 1
+ bltu a1, a3, 5b
+ j 3b
+ENDPROC(__asm_copy_to_user)
+ENDPROC(__asm_copy_from_user)
+EXPORT_SYMBOL(__asm_copy_to_user)
+EXPORT_SYMBOL(__asm_copy_from_user)
+
+
+ENTRY(__clear_user)
+
+ /* Enable access to user memory */
+ li t6, SR_SUM
+ csrs CSR_STATUS, t6
+
+ add a3, a0, a1
+ addi t0, a0, SZREG-1
+ andi t1, a3, ~(SZREG-1)
+ andi t0, t0, ~(SZREG-1)
+ /*
+ * a3: terminal address of target region
+ * t0: lowest doubleword-aligned address in target region
+ * t1: highest doubleword-aligned address in target region
+ */
+ bgeu t0, t1, 2f
+ bltu a0, t0, 4f
+1:
+ fixup REG_S, zero, (a0), 11f
+ addi a0, a0, SZREG
+ bltu a0, t1, 1b
+2:
+ bltu a0, a3, 5f
+
+3:
+ /* Disable access to user memory */
+ csrc CSR_STATUS, t6
+ li a0, 0
+ ret
+4: /* Edge case: unalignment */
+ fixup sb, zero, (a0), 11f
+ addi a0, a0, 1
+ bltu a0, t0, 4b
+ j 1b
+5: /* Edge case: remainder */
+ fixup sb, zero, (a0), 11f
+ addi a0, a0, 1
+ bltu a0, a3, 5b
+ j 3b
+ENDPROC(__clear_user)
+EXPORT_SYMBOL(__clear_user)
+
+ .section .fixup,"ax"
+ .balign 4
+ /* Fixup code for __copy_user(10) and __clear_user(11) */
+10:
+ /* Disable access to user memory */
+ csrs CSR_STATUS, t6
+ mv a0, a2
+ ret
+11:
+ csrs CSR_STATUS, t6
+ mv a0, a1
+ ret
+ .previous
diff --git a/arch/riscv/mm/Makefile b/arch/riscv/mm/Makefile
new file mode 100644
index 000000000..ac7a25298
--- /dev/null
+++ b/arch/riscv/mm/Makefile
@@ -0,0 +1,32 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+CFLAGS_init.o := -mcmodel=medany
+ifdef CONFIG_FTRACE
+CFLAGS_REMOVE_init.o = $(CC_FLAGS_FTRACE)
+CFLAGS_REMOVE_cacheflush.o = $(CC_FLAGS_FTRACE)
+endif
+
+KCOV_INSTRUMENT_init.o := n
+
+obj-y += init.o
+obj-y += extable.o
+obj-$(CONFIG_MMU) += fault.o pageattr.o
+obj-y += cacheflush.o
+obj-y += context.o
+
+ifeq ($(CONFIG_MMU),y)
+obj-$(CONFIG_SMP) += tlbflush.o
+endif
+obj-$(CONFIG_HUGETLB_PAGE) += hugetlbpage.o
+obj-$(CONFIG_PTDUMP_CORE) += ptdump.o
+obj-$(CONFIG_KASAN) += kasan_init.o
+
+ifdef CONFIG_KASAN
+KASAN_SANITIZE_kasan_init.o := n
+KASAN_SANITIZE_init.o := n
+ifdef CONFIG_DEBUG_VIRTUAL
+KASAN_SANITIZE_physaddr.o := n
+endif
+endif
+
+obj-$(CONFIG_DEBUG_VIRTUAL) += physaddr.o
diff --git a/arch/riscv/mm/cacheflush.c b/arch/riscv/mm/cacheflush.c
new file mode 100644
index 000000000..2ae1201cf
--- /dev/null
+++ b/arch/riscv/mm/cacheflush.c
@@ -0,0 +1,93 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2017 SiFive
+ */
+
+#include <asm/cacheflush.h>
+
+#ifdef CONFIG_SMP
+
+#include <asm/sbi.h>
+
+static void ipi_remote_fence_i(void *info)
+{
+ return local_flush_icache_all();
+}
+
+void flush_icache_all(void)
+{
+ local_flush_icache_all();
+
+ if (IS_ENABLED(CONFIG_RISCV_SBI))
+ sbi_remote_fence_i(NULL);
+ else
+ on_each_cpu(ipi_remote_fence_i, NULL, 1);
+}
+EXPORT_SYMBOL(flush_icache_all);
+
+/*
+ * Performs an icache flush for the given MM context. RISC-V has no direct
+ * mechanism for instruction cache shoot downs, so instead we send an IPI that
+ * informs the remote harts they need to flush their local instruction caches.
+ * To avoid pathologically slow behavior in a common case (a bunch of
+ * single-hart processes on a many-hart machine, ie 'make -j') we avoid the
+ * IPIs for harts that are not currently executing a MM context and instead
+ * schedule a deferred local instruction cache flush to be performed before
+ * execution resumes on each hart.
+ */
+void flush_icache_mm(struct mm_struct *mm, bool local)
+{
+ unsigned int cpu;
+ cpumask_t others, *mask;
+
+ preempt_disable();
+
+ /* Mark every hart's icache as needing a flush for this MM. */
+ mask = &mm->context.icache_stale_mask;
+ cpumask_setall(mask);
+ /* Flush this hart's I$ now, and mark it as flushed. */
+ cpu = smp_processor_id();
+ cpumask_clear_cpu(cpu, mask);
+ local_flush_icache_all();
+
+ /*
+ * Flush the I$ of other harts concurrently executing, and mark them as
+ * flushed.
+ */
+ cpumask_andnot(&others, mm_cpumask(mm), cpumask_of(cpu));
+ local |= cpumask_empty(&others);
+ if (mm == current->active_mm && local) {
+ /*
+ * It's assumed that at least one strongly ordered operation is
+ * performed on this hart between setting a hart's cpumask bit
+ * and scheduling this MM context on that hart. Sending an SBI
+ * remote message will do this, but in the case where no
+ * messages are sent we still need to order this hart's writes
+ * with flush_icache_deferred().
+ */
+ smp_mb();
+ } else if (IS_ENABLED(CONFIG_RISCV_SBI)) {
+ cpumask_t hartid_mask;
+
+ riscv_cpuid_to_hartid_mask(&others, &hartid_mask);
+ sbi_remote_fence_i(cpumask_bits(&hartid_mask));
+ } else {
+ on_each_cpu_mask(&others, ipi_remote_fence_i, NULL, 1);
+ }
+
+ preempt_enable();
+}
+
+#endif /* CONFIG_SMP */
+
+#ifdef CONFIG_MMU
+void flush_icache_pte(pte_t pte)
+{
+ struct page *page = pte_page(pte);
+
+ if (!test_bit(PG_dcache_clean, &page->flags)) {
+ flush_icache_all();
+ set_bit(PG_dcache_clean, &page->flags);
+ }
+}
+#endif /* CONFIG_MMU */
diff --git a/arch/riscv/mm/context.c b/arch/riscv/mm/context.c
new file mode 100644
index 000000000..613ec81a8
--- /dev/null
+++ b/arch/riscv/mm/context.c
@@ -0,0 +1,67 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2012 Regents of the University of California
+ * Copyright (C) 2017 SiFive
+ */
+
+#include <linux/mm.h>
+#include <asm/tlbflush.h>
+#include <asm/cacheflush.h>
+#include <asm/mmu_context.h>
+
+/*
+ * When necessary, performs a deferred icache flush for the given MM context,
+ * on the local CPU. RISC-V has no direct mechanism for instruction cache
+ * shoot downs, so instead we send an IPI that informs the remote harts they
+ * need to flush their local instruction caches. To avoid pathologically slow
+ * behavior in a common case (a bunch of single-hart processes on a many-hart
+ * machine, ie 'make -j') we avoid the IPIs for harts that are not currently
+ * executing a MM context and instead schedule a deferred local instruction
+ * cache flush to be performed before execution resumes on each hart. This
+ * actually performs that local instruction cache flush, which implicitly only
+ * refers to the current hart.
+ */
+static inline void flush_icache_deferred(struct mm_struct *mm)
+{
+#ifdef CONFIG_SMP
+ unsigned int cpu = smp_processor_id();
+ cpumask_t *mask = &mm->context.icache_stale_mask;
+
+ if (cpumask_test_cpu(cpu, mask)) {
+ cpumask_clear_cpu(cpu, mask);
+ /*
+ * Ensure the remote hart's writes are visible to this hart.
+ * This pairs with a barrier in flush_icache_mm.
+ */
+ smp_mb();
+ local_flush_icache_all();
+ }
+
+#endif
+}
+
+void switch_mm(struct mm_struct *prev, struct mm_struct *next,
+ struct task_struct *task)
+{
+ unsigned int cpu;
+
+ if (unlikely(prev == next))
+ return;
+
+ /*
+ * Mark the current MM context as inactive, and the next as
+ * active. This is at least used by the icache flushing
+ * routines in order to determine who should be flushed.
+ */
+ cpu = smp_processor_id();
+
+ cpumask_clear_cpu(cpu, mm_cpumask(prev));
+ cpumask_set_cpu(cpu, mm_cpumask(next));
+
+#ifdef CONFIG_MMU
+ csr_write(CSR_SATP, virt_to_pfn(next->pgd) | SATP_MODE);
+ local_flush_tlb_all();
+#endif
+
+ flush_icache_deferred(next);
+}
diff --git a/arch/riscv/mm/extable.c b/arch/riscv/mm/extable.c
new file mode 100644
index 000000000..2fc729422
--- /dev/null
+++ b/arch/riscv/mm/extable.c
@@ -0,0 +1,24 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2009 Sunplus Core Technology Co., Ltd.
+ * Lennox Wu <lennox.wu@sunplusct.com>
+ * Chen Liqin <liqin.chen@sunplusct.com>
+ * Copyright (C) 2013 Regents of the University of California
+ */
+
+
+#include <linux/extable.h>
+#include <linux/module.h>
+#include <linux/uaccess.h>
+
+int fixup_exception(struct pt_regs *regs)
+{
+ const struct exception_table_entry *fixup;
+
+ fixup = search_exception_tables(regs->epc);
+ if (fixup) {
+ regs->epc = fixup->fixup;
+ return 1;
+ }
+ return 0;
+}
diff --git a/arch/riscv/mm/fault.c b/arch/riscv/mm/fault.c
new file mode 100644
index 000000000..54b12943c
--- /dev/null
+++ b/arch/riscv/mm/fault.c
@@ -0,0 +1,305 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2009 Sunplus Core Technology Co., Ltd.
+ * Lennox Wu <lennox.wu@sunplusct.com>
+ * Chen Liqin <liqin.chen@sunplusct.com>
+ * Copyright (C) 2012 Regents of the University of California
+ */
+
+
+#include <linux/mm.h>
+#include <linux/kernel.h>
+#include <linux/interrupt.h>
+#include <linux/perf_event.h>
+#include <linux/signal.h>
+#include <linux/uaccess.h>
+
+#include <asm/ptrace.h>
+#include <asm/tlbflush.h>
+
+#include "../kernel/head.h"
+
+static inline void no_context(struct pt_regs *regs, unsigned long addr)
+{
+ /* Are we prepared to handle this kernel fault? */
+ if (fixup_exception(regs))
+ return;
+
+ /*
+ * Oops. The kernel tried to access some bad page. We'll have to
+ * terminate things with extreme prejudice.
+ */
+ bust_spinlocks(1);
+ pr_alert("Unable to handle kernel %s at virtual address " REG_FMT "\n",
+ (addr < PAGE_SIZE) ? "NULL pointer dereference" :
+ "paging request", addr);
+ die(regs, "Oops");
+ make_task_dead(SIGKILL);
+}
+
+static inline void mm_fault_error(struct pt_regs *regs, unsigned long addr, vm_fault_t fault)
+{
+ if (fault & VM_FAULT_OOM) {
+ /*
+ * We ran out of memory, call the OOM killer, and return the userspace
+ * (which will retry the fault, or kill us if we got oom-killed).
+ */
+ if (!user_mode(regs)) {
+ no_context(regs, addr);
+ return;
+ }
+ pagefault_out_of_memory();
+ return;
+ } else if (fault & VM_FAULT_SIGBUS) {
+ /* Kernel mode? Handle exceptions or die */
+ if (!user_mode(regs)) {
+ no_context(regs, addr);
+ return;
+ }
+ do_trap(regs, SIGBUS, BUS_ADRERR, addr);
+ return;
+ }
+ BUG();
+}
+
+static inline void bad_area(struct pt_regs *regs, struct mm_struct *mm, int code, unsigned long addr)
+{
+ /*
+ * Something tried to access memory that isn't in our memory map.
+ * Fix it, but check if it's kernel or user first.
+ */
+ mmap_read_unlock(mm);
+ /* User mode accesses just cause a SIGSEGV */
+ if (user_mode(regs)) {
+ do_trap(regs, SIGSEGV, code, addr);
+ return;
+ }
+
+ no_context(regs, addr);
+}
+
+static inline void vmalloc_fault(struct pt_regs *regs, int code, unsigned long addr)
+{
+ pgd_t *pgd, *pgd_k;
+ pud_t *pud, *pud_k;
+ p4d_t *p4d, *p4d_k;
+ pmd_t *pmd, *pmd_k;
+ pte_t *pte_k;
+ int index;
+ unsigned long pfn;
+
+ /* User mode accesses just cause a SIGSEGV */
+ if (user_mode(regs))
+ return do_trap(regs, SIGSEGV, code, addr);
+
+ /*
+ * Synchronize this task's top level page-table
+ * with the 'reference' page table.
+ *
+ * Do _not_ use "tsk->active_mm->pgd" here.
+ * We might be inside an interrupt in the middle
+ * of a task switch.
+ */
+ index = pgd_index(addr);
+ pfn = csr_read(CSR_SATP) & SATP_PPN;
+ pgd = (pgd_t *)pfn_to_virt(pfn) + index;
+ pgd_k = init_mm.pgd + index;
+
+ if (!pgd_present(*pgd_k)) {
+ no_context(regs, addr);
+ return;
+ }
+ set_pgd(pgd, *pgd_k);
+
+ p4d = p4d_offset(pgd, addr);
+ p4d_k = p4d_offset(pgd_k, addr);
+ if (!p4d_present(*p4d_k)) {
+ no_context(regs, addr);
+ return;
+ }
+
+ pud = pud_offset(p4d, addr);
+ pud_k = pud_offset(p4d_k, addr);
+ if (!pud_present(*pud_k)) {
+ no_context(regs, addr);
+ return;
+ }
+
+ /*
+ * Since the vmalloc area is global, it is unnecessary
+ * to copy individual PTEs
+ */
+ pmd = pmd_offset(pud, addr);
+ pmd_k = pmd_offset(pud_k, addr);
+ if (!pmd_present(*pmd_k)) {
+ no_context(regs, addr);
+ return;
+ }
+ set_pmd(pmd, *pmd_k);
+
+ /*
+ * Make sure the actual PTE exists as well to
+ * catch kernel vmalloc-area accesses to non-mapped
+ * addresses. If we don't do this, this will just
+ * silently loop forever.
+ */
+ pte_k = pte_offset_kernel(pmd_k, addr);
+ if (!pte_present(*pte_k)) {
+ no_context(regs, addr);
+ return;
+ }
+
+ /*
+ * The kernel assumes that TLBs don't cache invalid
+ * entries, but in RISC-V, SFENCE.VMA specifies an
+ * ordering constraint, not a cache flush; it is
+ * necessary even after writing invalid entries.
+ */
+ local_flush_tlb_page(addr);
+}
+
+static inline bool access_error(unsigned long cause, struct vm_area_struct *vma)
+{
+ switch (cause) {
+ case EXC_INST_PAGE_FAULT:
+ if (!(vma->vm_flags & VM_EXEC)) {
+ return true;
+ }
+ break;
+ case EXC_LOAD_PAGE_FAULT:
+ /* Write implies read */
+ if (!(vma->vm_flags & (VM_READ | VM_WRITE))) {
+ return true;
+ }
+ break;
+ case EXC_STORE_PAGE_FAULT:
+ if (!(vma->vm_flags & VM_WRITE)) {
+ return true;
+ }
+ break;
+ default:
+ panic("%s: unhandled cause %lu", __func__, cause);
+ }
+ return false;
+}
+
+/*
+ * This routine handles page faults. It determines the address and the
+ * problem, and then passes it off to one of the appropriate routines.
+ */
+asmlinkage void do_page_fault(struct pt_regs *regs)
+{
+ struct task_struct *tsk;
+ struct vm_area_struct *vma;
+ struct mm_struct *mm;
+ unsigned long addr, cause;
+ unsigned int flags = FAULT_FLAG_DEFAULT;
+ int code = SEGV_MAPERR;
+ vm_fault_t fault;
+
+ cause = regs->cause;
+ addr = regs->badaddr;
+
+ tsk = current;
+ mm = tsk->mm;
+
+ /*
+ * Fault-in kernel-space virtual memory on-demand.
+ * The 'reference' page table is init_mm.pgd.
+ *
+ * NOTE! We MUST NOT take any locks for this case. We may
+ * be in an interrupt or a critical region, and should
+ * only copy the information from the master page table,
+ * nothing more.
+ */
+ if (unlikely((addr >= VMALLOC_START) && (addr <= VMALLOC_END))) {
+ vmalloc_fault(regs, code, addr);
+ return;
+ }
+
+ /* Enable interrupts if they were enabled in the parent context. */
+ if (likely(regs->status & SR_PIE))
+ local_irq_enable();
+
+ /*
+ * If we're in an interrupt, have no user context, or are running
+ * in an atomic region, then we must not take the fault.
+ */
+ if (unlikely(faulthandler_disabled() || !mm)) {
+ no_context(regs, addr);
+ return;
+ }
+
+ if (user_mode(regs))
+ flags |= FAULT_FLAG_USER;
+
+ perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, addr);
+
+ if (cause == EXC_STORE_PAGE_FAULT)
+ flags |= FAULT_FLAG_WRITE;
+ else if (cause == EXC_INST_PAGE_FAULT)
+ flags |= FAULT_FLAG_INSTRUCTION;
+retry:
+ mmap_read_lock(mm);
+ vma = find_vma(mm, addr);
+ if (unlikely(!vma)) {
+ bad_area(regs, mm, code, addr);
+ return;
+ }
+ if (likely(vma->vm_start <= addr))
+ goto good_area;
+ if (unlikely(!(vma->vm_flags & VM_GROWSDOWN))) {
+ bad_area(regs, mm, code, addr);
+ return;
+ }
+ if (unlikely(expand_stack(vma, addr))) {
+ bad_area(regs, mm, code, addr);
+ return;
+ }
+
+ /*
+ * Ok, we have a good vm_area for this memory access, so
+ * we can handle it.
+ */
+good_area:
+ code = SEGV_ACCERR;
+
+ if (unlikely(access_error(cause, vma))) {
+ bad_area(regs, mm, code, addr);
+ return;
+ }
+
+ /*
+ * If for any reason at all we could not handle the fault,
+ * make sure we exit gracefully rather than endlessly redo
+ * the fault.
+ */
+ fault = handle_mm_fault(vma, addr, flags, regs);
+
+ /*
+ * If we need to retry but a fatal signal is pending, handle the
+ * signal first. We do not need to release the mmap_lock because it
+ * would already be released in __lock_page_or_retry in mm/filemap.c.
+ */
+ if (fault_signal_pending(fault, regs))
+ return;
+
+ if (unlikely((fault & VM_FAULT_RETRY) && (flags & FAULT_FLAG_ALLOW_RETRY))) {
+ flags |= FAULT_FLAG_TRIED;
+
+ /*
+ * No need to mmap_read_unlock(mm) as we would
+ * have already released it in __lock_page_or_retry
+ * in mm/filemap.c.
+ */
+ goto retry;
+ }
+
+ mmap_read_unlock(mm);
+
+ if (unlikely(fault & VM_FAULT_ERROR)) {
+ mm_fault_error(regs, addr, fault);
+ return;
+ }
+ return;
+}
diff --git a/arch/riscv/mm/hugetlbpage.c b/arch/riscv/mm/hugetlbpage.c
new file mode 100644
index 000000000..932dadfdc
--- /dev/null
+++ b/arch/riscv/mm/hugetlbpage.c
@@ -0,0 +1,34 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/hugetlb.h>
+#include <linux/err.h>
+
+int pud_huge(pud_t pud)
+{
+ return pud_leaf(pud);
+}
+
+int pmd_huge(pmd_t pmd)
+{
+ return pmd_leaf(pmd);
+}
+
+bool __init arch_hugetlb_valid_size(unsigned long size)
+{
+ if (size == HPAGE_SIZE)
+ return true;
+ else if (IS_ENABLED(CONFIG_64BIT) && size == PUD_SIZE)
+ return true;
+ else
+ return false;
+}
+
+#ifdef CONFIG_CONTIG_ALLOC
+static __init int gigantic_pages_init(void)
+{
+ /* With CONTIG_ALLOC, we can allocate gigantic pages at runtime */
+ if (IS_ENABLED(CONFIG_64BIT))
+ hugetlb_add_hstate(PUD_SHIFT - PAGE_SHIFT);
+ return 0;
+}
+arch_initcall(gigantic_pages_init);
+#endif
diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c
new file mode 100644
index 000000000..6c2f38aac
--- /dev/null
+++ b/arch/riscv/mm/init.c
@@ -0,0 +1,687 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2012 Regents of the University of California
+ * Copyright (C) 2019 Western Digital Corporation or its affiliates.
+ */
+
+#include <linux/init.h>
+#include <linux/mm.h>
+#include <linux/memblock.h>
+#include <linux/initrd.h>
+#include <linux/swap.h>
+#include <linux/sizes.h>
+#include <linux/of_fdt.h>
+#include <linux/libfdt.h>
+#include <linux/set_memory.h>
+#include <linux/dma-map-ops.h>
+
+#include <asm/fixmap.h>
+#include <asm/tlbflush.h>
+#include <asm/sections.h>
+#include <asm/soc.h>
+#include <asm/io.h>
+#include <asm/ptdump.h>
+
+#include "../kernel/head.h"
+
+unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)]
+ __page_aligned_bss;
+EXPORT_SYMBOL(empty_zero_page);
+
+extern char _start[];
+#define DTB_EARLY_BASE_VA PGDIR_SIZE
+void *dtb_early_va __initdata;
+uintptr_t dtb_early_pa __initdata;
+
+struct pt_alloc_ops {
+ pte_t *(*get_pte_virt)(phys_addr_t pa);
+ phys_addr_t (*alloc_pte)(uintptr_t va);
+#ifndef __PAGETABLE_PMD_FOLDED
+ pmd_t *(*get_pmd_virt)(phys_addr_t pa);
+ phys_addr_t (*alloc_pmd)(uintptr_t va);
+#endif
+};
+
+static phys_addr_t dma32_phys_limit __ro_after_init;
+
+static void __init zone_sizes_init(void)
+{
+ unsigned long max_zone_pfns[MAX_NR_ZONES] = { 0, };
+
+#ifdef CONFIG_ZONE_DMA32
+ max_zone_pfns[ZONE_DMA32] = PFN_DOWN(dma32_phys_limit);
+#endif
+ max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
+
+ free_area_init(max_zone_pfns);
+}
+
+static void setup_zero_page(void)
+{
+ memset((void *)empty_zero_page, 0, PAGE_SIZE);
+}
+
+#if defined(CONFIG_MMU) && defined(CONFIG_DEBUG_VM)
+static inline void print_mlk(char *name, unsigned long b, unsigned long t)
+{
+ pr_notice("%12s : 0x%08lx - 0x%08lx (%4ld kB)\n", name, b, t,
+ (((t) - (b)) >> 10));
+}
+
+static inline void print_mlm(char *name, unsigned long b, unsigned long t)
+{
+ pr_notice("%12s : 0x%08lx - 0x%08lx (%4ld MB)\n", name, b, t,
+ (((t) - (b)) >> 20));
+}
+
+static void print_vm_layout(void)
+{
+ pr_notice("Virtual kernel memory layout:\n");
+ print_mlk("fixmap", (unsigned long)FIXADDR_START,
+ (unsigned long)FIXADDR_TOP);
+ print_mlm("pci io", (unsigned long)PCI_IO_START,
+ (unsigned long)PCI_IO_END);
+ print_mlm("vmemmap", (unsigned long)VMEMMAP_START,
+ (unsigned long)VMEMMAP_END);
+ print_mlm("vmalloc", (unsigned long)VMALLOC_START,
+ (unsigned long)VMALLOC_END);
+ print_mlm("lowmem", (unsigned long)PAGE_OFFSET,
+ (unsigned long)high_memory);
+}
+#else
+static void print_vm_layout(void) { }
+#endif /* CONFIG_DEBUG_VM */
+
+void __init mem_init(void)
+{
+#ifdef CONFIG_FLATMEM
+ BUG_ON(!mem_map);
+#endif /* CONFIG_FLATMEM */
+
+ high_memory = (void *)(__va(PFN_PHYS(max_low_pfn)));
+ memblock_free_all();
+
+ mem_init_print_info(NULL);
+ print_vm_layout();
+}
+
+#ifdef CONFIG_BLK_DEV_INITRD
+static void __init setup_initrd(void)
+{
+ phys_addr_t start;
+ unsigned long size;
+
+ /* Ignore the virtul address computed during device tree parsing */
+ initrd_start = initrd_end = 0;
+
+ if (!phys_initrd_size)
+ return;
+ /*
+ * Round the memory region to page boundaries as per free_initrd_mem()
+ * This allows us to detect whether the pages overlapping the initrd
+ * are in use, but more importantly, reserves the entire set of pages
+ * as we don't want these pages allocated for other purposes.
+ */
+ start = round_down(phys_initrd_start, PAGE_SIZE);
+ size = phys_initrd_size + (phys_initrd_start - start);
+ size = round_up(size, PAGE_SIZE);
+
+ if (!memblock_is_region_memory(start, size)) {
+ pr_err("INITRD: 0x%08llx+0x%08lx is not a memory region",
+ (u64)start, size);
+ goto disable;
+ }
+
+ if (memblock_is_region_reserved(start, size)) {
+ pr_err("INITRD: 0x%08llx+0x%08lx overlaps in-use memory region\n",
+ (u64)start, size);
+ goto disable;
+ }
+
+ memblock_reserve(start, size);
+ /* Now convert initrd to virtual addresses */
+ initrd_start = (unsigned long)__va(phys_initrd_start);
+ initrd_end = initrd_start + phys_initrd_size;
+ initrd_below_start_ok = 1;
+
+ pr_info("Initial ramdisk at: 0x%p (%lu bytes)\n",
+ (void *)(initrd_start), size);
+ return;
+disable:
+ pr_cont(" - disabling initrd\n");
+ initrd_start = 0;
+ initrd_end = 0;
+}
+#endif /* CONFIG_BLK_DEV_INITRD */
+
+void __init setup_bootmem(void)
+{
+ phys_addr_t mem_start = 0;
+ phys_addr_t start, dram_end, end = 0;
+ phys_addr_t vmlinux_end = __pa_symbol(&_end);
+ phys_addr_t vmlinux_start = __pa_symbol(&_start);
+ phys_addr_t max_mapped_addr = __pa(~(ulong)0);
+ u64 i;
+
+ /* Find the memory region containing the kernel */
+ for_each_mem_range(i, &start, &end) {
+ phys_addr_t size = end - start;
+ if (!mem_start)
+ mem_start = start;
+ if (start <= vmlinux_start && vmlinux_end <= end)
+ BUG_ON(size == 0);
+ }
+
+ /*
+ * The maximal physical memory size is -PAGE_OFFSET.
+ * Make sure that any memory beyond mem_start + (-PAGE_OFFSET) is removed
+ * as it is unusable by kernel.
+ */
+ memblock_enforce_memory_limit(-PAGE_OFFSET);
+
+ /* Reserve from the start of the kernel to the end of the kernel */
+ memblock_reserve(vmlinux_start, vmlinux_end - vmlinux_start);
+
+ dram_end = memblock_end_of_DRAM();
+
+ /*
+ * memblock allocator is not aware of the fact that last 4K bytes of
+ * the addressable memory can not be mapped because of IS_ERR_VALUE
+ * macro. Make sure that last 4k bytes are not usable by memblock
+ * if end of dram is equal to maximum addressable memory.
+ */
+ if (max_mapped_addr == (dram_end - 1))
+ memblock_set_current_limit(max_mapped_addr - 4096);
+
+ max_pfn = PFN_DOWN(dram_end);
+ max_low_pfn = max_pfn;
+ dma32_phys_limit = min(4UL * SZ_1G, (unsigned long)PFN_PHYS(max_low_pfn));
+ set_max_mapnr(max_low_pfn);
+
+#ifdef CONFIG_BLK_DEV_INITRD
+ setup_initrd();
+#endif /* CONFIG_BLK_DEV_INITRD */
+
+ /*
+ * Avoid using early_init_fdt_reserve_self() since __pa() does
+ * not work for DTB pointers that are fixmap addresses
+ */
+ memblock_reserve(dtb_early_pa, fdt_totalsize(dtb_early_va));
+
+ dma_contiguous_reserve(dma32_phys_limit);
+ memblock_allow_resize();
+ memblock_dump_all();
+}
+
+#ifdef CONFIG_MMU
+static struct pt_alloc_ops pt_ops;
+
+unsigned long va_pa_offset;
+EXPORT_SYMBOL(va_pa_offset);
+unsigned long pfn_base;
+EXPORT_SYMBOL(pfn_base);
+
+pgd_t swapper_pg_dir[PTRS_PER_PGD] __page_aligned_bss;
+pgd_t trampoline_pg_dir[PTRS_PER_PGD] __page_aligned_bss;
+pte_t fixmap_pte[PTRS_PER_PTE] __page_aligned_bss;
+
+pgd_t early_pg_dir[PTRS_PER_PGD] __initdata __aligned(PAGE_SIZE);
+
+void __set_fixmap(enum fixed_addresses idx, phys_addr_t phys, pgprot_t prot)
+{
+ unsigned long addr = __fix_to_virt(idx);
+ pte_t *ptep;
+
+ BUG_ON(idx <= FIX_HOLE || idx >= __end_of_fixed_addresses);
+
+ ptep = &fixmap_pte[pte_index(addr)];
+
+ if (pgprot_val(prot))
+ set_pte(ptep, pfn_pte(phys >> PAGE_SHIFT, prot));
+ else
+ pte_clear(&init_mm, addr, ptep);
+ local_flush_tlb_page(addr);
+}
+
+static inline pte_t *__init get_pte_virt_early(phys_addr_t pa)
+{
+ return (pte_t *)((uintptr_t)pa);
+}
+
+static inline pte_t *__init get_pte_virt_fixmap(phys_addr_t pa)
+{
+ clear_fixmap(FIX_PTE);
+ return (pte_t *)set_fixmap_offset(FIX_PTE, pa);
+}
+
+static inline pte_t *get_pte_virt_late(phys_addr_t pa)
+{
+ return (pte_t *) __va(pa);
+}
+
+static inline phys_addr_t __init alloc_pte_early(uintptr_t va)
+{
+ /*
+ * We only create PMD or PGD early mappings so we
+ * should never reach here with MMU disabled.
+ */
+ BUG();
+}
+
+static inline phys_addr_t __init alloc_pte_fixmap(uintptr_t va)
+{
+ return memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE);
+}
+
+static phys_addr_t alloc_pte_late(uintptr_t va)
+{
+ unsigned long vaddr;
+
+ vaddr = __get_free_page(GFP_KERNEL);
+ if (!vaddr || !pgtable_pte_page_ctor(virt_to_page(vaddr)))
+ BUG();
+ return __pa(vaddr);
+}
+
+static void __init create_pte_mapping(pte_t *ptep,
+ uintptr_t va, phys_addr_t pa,
+ phys_addr_t sz, pgprot_t prot)
+{
+ uintptr_t pte_idx = pte_index(va);
+
+ BUG_ON(sz != PAGE_SIZE);
+
+ if (pte_none(ptep[pte_idx]))
+ ptep[pte_idx] = pfn_pte(PFN_DOWN(pa), prot);
+}
+
+#ifndef __PAGETABLE_PMD_FOLDED
+
+pmd_t trampoline_pmd[PTRS_PER_PMD] __page_aligned_bss;
+pmd_t fixmap_pmd[PTRS_PER_PMD] __page_aligned_bss;
+pmd_t early_pmd[PTRS_PER_PMD] __initdata __aligned(PAGE_SIZE);
+pmd_t early_dtb_pmd[PTRS_PER_PMD] __initdata __aligned(PAGE_SIZE);
+
+static pmd_t *__init get_pmd_virt_early(phys_addr_t pa)
+{
+ /* Before MMU is enabled */
+ return (pmd_t *)((uintptr_t)pa);
+}
+
+static pmd_t *__init get_pmd_virt_fixmap(phys_addr_t pa)
+{
+ clear_fixmap(FIX_PMD);
+ return (pmd_t *)set_fixmap_offset(FIX_PMD, pa);
+}
+
+static pmd_t *get_pmd_virt_late(phys_addr_t pa)
+{
+ return (pmd_t *) __va(pa);
+}
+
+static phys_addr_t __init alloc_pmd_early(uintptr_t va)
+{
+ BUG_ON((va - PAGE_OFFSET) >> PGDIR_SHIFT);
+
+ return (uintptr_t)early_pmd;
+}
+
+static phys_addr_t __init alloc_pmd_fixmap(uintptr_t va)
+{
+ return memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE);
+}
+
+static phys_addr_t alloc_pmd_late(uintptr_t va)
+{
+ unsigned long vaddr;
+
+ vaddr = __get_free_page(GFP_KERNEL);
+ BUG_ON(!vaddr);
+ return __pa(vaddr);
+}
+
+static void __init create_pmd_mapping(pmd_t *pmdp,
+ uintptr_t va, phys_addr_t pa,
+ phys_addr_t sz, pgprot_t prot)
+{
+ pte_t *ptep;
+ phys_addr_t pte_phys;
+ uintptr_t pmd_idx = pmd_index(va);
+
+ if (sz == PMD_SIZE) {
+ if (pmd_none(pmdp[pmd_idx]))
+ pmdp[pmd_idx] = pfn_pmd(PFN_DOWN(pa), prot);
+ return;
+ }
+
+ if (pmd_none(pmdp[pmd_idx])) {
+ pte_phys = pt_ops.alloc_pte(va);
+ pmdp[pmd_idx] = pfn_pmd(PFN_DOWN(pte_phys), PAGE_TABLE);
+ ptep = pt_ops.get_pte_virt(pte_phys);
+ memset(ptep, 0, PAGE_SIZE);
+ } else {
+ pte_phys = PFN_PHYS(_pmd_pfn(pmdp[pmd_idx]));
+ ptep = pt_ops.get_pte_virt(pte_phys);
+ }
+
+ create_pte_mapping(ptep, va, pa, sz, prot);
+}
+
+#define pgd_next_t pmd_t
+#define alloc_pgd_next(__va) pt_ops.alloc_pmd(__va)
+#define get_pgd_next_virt(__pa) pt_ops.get_pmd_virt(__pa)
+#define create_pgd_next_mapping(__nextp, __va, __pa, __sz, __prot) \
+ create_pmd_mapping(__nextp, __va, __pa, __sz, __prot)
+#define fixmap_pgd_next fixmap_pmd
+#else
+#define pgd_next_t pte_t
+#define alloc_pgd_next(__va) pt_ops.alloc_pte(__va)
+#define get_pgd_next_virt(__pa) pt_ops.get_pte_virt(__pa)
+#define create_pgd_next_mapping(__nextp, __va, __pa, __sz, __prot) \
+ create_pte_mapping(__nextp, __va, __pa, __sz, __prot)
+#define fixmap_pgd_next fixmap_pte
+#endif
+
+void __init create_pgd_mapping(pgd_t *pgdp,
+ uintptr_t va, phys_addr_t pa,
+ phys_addr_t sz, pgprot_t prot)
+{
+ pgd_next_t *nextp;
+ phys_addr_t next_phys;
+ uintptr_t pgd_idx = pgd_index(va);
+
+ if (sz == PGDIR_SIZE) {
+ if (pgd_val(pgdp[pgd_idx]) == 0)
+ pgdp[pgd_idx] = pfn_pgd(PFN_DOWN(pa), prot);
+ return;
+ }
+
+ if (pgd_val(pgdp[pgd_idx]) == 0) {
+ next_phys = alloc_pgd_next(va);
+ pgdp[pgd_idx] = pfn_pgd(PFN_DOWN(next_phys), PAGE_TABLE);
+ nextp = get_pgd_next_virt(next_phys);
+ memset(nextp, 0, PAGE_SIZE);
+ } else {
+ next_phys = PFN_PHYS(_pgd_pfn(pgdp[pgd_idx]));
+ nextp = get_pgd_next_virt(next_phys);
+ }
+
+ create_pgd_next_mapping(nextp, va, pa, sz, prot);
+}
+
+static uintptr_t __init best_map_size(phys_addr_t base, phys_addr_t size)
+{
+ /* Upgrade to PMD_SIZE mappings whenever possible */
+ if ((base & (PMD_SIZE - 1)) || (size & (PMD_SIZE - 1)))
+ return PAGE_SIZE;
+
+ return PMD_SIZE;
+}
+
+/*
+ * setup_vm() is called from head.S with MMU-off.
+ *
+ * Following requirements should be honoured for setup_vm() to work
+ * correctly:
+ * 1) It should use PC-relative addressing for accessing kernel symbols.
+ * To achieve this we always use GCC cmodel=medany.
+ * 2) The compiler instrumentation for FTRACE will not work for setup_vm()
+ * so disable compiler instrumentation when FTRACE is enabled.
+ *
+ * Currently, the above requirements are honoured by using custom CFLAGS
+ * for init.o in mm/Makefile.
+ */
+
+#ifndef __riscv_cmodel_medany
+#error "setup_vm() is called from head.S before relocate so it should not use absolute addressing."
+#endif
+
+asmlinkage void __init setup_vm(uintptr_t dtb_pa)
+{
+ uintptr_t va, pa, end_va;
+ uintptr_t load_pa = (uintptr_t)(&_start);
+ uintptr_t load_sz = (uintptr_t)(&_end) - load_pa;
+ uintptr_t map_size;
+#ifndef __PAGETABLE_PMD_FOLDED
+ pmd_t fix_bmap_spmd, fix_bmap_epmd;
+#endif
+
+ va_pa_offset = PAGE_OFFSET - load_pa;
+ pfn_base = PFN_DOWN(load_pa);
+
+ /*
+ * Enforce boot alignment requirements of RV32 and
+ * RV64 by only allowing PMD or PGD mappings.
+ */
+ map_size = PMD_SIZE;
+
+ /* Sanity check alignment and size */
+ BUG_ON((PAGE_OFFSET % PGDIR_SIZE) != 0);
+ BUG_ON((load_pa % map_size) != 0);
+
+ pt_ops.alloc_pte = alloc_pte_early;
+ pt_ops.get_pte_virt = get_pte_virt_early;
+#ifndef __PAGETABLE_PMD_FOLDED
+ pt_ops.alloc_pmd = alloc_pmd_early;
+ pt_ops.get_pmd_virt = get_pmd_virt_early;
+#endif
+ /* Setup early PGD for fixmap */
+ create_pgd_mapping(early_pg_dir, FIXADDR_START,
+ (uintptr_t)fixmap_pgd_next, PGDIR_SIZE, PAGE_TABLE);
+
+#ifndef __PAGETABLE_PMD_FOLDED
+ /* Setup fixmap PMD */
+ create_pmd_mapping(fixmap_pmd, FIXADDR_START,
+ (uintptr_t)fixmap_pte, PMD_SIZE, PAGE_TABLE);
+ /* Setup trampoline PGD and PMD */
+ create_pgd_mapping(trampoline_pg_dir, PAGE_OFFSET,
+ (uintptr_t)trampoline_pmd, PGDIR_SIZE, PAGE_TABLE);
+ create_pmd_mapping(trampoline_pmd, PAGE_OFFSET,
+ load_pa, PMD_SIZE, PAGE_KERNEL_EXEC);
+#else
+ /* Setup trampoline PGD */
+ create_pgd_mapping(trampoline_pg_dir, PAGE_OFFSET,
+ load_pa, PGDIR_SIZE, PAGE_KERNEL_EXEC);
+#endif
+
+ /*
+ * Setup early PGD covering entire kernel which will allows
+ * us to reach paging_init(). We map all memory banks later
+ * in setup_vm_final() below.
+ */
+ end_va = PAGE_OFFSET + load_sz;
+ for (va = PAGE_OFFSET; va < end_va; va += map_size)
+ create_pgd_mapping(early_pg_dir, va,
+ load_pa + (va - PAGE_OFFSET),
+ map_size, PAGE_KERNEL_EXEC);
+
+#ifndef __PAGETABLE_PMD_FOLDED
+ /* Setup early PMD for DTB */
+ create_pgd_mapping(early_pg_dir, DTB_EARLY_BASE_VA,
+ (uintptr_t)early_dtb_pmd, PGDIR_SIZE, PAGE_TABLE);
+ /* Create two consecutive PMD mappings for FDT early scan */
+ pa = dtb_pa & ~(PMD_SIZE - 1);
+ create_pmd_mapping(early_dtb_pmd, DTB_EARLY_BASE_VA,
+ pa, PMD_SIZE, PAGE_KERNEL);
+ create_pmd_mapping(early_dtb_pmd, DTB_EARLY_BASE_VA + PMD_SIZE,
+ pa + PMD_SIZE, PMD_SIZE, PAGE_KERNEL);
+ dtb_early_va = (void *)DTB_EARLY_BASE_VA + (dtb_pa & (PMD_SIZE - 1));
+#else
+ /* Create two consecutive PGD mappings for FDT early scan */
+ pa = dtb_pa & ~(PGDIR_SIZE - 1);
+ create_pgd_mapping(early_pg_dir, DTB_EARLY_BASE_VA,
+ pa, PGDIR_SIZE, PAGE_KERNEL);
+ create_pgd_mapping(early_pg_dir, DTB_EARLY_BASE_VA + PGDIR_SIZE,
+ pa + PGDIR_SIZE, PGDIR_SIZE, PAGE_KERNEL);
+ dtb_early_va = (void *)DTB_EARLY_BASE_VA + (dtb_pa & (PGDIR_SIZE - 1));
+#endif
+ dtb_early_pa = dtb_pa;
+
+ /*
+ * Bootime fixmap only can handle PMD_SIZE mapping. Thus, boot-ioremap
+ * range can not span multiple pmds.
+ */
+ BUILD_BUG_ON((__fix_to_virt(FIX_BTMAP_BEGIN) >> PMD_SHIFT)
+ != (__fix_to_virt(FIX_BTMAP_END) >> PMD_SHIFT));
+
+#ifndef __PAGETABLE_PMD_FOLDED
+ /*
+ * Early ioremap fixmap is already created as it lies within first 2MB
+ * of fixmap region. We always map PMD_SIZE. Thus, both FIX_BTMAP_END
+ * FIX_BTMAP_BEGIN should lie in the same pmd. Verify that and warn
+ * the user if not.
+ */
+ fix_bmap_spmd = fixmap_pmd[pmd_index(__fix_to_virt(FIX_BTMAP_BEGIN))];
+ fix_bmap_epmd = fixmap_pmd[pmd_index(__fix_to_virt(FIX_BTMAP_END))];
+ if (pmd_val(fix_bmap_spmd) != pmd_val(fix_bmap_epmd)) {
+ WARN_ON(1);
+ pr_warn("fixmap btmap start [%08lx] != end [%08lx]\n",
+ pmd_val(fix_bmap_spmd), pmd_val(fix_bmap_epmd));
+ pr_warn("fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n",
+ fix_to_virt(FIX_BTMAP_BEGIN));
+ pr_warn("fix_to_virt(FIX_BTMAP_END): %08lx\n",
+ fix_to_virt(FIX_BTMAP_END));
+
+ pr_warn("FIX_BTMAP_END: %d\n", FIX_BTMAP_END);
+ pr_warn("FIX_BTMAP_BEGIN: %d\n", FIX_BTMAP_BEGIN);
+ }
+#endif
+}
+
+static void __init setup_vm_final(void)
+{
+ uintptr_t va, map_size;
+ phys_addr_t pa, start, end;
+ u64 i;
+
+ /**
+ * MMU is enabled at this point. But page table setup is not complete yet.
+ * fixmap page table alloc functions should be used at this point
+ */
+ pt_ops.alloc_pte = alloc_pte_fixmap;
+ pt_ops.get_pte_virt = get_pte_virt_fixmap;
+#ifndef __PAGETABLE_PMD_FOLDED
+ pt_ops.alloc_pmd = alloc_pmd_fixmap;
+ pt_ops.get_pmd_virt = get_pmd_virt_fixmap;
+#endif
+ /* Setup swapper PGD for fixmap */
+ create_pgd_mapping(swapper_pg_dir, FIXADDR_START,
+ __pa_symbol(fixmap_pgd_next),
+ PGDIR_SIZE, PAGE_TABLE);
+
+ /* Map all memory banks */
+ for_each_mem_range(i, &start, &end) {
+ if (start >= end)
+ break;
+ if (start <= __pa(PAGE_OFFSET) &&
+ __pa(PAGE_OFFSET) < end)
+ start = __pa(PAGE_OFFSET);
+
+ map_size = best_map_size(start, end - start);
+ for (pa = start; pa < end; pa += map_size) {
+ va = (uintptr_t)__va(pa);
+ create_pgd_mapping(swapper_pg_dir, va, pa,
+ map_size, PAGE_KERNEL_EXEC);
+ }
+ }
+
+ /* Clear fixmap PTE and PMD mappings */
+ clear_fixmap(FIX_PTE);
+ clear_fixmap(FIX_PMD);
+
+ /* Move to swapper page table */
+ csr_write(CSR_SATP, PFN_DOWN(__pa_symbol(swapper_pg_dir)) | SATP_MODE);
+ local_flush_tlb_all();
+
+ /* generic page allocation functions must be used to setup page table */
+ pt_ops.alloc_pte = alloc_pte_late;
+ pt_ops.get_pte_virt = get_pte_virt_late;
+#ifndef __PAGETABLE_PMD_FOLDED
+ pt_ops.alloc_pmd = alloc_pmd_late;
+ pt_ops.get_pmd_virt = get_pmd_virt_late;
+#endif
+}
+#else
+asmlinkage void __init setup_vm(uintptr_t dtb_pa)
+{
+#ifdef CONFIG_BUILTIN_DTB
+ dtb_early_va = soc_lookup_builtin_dtb();
+ if (!dtb_early_va) {
+ /* Fallback to first available DTS */
+ dtb_early_va = (void *) __dtb_start;
+ }
+#else
+ dtb_early_va = (void *)dtb_pa;
+#endif
+ dtb_early_pa = dtb_pa;
+}
+
+static inline void setup_vm_final(void)
+{
+}
+#endif /* CONFIG_MMU */
+
+#ifdef CONFIG_STRICT_KERNEL_RWX
+void mark_rodata_ro(void)
+{
+ unsigned long text_start = (unsigned long)_text;
+ unsigned long text_end = (unsigned long)_etext;
+ unsigned long rodata_start = (unsigned long)__start_rodata;
+ unsigned long data_start = (unsigned long)_data;
+ unsigned long max_low = (unsigned long)(__va(PFN_PHYS(max_low_pfn)));
+
+ set_memory_ro(text_start, (text_end - text_start) >> PAGE_SHIFT);
+ set_memory_ro(rodata_start, (data_start - rodata_start) >> PAGE_SHIFT);
+ set_memory_nx(rodata_start, (data_start - rodata_start) >> PAGE_SHIFT);
+ set_memory_nx(data_start, (max_low - data_start) >> PAGE_SHIFT);
+
+ debug_checkwx();
+}
+#endif
+
+static void __init resource_init(void)
+{
+ struct memblock_region *region;
+
+ for_each_mem_region(region) {
+ struct resource *res;
+
+ res = memblock_alloc(sizeof(struct resource), SMP_CACHE_BYTES);
+ if (!res)
+ panic("%s: Failed to allocate %zu bytes\n", __func__,
+ sizeof(struct resource));
+
+ if (memblock_is_nomap(region)) {
+ res->name = "reserved";
+ res->flags = IORESOURCE_MEM;
+ } else {
+ res->name = "System RAM";
+ res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
+ }
+ res->start = __pfn_to_phys(memblock_region_memory_base_pfn(region));
+ res->end = __pfn_to_phys(memblock_region_memory_end_pfn(region)) - 1;
+
+ request_resource(&iomem_resource, res);
+ }
+}
+
+void __init paging_init(void)
+{
+ setup_vm_final();
+ setup_zero_page();
+}
+
+void __init misc_mem_init(void)
+{
+ sparse_init();
+ zone_sizes_init();
+ resource_init();
+}
+
+#ifdef CONFIG_SPARSEMEM_VMEMMAP
+int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
+ struct vmem_altmap *altmap)
+{
+ return vmemmap_populate_basepages(start, end, node, NULL);
+}
+#endif
diff --git a/arch/riscv/mm/kasan_init.c b/arch/riscv/mm/kasan_init.c
new file mode 100644
index 000000000..2db442701
--- /dev/null
+++ b/arch/riscv/mm/kasan_init.c
@@ -0,0 +1,115 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2019 Andes Technology Corporation
+
+#include <linux/pfn.h>
+#include <linux/init_task.h>
+#include <linux/kasan.h>
+#include <linux/kernel.h>
+#include <linux/memblock.h>
+#include <linux/pgtable.h>
+#include <asm/tlbflush.h>
+#include <asm/fixmap.h>
+
+extern pgd_t early_pg_dir[PTRS_PER_PGD];
+asmlinkage void __init kasan_early_init(void)
+{
+ uintptr_t i;
+ pgd_t *pgd = early_pg_dir + pgd_index(KASAN_SHADOW_START);
+
+ BUILD_BUG_ON(KASAN_SHADOW_OFFSET !=
+ KASAN_SHADOW_END - (1UL << (64 - KASAN_SHADOW_SCALE_SHIFT)));
+
+ for (i = 0; i < PTRS_PER_PTE; ++i)
+ set_pte(kasan_early_shadow_pte + i,
+ pfn_pte(virt_to_pfn(kasan_early_shadow_page), PAGE_KERNEL));
+
+ for (i = 0; i < PTRS_PER_PMD; ++i)
+ set_pmd(kasan_early_shadow_pmd + i,
+ pfn_pmd(PFN_DOWN
+ (__pa((uintptr_t) kasan_early_shadow_pte)),
+ __pgprot(_PAGE_TABLE)));
+
+ for (i = KASAN_SHADOW_START; i < KASAN_SHADOW_END;
+ i += PGDIR_SIZE, ++pgd)
+ set_pgd(pgd,
+ pfn_pgd(PFN_DOWN
+ (__pa(((uintptr_t) kasan_early_shadow_pmd))),
+ __pgprot(_PAGE_TABLE)));
+
+ /* init for swapper_pg_dir */
+ pgd = pgd_offset_k(KASAN_SHADOW_START);
+
+ for (i = KASAN_SHADOW_START; i < KASAN_SHADOW_END;
+ i += PGDIR_SIZE, ++pgd)
+ set_pgd(pgd,
+ pfn_pgd(PFN_DOWN
+ (__pa(((uintptr_t) kasan_early_shadow_pmd))),
+ __pgprot(_PAGE_TABLE)));
+
+ local_flush_tlb_all();
+}
+
+static void __init populate(void *start, void *end)
+{
+ unsigned long i, offset;
+ unsigned long vaddr = (unsigned long)start & PAGE_MASK;
+ unsigned long vend = PAGE_ALIGN((unsigned long)end);
+ unsigned long n_pages = (vend - vaddr) / PAGE_SIZE;
+ unsigned long n_ptes =
+ ((n_pages + PTRS_PER_PTE) & -PTRS_PER_PTE) / PTRS_PER_PTE;
+ unsigned long n_pmds =
+ ((n_ptes + PTRS_PER_PMD) & -PTRS_PER_PMD) / PTRS_PER_PMD;
+
+ pte_t *pte =
+ memblock_alloc(n_ptes * PTRS_PER_PTE * sizeof(pte_t), PAGE_SIZE);
+ pmd_t *pmd =
+ memblock_alloc(n_pmds * PTRS_PER_PMD * sizeof(pmd_t), PAGE_SIZE);
+ pgd_t *pgd = pgd_offset_k(vaddr);
+
+ for (i = 0; i < n_pages; i++) {
+ phys_addr_t phys = memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE);
+ set_pte(&pte[i], pfn_pte(PHYS_PFN(phys), PAGE_KERNEL));
+ }
+
+ for (i = 0, offset = 0; i < n_ptes; i++, offset += PTRS_PER_PTE)
+ set_pmd(&pmd[i],
+ pfn_pmd(PFN_DOWN(__pa(&pte[offset])),
+ __pgprot(_PAGE_TABLE)));
+
+ for (i = 0, offset = 0; i < n_pmds; i++, offset += PTRS_PER_PMD)
+ set_pgd(&pgd[i],
+ pfn_pgd(PFN_DOWN(__pa(&pmd[offset])),
+ __pgprot(_PAGE_TABLE)));
+
+ local_flush_tlb_all();
+ memset(start, 0, end - start);
+}
+
+void __init kasan_init(void)
+{
+ phys_addr_t _start, _end;
+ u64 i;
+
+ kasan_populate_early_shadow((void *)KASAN_SHADOW_START,
+ (void *)kasan_mem_to_shadow((void *)
+ VMALLOC_END));
+
+ for_each_mem_range(i, &_start, &_end) {
+ void *start = (void *)__va(_start);
+ void *end = (void *)__va(_end);
+
+ if (start >= end)
+ break;
+
+ populate(kasan_mem_to_shadow(start), kasan_mem_to_shadow(end));
+ };
+
+ for (i = 0; i < PTRS_PER_PTE; i++)
+ set_pte(&kasan_early_shadow_pte[i],
+ mk_pte(virt_to_page(kasan_early_shadow_page),
+ __pgprot(_PAGE_PRESENT | _PAGE_READ |
+ _PAGE_ACCESSED)));
+
+ memset(kasan_early_shadow_page, 0, PAGE_SIZE);
+ init_task.kasan_depth = 0;
+}
diff --git a/arch/riscv/mm/pageattr.c b/arch/riscv/mm/pageattr.c
new file mode 100644
index 000000000..09f6be19b
--- /dev/null
+++ b/arch/riscv/mm/pageattr.c
@@ -0,0 +1,198 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2019 SiFive
+ */
+
+#include <linux/pagewalk.h>
+#include <linux/pgtable.h>
+#include <asm/tlbflush.h>
+#include <asm/bitops.h>
+#include <asm/set_memory.h>
+
+struct pageattr_masks {
+ pgprot_t set_mask;
+ pgprot_t clear_mask;
+};
+
+static unsigned long set_pageattr_masks(unsigned long val, struct mm_walk *walk)
+{
+ struct pageattr_masks *masks = walk->private;
+ unsigned long new_val = val;
+
+ new_val &= ~(pgprot_val(masks->clear_mask));
+ new_val |= (pgprot_val(masks->set_mask));
+
+ return new_val;
+}
+
+static int pageattr_pgd_entry(pgd_t *pgd, unsigned long addr,
+ unsigned long next, struct mm_walk *walk)
+{
+ pgd_t val = READ_ONCE(*pgd);
+
+ if (pgd_leaf(val)) {
+ val = __pgd(set_pageattr_masks(pgd_val(val), walk));
+ set_pgd(pgd, val);
+ }
+
+ return 0;
+}
+
+static int pageattr_p4d_entry(p4d_t *p4d, unsigned long addr,
+ unsigned long next, struct mm_walk *walk)
+{
+ p4d_t val = READ_ONCE(*p4d);
+
+ if (p4d_leaf(val)) {
+ val = __p4d(set_pageattr_masks(p4d_val(val), walk));
+ set_p4d(p4d, val);
+ }
+
+ return 0;
+}
+
+static int pageattr_pud_entry(pud_t *pud, unsigned long addr,
+ unsigned long next, struct mm_walk *walk)
+{
+ pud_t val = READ_ONCE(*pud);
+
+ if (pud_leaf(val)) {
+ val = __pud(set_pageattr_masks(pud_val(val), walk));
+ set_pud(pud, val);
+ }
+
+ return 0;
+}
+
+static int pageattr_pmd_entry(pmd_t *pmd, unsigned long addr,
+ unsigned long next, struct mm_walk *walk)
+{
+ pmd_t val = READ_ONCE(*pmd);
+
+ if (pmd_leaf(val)) {
+ val = __pmd(set_pageattr_masks(pmd_val(val), walk));
+ set_pmd(pmd, val);
+ }
+
+ return 0;
+}
+
+static int pageattr_pte_entry(pte_t *pte, unsigned long addr,
+ unsigned long next, struct mm_walk *walk)
+{
+ pte_t val = READ_ONCE(*pte);
+
+ val = __pte(set_pageattr_masks(pte_val(val), walk));
+ set_pte(pte, val);
+
+ return 0;
+}
+
+static int pageattr_pte_hole(unsigned long addr, unsigned long next,
+ int depth, struct mm_walk *walk)
+{
+ /* Nothing to do here */
+ return 0;
+}
+
+static const struct mm_walk_ops pageattr_ops = {
+ .pgd_entry = pageattr_pgd_entry,
+ .p4d_entry = pageattr_p4d_entry,
+ .pud_entry = pageattr_pud_entry,
+ .pmd_entry = pageattr_pmd_entry,
+ .pte_entry = pageattr_pte_entry,
+ .pte_hole = pageattr_pte_hole,
+};
+
+static int __set_memory(unsigned long addr, int numpages, pgprot_t set_mask,
+ pgprot_t clear_mask)
+{
+ int ret;
+ unsigned long start = addr;
+ unsigned long end = start + PAGE_SIZE * numpages;
+ struct pageattr_masks masks = {
+ .set_mask = set_mask,
+ .clear_mask = clear_mask
+ };
+
+ if (!numpages)
+ return 0;
+
+ mmap_write_lock(&init_mm);
+ ret = walk_page_range_novma(&init_mm, start, end, &pageattr_ops, NULL,
+ &masks);
+ mmap_write_unlock(&init_mm);
+
+ flush_tlb_kernel_range(start, end);
+
+ return ret;
+}
+
+int set_memory_ro(unsigned long addr, int numpages)
+{
+ return __set_memory(addr, numpages, __pgprot(_PAGE_READ),
+ __pgprot(_PAGE_WRITE));
+}
+
+int set_memory_rw(unsigned long addr, int numpages)
+{
+ return __set_memory(addr, numpages, __pgprot(_PAGE_READ | _PAGE_WRITE),
+ __pgprot(0));
+}
+
+int set_memory_x(unsigned long addr, int numpages)
+{
+ return __set_memory(addr, numpages, __pgprot(_PAGE_EXEC), __pgprot(0));
+}
+
+int set_memory_nx(unsigned long addr, int numpages)
+{
+ return __set_memory(addr, numpages, __pgprot(0), __pgprot(_PAGE_EXEC));
+}
+
+int set_direct_map_invalid_noflush(struct page *page)
+{
+ int ret;
+ unsigned long start = (unsigned long)page_address(page);
+ unsigned long end = start + PAGE_SIZE;
+ struct pageattr_masks masks = {
+ .set_mask = __pgprot(0),
+ .clear_mask = __pgprot(_PAGE_PRESENT)
+ };
+
+ mmap_read_lock(&init_mm);
+ ret = walk_page_range(&init_mm, start, end, &pageattr_ops, &masks);
+ mmap_read_unlock(&init_mm);
+
+ return ret;
+}
+
+int set_direct_map_default_noflush(struct page *page)
+{
+ int ret;
+ unsigned long start = (unsigned long)page_address(page);
+ unsigned long end = start + PAGE_SIZE;
+ struct pageattr_masks masks = {
+ .set_mask = PAGE_KERNEL,
+ .clear_mask = __pgprot(0)
+ };
+
+ mmap_read_lock(&init_mm);
+ ret = walk_page_range(&init_mm, start, end, &pageattr_ops, &masks);
+ mmap_read_unlock(&init_mm);
+
+ return ret;
+}
+
+void __kernel_map_pages(struct page *page, int numpages, int enable)
+{
+ if (!debug_pagealloc_enabled())
+ return;
+
+ if (enable)
+ __set_memory((unsigned long)page_address(page), numpages,
+ __pgprot(_PAGE_PRESENT), __pgprot(0));
+ else
+ __set_memory((unsigned long)page_address(page), numpages,
+ __pgprot(0), __pgprot(_PAGE_PRESENT));
+}
diff --git a/arch/riscv/mm/physaddr.c b/arch/riscv/mm/physaddr.c
new file mode 100644
index 000000000..e8e4dcd39
--- /dev/null
+++ b/arch/riscv/mm/physaddr.c
@@ -0,0 +1,37 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/types.h>
+#include <linux/mmdebug.h>
+#include <linux/mm.h>
+#include <asm/page.h>
+#include <asm/sections.h>
+
+phys_addr_t __virt_to_phys(unsigned long x)
+{
+ phys_addr_t y = x - PAGE_OFFSET;
+
+ /*
+ * Boundary checking aginst the kernel linear mapping space.
+ */
+ WARN(y >= KERN_VIRT_SIZE,
+ "virt_to_phys used for non-linear address: %pK (%pS)\n",
+ (void *)x, (void *)x);
+
+ return __va_to_pa_nodebug(x);
+}
+EXPORT_SYMBOL(__virt_to_phys);
+
+phys_addr_t __phys_addr_symbol(unsigned long x)
+{
+ unsigned long kernel_start = (unsigned long)PAGE_OFFSET;
+ unsigned long kernel_end = (unsigned long)_end;
+
+ /*
+ * Boundary checking aginst the kernel image mapping.
+ * __pa_symbol should only be used on kernel symbol addresses.
+ */
+ VIRTUAL_BUG_ON(x < kernel_start || x > kernel_end);
+
+ return __va_to_pa_nodebug(x);
+}
+EXPORT_SYMBOL(__phys_addr_symbol);
diff --git a/arch/riscv/mm/ptdump.c b/arch/riscv/mm/ptdump.c
new file mode 100644
index 000000000..ace74dec7
--- /dev/null
+++ b/arch/riscv/mm/ptdump.c
@@ -0,0 +1,353 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2019 SiFive
+ */
+
+#include <linux/efi.h>
+#include <linux/init.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+#include <linux/ptdump.h>
+
+#include <asm/ptdump.h>
+#include <linux/pgtable.h>
+#include <asm/kasan.h>
+
+#define pt_dump_seq_printf(m, fmt, args...) \
+({ \
+ if (m) \
+ seq_printf(m, fmt, ##args); \
+})
+
+#define pt_dump_seq_puts(m, fmt) \
+({ \
+ if (m) \
+ seq_printf(m, fmt); \
+})
+
+/*
+ * The page dumper groups page table entries of the same type into a single
+ * description. It uses pg_state to track the range information while
+ * iterating over the pte entries. When the continuity is broken it then
+ * dumps out a description of the range.
+ */
+struct pg_state {
+ struct ptdump_state ptdump;
+ struct seq_file *seq;
+ const struct addr_marker *marker;
+ unsigned long start_address;
+ unsigned long start_pa;
+ unsigned long last_pa;
+ int level;
+ u64 current_prot;
+ bool check_wx;
+ unsigned long wx_pages;
+};
+
+/* Address marker */
+struct addr_marker {
+ unsigned long start_address;
+ const char *name;
+};
+
+/* Private information for debugfs */
+struct ptd_mm_info {
+ struct mm_struct *mm;
+ const struct addr_marker *markers;
+ unsigned long base_addr;
+ unsigned long end;
+};
+
+static struct addr_marker address_markers[] = {
+#ifdef CONFIG_KASAN
+ {KASAN_SHADOW_START, "Kasan shadow start"},
+ {KASAN_SHADOW_END, "Kasan shadow end"},
+#endif
+ {FIXADDR_START, "Fixmap start"},
+ {FIXADDR_TOP, "Fixmap end"},
+ {PCI_IO_START, "PCI I/O start"},
+ {PCI_IO_END, "PCI I/O end"},
+#ifdef CONFIG_SPARSEMEM_VMEMMAP
+ {VMEMMAP_START, "vmemmap start"},
+ {VMEMMAP_END, "vmemmap end"},
+#endif
+ {VMALLOC_START, "vmalloc() area"},
+ {VMALLOC_END, "vmalloc() end"},
+ {PAGE_OFFSET, "Linear mapping"},
+ {-1, NULL},
+};
+
+static struct ptd_mm_info kernel_ptd_info = {
+ .mm = &init_mm,
+ .markers = address_markers,
+ .base_addr = KERN_VIRT_START,
+ .end = ULONG_MAX,
+};
+
+#ifdef CONFIG_EFI
+static struct addr_marker efi_addr_markers[] = {
+ { 0, "UEFI runtime start" },
+ { SZ_1G, "UEFI runtime end" },
+ { -1, NULL }
+};
+
+static struct ptd_mm_info efi_ptd_info = {
+ .mm = &efi_mm,
+ .markers = efi_addr_markers,
+ .base_addr = 0,
+ .end = SZ_2G,
+};
+#endif
+
+/* Page Table Entry */
+struct prot_bits {
+ u64 mask;
+ u64 val;
+ const char *set;
+ const char *clear;
+};
+
+static const struct prot_bits pte_bits[] = {
+ {
+ .mask = _PAGE_SOFT,
+ .val = _PAGE_SOFT,
+ .set = "RSW",
+ .clear = " ",
+ }, {
+ .mask = _PAGE_DIRTY,
+ .val = _PAGE_DIRTY,
+ .set = "D",
+ .clear = ".",
+ }, {
+ .mask = _PAGE_ACCESSED,
+ .val = _PAGE_ACCESSED,
+ .set = "A",
+ .clear = ".",
+ }, {
+ .mask = _PAGE_GLOBAL,
+ .val = _PAGE_GLOBAL,
+ .set = "G",
+ .clear = ".",
+ }, {
+ .mask = _PAGE_USER,
+ .val = _PAGE_USER,
+ .set = "U",
+ .clear = ".",
+ }, {
+ .mask = _PAGE_EXEC,
+ .val = _PAGE_EXEC,
+ .set = "X",
+ .clear = ".",
+ }, {
+ .mask = _PAGE_WRITE,
+ .val = _PAGE_WRITE,
+ .set = "W",
+ .clear = ".",
+ }, {
+ .mask = _PAGE_READ,
+ .val = _PAGE_READ,
+ .set = "R",
+ .clear = ".",
+ }, {
+ .mask = _PAGE_PRESENT,
+ .val = _PAGE_PRESENT,
+ .set = "V",
+ .clear = ".",
+ }
+};
+
+/* Page Level */
+struct pg_level {
+ const char *name;
+ u64 mask;
+};
+
+static struct pg_level pg_level[] = {
+ { /* pgd */
+ .name = "PGD",
+ }, { /* p4d */
+ .name = (CONFIG_PGTABLE_LEVELS > 4) ? "P4D" : "PGD",
+ }, { /* pud */
+ .name = (CONFIG_PGTABLE_LEVELS > 3) ? "PUD" : "PGD",
+ }, { /* pmd */
+ .name = (CONFIG_PGTABLE_LEVELS > 2) ? "PMD" : "PGD",
+ }, { /* pte */
+ .name = "PTE",
+ },
+};
+
+static void dump_prot(struct pg_state *st)
+{
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(pte_bits); i++) {
+ const char *s;
+
+ if ((st->current_prot & pte_bits[i].mask) == pte_bits[i].val)
+ s = pte_bits[i].set;
+ else
+ s = pte_bits[i].clear;
+
+ if (s)
+ pt_dump_seq_printf(st->seq, " %s", s);
+ }
+}
+
+#ifdef CONFIG_64BIT
+#define ADDR_FORMAT "0x%016lx"
+#else
+#define ADDR_FORMAT "0x%08lx"
+#endif
+static void dump_addr(struct pg_state *st, unsigned long addr)
+{
+ static const char units[] = "KMGTPE";
+ const char *unit = units;
+ unsigned long delta;
+
+ pt_dump_seq_printf(st->seq, ADDR_FORMAT "-" ADDR_FORMAT " ",
+ st->start_address, addr);
+
+ pt_dump_seq_printf(st->seq, " " ADDR_FORMAT " ", st->start_pa);
+ delta = (addr - st->start_address) >> 10;
+
+ while (!(delta & 1023) && unit[1]) {
+ delta >>= 10;
+ unit++;
+ }
+
+ pt_dump_seq_printf(st->seq, "%9lu%c %s", delta, *unit,
+ pg_level[st->level].name);
+}
+
+static void note_prot_wx(struct pg_state *st, unsigned long addr)
+{
+ if (!st->check_wx)
+ return;
+
+ if ((st->current_prot & (_PAGE_WRITE | _PAGE_EXEC)) !=
+ (_PAGE_WRITE | _PAGE_EXEC))
+ return;
+
+ WARN_ONCE(1, "riscv/mm: Found insecure W+X mapping at address %p/%pS\n",
+ (void *)st->start_address, (void *)st->start_address);
+
+ st->wx_pages += (addr - st->start_address) / PAGE_SIZE;
+}
+
+static void note_page(struct ptdump_state *pt_st, unsigned long addr,
+ int level, u64 val)
+{
+ struct pg_state *st = container_of(pt_st, struct pg_state, ptdump);
+ u64 pa = PFN_PHYS(pte_pfn(__pte(val)));
+ u64 prot = 0;
+
+ if (level >= 0)
+ prot = val & pg_level[level].mask;
+
+ if (st->level == -1) {
+ st->level = level;
+ st->current_prot = prot;
+ st->start_address = addr;
+ st->start_pa = pa;
+ st->last_pa = pa;
+ pt_dump_seq_printf(st->seq, "---[ %s ]---\n", st->marker->name);
+ } else if (prot != st->current_prot ||
+ level != st->level || addr >= st->marker[1].start_address) {
+ if (st->current_prot) {
+ note_prot_wx(st, addr);
+ dump_addr(st, addr);
+ dump_prot(st);
+ pt_dump_seq_puts(st->seq, "\n");
+ }
+
+ while (addr >= st->marker[1].start_address) {
+ st->marker++;
+ pt_dump_seq_printf(st->seq, "---[ %s ]---\n",
+ st->marker->name);
+ }
+
+ st->start_address = addr;
+ st->start_pa = pa;
+ st->last_pa = pa;
+ st->current_prot = prot;
+ st->level = level;
+ } else {
+ st->last_pa = pa;
+ }
+}
+
+static void ptdump_walk(struct seq_file *s, struct ptd_mm_info *pinfo)
+{
+ struct pg_state st = {
+ .seq = s,
+ .marker = pinfo->markers,
+ .level = -1,
+ .ptdump = {
+ .note_page = note_page,
+ .range = (struct ptdump_range[]) {
+ {pinfo->base_addr, pinfo->end},
+ {0, 0}
+ }
+ }
+ };
+
+ ptdump_walk_pgd(&st.ptdump, pinfo->mm, NULL);
+}
+
+void ptdump_check_wx(void)
+{
+ struct pg_state st = {
+ .seq = NULL,
+ .marker = (struct addr_marker[]) {
+ {0, NULL},
+ {-1, NULL},
+ },
+ .level = -1,
+ .check_wx = true,
+ .ptdump = {
+ .note_page = note_page,
+ .range = (struct ptdump_range[]) {
+ {KERN_VIRT_START, ULONG_MAX},
+ {0, 0}
+ }
+ }
+ };
+
+ ptdump_walk_pgd(&st.ptdump, &init_mm, NULL);
+
+ if (st.wx_pages)
+ pr_warn("Checked W+X mappings: failed, %lu W+X pages found\n",
+ st.wx_pages);
+ else
+ pr_info("Checked W+X mappings: passed, no W+X pages found\n");
+}
+
+static int ptdump_show(struct seq_file *m, void *v)
+{
+ ptdump_walk(m, m->private);
+
+ return 0;
+}
+
+DEFINE_SHOW_ATTRIBUTE(ptdump);
+
+static int ptdump_init(void)
+{
+ unsigned int i, j;
+
+ for (i = 0; i < ARRAY_SIZE(pg_level); i++)
+ for (j = 0; j < ARRAY_SIZE(pte_bits); j++)
+ pg_level[i].mask |= pte_bits[j].mask;
+
+ debugfs_create_file("kernel_page_tables", 0400, NULL, &kernel_ptd_info,
+ &ptdump_fops);
+#ifdef CONFIG_EFI
+ if (efi_enabled(EFI_RUNTIME_SERVICES))
+ debugfs_create_file("efi_page_tables", 0400, NULL, &efi_ptd_info,
+ &ptdump_fops);
+#endif
+
+ return 0;
+}
+
+device_initcall(ptdump_init);
diff --git a/arch/riscv/mm/tlbflush.c b/arch/riscv/mm/tlbflush.c
new file mode 100644
index 000000000..720b443c4
--- /dev/null
+++ b/arch/riscv/mm/tlbflush.c
@@ -0,0 +1,56 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <linux/mm.h>
+#include <linux/smp.h>
+#include <linux/sched.h>
+#include <asm/sbi.h>
+
+void flush_tlb_all(void)
+{
+ sbi_remote_sfence_vma(NULL, 0, -1);
+}
+
+/*
+ * This function must not be called with cmask being null.
+ * Kernel may panic if cmask is NULL.
+ */
+static void __sbi_tlb_flush_range(struct cpumask *cmask, unsigned long start,
+ unsigned long size)
+{
+ struct cpumask hmask;
+ unsigned int cpuid;
+
+ if (cpumask_empty(cmask))
+ return;
+
+ cpuid = get_cpu();
+
+ if (cpumask_any_but(cmask, cpuid) >= nr_cpu_ids) {
+ /* local cpu is the only cpu present in cpumask */
+ if (size <= PAGE_SIZE)
+ local_flush_tlb_page(start);
+ else
+ local_flush_tlb_all();
+ } else {
+ riscv_cpuid_to_hartid_mask(cmask, &hmask);
+ sbi_remote_sfence_vma(cpumask_bits(&hmask), start, size);
+ }
+
+ put_cpu();
+}
+
+void flush_tlb_mm(struct mm_struct *mm)
+{
+ __sbi_tlb_flush_range(mm_cpumask(mm), 0, -1);
+}
+
+void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr)
+{
+ __sbi_tlb_flush_range(mm_cpumask(vma->vm_mm), addr, PAGE_SIZE);
+}
+
+void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
+ unsigned long end)
+{
+ __sbi_tlb_flush_range(mm_cpumask(vma->vm_mm), start, end - start);
+}
diff --git a/arch/riscv/net/Makefile b/arch/riscv/net/Makefile
new file mode 100644
index 000000000..9a1e5f0a9
--- /dev/null
+++ b/arch/riscv/net/Makefile
@@ -0,0 +1,9 @@
+# SPDX-License-Identifier: GPL-2.0-only
+
+obj-$(CONFIG_BPF_JIT) += bpf_jit_core.o
+
+ifeq ($(CONFIG_ARCH_RV64I),y)
+ obj-$(CONFIG_BPF_JIT) += bpf_jit_comp64.o
+else
+ obj-$(CONFIG_BPF_JIT) += bpf_jit_comp32.o
+endif
diff --git a/arch/riscv/net/bpf_jit.h b/arch/riscv/net/bpf_jit.h
new file mode 100644
index 000000000..ef336fe16
--- /dev/null
+++ b/arch/riscv/net/bpf_jit.h
@@ -0,0 +1,992 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Common functionality for RV32 and RV64 BPF JIT compilers
+ *
+ * Copyright (c) 2019 Björn Töpel <bjorn.topel@gmail.com>
+ *
+ */
+
+#ifndef _BPF_JIT_H
+#define _BPF_JIT_H
+
+#include <linux/bpf.h>
+#include <linux/filter.h>
+#include <asm/cacheflush.h>
+
+static inline bool rvc_enabled(void)
+{
+ return IS_ENABLED(CONFIG_RISCV_ISA_C);
+}
+
+enum {
+ RV_REG_ZERO = 0, /* The constant value 0 */
+ RV_REG_RA = 1, /* Return address */
+ RV_REG_SP = 2, /* Stack pointer */
+ RV_REG_GP = 3, /* Global pointer */
+ RV_REG_TP = 4, /* Thread pointer */
+ RV_REG_T0 = 5, /* Temporaries */
+ RV_REG_T1 = 6,
+ RV_REG_T2 = 7,
+ RV_REG_FP = 8, /* Saved register/frame pointer */
+ RV_REG_S1 = 9, /* Saved register */
+ RV_REG_A0 = 10, /* Function argument/return values */
+ RV_REG_A1 = 11, /* Function arguments */
+ RV_REG_A2 = 12,
+ RV_REG_A3 = 13,
+ RV_REG_A4 = 14,
+ RV_REG_A5 = 15,
+ RV_REG_A6 = 16,
+ RV_REG_A7 = 17,
+ RV_REG_S2 = 18, /* Saved registers */
+ RV_REG_S3 = 19,
+ RV_REG_S4 = 20,
+ RV_REG_S5 = 21,
+ RV_REG_S6 = 22,
+ RV_REG_S7 = 23,
+ RV_REG_S8 = 24,
+ RV_REG_S9 = 25,
+ RV_REG_S10 = 26,
+ RV_REG_S11 = 27,
+ RV_REG_T3 = 28, /* Temporaries */
+ RV_REG_T4 = 29,
+ RV_REG_T5 = 30,
+ RV_REG_T6 = 31,
+};
+
+static inline bool is_creg(u8 reg)
+{
+ return (1 << reg) & (BIT(RV_REG_FP) |
+ BIT(RV_REG_S1) |
+ BIT(RV_REG_A0) |
+ BIT(RV_REG_A1) |
+ BIT(RV_REG_A2) |
+ BIT(RV_REG_A3) |
+ BIT(RV_REG_A4) |
+ BIT(RV_REG_A5));
+}
+
+struct rv_jit_context {
+ struct bpf_prog *prog;
+ u16 *insns; /* RV insns */
+ int ninsns;
+ int prologue_len;
+ int epilogue_offset;
+ int *offset; /* BPF to RV */
+ unsigned long flags;
+ int stack_size;
+};
+
+/* Convert from ninsns to bytes. */
+static inline int ninsns_rvoff(int ninsns)
+{
+ return ninsns << 1;
+}
+
+struct rv_jit_data {
+ struct bpf_binary_header *header;
+ u8 *image;
+ struct rv_jit_context ctx;
+};
+
+static inline void bpf_fill_ill_insns(void *area, unsigned int size)
+{
+ memset(area, 0, size);
+}
+
+static inline void bpf_flush_icache(void *start, void *end)
+{
+ flush_icache_range((unsigned long)start, (unsigned long)end);
+}
+
+/* Emit a 4-byte riscv instruction. */
+static inline void emit(const u32 insn, struct rv_jit_context *ctx)
+{
+ if (ctx->insns) {
+ ctx->insns[ctx->ninsns] = insn;
+ ctx->insns[ctx->ninsns + 1] = (insn >> 16);
+ }
+
+ ctx->ninsns += 2;
+}
+
+/* Emit a 2-byte riscv compressed instruction. */
+static inline void emitc(const u16 insn, struct rv_jit_context *ctx)
+{
+ BUILD_BUG_ON(!rvc_enabled());
+
+ if (ctx->insns)
+ ctx->insns[ctx->ninsns] = insn;
+
+ ctx->ninsns++;
+}
+
+static inline int epilogue_offset(struct rv_jit_context *ctx)
+{
+ int to = ctx->epilogue_offset, from = ctx->ninsns;
+
+ return ninsns_rvoff(to - from);
+}
+
+/* Return -1 or inverted cond. */
+static inline int invert_bpf_cond(u8 cond)
+{
+ switch (cond) {
+ case BPF_JEQ:
+ return BPF_JNE;
+ case BPF_JGT:
+ return BPF_JLE;
+ case BPF_JLT:
+ return BPF_JGE;
+ case BPF_JGE:
+ return BPF_JLT;
+ case BPF_JLE:
+ return BPF_JGT;
+ case BPF_JNE:
+ return BPF_JEQ;
+ case BPF_JSGT:
+ return BPF_JSLE;
+ case BPF_JSLT:
+ return BPF_JSGE;
+ case BPF_JSGE:
+ return BPF_JSLT;
+ case BPF_JSLE:
+ return BPF_JSGT;
+ }
+ return -1;
+}
+
+static inline bool is_6b_int(long val)
+{
+ return -(1L << 5) <= val && val < (1L << 5);
+}
+
+static inline bool is_7b_uint(unsigned long val)
+{
+ return val < (1UL << 7);
+}
+
+static inline bool is_8b_uint(unsigned long val)
+{
+ return val < (1UL << 8);
+}
+
+static inline bool is_9b_uint(unsigned long val)
+{
+ return val < (1UL << 9);
+}
+
+static inline bool is_10b_int(long val)
+{
+ return -(1L << 9) <= val && val < (1L << 9);
+}
+
+static inline bool is_10b_uint(unsigned long val)
+{
+ return val < (1UL << 10);
+}
+
+static inline bool is_12b_int(long val)
+{
+ return -(1L << 11) <= val && val < (1L << 11);
+}
+
+static inline int is_12b_check(int off, int insn)
+{
+ if (!is_12b_int(off)) {
+ pr_err("bpf-jit: insn=%d 12b < offset=%d not supported yet!\n",
+ insn, (int)off);
+ return -1;
+ }
+ return 0;
+}
+
+static inline bool is_13b_int(long val)
+{
+ return -(1L << 12) <= val && val < (1L << 12);
+}
+
+static inline bool is_21b_int(long val)
+{
+ return -(1L << 20) <= val && val < (1L << 20);
+}
+
+static inline int rv_offset(int insn, int off, struct rv_jit_context *ctx)
+{
+ int from, to;
+
+ off++; /* BPF branch is from PC+1, RV is from PC */
+ from = (insn > 0) ? ctx->offset[insn - 1] : ctx->prologue_len;
+ to = (insn + off > 0) ? ctx->offset[insn + off - 1] : ctx->prologue_len;
+ return ninsns_rvoff(to - from);
+}
+
+/* Instruction formats. */
+
+static inline u32 rv_r_insn(u8 funct7, u8 rs2, u8 rs1, u8 funct3, u8 rd,
+ u8 opcode)
+{
+ return (funct7 << 25) | (rs2 << 20) | (rs1 << 15) | (funct3 << 12) |
+ (rd << 7) | opcode;
+}
+
+static inline u32 rv_i_insn(u16 imm11_0, u8 rs1, u8 funct3, u8 rd, u8 opcode)
+{
+ return (imm11_0 << 20) | (rs1 << 15) | (funct3 << 12) | (rd << 7) |
+ opcode;
+}
+
+static inline u32 rv_s_insn(u16 imm11_0, u8 rs2, u8 rs1, u8 funct3, u8 opcode)
+{
+ u8 imm11_5 = imm11_0 >> 5, imm4_0 = imm11_0 & 0x1f;
+
+ return (imm11_5 << 25) | (rs2 << 20) | (rs1 << 15) | (funct3 << 12) |
+ (imm4_0 << 7) | opcode;
+}
+
+static inline u32 rv_b_insn(u16 imm12_1, u8 rs2, u8 rs1, u8 funct3, u8 opcode)
+{
+ u8 imm12 = ((imm12_1 & 0x800) >> 5) | ((imm12_1 & 0x3f0) >> 4);
+ u8 imm4_1 = ((imm12_1 & 0xf) << 1) | ((imm12_1 & 0x400) >> 10);
+
+ return (imm12 << 25) | (rs2 << 20) | (rs1 << 15) | (funct3 << 12) |
+ (imm4_1 << 7) | opcode;
+}
+
+static inline u32 rv_u_insn(u32 imm31_12, u8 rd, u8 opcode)
+{
+ return (imm31_12 << 12) | (rd << 7) | opcode;
+}
+
+static inline u32 rv_j_insn(u32 imm20_1, u8 rd, u8 opcode)
+{
+ u32 imm;
+
+ imm = (imm20_1 & 0x80000) | ((imm20_1 & 0x3ff) << 9) |
+ ((imm20_1 & 0x400) >> 2) | ((imm20_1 & 0x7f800) >> 11);
+
+ return (imm << 12) | (rd << 7) | opcode;
+}
+
+static inline u32 rv_amo_insn(u8 funct5, u8 aq, u8 rl, u8 rs2, u8 rs1,
+ u8 funct3, u8 rd, u8 opcode)
+{
+ u8 funct7 = (funct5 << 2) | (aq << 1) | rl;
+
+ return rv_r_insn(funct7, rs2, rs1, funct3, rd, opcode);
+}
+
+/* RISC-V compressed instruction formats. */
+
+static inline u16 rv_cr_insn(u8 funct4, u8 rd, u8 rs2, u8 op)
+{
+ return (funct4 << 12) | (rd << 7) | (rs2 << 2) | op;
+}
+
+static inline u16 rv_ci_insn(u8 funct3, u32 imm6, u8 rd, u8 op)
+{
+ u32 imm;
+
+ imm = ((imm6 & 0x20) << 7) | ((imm6 & 0x1f) << 2);
+ return (funct3 << 13) | (rd << 7) | op | imm;
+}
+
+static inline u16 rv_css_insn(u8 funct3, u32 uimm, u8 rs2, u8 op)
+{
+ return (funct3 << 13) | (uimm << 7) | (rs2 << 2) | op;
+}
+
+static inline u16 rv_ciw_insn(u8 funct3, u32 uimm, u8 rd, u8 op)
+{
+ return (funct3 << 13) | (uimm << 5) | ((rd & 0x7) << 2) | op;
+}
+
+static inline u16 rv_cl_insn(u8 funct3, u32 imm_hi, u8 rs1, u32 imm_lo, u8 rd,
+ u8 op)
+{
+ return (funct3 << 13) | (imm_hi << 10) | ((rs1 & 0x7) << 7) |
+ (imm_lo << 5) | ((rd & 0x7) << 2) | op;
+}
+
+static inline u16 rv_cs_insn(u8 funct3, u32 imm_hi, u8 rs1, u32 imm_lo, u8 rs2,
+ u8 op)
+{
+ return (funct3 << 13) | (imm_hi << 10) | ((rs1 & 0x7) << 7) |
+ (imm_lo << 5) | ((rs2 & 0x7) << 2) | op;
+}
+
+static inline u16 rv_ca_insn(u8 funct6, u8 rd, u8 funct2, u8 rs2, u8 op)
+{
+ return (funct6 << 10) | ((rd & 0x7) << 7) | (funct2 << 5) |
+ ((rs2 & 0x7) << 2) | op;
+}
+
+static inline u16 rv_cb_insn(u8 funct3, u32 imm6, u8 funct2, u8 rd, u8 op)
+{
+ u32 imm;
+
+ imm = ((imm6 & 0x20) << 7) | ((imm6 & 0x1f) << 2);
+ return (funct3 << 13) | (funct2 << 10) | ((rd & 0x7) << 7) | op | imm;
+}
+
+/* Instructions shared by both RV32 and RV64. */
+
+static inline u32 rv_addi(u8 rd, u8 rs1, u16 imm11_0)
+{
+ return rv_i_insn(imm11_0, rs1, 0, rd, 0x13);
+}
+
+static inline u32 rv_andi(u8 rd, u8 rs1, u16 imm11_0)
+{
+ return rv_i_insn(imm11_0, rs1, 7, rd, 0x13);
+}
+
+static inline u32 rv_ori(u8 rd, u8 rs1, u16 imm11_0)
+{
+ return rv_i_insn(imm11_0, rs1, 6, rd, 0x13);
+}
+
+static inline u32 rv_xori(u8 rd, u8 rs1, u16 imm11_0)
+{
+ return rv_i_insn(imm11_0, rs1, 4, rd, 0x13);
+}
+
+static inline u32 rv_slli(u8 rd, u8 rs1, u16 imm11_0)
+{
+ return rv_i_insn(imm11_0, rs1, 1, rd, 0x13);
+}
+
+static inline u32 rv_srli(u8 rd, u8 rs1, u16 imm11_0)
+{
+ return rv_i_insn(imm11_0, rs1, 5, rd, 0x13);
+}
+
+static inline u32 rv_srai(u8 rd, u8 rs1, u16 imm11_0)
+{
+ return rv_i_insn(0x400 | imm11_0, rs1, 5, rd, 0x13);
+}
+
+static inline u32 rv_lui(u8 rd, u32 imm31_12)
+{
+ return rv_u_insn(imm31_12, rd, 0x37);
+}
+
+static inline u32 rv_auipc(u8 rd, u32 imm31_12)
+{
+ return rv_u_insn(imm31_12, rd, 0x17);
+}
+
+static inline u32 rv_add(u8 rd, u8 rs1, u8 rs2)
+{
+ return rv_r_insn(0, rs2, rs1, 0, rd, 0x33);
+}
+
+static inline u32 rv_sub(u8 rd, u8 rs1, u8 rs2)
+{
+ return rv_r_insn(0x20, rs2, rs1, 0, rd, 0x33);
+}
+
+static inline u32 rv_sltu(u8 rd, u8 rs1, u8 rs2)
+{
+ return rv_r_insn(0, rs2, rs1, 3, rd, 0x33);
+}
+
+static inline u32 rv_and(u8 rd, u8 rs1, u8 rs2)
+{
+ return rv_r_insn(0, rs2, rs1, 7, rd, 0x33);
+}
+
+static inline u32 rv_or(u8 rd, u8 rs1, u8 rs2)
+{
+ return rv_r_insn(0, rs2, rs1, 6, rd, 0x33);
+}
+
+static inline u32 rv_xor(u8 rd, u8 rs1, u8 rs2)
+{
+ return rv_r_insn(0, rs2, rs1, 4, rd, 0x33);
+}
+
+static inline u32 rv_sll(u8 rd, u8 rs1, u8 rs2)
+{
+ return rv_r_insn(0, rs2, rs1, 1, rd, 0x33);
+}
+
+static inline u32 rv_srl(u8 rd, u8 rs1, u8 rs2)
+{
+ return rv_r_insn(0, rs2, rs1, 5, rd, 0x33);
+}
+
+static inline u32 rv_sra(u8 rd, u8 rs1, u8 rs2)
+{
+ return rv_r_insn(0x20, rs2, rs1, 5, rd, 0x33);
+}
+
+static inline u32 rv_mul(u8 rd, u8 rs1, u8 rs2)
+{
+ return rv_r_insn(1, rs2, rs1, 0, rd, 0x33);
+}
+
+static inline u32 rv_mulhu(u8 rd, u8 rs1, u8 rs2)
+{
+ return rv_r_insn(1, rs2, rs1, 3, rd, 0x33);
+}
+
+static inline u32 rv_divu(u8 rd, u8 rs1, u8 rs2)
+{
+ return rv_r_insn(1, rs2, rs1, 5, rd, 0x33);
+}
+
+static inline u32 rv_remu(u8 rd, u8 rs1, u8 rs2)
+{
+ return rv_r_insn(1, rs2, rs1, 7, rd, 0x33);
+}
+
+static inline u32 rv_jal(u8 rd, u32 imm20_1)
+{
+ return rv_j_insn(imm20_1, rd, 0x6f);
+}
+
+static inline u32 rv_jalr(u8 rd, u8 rs1, u16 imm11_0)
+{
+ return rv_i_insn(imm11_0, rs1, 0, rd, 0x67);
+}
+
+static inline u32 rv_beq(u8 rs1, u8 rs2, u16 imm12_1)
+{
+ return rv_b_insn(imm12_1, rs2, rs1, 0, 0x63);
+}
+
+static inline u32 rv_bne(u8 rs1, u8 rs2, u16 imm12_1)
+{
+ return rv_b_insn(imm12_1, rs2, rs1, 1, 0x63);
+}
+
+static inline u32 rv_bltu(u8 rs1, u8 rs2, u16 imm12_1)
+{
+ return rv_b_insn(imm12_1, rs2, rs1, 6, 0x63);
+}
+
+static inline u32 rv_bgtu(u8 rs1, u8 rs2, u16 imm12_1)
+{
+ return rv_bltu(rs2, rs1, imm12_1);
+}
+
+static inline u32 rv_bgeu(u8 rs1, u8 rs2, u16 imm12_1)
+{
+ return rv_b_insn(imm12_1, rs2, rs1, 7, 0x63);
+}
+
+static inline u32 rv_bleu(u8 rs1, u8 rs2, u16 imm12_1)
+{
+ return rv_bgeu(rs2, rs1, imm12_1);
+}
+
+static inline u32 rv_blt(u8 rs1, u8 rs2, u16 imm12_1)
+{
+ return rv_b_insn(imm12_1, rs2, rs1, 4, 0x63);
+}
+
+static inline u32 rv_bgt(u8 rs1, u8 rs2, u16 imm12_1)
+{
+ return rv_blt(rs2, rs1, imm12_1);
+}
+
+static inline u32 rv_bge(u8 rs1, u8 rs2, u16 imm12_1)
+{
+ return rv_b_insn(imm12_1, rs2, rs1, 5, 0x63);
+}
+
+static inline u32 rv_ble(u8 rs1, u8 rs2, u16 imm12_1)
+{
+ return rv_bge(rs2, rs1, imm12_1);
+}
+
+static inline u32 rv_lw(u8 rd, u16 imm11_0, u8 rs1)
+{
+ return rv_i_insn(imm11_0, rs1, 2, rd, 0x03);
+}
+
+static inline u32 rv_lbu(u8 rd, u16 imm11_0, u8 rs1)
+{
+ return rv_i_insn(imm11_0, rs1, 4, rd, 0x03);
+}
+
+static inline u32 rv_lhu(u8 rd, u16 imm11_0, u8 rs1)
+{
+ return rv_i_insn(imm11_0, rs1, 5, rd, 0x03);
+}
+
+static inline u32 rv_sb(u8 rs1, u16 imm11_0, u8 rs2)
+{
+ return rv_s_insn(imm11_0, rs2, rs1, 0, 0x23);
+}
+
+static inline u32 rv_sh(u8 rs1, u16 imm11_0, u8 rs2)
+{
+ return rv_s_insn(imm11_0, rs2, rs1, 1, 0x23);
+}
+
+static inline u32 rv_sw(u8 rs1, u16 imm11_0, u8 rs2)
+{
+ return rv_s_insn(imm11_0, rs2, rs1, 2, 0x23);
+}
+
+static inline u32 rv_amoadd_w(u8 rd, u8 rs2, u8 rs1, u8 aq, u8 rl)
+{
+ return rv_amo_insn(0, aq, rl, rs2, rs1, 2, rd, 0x2f);
+}
+
+/* RVC instrutions. */
+
+static inline u16 rvc_addi4spn(u8 rd, u32 imm10)
+{
+ u32 imm;
+
+ imm = ((imm10 & 0x30) << 2) | ((imm10 & 0x3c0) >> 4) |
+ ((imm10 & 0x4) >> 1) | ((imm10 & 0x8) >> 3);
+ return rv_ciw_insn(0x0, imm, rd, 0x0);
+}
+
+static inline u16 rvc_lw(u8 rd, u32 imm7, u8 rs1)
+{
+ u32 imm_hi, imm_lo;
+
+ imm_hi = (imm7 & 0x38) >> 3;
+ imm_lo = ((imm7 & 0x4) >> 1) | ((imm7 & 0x40) >> 6);
+ return rv_cl_insn(0x2, imm_hi, rs1, imm_lo, rd, 0x0);
+}
+
+static inline u16 rvc_sw(u8 rs1, u32 imm7, u8 rs2)
+{
+ u32 imm_hi, imm_lo;
+
+ imm_hi = (imm7 & 0x38) >> 3;
+ imm_lo = ((imm7 & 0x4) >> 1) | ((imm7 & 0x40) >> 6);
+ return rv_cs_insn(0x6, imm_hi, rs1, imm_lo, rs2, 0x0);
+}
+
+static inline u16 rvc_addi(u8 rd, u32 imm6)
+{
+ return rv_ci_insn(0, imm6, rd, 0x1);
+}
+
+static inline u16 rvc_li(u8 rd, u32 imm6)
+{
+ return rv_ci_insn(0x2, imm6, rd, 0x1);
+}
+
+static inline u16 rvc_addi16sp(u32 imm10)
+{
+ u32 imm;
+
+ imm = ((imm10 & 0x200) >> 4) | (imm10 & 0x10) | ((imm10 & 0x40) >> 3) |
+ ((imm10 & 0x180) >> 6) | ((imm10 & 0x20) >> 5);
+ return rv_ci_insn(0x3, imm, RV_REG_SP, 0x1);
+}
+
+static inline u16 rvc_lui(u8 rd, u32 imm6)
+{
+ return rv_ci_insn(0x3, imm6, rd, 0x1);
+}
+
+static inline u16 rvc_srli(u8 rd, u32 imm6)
+{
+ return rv_cb_insn(0x4, imm6, 0, rd, 0x1);
+}
+
+static inline u16 rvc_srai(u8 rd, u32 imm6)
+{
+ return rv_cb_insn(0x4, imm6, 0x1, rd, 0x1);
+}
+
+static inline u16 rvc_andi(u8 rd, u32 imm6)
+{
+ return rv_cb_insn(0x4, imm6, 0x2, rd, 0x1);
+}
+
+static inline u16 rvc_sub(u8 rd, u8 rs)
+{
+ return rv_ca_insn(0x23, rd, 0, rs, 0x1);
+}
+
+static inline u16 rvc_xor(u8 rd, u8 rs)
+{
+ return rv_ca_insn(0x23, rd, 0x1, rs, 0x1);
+}
+
+static inline u16 rvc_or(u8 rd, u8 rs)
+{
+ return rv_ca_insn(0x23, rd, 0x2, rs, 0x1);
+}
+
+static inline u16 rvc_and(u8 rd, u8 rs)
+{
+ return rv_ca_insn(0x23, rd, 0x3, rs, 0x1);
+}
+
+static inline u16 rvc_slli(u8 rd, u32 imm6)
+{
+ return rv_ci_insn(0, imm6, rd, 0x2);
+}
+
+static inline u16 rvc_lwsp(u8 rd, u32 imm8)
+{
+ u32 imm;
+
+ imm = ((imm8 & 0xc0) >> 6) | (imm8 & 0x3c);
+ return rv_ci_insn(0x2, imm, rd, 0x2);
+}
+
+static inline u16 rvc_jr(u8 rs1)
+{
+ return rv_cr_insn(0x8, rs1, RV_REG_ZERO, 0x2);
+}
+
+static inline u16 rvc_mv(u8 rd, u8 rs)
+{
+ return rv_cr_insn(0x8, rd, rs, 0x2);
+}
+
+static inline u16 rvc_jalr(u8 rs1)
+{
+ return rv_cr_insn(0x9, rs1, RV_REG_ZERO, 0x2);
+}
+
+static inline u16 rvc_add(u8 rd, u8 rs)
+{
+ return rv_cr_insn(0x9, rd, rs, 0x2);
+}
+
+static inline u16 rvc_swsp(u32 imm8, u8 rs2)
+{
+ u32 imm;
+
+ imm = (imm8 & 0x3c) | ((imm8 & 0xc0) >> 6);
+ return rv_css_insn(0x6, imm, rs2, 0x2);
+}
+
+/*
+ * RV64-only instructions.
+ *
+ * These instructions are not available on RV32. Wrap them below a #if to
+ * ensure that the RV32 JIT doesn't emit any of these instructions.
+ */
+
+#if __riscv_xlen == 64
+
+static inline u32 rv_addiw(u8 rd, u8 rs1, u16 imm11_0)
+{
+ return rv_i_insn(imm11_0, rs1, 0, rd, 0x1b);
+}
+
+static inline u32 rv_slliw(u8 rd, u8 rs1, u16 imm11_0)
+{
+ return rv_i_insn(imm11_0, rs1, 1, rd, 0x1b);
+}
+
+static inline u32 rv_srliw(u8 rd, u8 rs1, u16 imm11_0)
+{
+ return rv_i_insn(imm11_0, rs1, 5, rd, 0x1b);
+}
+
+static inline u32 rv_sraiw(u8 rd, u8 rs1, u16 imm11_0)
+{
+ return rv_i_insn(0x400 | imm11_0, rs1, 5, rd, 0x1b);
+}
+
+static inline u32 rv_addw(u8 rd, u8 rs1, u8 rs2)
+{
+ return rv_r_insn(0, rs2, rs1, 0, rd, 0x3b);
+}
+
+static inline u32 rv_subw(u8 rd, u8 rs1, u8 rs2)
+{
+ return rv_r_insn(0x20, rs2, rs1, 0, rd, 0x3b);
+}
+
+static inline u32 rv_sllw(u8 rd, u8 rs1, u8 rs2)
+{
+ return rv_r_insn(0, rs2, rs1, 1, rd, 0x3b);
+}
+
+static inline u32 rv_srlw(u8 rd, u8 rs1, u8 rs2)
+{
+ return rv_r_insn(0, rs2, rs1, 5, rd, 0x3b);
+}
+
+static inline u32 rv_sraw(u8 rd, u8 rs1, u8 rs2)
+{
+ return rv_r_insn(0x20, rs2, rs1, 5, rd, 0x3b);
+}
+
+static inline u32 rv_mulw(u8 rd, u8 rs1, u8 rs2)
+{
+ return rv_r_insn(1, rs2, rs1, 0, rd, 0x3b);
+}
+
+static inline u32 rv_divuw(u8 rd, u8 rs1, u8 rs2)
+{
+ return rv_r_insn(1, rs2, rs1, 5, rd, 0x3b);
+}
+
+static inline u32 rv_remuw(u8 rd, u8 rs1, u8 rs2)
+{
+ return rv_r_insn(1, rs2, rs1, 7, rd, 0x3b);
+}
+
+static inline u32 rv_ld(u8 rd, u16 imm11_0, u8 rs1)
+{
+ return rv_i_insn(imm11_0, rs1, 3, rd, 0x03);
+}
+
+static inline u32 rv_lwu(u8 rd, u16 imm11_0, u8 rs1)
+{
+ return rv_i_insn(imm11_0, rs1, 6, rd, 0x03);
+}
+
+static inline u32 rv_sd(u8 rs1, u16 imm11_0, u8 rs2)
+{
+ return rv_s_insn(imm11_0, rs2, rs1, 3, 0x23);
+}
+
+static inline u32 rv_amoadd_d(u8 rd, u8 rs2, u8 rs1, u8 aq, u8 rl)
+{
+ return rv_amo_insn(0, aq, rl, rs2, rs1, 3, rd, 0x2f);
+}
+
+/* RV64-only RVC instructions. */
+
+static inline u16 rvc_ld(u8 rd, u32 imm8, u8 rs1)
+{
+ u32 imm_hi, imm_lo;
+
+ imm_hi = (imm8 & 0x38) >> 3;
+ imm_lo = (imm8 & 0xc0) >> 6;
+ return rv_cl_insn(0x3, imm_hi, rs1, imm_lo, rd, 0x0);
+}
+
+static inline u16 rvc_sd(u8 rs1, u32 imm8, u8 rs2)
+{
+ u32 imm_hi, imm_lo;
+
+ imm_hi = (imm8 & 0x38) >> 3;
+ imm_lo = (imm8 & 0xc0) >> 6;
+ return rv_cs_insn(0x7, imm_hi, rs1, imm_lo, rs2, 0x0);
+}
+
+static inline u16 rvc_subw(u8 rd, u8 rs)
+{
+ return rv_ca_insn(0x27, rd, 0, rs, 0x1);
+}
+
+static inline u16 rvc_addiw(u8 rd, u32 imm6)
+{
+ return rv_ci_insn(0x1, imm6, rd, 0x1);
+}
+
+static inline u16 rvc_ldsp(u8 rd, u32 imm9)
+{
+ u32 imm;
+
+ imm = ((imm9 & 0x1c0) >> 6) | (imm9 & 0x38);
+ return rv_ci_insn(0x3, imm, rd, 0x2);
+}
+
+static inline u16 rvc_sdsp(u32 imm9, u8 rs2)
+{
+ u32 imm;
+
+ imm = (imm9 & 0x38) | ((imm9 & 0x1c0) >> 6);
+ return rv_css_insn(0x7, imm, rs2, 0x2);
+}
+
+#endif /* __riscv_xlen == 64 */
+
+/* Helper functions that emit RVC instructions when possible. */
+
+static inline void emit_jalr(u8 rd, u8 rs, s32 imm, struct rv_jit_context *ctx)
+{
+ if (rvc_enabled() && rd == RV_REG_RA && rs && !imm)
+ emitc(rvc_jalr(rs), ctx);
+ else if (rvc_enabled() && !rd && rs && !imm)
+ emitc(rvc_jr(rs), ctx);
+ else
+ emit(rv_jalr(rd, rs, imm), ctx);
+}
+
+static inline void emit_mv(u8 rd, u8 rs, struct rv_jit_context *ctx)
+{
+ if (rvc_enabled() && rd && rs)
+ emitc(rvc_mv(rd, rs), ctx);
+ else
+ emit(rv_addi(rd, rs, 0), ctx);
+}
+
+static inline void emit_add(u8 rd, u8 rs1, u8 rs2, struct rv_jit_context *ctx)
+{
+ if (rvc_enabled() && rd && rd == rs1 && rs2)
+ emitc(rvc_add(rd, rs2), ctx);
+ else
+ emit(rv_add(rd, rs1, rs2), ctx);
+}
+
+static inline void emit_addi(u8 rd, u8 rs, s32 imm, struct rv_jit_context *ctx)
+{
+ if (rvc_enabled() && rd == RV_REG_SP && rd == rs && is_10b_int(imm) && imm && !(imm & 0xf))
+ emitc(rvc_addi16sp(imm), ctx);
+ else if (rvc_enabled() && is_creg(rd) && rs == RV_REG_SP && is_10b_uint(imm) &&
+ !(imm & 0x3) && imm)
+ emitc(rvc_addi4spn(rd, imm), ctx);
+ else if (rvc_enabled() && rd && rd == rs && imm && is_6b_int(imm))
+ emitc(rvc_addi(rd, imm), ctx);
+ else
+ emit(rv_addi(rd, rs, imm), ctx);
+}
+
+static inline void emit_li(u8 rd, s32 imm, struct rv_jit_context *ctx)
+{
+ if (rvc_enabled() && rd && is_6b_int(imm))
+ emitc(rvc_li(rd, imm), ctx);
+ else
+ emit(rv_addi(rd, RV_REG_ZERO, imm), ctx);
+}
+
+static inline void emit_lui(u8 rd, s32 imm, struct rv_jit_context *ctx)
+{
+ if (rvc_enabled() && rd && rd != RV_REG_SP && is_6b_int(imm) && imm)
+ emitc(rvc_lui(rd, imm), ctx);
+ else
+ emit(rv_lui(rd, imm), ctx);
+}
+
+static inline void emit_slli(u8 rd, u8 rs, s32 imm, struct rv_jit_context *ctx)
+{
+ if (rvc_enabled() && rd && rd == rs && imm && (u32)imm < __riscv_xlen)
+ emitc(rvc_slli(rd, imm), ctx);
+ else
+ emit(rv_slli(rd, rs, imm), ctx);
+}
+
+static inline void emit_andi(u8 rd, u8 rs, s32 imm, struct rv_jit_context *ctx)
+{
+ if (rvc_enabled() && is_creg(rd) && rd == rs && is_6b_int(imm))
+ emitc(rvc_andi(rd, imm), ctx);
+ else
+ emit(rv_andi(rd, rs, imm), ctx);
+}
+
+static inline void emit_srli(u8 rd, u8 rs, s32 imm, struct rv_jit_context *ctx)
+{
+ if (rvc_enabled() && is_creg(rd) && rd == rs && imm && (u32)imm < __riscv_xlen)
+ emitc(rvc_srli(rd, imm), ctx);
+ else
+ emit(rv_srli(rd, rs, imm), ctx);
+}
+
+static inline void emit_srai(u8 rd, u8 rs, s32 imm, struct rv_jit_context *ctx)
+{
+ if (rvc_enabled() && is_creg(rd) && rd == rs && imm && (u32)imm < __riscv_xlen)
+ emitc(rvc_srai(rd, imm), ctx);
+ else
+ emit(rv_srai(rd, rs, imm), ctx);
+}
+
+static inline void emit_sub(u8 rd, u8 rs1, u8 rs2, struct rv_jit_context *ctx)
+{
+ if (rvc_enabled() && is_creg(rd) && rd == rs1 && is_creg(rs2))
+ emitc(rvc_sub(rd, rs2), ctx);
+ else
+ emit(rv_sub(rd, rs1, rs2), ctx);
+}
+
+static inline void emit_or(u8 rd, u8 rs1, u8 rs2, struct rv_jit_context *ctx)
+{
+ if (rvc_enabled() && is_creg(rd) && rd == rs1 && is_creg(rs2))
+ emitc(rvc_or(rd, rs2), ctx);
+ else
+ emit(rv_or(rd, rs1, rs2), ctx);
+}
+
+static inline void emit_and(u8 rd, u8 rs1, u8 rs2, struct rv_jit_context *ctx)
+{
+ if (rvc_enabled() && is_creg(rd) && rd == rs1 && is_creg(rs2))
+ emitc(rvc_and(rd, rs2), ctx);
+ else
+ emit(rv_and(rd, rs1, rs2), ctx);
+}
+
+static inline void emit_xor(u8 rd, u8 rs1, u8 rs2, struct rv_jit_context *ctx)
+{
+ if (rvc_enabled() && is_creg(rd) && rd == rs1 && is_creg(rs2))
+ emitc(rvc_xor(rd, rs2), ctx);
+ else
+ emit(rv_xor(rd, rs1, rs2), ctx);
+}
+
+static inline void emit_lw(u8 rd, s32 off, u8 rs1, struct rv_jit_context *ctx)
+{
+ if (rvc_enabled() && rs1 == RV_REG_SP && rd && is_8b_uint(off) && !(off & 0x3))
+ emitc(rvc_lwsp(rd, off), ctx);
+ else if (rvc_enabled() && is_creg(rd) && is_creg(rs1) && is_7b_uint(off) && !(off & 0x3))
+ emitc(rvc_lw(rd, off, rs1), ctx);
+ else
+ emit(rv_lw(rd, off, rs1), ctx);
+}
+
+static inline void emit_sw(u8 rs1, s32 off, u8 rs2, struct rv_jit_context *ctx)
+{
+ if (rvc_enabled() && rs1 == RV_REG_SP && is_8b_uint(off) && !(off & 0x3))
+ emitc(rvc_swsp(off, rs2), ctx);
+ else if (rvc_enabled() && is_creg(rs1) && is_creg(rs2) && is_7b_uint(off) && !(off & 0x3))
+ emitc(rvc_sw(rs1, off, rs2), ctx);
+ else
+ emit(rv_sw(rs1, off, rs2), ctx);
+}
+
+/* RV64-only helper functions. */
+#if __riscv_xlen == 64
+
+static inline void emit_addiw(u8 rd, u8 rs, s32 imm, struct rv_jit_context *ctx)
+{
+ if (rvc_enabled() && rd && rd == rs && is_6b_int(imm))
+ emitc(rvc_addiw(rd, imm), ctx);
+ else
+ emit(rv_addiw(rd, rs, imm), ctx);
+}
+
+static inline void emit_ld(u8 rd, s32 off, u8 rs1, struct rv_jit_context *ctx)
+{
+ if (rvc_enabled() && rs1 == RV_REG_SP && rd && is_9b_uint(off) && !(off & 0x7))
+ emitc(rvc_ldsp(rd, off), ctx);
+ else if (rvc_enabled() && is_creg(rd) && is_creg(rs1) && is_8b_uint(off) && !(off & 0x7))
+ emitc(rvc_ld(rd, off, rs1), ctx);
+ else
+ emit(rv_ld(rd, off, rs1), ctx);
+}
+
+static inline void emit_sd(u8 rs1, s32 off, u8 rs2, struct rv_jit_context *ctx)
+{
+ if (rvc_enabled() && rs1 == RV_REG_SP && is_9b_uint(off) && !(off & 0x7))
+ emitc(rvc_sdsp(off, rs2), ctx);
+ else if (rvc_enabled() && is_creg(rs1) && is_creg(rs2) && is_8b_uint(off) && !(off & 0x7))
+ emitc(rvc_sd(rs1, off, rs2), ctx);
+ else
+ emit(rv_sd(rs1, off, rs2), ctx);
+}
+
+static inline void emit_subw(u8 rd, u8 rs1, u8 rs2, struct rv_jit_context *ctx)
+{
+ if (rvc_enabled() && is_creg(rd) && rd == rs1 && is_creg(rs2))
+ emitc(rvc_subw(rd, rs2), ctx);
+ else
+ emit(rv_subw(rd, rs1, rs2), ctx);
+}
+
+#endif /* __riscv_xlen == 64 */
+
+void bpf_jit_build_prologue(struct rv_jit_context *ctx);
+void bpf_jit_build_epilogue(struct rv_jit_context *ctx);
+
+int bpf_jit_emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx,
+ bool extra_pass);
+
+#endif /* _BPF_JIT_H */
diff --git a/arch/riscv/net/bpf_jit_comp32.c b/arch/riscv/net/bpf_jit_comp32.c
new file mode 100644
index 000000000..f300f93ba
--- /dev/null
+++ b/arch/riscv/net/bpf_jit_comp32.c
@@ -0,0 +1,1347 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * BPF JIT compiler for RV32G
+ *
+ * Copyright (c) 2020 Luke Nelson <luke.r.nels@gmail.com>
+ * Copyright (c) 2020 Xi Wang <xi.wang@gmail.com>
+ *
+ * The code is based on the BPF JIT compiler for RV64G by Björn Töpel and
+ * the BPF JIT compiler for 32-bit ARM by Shubham Bansal and Mircea Gherzan.
+ */
+
+#include <linux/bpf.h>
+#include <linux/filter.h>
+#include "bpf_jit.h"
+
+/*
+ * Stack layout during BPF program execution:
+ *
+ * high
+ * RV32 fp => +----------+
+ * | saved ra |
+ * | saved fp | RV32 callee-saved registers
+ * | ... |
+ * +----------+ <= (fp - 4 * NR_SAVED_REGISTERS)
+ * | hi(R6) |
+ * | lo(R6) |
+ * | hi(R7) | JIT scratch space for BPF registers
+ * | lo(R7) |
+ * | ... |
+ * BPF_REG_FP => +----------+ <= (fp - 4 * NR_SAVED_REGISTERS
+ * | | - 4 * BPF_JIT_SCRATCH_REGS)
+ * | |
+ * | ... | BPF program stack
+ * | |
+ * RV32 sp => +----------+
+ * | |
+ * | ... | Function call stack
+ * | |
+ * +----------+
+ * low
+ */
+
+enum {
+ /* Stack layout - these are offsets from top of JIT scratch space. */
+ BPF_R6_HI,
+ BPF_R6_LO,
+ BPF_R7_HI,
+ BPF_R7_LO,
+ BPF_R8_HI,
+ BPF_R8_LO,
+ BPF_R9_HI,
+ BPF_R9_LO,
+ BPF_AX_HI,
+ BPF_AX_LO,
+ /* Stack space for BPF_REG_6 through BPF_REG_9 and BPF_REG_AX. */
+ BPF_JIT_SCRATCH_REGS,
+};
+
+/* Number of callee-saved registers stored to stack: ra, fp, s1--s7. */
+#define NR_SAVED_REGISTERS 9
+
+/* Offset from fp for BPF registers stored on stack. */
+#define STACK_OFFSET(k) (-4 - (4 * NR_SAVED_REGISTERS) - (4 * (k)))
+
+#define TMP_REG_1 (MAX_BPF_JIT_REG + 0)
+#define TMP_REG_2 (MAX_BPF_JIT_REG + 1)
+
+#define RV_REG_TCC RV_REG_T6
+#define RV_REG_TCC_SAVED RV_REG_S7
+
+static const s8 bpf2rv32[][2] = {
+ /* Return value from in-kernel function, and exit value from eBPF. */
+ [BPF_REG_0] = {RV_REG_S2, RV_REG_S1},
+ /* Arguments from eBPF program to in-kernel function. */
+ [BPF_REG_1] = {RV_REG_A1, RV_REG_A0},
+ [BPF_REG_2] = {RV_REG_A3, RV_REG_A2},
+ [BPF_REG_3] = {RV_REG_A5, RV_REG_A4},
+ [BPF_REG_4] = {RV_REG_A7, RV_REG_A6},
+ [BPF_REG_5] = {RV_REG_S4, RV_REG_S3},
+ /*
+ * Callee-saved registers that in-kernel function will preserve.
+ * Stored on the stack.
+ */
+ [BPF_REG_6] = {STACK_OFFSET(BPF_R6_HI), STACK_OFFSET(BPF_R6_LO)},
+ [BPF_REG_7] = {STACK_OFFSET(BPF_R7_HI), STACK_OFFSET(BPF_R7_LO)},
+ [BPF_REG_8] = {STACK_OFFSET(BPF_R8_HI), STACK_OFFSET(BPF_R8_LO)},
+ [BPF_REG_9] = {STACK_OFFSET(BPF_R9_HI), STACK_OFFSET(BPF_R9_LO)},
+ /* Read-only frame pointer to access BPF stack. */
+ [BPF_REG_FP] = {RV_REG_S6, RV_REG_S5},
+ /* Temporary register for blinding constants. Stored on the stack. */
+ [BPF_REG_AX] = {STACK_OFFSET(BPF_AX_HI), STACK_OFFSET(BPF_AX_LO)},
+ /*
+ * Temporary registers used by the JIT to operate on registers stored
+ * on the stack. Save t0 and t1 to be used as temporaries in generated
+ * code.
+ */
+ [TMP_REG_1] = {RV_REG_T3, RV_REG_T2},
+ [TMP_REG_2] = {RV_REG_T5, RV_REG_T4},
+};
+
+static s8 hi(const s8 *r)
+{
+ return r[0];
+}
+
+static s8 lo(const s8 *r)
+{
+ return r[1];
+}
+
+static void emit_imm(const s8 rd, s32 imm, struct rv_jit_context *ctx)
+{
+ u32 upper = (imm + (1 << 11)) >> 12;
+ u32 lower = imm & 0xfff;
+
+ if (upper) {
+ emit(rv_lui(rd, upper), ctx);
+ emit(rv_addi(rd, rd, lower), ctx);
+ } else {
+ emit(rv_addi(rd, RV_REG_ZERO, lower), ctx);
+ }
+}
+
+static void emit_imm32(const s8 *rd, s32 imm, struct rv_jit_context *ctx)
+{
+ /* Emit immediate into lower bits. */
+ emit_imm(lo(rd), imm, ctx);
+
+ /* Sign-extend into upper bits. */
+ if (imm >= 0)
+ emit(rv_addi(hi(rd), RV_REG_ZERO, 0), ctx);
+ else
+ emit(rv_addi(hi(rd), RV_REG_ZERO, -1), ctx);
+}
+
+static void emit_imm64(const s8 *rd, s32 imm_hi, s32 imm_lo,
+ struct rv_jit_context *ctx)
+{
+ emit_imm(lo(rd), imm_lo, ctx);
+ emit_imm(hi(rd), imm_hi, ctx);
+}
+
+static void __build_epilogue(bool is_tail_call, struct rv_jit_context *ctx)
+{
+ int stack_adjust = ctx->stack_size;
+ const s8 *r0 = bpf2rv32[BPF_REG_0];
+
+ /* Set return value if not tail call. */
+ if (!is_tail_call) {
+ emit(rv_addi(RV_REG_A0, lo(r0), 0), ctx);
+ emit(rv_addi(RV_REG_A1, hi(r0), 0), ctx);
+ }
+
+ /* Restore callee-saved registers. */
+ emit(rv_lw(RV_REG_RA, stack_adjust - 4, RV_REG_SP), ctx);
+ emit(rv_lw(RV_REG_FP, stack_adjust - 8, RV_REG_SP), ctx);
+ emit(rv_lw(RV_REG_S1, stack_adjust - 12, RV_REG_SP), ctx);
+ emit(rv_lw(RV_REG_S2, stack_adjust - 16, RV_REG_SP), ctx);
+ emit(rv_lw(RV_REG_S3, stack_adjust - 20, RV_REG_SP), ctx);
+ emit(rv_lw(RV_REG_S4, stack_adjust - 24, RV_REG_SP), ctx);
+ emit(rv_lw(RV_REG_S5, stack_adjust - 28, RV_REG_SP), ctx);
+ emit(rv_lw(RV_REG_S6, stack_adjust - 32, RV_REG_SP), ctx);
+ emit(rv_lw(RV_REG_S7, stack_adjust - 36, RV_REG_SP), ctx);
+
+ emit(rv_addi(RV_REG_SP, RV_REG_SP, stack_adjust), ctx);
+
+ if (is_tail_call) {
+ /*
+ * goto *(t0 + 4);
+ * Skips first instruction of prologue which initializes tail
+ * call counter. Assumes t0 contains address of target program,
+ * see emit_bpf_tail_call.
+ */
+ emit(rv_jalr(RV_REG_ZERO, RV_REG_T0, 4), ctx);
+ } else {
+ emit(rv_jalr(RV_REG_ZERO, RV_REG_RA, 0), ctx);
+ }
+}
+
+static bool is_stacked(s8 reg)
+{
+ return reg < 0;
+}
+
+static const s8 *bpf_get_reg64(const s8 *reg, const s8 *tmp,
+ struct rv_jit_context *ctx)
+{
+ if (is_stacked(hi(reg))) {
+ emit(rv_lw(hi(tmp), hi(reg), RV_REG_FP), ctx);
+ emit(rv_lw(lo(tmp), lo(reg), RV_REG_FP), ctx);
+ reg = tmp;
+ }
+ return reg;
+}
+
+static void bpf_put_reg64(const s8 *reg, const s8 *src,
+ struct rv_jit_context *ctx)
+{
+ if (is_stacked(hi(reg))) {
+ emit(rv_sw(RV_REG_FP, hi(reg), hi(src)), ctx);
+ emit(rv_sw(RV_REG_FP, lo(reg), lo(src)), ctx);
+ }
+}
+
+static const s8 *bpf_get_reg32(const s8 *reg, const s8 *tmp,
+ struct rv_jit_context *ctx)
+{
+ if (is_stacked(lo(reg))) {
+ emit(rv_lw(lo(tmp), lo(reg), RV_REG_FP), ctx);
+ reg = tmp;
+ }
+ return reg;
+}
+
+static void bpf_put_reg32(const s8 *reg, const s8 *src,
+ struct rv_jit_context *ctx)
+{
+ if (is_stacked(lo(reg))) {
+ emit(rv_sw(RV_REG_FP, lo(reg), lo(src)), ctx);
+ if (!ctx->prog->aux->verifier_zext)
+ emit(rv_sw(RV_REG_FP, hi(reg), RV_REG_ZERO), ctx);
+ } else if (!ctx->prog->aux->verifier_zext) {
+ emit(rv_addi(hi(reg), RV_REG_ZERO, 0), ctx);
+ }
+}
+
+static void emit_jump_and_link(u8 rd, s32 rvoff, bool force_jalr,
+ struct rv_jit_context *ctx)
+{
+ s32 upper, lower;
+
+ if (rvoff && is_21b_int(rvoff) && !force_jalr) {
+ emit(rv_jal(rd, rvoff >> 1), ctx);
+ return;
+ }
+
+ upper = (rvoff + (1 << 11)) >> 12;
+ lower = rvoff & 0xfff;
+ emit(rv_auipc(RV_REG_T1, upper), ctx);
+ emit(rv_jalr(rd, RV_REG_T1, lower), ctx);
+}
+
+static void emit_alu_i64(const s8 *dst, s32 imm,
+ struct rv_jit_context *ctx, const u8 op)
+{
+ const s8 *tmp1 = bpf2rv32[TMP_REG_1];
+ const s8 *rd = bpf_get_reg64(dst, tmp1, ctx);
+
+ switch (op) {
+ case BPF_MOV:
+ emit_imm32(rd, imm, ctx);
+ break;
+ case BPF_AND:
+ if (is_12b_int(imm)) {
+ emit(rv_andi(lo(rd), lo(rd), imm), ctx);
+ } else {
+ emit_imm(RV_REG_T0, imm, ctx);
+ emit(rv_and(lo(rd), lo(rd), RV_REG_T0), ctx);
+ }
+ if (imm >= 0)
+ emit(rv_addi(hi(rd), RV_REG_ZERO, 0), ctx);
+ break;
+ case BPF_OR:
+ if (is_12b_int(imm)) {
+ emit(rv_ori(lo(rd), lo(rd), imm), ctx);
+ } else {
+ emit_imm(RV_REG_T0, imm, ctx);
+ emit(rv_or(lo(rd), lo(rd), RV_REG_T0), ctx);
+ }
+ if (imm < 0)
+ emit(rv_ori(hi(rd), RV_REG_ZERO, -1), ctx);
+ break;
+ case BPF_XOR:
+ if (is_12b_int(imm)) {
+ emit(rv_xori(lo(rd), lo(rd), imm), ctx);
+ } else {
+ emit_imm(RV_REG_T0, imm, ctx);
+ emit(rv_xor(lo(rd), lo(rd), RV_REG_T0), ctx);
+ }
+ if (imm < 0)
+ emit(rv_xori(hi(rd), hi(rd), -1), ctx);
+ break;
+ case BPF_LSH:
+ if (imm >= 32) {
+ emit(rv_slli(hi(rd), lo(rd), imm - 32), ctx);
+ emit(rv_addi(lo(rd), RV_REG_ZERO, 0), ctx);
+ } else if (imm == 0) {
+ /* Do nothing. */
+ } else {
+ emit(rv_srli(RV_REG_T0, lo(rd), 32 - imm), ctx);
+ emit(rv_slli(hi(rd), hi(rd), imm), ctx);
+ emit(rv_or(hi(rd), RV_REG_T0, hi(rd)), ctx);
+ emit(rv_slli(lo(rd), lo(rd), imm), ctx);
+ }
+ break;
+ case BPF_RSH:
+ if (imm >= 32) {
+ emit(rv_srli(lo(rd), hi(rd), imm - 32), ctx);
+ emit(rv_addi(hi(rd), RV_REG_ZERO, 0), ctx);
+ } else if (imm == 0) {
+ /* Do nothing. */
+ } else {
+ emit(rv_slli(RV_REG_T0, hi(rd), 32 - imm), ctx);
+ emit(rv_srli(lo(rd), lo(rd), imm), ctx);
+ emit(rv_or(lo(rd), RV_REG_T0, lo(rd)), ctx);
+ emit(rv_srli(hi(rd), hi(rd), imm), ctx);
+ }
+ break;
+ case BPF_ARSH:
+ if (imm >= 32) {
+ emit(rv_srai(lo(rd), hi(rd), imm - 32), ctx);
+ emit(rv_srai(hi(rd), hi(rd), 31), ctx);
+ } else if (imm == 0) {
+ /* Do nothing. */
+ } else {
+ emit(rv_slli(RV_REG_T0, hi(rd), 32 - imm), ctx);
+ emit(rv_srli(lo(rd), lo(rd), imm), ctx);
+ emit(rv_or(lo(rd), RV_REG_T0, lo(rd)), ctx);
+ emit(rv_srai(hi(rd), hi(rd), imm), ctx);
+ }
+ break;
+ }
+
+ bpf_put_reg64(dst, rd, ctx);
+}
+
+static void emit_alu_i32(const s8 *dst, s32 imm,
+ struct rv_jit_context *ctx, const u8 op)
+{
+ const s8 *tmp1 = bpf2rv32[TMP_REG_1];
+ const s8 *rd = bpf_get_reg32(dst, tmp1, ctx);
+
+ switch (op) {
+ case BPF_MOV:
+ emit_imm(lo(rd), imm, ctx);
+ break;
+ case BPF_ADD:
+ if (is_12b_int(imm)) {
+ emit(rv_addi(lo(rd), lo(rd), imm), ctx);
+ } else {
+ emit_imm(RV_REG_T0, imm, ctx);
+ emit(rv_add(lo(rd), lo(rd), RV_REG_T0), ctx);
+ }
+ break;
+ case BPF_SUB:
+ if (is_12b_int(-imm)) {
+ emit(rv_addi(lo(rd), lo(rd), -imm), ctx);
+ } else {
+ emit_imm(RV_REG_T0, imm, ctx);
+ emit(rv_sub(lo(rd), lo(rd), RV_REG_T0), ctx);
+ }
+ break;
+ case BPF_AND:
+ if (is_12b_int(imm)) {
+ emit(rv_andi(lo(rd), lo(rd), imm), ctx);
+ } else {
+ emit_imm(RV_REG_T0, imm, ctx);
+ emit(rv_and(lo(rd), lo(rd), RV_REG_T0), ctx);
+ }
+ break;
+ case BPF_OR:
+ if (is_12b_int(imm)) {
+ emit(rv_ori(lo(rd), lo(rd), imm), ctx);
+ } else {
+ emit_imm(RV_REG_T0, imm, ctx);
+ emit(rv_or(lo(rd), lo(rd), RV_REG_T0), ctx);
+ }
+ break;
+ case BPF_XOR:
+ if (is_12b_int(imm)) {
+ emit(rv_xori(lo(rd), lo(rd), imm), ctx);
+ } else {
+ emit_imm(RV_REG_T0, imm, ctx);
+ emit(rv_xor(lo(rd), lo(rd), RV_REG_T0), ctx);
+ }
+ break;
+ case BPF_LSH:
+ if (is_12b_int(imm)) {
+ emit(rv_slli(lo(rd), lo(rd), imm), ctx);
+ } else {
+ emit_imm(RV_REG_T0, imm, ctx);
+ emit(rv_sll(lo(rd), lo(rd), RV_REG_T0), ctx);
+ }
+ break;
+ case BPF_RSH:
+ if (is_12b_int(imm)) {
+ emit(rv_srli(lo(rd), lo(rd), imm), ctx);
+ } else {
+ emit_imm(RV_REG_T0, imm, ctx);
+ emit(rv_srl(lo(rd), lo(rd), RV_REG_T0), ctx);
+ }
+ break;
+ case BPF_ARSH:
+ if (is_12b_int(imm)) {
+ emit(rv_srai(lo(rd), lo(rd), imm), ctx);
+ } else {
+ emit_imm(RV_REG_T0, imm, ctx);
+ emit(rv_sra(lo(rd), lo(rd), RV_REG_T0), ctx);
+ }
+ break;
+ }
+
+ bpf_put_reg32(dst, rd, ctx);
+}
+
+static void emit_alu_r64(const s8 *dst, const s8 *src,
+ struct rv_jit_context *ctx, const u8 op)
+{
+ const s8 *tmp1 = bpf2rv32[TMP_REG_1];
+ const s8 *tmp2 = bpf2rv32[TMP_REG_2];
+ const s8 *rd = bpf_get_reg64(dst, tmp1, ctx);
+ const s8 *rs = bpf_get_reg64(src, tmp2, ctx);
+
+ switch (op) {
+ case BPF_MOV:
+ emit(rv_addi(lo(rd), lo(rs), 0), ctx);
+ emit(rv_addi(hi(rd), hi(rs), 0), ctx);
+ break;
+ case BPF_ADD:
+ if (rd == rs) {
+ emit(rv_srli(RV_REG_T0, lo(rd), 31), ctx);
+ emit(rv_slli(hi(rd), hi(rd), 1), ctx);
+ emit(rv_or(hi(rd), RV_REG_T0, hi(rd)), ctx);
+ emit(rv_slli(lo(rd), lo(rd), 1), ctx);
+ } else {
+ emit(rv_add(lo(rd), lo(rd), lo(rs)), ctx);
+ emit(rv_sltu(RV_REG_T0, lo(rd), lo(rs)), ctx);
+ emit(rv_add(hi(rd), hi(rd), hi(rs)), ctx);
+ emit(rv_add(hi(rd), hi(rd), RV_REG_T0), ctx);
+ }
+ break;
+ case BPF_SUB:
+ emit(rv_sub(RV_REG_T1, hi(rd), hi(rs)), ctx);
+ emit(rv_sltu(RV_REG_T0, lo(rd), lo(rs)), ctx);
+ emit(rv_sub(hi(rd), RV_REG_T1, RV_REG_T0), ctx);
+ emit(rv_sub(lo(rd), lo(rd), lo(rs)), ctx);
+ break;
+ case BPF_AND:
+ emit(rv_and(lo(rd), lo(rd), lo(rs)), ctx);
+ emit(rv_and(hi(rd), hi(rd), hi(rs)), ctx);
+ break;
+ case BPF_OR:
+ emit(rv_or(lo(rd), lo(rd), lo(rs)), ctx);
+ emit(rv_or(hi(rd), hi(rd), hi(rs)), ctx);
+ break;
+ case BPF_XOR:
+ emit(rv_xor(lo(rd), lo(rd), lo(rs)), ctx);
+ emit(rv_xor(hi(rd), hi(rd), hi(rs)), ctx);
+ break;
+ case BPF_MUL:
+ emit(rv_mul(RV_REG_T0, hi(rs), lo(rd)), ctx);
+ emit(rv_mul(hi(rd), hi(rd), lo(rs)), ctx);
+ emit(rv_mulhu(RV_REG_T1, lo(rd), lo(rs)), ctx);
+ emit(rv_add(hi(rd), hi(rd), RV_REG_T0), ctx);
+ emit(rv_mul(lo(rd), lo(rd), lo(rs)), ctx);
+ emit(rv_add(hi(rd), hi(rd), RV_REG_T1), ctx);
+ break;
+ case BPF_LSH:
+ emit(rv_addi(RV_REG_T0, lo(rs), -32), ctx);
+ emit(rv_blt(RV_REG_T0, RV_REG_ZERO, 8), ctx);
+ emit(rv_sll(hi(rd), lo(rd), RV_REG_T0), ctx);
+ emit(rv_addi(lo(rd), RV_REG_ZERO, 0), ctx);
+ emit(rv_jal(RV_REG_ZERO, 16), ctx);
+ emit(rv_addi(RV_REG_T1, RV_REG_ZERO, 31), ctx);
+ emit(rv_srli(RV_REG_T0, lo(rd), 1), ctx);
+ emit(rv_sub(RV_REG_T1, RV_REG_T1, lo(rs)), ctx);
+ emit(rv_srl(RV_REG_T0, RV_REG_T0, RV_REG_T1), ctx);
+ emit(rv_sll(hi(rd), hi(rd), lo(rs)), ctx);
+ emit(rv_or(hi(rd), RV_REG_T0, hi(rd)), ctx);
+ emit(rv_sll(lo(rd), lo(rd), lo(rs)), ctx);
+ break;
+ case BPF_RSH:
+ emit(rv_addi(RV_REG_T0, lo(rs), -32), ctx);
+ emit(rv_blt(RV_REG_T0, RV_REG_ZERO, 8), ctx);
+ emit(rv_srl(lo(rd), hi(rd), RV_REG_T0), ctx);
+ emit(rv_addi(hi(rd), RV_REG_ZERO, 0), ctx);
+ emit(rv_jal(RV_REG_ZERO, 16), ctx);
+ emit(rv_addi(RV_REG_T1, RV_REG_ZERO, 31), ctx);
+ emit(rv_slli(RV_REG_T0, hi(rd), 1), ctx);
+ emit(rv_sub(RV_REG_T1, RV_REG_T1, lo(rs)), ctx);
+ emit(rv_sll(RV_REG_T0, RV_REG_T0, RV_REG_T1), ctx);
+ emit(rv_srl(lo(rd), lo(rd), lo(rs)), ctx);
+ emit(rv_or(lo(rd), RV_REG_T0, lo(rd)), ctx);
+ emit(rv_srl(hi(rd), hi(rd), lo(rs)), ctx);
+ break;
+ case BPF_ARSH:
+ emit(rv_addi(RV_REG_T0, lo(rs), -32), ctx);
+ emit(rv_blt(RV_REG_T0, RV_REG_ZERO, 8), ctx);
+ emit(rv_sra(lo(rd), hi(rd), RV_REG_T0), ctx);
+ emit(rv_srai(hi(rd), hi(rd), 31), ctx);
+ emit(rv_jal(RV_REG_ZERO, 16), ctx);
+ emit(rv_addi(RV_REG_T1, RV_REG_ZERO, 31), ctx);
+ emit(rv_slli(RV_REG_T0, hi(rd), 1), ctx);
+ emit(rv_sub(RV_REG_T1, RV_REG_T1, lo(rs)), ctx);
+ emit(rv_sll(RV_REG_T0, RV_REG_T0, RV_REG_T1), ctx);
+ emit(rv_srl(lo(rd), lo(rd), lo(rs)), ctx);
+ emit(rv_or(lo(rd), RV_REG_T0, lo(rd)), ctx);
+ emit(rv_sra(hi(rd), hi(rd), lo(rs)), ctx);
+ break;
+ case BPF_NEG:
+ emit(rv_sub(lo(rd), RV_REG_ZERO, lo(rd)), ctx);
+ emit(rv_sltu(RV_REG_T0, RV_REG_ZERO, lo(rd)), ctx);
+ emit(rv_sub(hi(rd), RV_REG_ZERO, hi(rd)), ctx);
+ emit(rv_sub(hi(rd), hi(rd), RV_REG_T0), ctx);
+ break;
+ }
+
+ bpf_put_reg64(dst, rd, ctx);
+}
+
+static void emit_alu_r32(const s8 *dst, const s8 *src,
+ struct rv_jit_context *ctx, const u8 op)
+{
+ const s8 *tmp1 = bpf2rv32[TMP_REG_1];
+ const s8 *tmp2 = bpf2rv32[TMP_REG_2];
+ const s8 *rd = bpf_get_reg32(dst, tmp1, ctx);
+ const s8 *rs = bpf_get_reg32(src, tmp2, ctx);
+
+ switch (op) {
+ case BPF_MOV:
+ emit(rv_addi(lo(rd), lo(rs), 0), ctx);
+ break;
+ case BPF_ADD:
+ emit(rv_add(lo(rd), lo(rd), lo(rs)), ctx);
+ break;
+ case BPF_SUB:
+ emit(rv_sub(lo(rd), lo(rd), lo(rs)), ctx);
+ break;
+ case BPF_AND:
+ emit(rv_and(lo(rd), lo(rd), lo(rs)), ctx);
+ break;
+ case BPF_OR:
+ emit(rv_or(lo(rd), lo(rd), lo(rs)), ctx);
+ break;
+ case BPF_XOR:
+ emit(rv_xor(lo(rd), lo(rd), lo(rs)), ctx);
+ break;
+ case BPF_MUL:
+ emit(rv_mul(lo(rd), lo(rd), lo(rs)), ctx);
+ break;
+ case BPF_DIV:
+ emit(rv_divu(lo(rd), lo(rd), lo(rs)), ctx);
+ break;
+ case BPF_MOD:
+ emit(rv_remu(lo(rd), lo(rd), lo(rs)), ctx);
+ break;
+ case BPF_LSH:
+ emit(rv_sll(lo(rd), lo(rd), lo(rs)), ctx);
+ break;
+ case BPF_RSH:
+ emit(rv_srl(lo(rd), lo(rd), lo(rs)), ctx);
+ break;
+ case BPF_ARSH:
+ emit(rv_sra(lo(rd), lo(rd), lo(rs)), ctx);
+ break;
+ case BPF_NEG:
+ emit(rv_sub(lo(rd), RV_REG_ZERO, lo(rd)), ctx);
+ break;
+ }
+
+ bpf_put_reg32(dst, rd, ctx);
+}
+
+static int emit_branch_r64(const s8 *src1, const s8 *src2, s32 rvoff,
+ struct rv_jit_context *ctx, const u8 op)
+{
+ int e, s = ctx->ninsns;
+ const s8 *tmp1 = bpf2rv32[TMP_REG_1];
+ const s8 *tmp2 = bpf2rv32[TMP_REG_2];
+
+ const s8 *rs1 = bpf_get_reg64(src1, tmp1, ctx);
+ const s8 *rs2 = bpf_get_reg64(src2, tmp2, ctx);
+
+ /*
+ * NO_JUMP skips over the rest of the instructions and the
+ * emit_jump_and_link, meaning the BPF branch is not taken.
+ * JUMP skips directly to the emit_jump_and_link, meaning
+ * the BPF branch is taken.
+ *
+ * The fallthrough case results in the BPF branch being taken.
+ */
+#define NO_JUMP(idx) (6 + (2 * (idx)))
+#define JUMP(idx) (2 + (2 * (idx)))
+
+ switch (op) {
+ case BPF_JEQ:
+ emit(rv_bne(hi(rs1), hi(rs2), NO_JUMP(1)), ctx);
+ emit(rv_bne(lo(rs1), lo(rs2), NO_JUMP(0)), ctx);
+ break;
+ case BPF_JGT:
+ emit(rv_bgtu(hi(rs1), hi(rs2), JUMP(2)), ctx);
+ emit(rv_bltu(hi(rs1), hi(rs2), NO_JUMP(1)), ctx);
+ emit(rv_bleu(lo(rs1), lo(rs2), NO_JUMP(0)), ctx);
+ break;
+ case BPF_JLT:
+ emit(rv_bltu(hi(rs1), hi(rs2), JUMP(2)), ctx);
+ emit(rv_bgtu(hi(rs1), hi(rs2), NO_JUMP(1)), ctx);
+ emit(rv_bgeu(lo(rs1), lo(rs2), NO_JUMP(0)), ctx);
+ break;
+ case BPF_JGE:
+ emit(rv_bgtu(hi(rs1), hi(rs2), JUMP(2)), ctx);
+ emit(rv_bltu(hi(rs1), hi(rs2), NO_JUMP(1)), ctx);
+ emit(rv_bltu(lo(rs1), lo(rs2), NO_JUMP(0)), ctx);
+ break;
+ case BPF_JLE:
+ emit(rv_bltu(hi(rs1), hi(rs2), JUMP(2)), ctx);
+ emit(rv_bgtu(hi(rs1), hi(rs2), NO_JUMP(1)), ctx);
+ emit(rv_bgtu(lo(rs1), lo(rs2), NO_JUMP(0)), ctx);
+ break;
+ case BPF_JNE:
+ emit(rv_bne(hi(rs1), hi(rs2), JUMP(1)), ctx);
+ emit(rv_beq(lo(rs1), lo(rs2), NO_JUMP(0)), ctx);
+ break;
+ case BPF_JSGT:
+ emit(rv_bgt(hi(rs1), hi(rs2), JUMP(2)), ctx);
+ emit(rv_blt(hi(rs1), hi(rs2), NO_JUMP(1)), ctx);
+ emit(rv_bleu(lo(rs1), lo(rs2), NO_JUMP(0)), ctx);
+ break;
+ case BPF_JSLT:
+ emit(rv_blt(hi(rs1), hi(rs2), JUMP(2)), ctx);
+ emit(rv_bgt(hi(rs1), hi(rs2), NO_JUMP(1)), ctx);
+ emit(rv_bgeu(lo(rs1), lo(rs2), NO_JUMP(0)), ctx);
+ break;
+ case BPF_JSGE:
+ emit(rv_bgt(hi(rs1), hi(rs2), JUMP(2)), ctx);
+ emit(rv_blt(hi(rs1), hi(rs2), NO_JUMP(1)), ctx);
+ emit(rv_bltu(lo(rs1), lo(rs2), NO_JUMP(0)), ctx);
+ break;
+ case BPF_JSLE:
+ emit(rv_blt(hi(rs1), hi(rs2), JUMP(2)), ctx);
+ emit(rv_bgt(hi(rs1), hi(rs2), NO_JUMP(1)), ctx);
+ emit(rv_bgtu(lo(rs1), lo(rs2), NO_JUMP(0)), ctx);
+ break;
+ case BPF_JSET:
+ emit(rv_and(RV_REG_T0, hi(rs1), hi(rs2)), ctx);
+ emit(rv_bne(RV_REG_T0, RV_REG_ZERO, JUMP(2)), ctx);
+ emit(rv_and(RV_REG_T0, lo(rs1), lo(rs2)), ctx);
+ emit(rv_beq(RV_REG_T0, RV_REG_ZERO, NO_JUMP(0)), ctx);
+ break;
+ }
+
+#undef NO_JUMP
+#undef JUMP
+
+ e = ctx->ninsns;
+ /* Adjust for extra insns. */
+ rvoff -= ninsns_rvoff(e - s);
+ emit_jump_and_link(RV_REG_ZERO, rvoff, true, ctx);
+ return 0;
+}
+
+static int emit_bcc(u8 op, u8 rd, u8 rs, int rvoff, struct rv_jit_context *ctx)
+{
+ int e, s = ctx->ninsns;
+ bool far = false;
+ int off;
+
+ if (op == BPF_JSET) {
+ /*
+ * BPF_JSET is a special case: it has no inverse so we always
+ * treat it as a far branch.
+ */
+ far = true;
+ } else if (!is_13b_int(rvoff)) {
+ op = invert_bpf_cond(op);
+ far = true;
+ }
+
+ /*
+ * For a far branch, the condition is negated and we jump over the
+ * branch itself, and the two instructions from emit_jump_and_link.
+ * For a near branch, just use rvoff.
+ */
+ off = far ? 6 : (rvoff >> 1);
+
+ switch (op) {
+ case BPF_JEQ:
+ emit(rv_beq(rd, rs, off), ctx);
+ break;
+ case BPF_JGT:
+ emit(rv_bgtu(rd, rs, off), ctx);
+ break;
+ case BPF_JLT:
+ emit(rv_bltu(rd, rs, off), ctx);
+ break;
+ case BPF_JGE:
+ emit(rv_bgeu(rd, rs, off), ctx);
+ break;
+ case BPF_JLE:
+ emit(rv_bleu(rd, rs, off), ctx);
+ break;
+ case BPF_JNE:
+ emit(rv_bne(rd, rs, off), ctx);
+ break;
+ case BPF_JSGT:
+ emit(rv_bgt(rd, rs, off), ctx);
+ break;
+ case BPF_JSLT:
+ emit(rv_blt(rd, rs, off), ctx);
+ break;
+ case BPF_JSGE:
+ emit(rv_bge(rd, rs, off), ctx);
+ break;
+ case BPF_JSLE:
+ emit(rv_ble(rd, rs, off), ctx);
+ break;
+ case BPF_JSET:
+ emit(rv_and(RV_REG_T0, rd, rs), ctx);
+ emit(rv_beq(RV_REG_T0, RV_REG_ZERO, off), ctx);
+ break;
+ }
+
+ if (far) {
+ e = ctx->ninsns;
+ /* Adjust for extra insns. */
+ rvoff -= ninsns_rvoff(e - s);
+ emit_jump_and_link(RV_REG_ZERO, rvoff, true, ctx);
+ }
+ return 0;
+}
+
+static int emit_branch_r32(const s8 *src1, const s8 *src2, s32 rvoff,
+ struct rv_jit_context *ctx, const u8 op)
+{
+ int e, s = ctx->ninsns;
+ const s8 *tmp1 = bpf2rv32[TMP_REG_1];
+ const s8 *tmp2 = bpf2rv32[TMP_REG_2];
+
+ const s8 *rs1 = bpf_get_reg32(src1, tmp1, ctx);
+ const s8 *rs2 = bpf_get_reg32(src2, tmp2, ctx);
+
+ e = ctx->ninsns;
+ /* Adjust for extra insns. */
+ rvoff -= ninsns_rvoff(e - s);
+
+ if (emit_bcc(op, lo(rs1), lo(rs2), rvoff, ctx))
+ return -1;
+
+ return 0;
+}
+
+static void emit_call(bool fixed, u64 addr, struct rv_jit_context *ctx)
+{
+ const s8 *r0 = bpf2rv32[BPF_REG_0];
+ const s8 *r5 = bpf2rv32[BPF_REG_5];
+ u32 upper = ((u32)addr + (1 << 11)) >> 12;
+ u32 lower = addr & 0xfff;
+
+ /* R1-R4 already in correct registers---need to push R5 to stack. */
+ emit(rv_addi(RV_REG_SP, RV_REG_SP, -16), ctx);
+ emit(rv_sw(RV_REG_SP, 0, lo(r5)), ctx);
+ emit(rv_sw(RV_REG_SP, 4, hi(r5)), ctx);
+
+ /* Backup TCC. */
+ emit(rv_addi(RV_REG_TCC_SAVED, RV_REG_TCC, 0), ctx);
+
+ /*
+ * Use lui/jalr pair to jump to absolute address. Don't use emit_imm as
+ * the number of emitted instructions should not depend on the value of
+ * addr.
+ */
+ emit(rv_lui(RV_REG_T1, upper), ctx);
+ emit(rv_jalr(RV_REG_RA, RV_REG_T1, lower), ctx);
+
+ /* Restore TCC. */
+ emit(rv_addi(RV_REG_TCC, RV_REG_TCC_SAVED, 0), ctx);
+
+ /* Set return value and restore stack. */
+ emit(rv_addi(lo(r0), RV_REG_A0, 0), ctx);
+ emit(rv_addi(hi(r0), RV_REG_A1, 0), ctx);
+ emit(rv_addi(RV_REG_SP, RV_REG_SP, 16), ctx);
+}
+
+static int emit_bpf_tail_call(int insn, struct rv_jit_context *ctx)
+{
+ /*
+ * R1 -> &ctx
+ * R2 -> &array
+ * R3 -> index
+ */
+ int tc_ninsn, off, start_insn = ctx->ninsns;
+ const s8 *arr_reg = bpf2rv32[BPF_REG_2];
+ const s8 *idx_reg = bpf2rv32[BPF_REG_3];
+
+ tc_ninsn = insn ? ctx->offset[insn] - ctx->offset[insn - 1] :
+ ctx->offset[0];
+
+ /* max_entries = array->map.max_entries; */
+ off = offsetof(struct bpf_array, map.max_entries);
+ if (is_12b_check(off, insn))
+ return -1;
+ emit(rv_lw(RV_REG_T1, off, lo(arr_reg)), ctx);
+
+ /*
+ * if (index >= max_entries)
+ * goto out;
+ */
+ off = ninsns_rvoff(tc_ninsn - (ctx->ninsns - start_insn));
+ emit_bcc(BPF_JGE, lo(idx_reg), RV_REG_T1, off, ctx);
+
+ /*
+ * temp_tcc = tcc - 1;
+ * if (tcc < 0)
+ * goto out;
+ */
+ emit(rv_addi(RV_REG_T1, RV_REG_TCC, -1), ctx);
+ off = ninsns_rvoff(tc_ninsn - (ctx->ninsns - start_insn));
+ emit_bcc(BPF_JSLT, RV_REG_TCC, RV_REG_ZERO, off, ctx);
+
+ /*
+ * prog = array->ptrs[index];
+ * if (!prog)
+ * goto out;
+ */
+ emit(rv_slli(RV_REG_T0, lo(idx_reg), 2), ctx);
+ emit(rv_add(RV_REG_T0, RV_REG_T0, lo(arr_reg)), ctx);
+ off = offsetof(struct bpf_array, ptrs);
+ if (is_12b_check(off, insn))
+ return -1;
+ emit(rv_lw(RV_REG_T0, off, RV_REG_T0), ctx);
+ off = ninsns_rvoff(tc_ninsn - (ctx->ninsns - start_insn));
+ emit_bcc(BPF_JEQ, RV_REG_T0, RV_REG_ZERO, off, ctx);
+
+ /*
+ * tcc = temp_tcc;
+ * goto *(prog->bpf_func + 4);
+ */
+ off = offsetof(struct bpf_prog, bpf_func);
+ if (is_12b_check(off, insn))
+ return -1;
+ emit(rv_lw(RV_REG_T0, off, RV_REG_T0), ctx);
+ emit(rv_addi(RV_REG_TCC, RV_REG_T1, 0), ctx);
+ /* Epilogue jumps to *(t0 + 4). */
+ __build_epilogue(true, ctx);
+ return 0;
+}
+
+static int emit_load_r64(const s8 *dst, const s8 *src, s16 off,
+ struct rv_jit_context *ctx, const u8 size)
+{
+ const s8 *tmp1 = bpf2rv32[TMP_REG_1];
+ const s8 *tmp2 = bpf2rv32[TMP_REG_2];
+ const s8 *rd = bpf_get_reg64(dst, tmp1, ctx);
+ const s8 *rs = bpf_get_reg64(src, tmp2, ctx);
+
+ emit_imm(RV_REG_T0, off, ctx);
+ emit(rv_add(RV_REG_T0, RV_REG_T0, lo(rs)), ctx);
+
+ switch (size) {
+ case BPF_B:
+ emit(rv_lbu(lo(rd), 0, RV_REG_T0), ctx);
+ if (!ctx->prog->aux->verifier_zext)
+ emit(rv_addi(hi(rd), RV_REG_ZERO, 0), ctx);
+ break;
+ case BPF_H:
+ emit(rv_lhu(lo(rd), 0, RV_REG_T0), ctx);
+ if (!ctx->prog->aux->verifier_zext)
+ emit(rv_addi(hi(rd), RV_REG_ZERO, 0), ctx);
+ break;
+ case BPF_W:
+ emit(rv_lw(lo(rd), 0, RV_REG_T0), ctx);
+ if (!ctx->prog->aux->verifier_zext)
+ emit(rv_addi(hi(rd), RV_REG_ZERO, 0), ctx);
+ break;
+ case BPF_DW:
+ emit(rv_lw(lo(rd), 0, RV_REG_T0), ctx);
+ emit(rv_lw(hi(rd), 4, RV_REG_T0), ctx);
+ break;
+ }
+
+ bpf_put_reg64(dst, rd, ctx);
+ return 0;
+}
+
+static int emit_store_r64(const s8 *dst, const s8 *src, s16 off,
+ struct rv_jit_context *ctx, const u8 size,
+ const u8 mode)
+{
+ const s8 *tmp1 = bpf2rv32[TMP_REG_1];
+ const s8 *tmp2 = bpf2rv32[TMP_REG_2];
+ const s8 *rd = bpf_get_reg64(dst, tmp1, ctx);
+ const s8 *rs = bpf_get_reg64(src, tmp2, ctx);
+
+ if (mode == BPF_XADD && size != BPF_W)
+ return -1;
+
+ emit_imm(RV_REG_T0, off, ctx);
+ emit(rv_add(RV_REG_T0, RV_REG_T0, lo(rd)), ctx);
+
+ switch (size) {
+ case BPF_B:
+ emit(rv_sb(RV_REG_T0, 0, lo(rs)), ctx);
+ break;
+ case BPF_H:
+ emit(rv_sh(RV_REG_T0, 0, lo(rs)), ctx);
+ break;
+ case BPF_W:
+ switch (mode) {
+ case BPF_MEM:
+ emit(rv_sw(RV_REG_T0, 0, lo(rs)), ctx);
+ break;
+ case BPF_XADD:
+ emit(rv_amoadd_w(RV_REG_ZERO, lo(rs), RV_REG_T0, 0, 0),
+ ctx);
+ break;
+ }
+ break;
+ case BPF_DW:
+ emit(rv_sw(RV_REG_T0, 0, lo(rs)), ctx);
+ emit(rv_sw(RV_REG_T0, 4, hi(rs)), ctx);
+ break;
+ }
+
+ return 0;
+}
+
+static void emit_rev16(const s8 rd, struct rv_jit_context *ctx)
+{
+ emit(rv_slli(rd, rd, 16), ctx);
+ emit(rv_slli(RV_REG_T1, rd, 8), ctx);
+ emit(rv_srli(rd, rd, 8), ctx);
+ emit(rv_add(RV_REG_T1, rd, RV_REG_T1), ctx);
+ emit(rv_srli(rd, RV_REG_T1, 16), ctx);
+}
+
+static void emit_rev32(const s8 rd, struct rv_jit_context *ctx)
+{
+ emit(rv_addi(RV_REG_T1, RV_REG_ZERO, 0), ctx);
+ emit(rv_andi(RV_REG_T0, rd, 255), ctx);
+ emit(rv_add(RV_REG_T1, RV_REG_T1, RV_REG_T0), ctx);
+ emit(rv_slli(RV_REG_T1, RV_REG_T1, 8), ctx);
+ emit(rv_srli(rd, rd, 8), ctx);
+ emit(rv_andi(RV_REG_T0, rd, 255), ctx);
+ emit(rv_add(RV_REG_T1, RV_REG_T1, RV_REG_T0), ctx);
+ emit(rv_slli(RV_REG_T1, RV_REG_T1, 8), ctx);
+ emit(rv_srli(rd, rd, 8), ctx);
+ emit(rv_andi(RV_REG_T0, rd, 255), ctx);
+ emit(rv_add(RV_REG_T1, RV_REG_T1, RV_REG_T0), ctx);
+ emit(rv_slli(RV_REG_T1, RV_REG_T1, 8), ctx);
+ emit(rv_srli(rd, rd, 8), ctx);
+ emit(rv_andi(RV_REG_T0, rd, 255), ctx);
+ emit(rv_add(RV_REG_T1, RV_REG_T1, RV_REG_T0), ctx);
+ emit(rv_addi(rd, RV_REG_T1, 0), ctx);
+}
+
+static void emit_zext64(const s8 *dst, struct rv_jit_context *ctx)
+{
+ const s8 *rd;
+ const s8 *tmp1 = bpf2rv32[TMP_REG_1];
+
+ rd = bpf_get_reg64(dst, tmp1, ctx);
+ emit(rv_addi(hi(rd), RV_REG_ZERO, 0), ctx);
+ bpf_put_reg64(dst, rd, ctx);
+}
+
+int bpf_jit_emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx,
+ bool extra_pass)
+{
+ bool is64 = BPF_CLASS(insn->code) == BPF_ALU64 ||
+ BPF_CLASS(insn->code) == BPF_JMP;
+ int s, e, rvoff, i = insn - ctx->prog->insnsi;
+ u8 code = insn->code;
+ s16 off = insn->off;
+ s32 imm = insn->imm;
+
+ const s8 *dst = bpf2rv32[insn->dst_reg];
+ const s8 *src = bpf2rv32[insn->src_reg];
+ const s8 *tmp1 = bpf2rv32[TMP_REG_1];
+ const s8 *tmp2 = bpf2rv32[TMP_REG_2];
+
+ switch (code) {
+ case BPF_ALU64 | BPF_MOV | BPF_X:
+
+ case BPF_ALU64 | BPF_ADD | BPF_X:
+ case BPF_ALU64 | BPF_ADD | BPF_K:
+
+ case BPF_ALU64 | BPF_SUB | BPF_X:
+ case BPF_ALU64 | BPF_SUB | BPF_K:
+
+ case BPF_ALU64 | BPF_AND | BPF_X:
+ case BPF_ALU64 | BPF_OR | BPF_X:
+ case BPF_ALU64 | BPF_XOR | BPF_X:
+
+ case BPF_ALU64 | BPF_MUL | BPF_X:
+ case BPF_ALU64 | BPF_MUL | BPF_K:
+
+ case BPF_ALU64 | BPF_LSH | BPF_X:
+ case BPF_ALU64 | BPF_RSH | BPF_X:
+ case BPF_ALU64 | BPF_ARSH | BPF_X:
+ if (BPF_SRC(code) == BPF_K) {
+ emit_imm32(tmp2, imm, ctx);
+ src = tmp2;
+ }
+ emit_alu_r64(dst, src, ctx, BPF_OP(code));
+ break;
+
+ case BPF_ALU64 | BPF_NEG:
+ emit_alu_r64(dst, tmp2, ctx, BPF_OP(code));
+ break;
+
+ case BPF_ALU64 | BPF_DIV | BPF_X:
+ case BPF_ALU64 | BPF_DIV | BPF_K:
+ case BPF_ALU64 | BPF_MOD | BPF_X:
+ case BPF_ALU64 | BPF_MOD | BPF_K:
+ goto notsupported;
+
+ case BPF_ALU64 | BPF_MOV | BPF_K:
+ case BPF_ALU64 | BPF_AND | BPF_K:
+ case BPF_ALU64 | BPF_OR | BPF_K:
+ case BPF_ALU64 | BPF_XOR | BPF_K:
+ case BPF_ALU64 | BPF_LSH | BPF_K:
+ case BPF_ALU64 | BPF_RSH | BPF_K:
+ case BPF_ALU64 | BPF_ARSH | BPF_K:
+ emit_alu_i64(dst, imm, ctx, BPF_OP(code));
+ break;
+
+ case BPF_ALU | BPF_MOV | BPF_X:
+ if (imm == 1) {
+ /* Special mov32 for zext. */
+ emit_zext64(dst, ctx);
+ break;
+ }
+ fallthrough;
+
+ case BPF_ALU | BPF_ADD | BPF_X:
+ case BPF_ALU | BPF_SUB | BPF_X:
+ case BPF_ALU | BPF_AND | BPF_X:
+ case BPF_ALU | BPF_OR | BPF_X:
+ case BPF_ALU | BPF_XOR | BPF_X:
+
+ case BPF_ALU | BPF_MUL | BPF_X:
+ case BPF_ALU | BPF_MUL | BPF_K:
+
+ case BPF_ALU | BPF_DIV | BPF_X:
+ case BPF_ALU | BPF_DIV | BPF_K:
+
+ case BPF_ALU | BPF_MOD | BPF_X:
+ case BPF_ALU | BPF_MOD | BPF_K:
+
+ case BPF_ALU | BPF_LSH | BPF_X:
+ case BPF_ALU | BPF_RSH | BPF_X:
+ case BPF_ALU | BPF_ARSH | BPF_X:
+ if (BPF_SRC(code) == BPF_K) {
+ emit_imm32(tmp2, imm, ctx);
+ src = tmp2;
+ }
+ emit_alu_r32(dst, src, ctx, BPF_OP(code));
+ break;
+
+ case BPF_ALU | BPF_MOV | BPF_K:
+ case BPF_ALU | BPF_ADD | BPF_K:
+ case BPF_ALU | BPF_SUB | BPF_K:
+ case BPF_ALU | BPF_AND | BPF_K:
+ case BPF_ALU | BPF_OR | BPF_K:
+ case BPF_ALU | BPF_XOR | BPF_K:
+ case BPF_ALU | BPF_LSH | BPF_K:
+ case BPF_ALU | BPF_RSH | BPF_K:
+ case BPF_ALU | BPF_ARSH | BPF_K:
+ /*
+ * mul,div,mod are handled in the BPF_X case since there are
+ * no RISC-V I-type equivalents.
+ */
+ emit_alu_i32(dst, imm, ctx, BPF_OP(code));
+ break;
+
+ case BPF_ALU | BPF_NEG:
+ /*
+ * src is ignored---choose tmp2 as a dummy register since it
+ * is not on the stack.
+ */
+ emit_alu_r32(dst, tmp2, ctx, BPF_OP(code));
+ break;
+
+ case BPF_ALU | BPF_END | BPF_FROM_LE:
+ {
+ const s8 *rd = bpf_get_reg64(dst, tmp1, ctx);
+
+ switch (imm) {
+ case 16:
+ emit(rv_slli(lo(rd), lo(rd), 16), ctx);
+ emit(rv_srli(lo(rd), lo(rd), 16), ctx);
+ fallthrough;
+ case 32:
+ if (!ctx->prog->aux->verifier_zext)
+ emit(rv_addi(hi(rd), RV_REG_ZERO, 0), ctx);
+ break;
+ case 64:
+ /* Do nothing. */
+ break;
+ default:
+ pr_err("bpf-jit: BPF_END imm %d invalid\n", imm);
+ return -1;
+ }
+
+ bpf_put_reg64(dst, rd, ctx);
+ break;
+ }
+
+ case BPF_ALU | BPF_END | BPF_FROM_BE:
+ {
+ const s8 *rd = bpf_get_reg64(dst, tmp1, ctx);
+
+ switch (imm) {
+ case 16:
+ emit_rev16(lo(rd), ctx);
+ if (!ctx->prog->aux->verifier_zext)
+ emit(rv_addi(hi(rd), RV_REG_ZERO, 0), ctx);
+ break;
+ case 32:
+ emit_rev32(lo(rd), ctx);
+ if (!ctx->prog->aux->verifier_zext)
+ emit(rv_addi(hi(rd), RV_REG_ZERO, 0), ctx);
+ break;
+ case 64:
+ /* Swap upper and lower halves. */
+ emit(rv_addi(RV_REG_T0, lo(rd), 0), ctx);
+ emit(rv_addi(lo(rd), hi(rd), 0), ctx);
+ emit(rv_addi(hi(rd), RV_REG_T0, 0), ctx);
+
+ /* Swap each half. */
+ emit_rev32(lo(rd), ctx);
+ emit_rev32(hi(rd), ctx);
+ break;
+ default:
+ pr_err("bpf-jit: BPF_END imm %d invalid\n", imm);
+ return -1;
+ }
+
+ bpf_put_reg64(dst, rd, ctx);
+ break;
+ }
+
+ case BPF_JMP | BPF_JA:
+ rvoff = rv_offset(i, off, ctx);
+ emit_jump_and_link(RV_REG_ZERO, rvoff, false, ctx);
+ break;
+
+ case BPF_JMP | BPF_CALL:
+ {
+ bool fixed;
+ int ret;
+ u64 addr;
+
+ ret = bpf_jit_get_func_addr(ctx->prog, insn, extra_pass, &addr,
+ &fixed);
+ if (ret < 0)
+ return ret;
+ emit_call(fixed, addr, ctx);
+ break;
+ }
+
+ case BPF_JMP | BPF_TAIL_CALL:
+ if (emit_bpf_tail_call(i, ctx))
+ return -1;
+ break;
+
+ case BPF_JMP | BPF_JEQ | BPF_X:
+ case BPF_JMP | BPF_JEQ | BPF_K:
+ case BPF_JMP32 | BPF_JEQ | BPF_X:
+ case BPF_JMP32 | BPF_JEQ | BPF_K:
+
+ case BPF_JMP | BPF_JNE | BPF_X:
+ case BPF_JMP | BPF_JNE | BPF_K:
+ case BPF_JMP32 | BPF_JNE | BPF_X:
+ case BPF_JMP32 | BPF_JNE | BPF_K:
+
+ case BPF_JMP | BPF_JLE | BPF_X:
+ case BPF_JMP | BPF_JLE | BPF_K:
+ case BPF_JMP32 | BPF_JLE | BPF_X:
+ case BPF_JMP32 | BPF_JLE | BPF_K:
+
+ case BPF_JMP | BPF_JLT | BPF_X:
+ case BPF_JMP | BPF_JLT | BPF_K:
+ case BPF_JMP32 | BPF_JLT | BPF_X:
+ case BPF_JMP32 | BPF_JLT | BPF_K:
+
+ case BPF_JMP | BPF_JGE | BPF_X:
+ case BPF_JMP | BPF_JGE | BPF_K:
+ case BPF_JMP32 | BPF_JGE | BPF_X:
+ case BPF_JMP32 | BPF_JGE | BPF_K:
+
+ case BPF_JMP | BPF_JGT | BPF_X:
+ case BPF_JMP | BPF_JGT | BPF_K:
+ case BPF_JMP32 | BPF_JGT | BPF_X:
+ case BPF_JMP32 | BPF_JGT | BPF_K:
+
+ case BPF_JMP | BPF_JSLE | BPF_X:
+ case BPF_JMP | BPF_JSLE | BPF_K:
+ case BPF_JMP32 | BPF_JSLE | BPF_X:
+ case BPF_JMP32 | BPF_JSLE | BPF_K:
+
+ case BPF_JMP | BPF_JSLT | BPF_X:
+ case BPF_JMP | BPF_JSLT | BPF_K:
+ case BPF_JMP32 | BPF_JSLT | BPF_X:
+ case BPF_JMP32 | BPF_JSLT | BPF_K:
+
+ case BPF_JMP | BPF_JSGE | BPF_X:
+ case BPF_JMP | BPF_JSGE | BPF_K:
+ case BPF_JMP32 | BPF_JSGE | BPF_X:
+ case BPF_JMP32 | BPF_JSGE | BPF_K:
+
+ case BPF_JMP | BPF_JSGT | BPF_X:
+ case BPF_JMP | BPF_JSGT | BPF_K:
+ case BPF_JMP32 | BPF_JSGT | BPF_X:
+ case BPF_JMP32 | BPF_JSGT | BPF_K:
+
+ case BPF_JMP | BPF_JSET | BPF_X:
+ case BPF_JMP | BPF_JSET | BPF_K:
+ case BPF_JMP32 | BPF_JSET | BPF_X:
+ case BPF_JMP32 | BPF_JSET | BPF_K:
+ rvoff = rv_offset(i, off, ctx);
+ if (BPF_SRC(code) == BPF_K) {
+ s = ctx->ninsns;
+ emit_imm32(tmp2, imm, ctx);
+ src = tmp2;
+ e = ctx->ninsns;
+ rvoff -= ninsns_rvoff(e - s);
+ }
+
+ if (is64)
+ emit_branch_r64(dst, src, rvoff, ctx, BPF_OP(code));
+ else
+ emit_branch_r32(dst, src, rvoff, ctx, BPF_OP(code));
+ break;
+
+ case BPF_JMP | BPF_EXIT:
+ if (i == ctx->prog->len - 1)
+ break;
+
+ rvoff = epilogue_offset(ctx);
+ emit_jump_and_link(RV_REG_ZERO, rvoff, false, ctx);
+ break;
+
+ case BPF_LD | BPF_IMM | BPF_DW:
+ {
+ struct bpf_insn insn1 = insn[1];
+ s32 imm_lo = imm;
+ s32 imm_hi = insn1.imm;
+ const s8 *rd = bpf_get_reg64(dst, tmp1, ctx);
+
+ emit_imm64(rd, imm_hi, imm_lo, ctx);
+ bpf_put_reg64(dst, rd, ctx);
+ return 1;
+ }
+
+ case BPF_LDX | BPF_MEM | BPF_B:
+ case BPF_LDX | BPF_MEM | BPF_H:
+ case BPF_LDX | BPF_MEM | BPF_W:
+ case BPF_LDX | BPF_MEM | BPF_DW:
+ if (emit_load_r64(dst, src, off, ctx, BPF_SIZE(code)))
+ return -1;
+ break;
+
+ /* speculation barrier */
+ case BPF_ST | BPF_NOSPEC:
+ break;
+
+ case BPF_ST | BPF_MEM | BPF_B:
+ case BPF_ST | BPF_MEM | BPF_H:
+ case BPF_ST | BPF_MEM | BPF_W:
+ case BPF_ST | BPF_MEM | BPF_DW:
+
+ case BPF_STX | BPF_MEM | BPF_B:
+ case BPF_STX | BPF_MEM | BPF_H:
+ case BPF_STX | BPF_MEM | BPF_W:
+ case BPF_STX | BPF_MEM | BPF_DW:
+ case BPF_STX | BPF_XADD | BPF_W:
+ if (BPF_CLASS(code) == BPF_ST) {
+ emit_imm32(tmp2, imm, ctx);
+ src = tmp2;
+ }
+
+ if (emit_store_r64(dst, src, off, ctx, BPF_SIZE(code),
+ BPF_MODE(code)))
+ return -1;
+ break;
+
+ /* No hardware support for 8-byte atomics in RV32. */
+ case BPF_STX | BPF_XADD | BPF_DW:
+ /* Fallthrough. */
+
+notsupported:
+ pr_info_once("bpf-jit: not supported: opcode %02x ***\n", code);
+ return -EFAULT;
+
+ default:
+ pr_err("bpf-jit: unknown opcode %02x\n", code);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+void bpf_jit_build_prologue(struct rv_jit_context *ctx)
+{
+ const s8 *fp = bpf2rv32[BPF_REG_FP];
+ const s8 *r1 = bpf2rv32[BPF_REG_1];
+ int stack_adjust = 0;
+ int bpf_stack_adjust =
+ round_up(ctx->prog->aux->stack_depth, STACK_ALIGN);
+
+ /* Make space for callee-saved registers. */
+ stack_adjust += NR_SAVED_REGISTERS * sizeof(u32);
+ /* Make space for BPF registers on stack. */
+ stack_adjust += BPF_JIT_SCRATCH_REGS * sizeof(u32);
+ /* Make space for BPF stack. */
+ stack_adjust += bpf_stack_adjust;
+ /* Round up for stack alignment. */
+ stack_adjust = round_up(stack_adjust, STACK_ALIGN);
+
+ /*
+ * The first instruction sets the tail-call-counter (TCC) register.
+ * This instruction is skipped by tail calls.
+ */
+ emit(rv_addi(RV_REG_TCC, RV_REG_ZERO, MAX_TAIL_CALL_CNT), ctx);
+
+ emit(rv_addi(RV_REG_SP, RV_REG_SP, -stack_adjust), ctx);
+
+ /* Save callee-save registers. */
+ emit(rv_sw(RV_REG_SP, stack_adjust - 4, RV_REG_RA), ctx);
+ emit(rv_sw(RV_REG_SP, stack_adjust - 8, RV_REG_FP), ctx);
+ emit(rv_sw(RV_REG_SP, stack_adjust - 12, RV_REG_S1), ctx);
+ emit(rv_sw(RV_REG_SP, stack_adjust - 16, RV_REG_S2), ctx);
+ emit(rv_sw(RV_REG_SP, stack_adjust - 20, RV_REG_S3), ctx);
+ emit(rv_sw(RV_REG_SP, stack_adjust - 24, RV_REG_S4), ctx);
+ emit(rv_sw(RV_REG_SP, stack_adjust - 28, RV_REG_S5), ctx);
+ emit(rv_sw(RV_REG_SP, stack_adjust - 32, RV_REG_S6), ctx);
+ emit(rv_sw(RV_REG_SP, stack_adjust - 36, RV_REG_S7), ctx);
+
+ /* Set fp: used as the base address for stacked BPF registers. */
+ emit(rv_addi(RV_REG_FP, RV_REG_SP, stack_adjust), ctx);
+
+ /* Set up BPF frame pointer. */
+ emit(rv_addi(lo(fp), RV_REG_SP, bpf_stack_adjust), ctx);
+ emit(rv_addi(hi(fp), RV_REG_ZERO, 0), ctx);
+
+ /* Set up BPF context pointer. */
+ emit(rv_addi(lo(r1), RV_REG_A0, 0), ctx);
+ emit(rv_addi(hi(r1), RV_REG_ZERO, 0), ctx);
+
+ ctx->stack_size = stack_adjust;
+}
+
+void bpf_jit_build_epilogue(struct rv_jit_context *ctx)
+{
+ __build_epilogue(false, ctx);
+}
diff --git a/arch/riscv/net/bpf_jit_comp64.c b/arch/riscv/net/bpf_jit_comp64.c
new file mode 100644
index 000000000..0cc3dd9d3
--- /dev/null
+++ b/arch/riscv/net/bpf_jit_comp64.c
@@ -0,0 +1,1143 @@
+// SPDX-License-Identifier: GPL-2.0
+/* BPF JIT compiler for RV64G
+ *
+ * Copyright(c) 2019 Björn Töpel <bjorn.topel@gmail.com>
+ *
+ */
+
+#include <linux/bpf.h>
+#include <linux/filter.h>
+#include "bpf_jit.h"
+
+#define RV_REG_TCC RV_REG_A6
+#define RV_REG_TCC_SAVED RV_REG_S6 /* Store A6 in S6 if program do calls */
+
+static const int regmap[] = {
+ [BPF_REG_0] = RV_REG_A5,
+ [BPF_REG_1] = RV_REG_A0,
+ [BPF_REG_2] = RV_REG_A1,
+ [BPF_REG_3] = RV_REG_A2,
+ [BPF_REG_4] = RV_REG_A3,
+ [BPF_REG_5] = RV_REG_A4,
+ [BPF_REG_6] = RV_REG_S1,
+ [BPF_REG_7] = RV_REG_S2,
+ [BPF_REG_8] = RV_REG_S3,
+ [BPF_REG_9] = RV_REG_S4,
+ [BPF_REG_FP] = RV_REG_S5,
+ [BPF_REG_AX] = RV_REG_T0,
+};
+
+enum {
+ RV_CTX_F_SEEN_TAIL_CALL = 0,
+ RV_CTX_F_SEEN_CALL = RV_REG_RA,
+ RV_CTX_F_SEEN_S1 = RV_REG_S1,
+ RV_CTX_F_SEEN_S2 = RV_REG_S2,
+ RV_CTX_F_SEEN_S3 = RV_REG_S3,
+ RV_CTX_F_SEEN_S4 = RV_REG_S4,
+ RV_CTX_F_SEEN_S5 = RV_REG_S5,
+ RV_CTX_F_SEEN_S6 = RV_REG_S6,
+};
+
+static u8 bpf_to_rv_reg(int bpf_reg, struct rv_jit_context *ctx)
+{
+ u8 reg = regmap[bpf_reg];
+
+ switch (reg) {
+ case RV_CTX_F_SEEN_S1:
+ case RV_CTX_F_SEEN_S2:
+ case RV_CTX_F_SEEN_S3:
+ case RV_CTX_F_SEEN_S4:
+ case RV_CTX_F_SEEN_S5:
+ case RV_CTX_F_SEEN_S6:
+ __set_bit(reg, &ctx->flags);
+ }
+ return reg;
+};
+
+static bool seen_reg(int reg, struct rv_jit_context *ctx)
+{
+ switch (reg) {
+ case RV_CTX_F_SEEN_CALL:
+ case RV_CTX_F_SEEN_S1:
+ case RV_CTX_F_SEEN_S2:
+ case RV_CTX_F_SEEN_S3:
+ case RV_CTX_F_SEEN_S4:
+ case RV_CTX_F_SEEN_S5:
+ case RV_CTX_F_SEEN_S6:
+ return test_bit(reg, &ctx->flags);
+ }
+ return false;
+}
+
+static void mark_fp(struct rv_jit_context *ctx)
+{
+ __set_bit(RV_CTX_F_SEEN_S5, &ctx->flags);
+}
+
+static void mark_call(struct rv_jit_context *ctx)
+{
+ __set_bit(RV_CTX_F_SEEN_CALL, &ctx->flags);
+}
+
+static bool seen_call(struct rv_jit_context *ctx)
+{
+ return test_bit(RV_CTX_F_SEEN_CALL, &ctx->flags);
+}
+
+static void mark_tail_call(struct rv_jit_context *ctx)
+{
+ __set_bit(RV_CTX_F_SEEN_TAIL_CALL, &ctx->flags);
+}
+
+static bool seen_tail_call(struct rv_jit_context *ctx)
+{
+ return test_bit(RV_CTX_F_SEEN_TAIL_CALL, &ctx->flags);
+}
+
+static u8 rv_tail_call_reg(struct rv_jit_context *ctx)
+{
+ mark_tail_call(ctx);
+
+ if (seen_call(ctx)) {
+ __set_bit(RV_CTX_F_SEEN_S6, &ctx->flags);
+ return RV_REG_S6;
+ }
+ return RV_REG_A6;
+}
+
+static bool is_32b_int(s64 val)
+{
+ return -(1L << 31) <= val && val < (1L << 31);
+}
+
+static bool in_auipc_jalr_range(s64 val)
+{
+ /*
+ * auipc+jalr can reach any signed PC-relative offset in the range
+ * [-2^31 - 2^11, 2^31 - 2^11).
+ */
+ return (-(1L << 31) - (1L << 11)) <= val &&
+ val < ((1L << 31) - (1L << 11));
+}
+
+static void emit_imm(u8 rd, s64 val, struct rv_jit_context *ctx)
+{
+ /* Note that the immediate from the add is sign-extended,
+ * which means that we need to compensate this by adding 2^12,
+ * when the 12th bit is set. A simpler way of doing this, and
+ * getting rid of the check, is to just add 2**11 before the
+ * shift. The "Loading a 32-Bit constant" example from the
+ * "Computer Organization and Design, RISC-V edition" book by
+ * Patterson/Hennessy highlights this fact.
+ *
+ * This also means that we need to process LSB to MSB.
+ */
+ s64 upper = (val + (1 << 11)) >> 12;
+ /* Sign-extend lower 12 bits to 64 bits since immediates for li, addiw,
+ * and addi are signed and RVC checks will perform signed comparisons.
+ */
+ s64 lower = ((val & 0xfff) << 52) >> 52;
+ int shift;
+
+ if (is_32b_int(val)) {
+ if (upper)
+ emit_lui(rd, upper, ctx);
+
+ if (!upper) {
+ emit_li(rd, lower, ctx);
+ return;
+ }
+
+ emit_addiw(rd, rd, lower, ctx);
+ return;
+ }
+
+ shift = __ffs(upper);
+ upper >>= shift;
+ shift += 12;
+
+ emit_imm(rd, upper, ctx);
+
+ emit_slli(rd, rd, shift, ctx);
+ if (lower)
+ emit_addi(rd, rd, lower, ctx);
+}
+
+static void __build_epilogue(bool is_tail_call, struct rv_jit_context *ctx)
+{
+ int stack_adjust = ctx->stack_size, store_offset = stack_adjust - 8;
+
+ if (seen_reg(RV_REG_RA, ctx)) {
+ emit_ld(RV_REG_RA, store_offset, RV_REG_SP, ctx);
+ store_offset -= 8;
+ }
+ emit_ld(RV_REG_FP, store_offset, RV_REG_SP, ctx);
+ store_offset -= 8;
+ if (seen_reg(RV_REG_S1, ctx)) {
+ emit_ld(RV_REG_S1, store_offset, RV_REG_SP, ctx);
+ store_offset -= 8;
+ }
+ if (seen_reg(RV_REG_S2, ctx)) {
+ emit_ld(RV_REG_S2, store_offset, RV_REG_SP, ctx);
+ store_offset -= 8;
+ }
+ if (seen_reg(RV_REG_S3, ctx)) {
+ emit_ld(RV_REG_S3, store_offset, RV_REG_SP, ctx);
+ store_offset -= 8;
+ }
+ if (seen_reg(RV_REG_S4, ctx)) {
+ emit_ld(RV_REG_S4, store_offset, RV_REG_SP, ctx);
+ store_offset -= 8;
+ }
+ if (seen_reg(RV_REG_S5, ctx)) {
+ emit_ld(RV_REG_S5, store_offset, RV_REG_SP, ctx);
+ store_offset -= 8;
+ }
+ if (seen_reg(RV_REG_S6, ctx)) {
+ emit_ld(RV_REG_S6, store_offset, RV_REG_SP, ctx);
+ store_offset -= 8;
+ }
+
+ emit_addi(RV_REG_SP, RV_REG_SP, stack_adjust, ctx);
+ /* Set return value. */
+ if (!is_tail_call)
+ emit_addiw(RV_REG_A0, RV_REG_A5, 0, ctx);
+ emit_jalr(RV_REG_ZERO, is_tail_call ? RV_REG_T3 : RV_REG_RA,
+ is_tail_call ? 4 : 0, /* skip TCC init */
+ ctx);
+}
+
+static void emit_bcc(u8 cond, u8 rd, u8 rs, int rvoff,
+ struct rv_jit_context *ctx)
+{
+ switch (cond) {
+ case BPF_JEQ:
+ emit(rv_beq(rd, rs, rvoff >> 1), ctx);
+ return;
+ case BPF_JGT:
+ emit(rv_bltu(rs, rd, rvoff >> 1), ctx);
+ return;
+ case BPF_JLT:
+ emit(rv_bltu(rd, rs, rvoff >> 1), ctx);
+ return;
+ case BPF_JGE:
+ emit(rv_bgeu(rd, rs, rvoff >> 1), ctx);
+ return;
+ case BPF_JLE:
+ emit(rv_bgeu(rs, rd, rvoff >> 1), ctx);
+ return;
+ case BPF_JNE:
+ emit(rv_bne(rd, rs, rvoff >> 1), ctx);
+ return;
+ case BPF_JSGT:
+ emit(rv_blt(rs, rd, rvoff >> 1), ctx);
+ return;
+ case BPF_JSLT:
+ emit(rv_blt(rd, rs, rvoff >> 1), ctx);
+ return;
+ case BPF_JSGE:
+ emit(rv_bge(rd, rs, rvoff >> 1), ctx);
+ return;
+ case BPF_JSLE:
+ emit(rv_bge(rs, rd, rvoff >> 1), ctx);
+ }
+}
+
+static void emit_branch(u8 cond, u8 rd, u8 rs, int rvoff,
+ struct rv_jit_context *ctx)
+{
+ s64 upper, lower;
+
+ if (is_13b_int(rvoff)) {
+ emit_bcc(cond, rd, rs, rvoff, ctx);
+ return;
+ }
+
+ /* Adjust for jal */
+ rvoff -= 4;
+
+ /* Transform, e.g.:
+ * bne rd,rs,foo
+ * to
+ * beq rd,rs,<.L1>
+ * (auipc foo)
+ * jal(r) foo
+ * .L1
+ */
+ cond = invert_bpf_cond(cond);
+ if (is_21b_int(rvoff)) {
+ emit_bcc(cond, rd, rs, 8, ctx);
+ emit(rv_jal(RV_REG_ZERO, rvoff >> 1), ctx);
+ return;
+ }
+
+ /* 32b No need for an additional rvoff adjustment, since we
+ * get that from the auipc at PC', where PC = PC' + 4.
+ */
+ upper = (rvoff + (1 << 11)) >> 12;
+ lower = rvoff & 0xfff;
+
+ emit_bcc(cond, rd, rs, 12, ctx);
+ emit(rv_auipc(RV_REG_T1, upper), ctx);
+ emit(rv_jalr(RV_REG_ZERO, RV_REG_T1, lower), ctx);
+}
+
+static void emit_zext_32(u8 reg, struct rv_jit_context *ctx)
+{
+ emit_slli(reg, reg, 32, ctx);
+ emit_srli(reg, reg, 32, ctx);
+}
+
+static int emit_bpf_tail_call(int insn, struct rv_jit_context *ctx)
+{
+ int tc_ninsn, off, start_insn = ctx->ninsns;
+ u8 tcc = rv_tail_call_reg(ctx);
+
+ /* a0: &ctx
+ * a1: &array
+ * a2: index
+ *
+ * if (index >= array->map.max_entries)
+ * goto out;
+ */
+ tc_ninsn = insn ? ctx->offset[insn] - ctx->offset[insn - 1] :
+ ctx->offset[0];
+ emit_zext_32(RV_REG_A2, ctx);
+
+ off = offsetof(struct bpf_array, map.max_entries);
+ if (is_12b_check(off, insn))
+ return -1;
+ emit(rv_lwu(RV_REG_T1, off, RV_REG_A1), ctx);
+ off = ninsns_rvoff(tc_ninsn - (ctx->ninsns - start_insn));
+ emit_branch(BPF_JGE, RV_REG_A2, RV_REG_T1, off, ctx);
+
+ /* if (TCC-- < 0)
+ * goto out;
+ */
+ emit_addi(RV_REG_T1, tcc, -1, ctx);
+ off = ninsns_rvoff(tc_ninsn - (ctx->ninsns - start_insn));
+ emit_branch(BPF_JSLT, tcc, RV_REG_ZERO, off, ctx);
+
+ /* prog = array->ptrs[index];
+ * if (!prog)
+ * goto out;
+ */
+ emit_slli(RV_REG_T2, RV_REG_A2, 3, ctx);
+ emit_add(RV_REG_T2, RV_REG_T2, RV_REG_A1, ctx);
+ off = offsetof(struct bpf_array, ptrs);
+ if (is_12b_check(off, insn))
+ return -1;
+ emit_ld(RV_REG_T2, off, RV_REG_T2, ctx);
+ off = ninsns_rvoff(tc_ninsn - (ctx->ninsns - start_insn));
+ emit_branch(BPF_JEQ, RV_REG_T2, RV_REG_ZERO, off, ctx);
+
+ /* goto *(prog->bpf_func + 4); */
+ off = offsetof(struct bpf_prog, bpf_func);
+ if (is_12b_check(off, insn))
+ return -1;
+ emit_ld(RV_REG_T3, off, RV_REG_T2, ctx);
+ emit_mv(RV_REG_TCC, RV_REG_T1, ctx);
+ __build_epilogue(true, ctx);
+ return 0;
+}
+
+static void init_regs(u8 *rd, u8 *rs, const struct bpf_insn *insn,
+ struct rv_jit_context *ctx)
+{
+ u8 code = insn->code;
+
+ switch (code) {
+ case BPF_JMP | BPF_JA:
+ case BPF_JMP | BPF_CALL:
+ case BPF_JMP | BPF_EXIT:
+ case BPF_JMP | BPF_TAIL_CALL:
+ break;
+ default:
+ *rd = bpf_to_rv_reg(insn->dst_reg, ctx);
+ }
+
+ if (code & (BPF_ALU | BPF_X) || code & (BPF_ALU64 | BPF_X) ||
+ code & (BPF_JMP | BPF_X) || code & (BPF_JMP32 | BPF_X) ||
+ code & BPF_LDX || code & BPF_STX)
+ *rs = bpf_to_rv_reg(insn->src_reg, ctx);
+}
+
+static void emit_zext_32_rd_rs(u8 *rd, u8 *rs, struct rv_jit_context *ctx)
+{
+ emit_mv(RV_REG_T2, *rd, ctx);
+ emit_zext_32(RV_REG_T2, ctx);
+ emit_mv(RV_REG_T1, *rs, ctx);
+ emit_zext_32(RV_REG_T1, ctx);
+ *rd = RV_REG_T2;
+ *rs = RV_REG_T1;
+}
+
+static void emit_sext_32_rd_rs(u8 *rd, u8 *rs, struct rv_jit_context *ctx)
+{
+ emit_addiw(RV_REG_T2, *rd, 0, ctx);
+ emit_addiw(RV_REG_T1, *rs, 0, ctx);
+ *rd = RV_REG_T2;
+ *rs = RV_REG_T1;
+}
+
+static void emit_zext_32_rd_t1(u8 *rd, struct rv_jit_context *ctx)
+{
+ emit_mv(RV_REG_T2, *rd, ctx);
+ emit_zext_32(RV_REG_T2, ctx);
+ emit_zext_32(RV_REG_T1, ctx);
+ *rd = RV_REG_T2;
+}
+
+static void emit_sext_32_rd(u8 *rd, struct rv_jit_context *ctx)
+{
+ emit_addiw(RV_REG_T2, *rd, 0, ctx);
+ *rd = RV_REG_T2;
+}
+
+static int emit_jump_and_link(u8 rd, s64 rvoff, bool fixed_addr,
+ struct rv_jit_context *ctx)
+{
+ s64 upper, lower;
+
+ if (rvoff && fixed_addr && is_21b_int(rvoff)) {
+ emit(rv_jal(rd, rvoff >> 1), ctx);
+ return 0;
+ } else if (in_auipc_jalr_range(rvoff)) {
+ upper = (rvoff + (1 << 11)) >> 12;
+ lower = rvoff & 0xfff;
+ emit(rv_auipc(RV_REG_T1, upper), ctx);
+ emit(rv_jalr(rd, RV_REG_T1, lower), ctx);
+ return 0;
+ }
+
+ pr_err("bpf-jit: target offset 0x%llx is out of range\n", rvoff);
+ return -ERANGE;
+}
+
+static bool is_signed_bpf_cond(u8 cond)
+{
+ return cond == BPF_JSGT || cond == BPF_JSLT ||
+ cond == BPF_JSGE || cond == BPF_JSLE;
+}
+
+static int emit_call(u64 addr, bool fixed_addr, struct rv_jit_context *ctx)
+{
+ s64 off = 0;
+ u64 ip;
+
+ if (addr && ctx->insns) {
+ ip = (u64)(long)(ctx->insns + ctx->ninsns);
+ off = addr - ip;
+ }
+
+ return emit_jump_and_link(RV_REG_RA, off, fixed_addr, ctx);
+}
+
+int bpf_jit_emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx,
+ bool extra_pass)
+{
+ bool is64 = BPF_CLASS(insn->code) == BPF_ALU64 ||
+ BPF_CLASS(insn->code) == BPF_JMP;
+ int s, e, rvoff, ret, i = insn - ctx->prog->insnsi;
+ struct bpf_prog_aux *aux = ctx->prog->aux;
+ u8 rd = -1, rs = -1, code = insn->code;
+ s16 off = insn->off;
+ s32 imm = insn->imm;
+
+ init_regs(&rd, &rs, insn, ctx);
+
+ switch (code) {
+ /* dst = src */
+ case BPF_ALU | BPF_MOV | BPF_X:
+ case BPF_ALU64 | BPF_MOV | BPF_X:
+ if (imm == 1) {
+ /* Special mov32 for zext */
+ emit_zext_32(rd, ctx);
+ break;
+ }
+ emit_mv(rd, rs, ctx);
+ if (!is64 && !aux->verifier_zext)
+ emit_zext_32(rd, ctx);
+ break;
+
+ /* dst = dst OP src */
+ case BPF_ALU | BPF_ADD | BPF_X:
+ case BPF_ALU64 | BPF_ADD | BPF_X:
+ emit_add(rd, rd, rs, ctx);
+ if (!is64 && !aux->verifier_zext)
+ emit_zext_32(rd, ctx);
+ break;
+ case BPF_ALU | BPF_SUB | BPF_X:
+ case BPF_ALU64 | BPF_SUB | BPF_X:
+ if (is64)
+ emit_sub(rd, rd, rs, ctx);
+ else
+ emit_subw(rd, rd, rs, ctx);
+
+ if (!is64 && !aux->verifier_zext)
+ emit_zext_32(rd, ctx);
+ break;
+ case BPF_ALU | BPF_AND | BPF_X:
+ case BPF_ALU64 | BPF_AND | BPF_X:
+ emit_and(rd, rd, rs, ctx);
+ if (!is64 && !aux->verifier_zext)
+ emit_zext_32(rd, ctx);
+ break;
+ case BPF_ALU | BPF_OR | BPF_X:
+ case BPF_ALU64 | BPF_OR | BPF_X:
+ emit_or(rd, rd, rs, ctx);
+ if (!is64 && !aux->verifier_zext)
+ emit_zext_32(rd, ctx);
+ break;
+ case BPF_ALU | BPF_XOR | BPF_X:
+ case BPF_ALU64 | BPF_XOR | BPF_X:
+ emit_xor(rd, rd, rs, ctx);
+ if (!is64 && !aux->verifier_zext)
+ emit_zext_32(rd, ctx);
+ break;
+ case BPF_ALU | BPF_MUL | BPF_X:
+ case BPF_ALU64 | BPF_MUL | BPF_X:
+ emit(is64 ? rv_mul(rd, rd, rs) : rv_mulw(rd, rd, rs), ctx);
+ if (!is64 && !aux->verifier_zext)
+ emit_zext_32(rd, ctx);
+ break;
+ case BPF_ALU | BPF_DIV | BPF_X:
+ case BPF_ALU64 | BPF_DIV | BPF_X:
+ emit(is64 ? rv_divu(rd, rd, rs) : rv_divuw(rd, rd, rs), ctx);
+ if (!is64 && !aux->verifier_zext)
+ emit_zext_32(rd, ctx);
+ break;
+ case BPF_ALU | BPF_MOD | BPF_X:
+ case BPF_ALU64 | BPF_MOD | BPF_X:
+ emit(is64 ? rv_remu(rd, rd, rs) : rv_remuw(rd, rd, rs), ctx);
+ if (!is64 && !aux->verifier_zext)
+ emit_zext_32(rd, ctx);
+ break;
+ case BPF_ALU | BPF_LSH | BPF_X:
+ case BPF_ALU64 | BPF_LSH | BPF_X:
+ emit(is64 ? rv_sll(rd, rd, rs) : rv_sllw(rd, rd, rs), ctx);
+ if (!is64 && !aux->verifier_zext)
+ emit_zext_32(rd, ctx);
+ break;
+ case BPF_ALU | BPF_RSH | BPF_X:
+ case BPF_ALU64 | BPF_RSH | BPF_X:
+ emit(is64 ? rv_srl(rd, rd, rs) : rv_srlw(rd, rd, rs), ctx);
+ if (!is64 && !aux->verifier_zext)
+ emit_zext_32(rd, ctx);
+ break;
+ case BPF_ALU | BPF_ARSH | BPF_X:
+ case BPF_ALU64 | BPF_ARSH | BPF_X:
+ emit(is64 ? rv_sra(rd, rd, rs) : rv_sraw(rd, rd, rs), ctx);
+ if (!is64 && !aux->verifier_zext)
+ emit_zext_32(rd, ctx);
+ break;
+
+ /* dst = -dst */
+ case BPF_ALU | BPF_NEG:
+ case BPF_ALU64 | BPF_NEG:
+ emit_sub(rd, RV_REG_ZERO, rd, ctx);
+ if (!is64 && !aux->verifier_zext)
+ emit_zext_32(rd, ctx);
+ break;
+
+ /* dst = BSWAP##imm(dst) */
+ case BPF_ALU | BPF_END | BPF_FROM_LE:
+ switch (imm) {
+ case 16:
+ emit_slli(rd, rd, 48, ctx);
+ emit_srli(rd, rd, 48, ctx);
+ break;
+ case 32:
+ if (!aux->verifier_zext)
+ emit_zext_32(rd, ctx);
+ break;
+ case 64:
+ /* Do nothing */
+ break;
+ }
+ break;
+
+ case BPF_ALU | BPF_END | BPF_FROM_BE:
+ emit_li(RV_REG_T2, 0, ctx);
+
+ emit_andi(RV_REG_T1, rd, 0xff, ctx);
+ emit_add(RV_REG_T2, RV_REG_T2, RV_REG_T1, ctx);
+ emit_slli(RV_REG_T2, RV_REG_T2, 8, ctx);
+ emit_srli(rd, rd, 8, ctx);
+ if (imm == 16)
+ goto out_be;
+
+ emit_andi(RV_REG_T1, rd, 0xff, ctx);
+ emit_add(RV_REG_T2, RV_REG_T2, RV_REG_T1, ctx);
+ emit_slli(RV_REG_T2, RV_REG_T2, 8, ctx);
+ emit_srli(rd, rd, 8, ctx);
+
+ emit_andi(RV_REG_T1, rd, 0xff, ctx);
+ emit_add(RV_REG_T2, RV_REG_T2, RV_REG_T1, ctx);
+ emit_slli(RV_REG_T2, RV_REG_T2, 8, ctx);
+ emit_srli(rd, rd, 8, ctx);
+ if (imm == 32)
+ goto out_be;
+
+ emit_andi(RV_REG_T1, rd, 0xff, ctx);
+ emit_add(RV_REG_T2, RV_REG_T2, RV_REG_T1, ctx);
+ emit_slli(RV_REG_T2, RV_REG_T2, 8, ctx);
+ emit_srli(rd, rd, 8, ctx);
+
+ emit_andi(RV_REG_T1, rd, 0xff, ctx);
+ emit_add(RV_REG_T2, RV_REG_T2, RV_REG_T1, ctx);
+ emit_slli(RV_REG_T2, RV_REG_T2, 8, ctx);
+ emit_srli(rd, rd, 8, ctx);
+
+ emit_andi(RV_REG_T1, rd, 0xff, ctx);
+ emit_add(RV_REG_T2, RV_REG_T2, RV_REG_T1, ctx);
+ emit_slli(RV_REG_T2, RV_REG_T2, 8, ctx);
+ emit_srli(rd, rd, 8, ctx);
+
+ emit_andi(RV_REG_T1, rd, 0xff, ctx);
+ emit_add(RV_REG_T2, RV_REG_T2, RV_REG_T1, ctx);
+ emit_slli(RV_REG_T2, RV_REG_T2, 8, ctx);
+ emit_srli(rd, rd, 8, ctx);
+out_be:
+ emit_andi(RV_REG_T1, rd, 0xff, ctx);
+ emit_add(RV_REG_T2, RV_REG_T2, RV_REG_T1, ctx);
+
+ emit_mv(rd, RV_REG_T2, ctx);
+ break;
+
+ /* dst = imm */
+ case BPF_ALU | BPF_MOV | BPF_K:
+ case BPF_ALU64 | BPF_MOV | BPF_K:
+ emit_imm(rd, imm, ctx);
+ if (!is64 && !aux->verifier_zext)
+ emit_zext_32(rd, ctx);
+ break;
+
+ /* dst = dst OP imm */
+ case BPF_ALU | BPF_ADD | BPF_K:
+ case BPF_ALU64 | BPF_ADD | BPF_K:
+ if (is_12b_int(imm)) {
+ emit_addi(rd, rd, imm, ctx);
+ } else {
+ emit_imm(RV_REG_T1, imm, ctx);
+ emit_add(rd, rd, RV_REG_T1, ctx);
+ }
+ if (!is64 && !aux->verifier_zext)
+ emit_zext_32(rd, ctx);
+ break;
+ case BPF_ALU | BPF_SUB | BPF_K:
+ case BPF_ALU64 | BPF_SUB | BPF_K:
+ if (is_12b_int(-imm)) {
+ emit_addi(rd, rd, -imm, ctx);
+ } else {
+ emit_imm(RV_REG_T1, imm, ctx);
+ emit_sub(rd, rd, RV_REG_T1, ctx);
+ }
+ if (!is64 && !aux->verifier_zext)
+ emit_zext_32(rd, ctx);
+ break;
+ case BPF_ALU | BPF_AND | BPF_K:
+ case BPF_ALU64 | BPF_AND | BPF_K:
+ if (is_12b_int(imm)) {
+ emit_andi(rd, rd, imm, ctx);
+ } else {
+ emit_imm(RV_REG_T1, imm, ctx);
+ emit_and(rd, rd, RV_REG_T1, ctx);
+ }
+ if (!is64 && !aux->verifier_zext)
+ emit_zext_32(rd, ctx);
+ break;
+ case BPF_ALU | BPF_OR | BPF_K:
+ case BPF_ALU64 | BPF_OR | BPF_K:
+ if (is_12b_int(imm)) {
+ emit(rv_ori(rd, rd, imm), ctx);
+ } else {
+ emit_imm(RV_REG_T1, imm, ctx);
+ emit_or(rd, rd, RV_REG_T1, ctx);
+ }
+ if (!is64 && !aux->verifier_zext)
+ emit_zext_32(rd, ctx);
+ break;
+ case BPF_ALU | BPF_XOR | BPF_K:
+ case BPF_ALU64 | BPF_XOR | BPF_K:
+ if (is_12b_int(imm)) {
+ emit(rv_xori(rd, rd, imm), ctx);
+ } else {
+ emit_imm(RV_REG_T1, imm, ctx);
+ emit_xor(rd, rd, RV_REG_T1, ctx);
+ }
+ if (!is64 && !aux->verifier_zext)
+ emit_zext_32(rd, ctx);
+ break;
+ case BPF_ALU | BPF_MUL | BPF_K:
+ case BPF_ALU64 | BPF_MUL | BPF_K:
+ emit_imm(RV_REG_T1, imm, ctx);
+ emit(is64 ? rv_mul(rd, rd, RV_REG_T1) :
+ rv_mulw(rd, rd, RV_REG_T1), ctx);
+ if (!is64 && !aux->verifier_zext)
+ emit_zext_32(rd, ctx);
+ break;
+ case BPF_ALU | BPF_DIV | BPF_K:
+ case BPF_ALU64 | BPF_DIV | BPF_K:
+ emit_imm(RV_REG_T1, imm, ctx);
+ emit(is64 ? rv_divu(rd, rd, RV_REG_T1) :
+ rv_divuw(rd, rd, RV_REG_T1), ctx);
+ if (!is64 && !aux->verifier_zext)
+ emit_zext_32(rd, ctx);
+ break;
+ case BPF_ALU | BPF_MOD | BPF_K:
+ case BPF_ALU64 | BPF_MOD | BPF_K:
+ emit_imm(RV_REG_T1, imm, ctx);
+ emit(is64 ? rv_remu(rd, rd, RV_REG_T1) :
+ rv_remuw(rd, rd, RV_REG_T1), ctx);
+ if (!is64 && !aux->verifier_zext)
+ emit_zext_32(rd, ctx);
+ break;
+ case BPF_ALU | BPF_LSH | BPF_K:
+ case BPF_ALU64 | BPF_LSH | BPF_K:
+ emit_slli(rd, rd, imm, ctx);
+
+ if (!is64 && !aux->verifier_zext)
+ emit_zext_32(rd, ctx);
+ break;
+ case BPF_ALU | BPF_RSH | BPF_K:
+ case BPF_ALU64 | BPF_RSH | BPF_K:
+ if (is64)
+ emit_srli(rd, rd, imm, ctx);
+ else
+ emit(rv_srliw(rd, rd, imm), ctx);
+
+ if (!is64 && !aux->verifier_zext)
+ emit_zext_32(rd, ctx);
+ break;
+ case BPF_ALU | BPF_ARSH | BPF_K:
+ case BPF_ALU64 | BPF_ARSH | BPF_K:
+ if (is64)
+ emit_srai(rd, rd, imm, ctx);
+ else
+ emit(rv_sraiw(rd, rd, imm), ctx);
+
+ if (!is64 && !aux->verifier_zext)
+ emit_zext_32(rd, ctx);
+ break;
+
+ /* JUMP off */
+ case BPF_JMP | BPF_JA:
+ rvoff = rv_offset(i, off, ctx);
+ ret = emit_jump_and_link(RV_REG_ZERO, rvoff, true, ctx);
+ if (ret)
+ return ret;
+ break;
+
+ /* IF (dst COND src) JUMP off */
+ case BPF_JMP | BPF_JEQ | BPF_X:
+ case BPF_JMP32 | BPF_JEQ | BPF_X:
+ case BPF_JMP | BPF_JGT | BPF_X:
+ case BPF_JMP32 | BPF_JGT | BPF_X:
+ case BPF_JMP | BPF_JLT | BPF_X:
+ case BPF_JMP32 | BPF_JLT | BPF_X:
+ case BPF_JMP | BPF_JGE | BPF_X:
+ case BPF_JMP32 | BPF_JGE | BPF_X:
+ case BPF_JMP | BPF_JLE | BPF_X:
+ case BPF_JMP32 | BPF_JLE | BPF_X:
+ case BPF_JMP | BPF_JNE | BPF_X:
+ case BPF_JMP32 | BPF_JNE | BPF_X:
+ case BPF_JMP | BPF_JSGT | BPF_X:
+ case BPF_JMP32 | BPF_JSGT | BPF_X:
+ case BPF_JMP | BPF_JSLT | BPF_X:
+ case BPF_JMP32 | BPF_JSLT | BPF_X:
+ case BPF_JMP | BPF_JSGE | BPF_X:
+ case BPF_JMP32 | BPF_JSGE | BPF_X:
+ case BPF_JMP | BPF_JSLE | BPF_X:
+ case BPF_JMP32 | BPF_JSLE | BPF_X:
+ case BPF_JMP | BPF_JSET | BPF_X:
+ case BPF_JMP32 | BPF_JSET | BPF_X:
+ rvoff = rv_offset(i, off, ctx);
+ if (!is64) {
+ s = ctx->ninsns;
+ if (is_signed_bpf_cond(BPF_OP(code)))
+ emit_sext_32_rd_rs(&rd, &rs, ctx);
+ else
+ emit_zext_32_rd_rs(&rd, &rs, ctx);
+ e = ctx->ninsns;
+
+ /* Adjust for extra insns */
+ rvoff -= ninsns_rvoff(e - s);
+ }
+
+ if (BPF_OP(code) == BPF_JSET) {
+ /* Adjust for and */
+ rvoff -= 4;
+ emit_and(RV_REG_T1, rd, rs, ctx);
+ emit_branch(BPF_JNE, RV_REG_T1, RV_REG_ZERO, rvoff,
+ ctx);
+ } else {
+ emit_branch(BPF_OP(code), rd, rs, rvoff, ctx);
+ }
+ break;
+
+ /* IF (dst COND imm) JUMP off */
+ case BPF_JMP | BPF_JEQ | BPF_K:
+ case BPF_JMP32 | BPF_JEQ | BPF_K:
+ case BPF_JMP | BPF_JGT | BPF_K:
+ case BPF_JMP32 | BPF_JGT | BPF_K:
+ case BPF_JMP | BPF_JLT | BPF_K:
+ case BPF_JMP32 | BPF_JLT | BPF_K:
+ case BPF_JMP | BPF_JGE | BPF_K:
+ case BPF_JMP32 | BPF_JGE | BPF_K:
+ case BPF_JMP | BPF_JLE | BPF_K:
+ case BPF_JMP32 | BPF_JLE | BPF_K:
+ case BPF_JMP | BPF_JNE | BPF_K:
+ case BPF_JMP32 | BPF_JNE | BPF_K:
+ case BPF_JMP | BPF_JSGT | BPF_K:
+ case BPF_JMP32 | BPF_JSGT | BPF_K:
+ case BPF_JMP | BPF_JSLT | BPF_K:
+ case BPF_JMP32 | BPF_JSLT | BPF_K:
+ case BPF_JMP | BPF_JSGE | BPF_K:
+ case BPF_JMP32 | BPF_JSGE | BPF_K:
+ case BPF_JMP | BPF_JSLE | BPF_K:
+ case BPF_JMP32 | BPF_JSLE | BPF_K:
+ rvoff = rv_offset(i, off, ctx);
+ s = ctx->ninsns;
+ if (imm) {
+ emit_imm(RV_REG_T1, imm, ctx);
+ rs = RV_REG_T1;
+ } else {
+ /* If imm is 0, simply use zero register. */
+ rs = RV_REG_ZERO;
+ }
+ if (!is64) {
+ if (is_signed_bpf_cond(BPF_OP(code)))
+ emit_sext_32_rd(&rd, ctx);
+ else
+ emit_zext_32_rd_t1(&rd, ctx);
+ }
+ e = ctx->ninsns;
+
+ /* Adjust for extra insns */
+ rvoff -= ninsns_rvoff(e - s);
+ emit_branch(BPF_OP(code), rd, rs, rvoff, ctx);
+ break;
+
+ case BPF_JMP | BPF_JSET | BPF_K:
+ case BPF_JMP32 | BPF_JSET | BPF_K:
+ rvoff = rv_offset(i, off, ctx);
+ s = ctx->ninsns;
+ if (is_12b_int(imm)) {
+ emit_andi(RV_REG_T1, rd, imm, ctx);
+ } else {
+ emit_imm(RV_REG_T1, imm, ctx);
+ emit_and(RV_REG_T1, rd, RV_REG_T1, ctx);
+ }
+ /* For jset32, we should clear the upper 32 bits of t1, but
+ * sign-extension is sufficient here and saves one instruction,
+ * as t1 is used only in comparison against zero.
+ */
+ if (!is64 && imm < 0)
+ emit_addiw(RV_REG_T1, RV_REG_T1, 0, ctx);
+ e = ctx->ninsns;
+ rvoff -= ninsns_rvoff(e - s);
+ emit_branch(BPF_JNE, RV_REG_T1, RV_REG_ZERO, rvoff, ctx);
+ break;
+
+ /* function call */
+ case BPF_JMP | BPF_CALL:
+ {
+ bool fixed_addr;
+ u64 addr;
+
+ mark_call(ctx);
+ ret = bpf_jit_get_func_addr(ctx->prog, insn, extra_pass,
+ &addr, &fixed_addr);
+ if (ret < 0)
+ return ret;
+
+ ret = emit_call(addr, fixed_addr, ctx);
+ if (ret)
+ return ret;
+
+ if (insn->src_reg != BPF_PSEUDO_CALL)
+ emit_mv(bpf_to_rv_reg(BPF_REG_0, ctx), RV_REG_A0, ctx);
+ break;
+ }
+ /* tail call */
+ case BPF_JMP | BPF_TAIL_CALL:
+ if (emit_bpf_tail_call(i, ctx))
+ return -1;
+ break;
+
+ /* function return */
+ case BPF_JMP | BPF_EXIT:
+ if (i == ctx->prog->len - 1)
+ break;
+
+ rvoff = epilogue_offset(ctx);
+ ret = emit_jump_and_link(RV_REG_ZERO, rvoff, true, ctx);
+ if (ret)
+ return ret;
+ break;
+
+ /* dst = imm64 */
+ case BPF_LD | BPF_IMM | BPF_DW:
+ {
+ struct bpf_insn insn1 = insn[1];
+ u64 imm64;
+
+ imm64 = (u64)insn1.imm << 32 | (u32)imm;
+ emit_imm(rd, imm64, ctx);
+ return 1;
+ }
+
+ /* LDX: dst = *(size *)(src + off) */
+ case BPF_LDX | BPF_MEM | BPF_B:
+ if (is_12b_int(off)) {
+ emit(rv_lbu(rd, off, rs), ctx);
+ break;
+ }
+
+ emit_imm(RV_REG_T1, off, ctx);
+ emit_add(RV_REG_T1, RV_REG_T1, rs, ctx);
+ emit(rv_lbu(rd, 0, RV_REG_T1), ctx);
+ if (insn_is_zext(&insn[1]))
+ return 1;
+ break;
+ case BPF_LDX | BPF_MEM | BPF_H:
+ if (is_12b_int(off)) {
+ emit(rv_lhu(rd, off, rs), ctx);
+ break;
+ }
+
+ emit_imm(RV_REG_T1, off, ctx);
+ emit_add(RV_REG_T1, RV_REG_T1, rs, ctx);
+ emit(rv_lhu(rd, 0, RV_REG_T1), ctx);
+ if (insn_is_zext(&insn[1]))
+ return 1;
+ break;
+ case BPF_LDX | BPF_MEM | BPF_W:
+ if (is_12b_int(off)) {
+ emit(rv_lwu(rd, off, rs), ctx);
+ break;
+ }
+
+ emit_imm(RV_REG_T1, off, ctx);
+ emit_add(RV_REG_T1, RV_REG_T1, rs, ctx);
+ emit(rv_lwu(rd, 0, RV_REG_T1), ctx);
+ if (insn_is_zext(&insn[1]))
+ return 1;
+ break;
+ case BPF_LDX | BPF_MEM | BPF_DW:
+ if (is_12b_int(off)) {
+ emit_ld(rd, off, rs, ctx);
+ break;
+ }
+
+ emit_imm(RV_REG_T1, off, ctx);
+ emit_add(RV_REG_T1, RV_REG_T1, rs, ctx);
+ emit_ld(rd, 0, RV_REG_T1, ctx);
+ break;
+
+ /* speculation barrier */
+ case BPF_ST | BPF_NOSPEC:
+ break;
+
+ /* ST: *(size *)(dst + off) = imm */
+ case BPF_ST | BPF_MEM | BPF_B:
+ emit_imm(RV_REG_T1, imm, ctx);
+ if (is_12b_int(off)) {
+ emit(rv_sb(rd, off, RV_REG_T1), ctx);
+ break;
+ }
+
+ emit_imm(RV_REG_T2, off, ctx);
+ emit_add(RV_REG_T2, RV_REG_T2, rd, ctx);
+ emit(rv_sb(RV_REG_T2, 0, RV_REG_T1), ctx);
+ break;
+
+ case BPF_ST | BPF_MEM | BPF_H:
+ emit_imm(RV_REG_T1, imm, ctx);
+ if (is_12b_int(off)) {
+ emit(rv_sh(rd, off, RV_REG_T1), ctx);
+ break;
+ }
+
+ emit_imm(RV_REG_T2, off, ctx);
+ emit_add(RV_REG_T2, RV_REG_T2, rd, ctx);
+ emit(rv_sh(RV_REG_T2, 0, RV_REG_T1), ctx);
+ break;
+ case BPF_ST | BPF_MEM | BPF_W:
+ emit_imm(RV_REG_T1, imm, ctx);
+ if (is_12b_int(off)) {
+ emit_sw(rd, off, RV_REG_T1, ctx);
+ break;
+ }
+
+ emit_imm(RV_REG_T2, off, ctx);
+ emit_add(RV_REG_T2, RV_REG_T2, rd, ctx);
+ emit_sw(RV_REG_T2, 0, RV_REG_T1, ctx);
+ break;
+ case BPF_ST | BPF_MEM | BPF_DW:
+ emit_imm(RV_REG_T1, imm, ctx);
+ if (is_12b_int(off)) {
+ emit_sd(rd, off, RV_REG_T1, ctx);
+ break;
+ }
+
+ emit_imm(RV_REG_T2, off, ctx);
+ emit_add(RV_REG_T2, RV_REG_T2, rd, ctx);
+ emit_sd(RV_REG_T2, 0, RV_REG_T1, ctx);
+ break;
+
+ /* STX: *(size *)(dst + off) = src */
+ case BPF_STX | BPF_MEM | BPF_B:
+ if (is_12b_int(off)) {
+ emit(rv_sb(rd, off, rs), ctx);
+ break;
+ }
+
+ emit_imm(RV_REG_T1, off, ctx);
+ emit_add(RV_REG_T1, RV_REG_T1, rd, ctx);
+ emit(rv_sb(RV_REG_T1, 0, rs), ctx);
+ break;
+ case BPF_STX | BPF_MEM | BPF_H:
+ if (is_12b_int(off)) {
+ emit(rv_sh(rd, off, rs), ctx);
+ break;
+ }
+
+ emit_imm(RV_REG_T1, off, ctx);
+ emit_add(RV_REG_T1, RV_REG_T1, rd, ctx);
+ emit(rv_sh(RV_REG_T1, 0, rs), ctx);
+ break;
+ case BPF_STX | BPF_MEM | BPF_W:
+ if (is_12b_int(off)) {
+ emit_sw(rd, off, rs, ctx);
+ break;
+ }
+
+ emit_imm(RV_REG_T1, off, ctx);
+ emit_add(RV_REG_T1, RV_REG_T1, rd, ctx);
+ emit_sw(RV_REG_T1, 0, rs, ctx);
+ break;
+ case BPF_STX | BPF_MEM | BPF_DW:
+ if (is_12b_int(off)) {
+ emit_sd(rd, off, rs, ctx);
+ break;
+ }
+
+ emit_imm(RV_REG_T1, off, ctx);
+ emit_add(RV_REG_T1, RV_REG_T1, rd, ctx);
+ emit_sd(RV_REG_T1, 0, rs, ctx);
+ break;
+ /* STX XADD: lock *(u32 *)(dst + off) += src */
+ case BPF_STX | BPF_XADD | BPF_W:
+ /* STX XADD: lock *(u64 *)(dst + off) += src */
+ case BPF_STX | BPF_XADD | BPF_DW:
+ if (off) {
+ if (is_12b_int(off)) {
+ emit_addi(RV_REG_T1, rd, off, ctx);
+ } else {
+ emit_imm(RV_REG_T1, off, ctx);
+ emit_add(RV_REG_T1, RV_REG_T1, rd, ctx);
+ }
+
+ rd = RV_REG_T1;
+ }
+
+ emit(BPF_SIZE(code) == BPF_W ?
+ rv_amoadd_w(RV_REG_ZERO, rs, rd, 0, 0) :
+ rv_amoadd_d(RV_REG_ZERO, rs, rd, 0, 0), ctx);
+ break;
+ default:
+ pr_err("bpf-jit: unknown opcode %02x\n", code);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+void bpf_jit_build_prologue(struct rv_jit_context *ctx)
+{
+ int stack_adjust = 0, store_offset, bpf_stack_adjust;
+
+ bpf_stack_adjust = round_up(ctx->prog->aux->stack_depth, 16);
+ if (bpf_stack_adjust)
+ mark_fp(ctx);
+
+ if (seen_reg(RV_REG_RA, ctx))
+ stack_adjust += 8;
+ stack_adjust += 8; /* RV_REG_FP */
+ if (seen_reg(RV_REG_S1, ctx))
+ stack_adjust += 8;
+ if (seen_reg(RV_REG_S2, ctx))
+ stack_adjust += 8;
+ if (seen_reg(RV_REG_S3, ctx))
+ stack_adjust += 8;
+ if (seen_reg(RV_REG_S4, ctx))
+ stack_adjust += 8;
+ if (seen_reg(RV_REG_S5, ctx))
+ stack_adjust += 8;
+ if (seen_reg(RV_REG_S6, ctx))
+ stack_adjust += 8;
+
+ stack_adjust = round_up(stack_adjust, 16);
+ stack_adjust += bpf_stack_adjust;
+
+ store_offset = stack_adjust - 8;
+
+ /* First instruction is always setting the tail-call-counter
+ * (TCC) register. This instruction is skipped for tail calls.
+ * Force using a 4-byte (non-compressed) instruction.
+ */
+ emit(rv_addi(RV_REG_TCC, RV_REG_ZERO, MAX_TAIL_CALL_CNT), ctx);
+
+ emit_addi(RV_REG_SP, RV_REG_SP, -stack_adjust, ctx);
+
+ if (seen_reg(RV_REG_RA, ctx)) {
+ emit_sd(RV_REG_SP, store_offset, RV_REG_RA, ctx);
+ store_offset -= 8;
+ }
+ emit_sd(RV_REG_SP, store_offset, RV_REG_FP, ctx);
+ store_offset -= 8;
+ if (seen_reg(RV_REG_S1, ctx)) {
+ emit_sd(RV_REG_SP, store_offset, RV_REG_S1, ctx);
+ store_offset -= 8;
+ }
+ if (seen_reg(RV_REG_S2, ctx)) {
+ emit_sd(RV_REG_SP, store_offset, RV_REG_S2, ctx);
+ store_offset -= 8;
+ }
+ if (seen_reg(RV_REG_S3, ctx)) {
+ emit_sd(RV_REG_SP, store_offset, RV_REG_S3, ctx);
+ store_offset -= 8;
+ }
+ if (seen_reg(RV_REG_S4, ctx)) {
+ emit_sd(RV_REG_SP, store_offset, RV_REG_S4, ctx);
+ store_offset -= 8;
+ }
+ if (seen_reg(RV_REG_S5, ctx)) {
+ emit_sd(RV_REG_SP, store_offset, RV_REG_S5, ctx);
+ store_offset -= 8;
+ }
+ if (seen_reg(RV_REG_S6, ctx)) {
+ emit_sd(RV_REG_SP, store_offset, RV_REG_S6, ctx);
+ store_offset -= 8;
+ }
+
+ emit_addi(RV_REG_FP, RV_REG_SP, stack_adjust, ctx);
+
+ if (bpf_stack_adjust)
+ emit_addi(RV_REG_S5, RV_REG_SP, bpf_stack_adjust, ctx);
+
+ /* Program contains calls and tail calls, so RV_REG_TCC need
+ * to be saved across calls.
+ */
+ if (seen_tail_call(ctx) && seen_call(ctx))
+ emit_mv(RV_REG_TCC_SAVED, RV_REG_TCC, ctx);
+
+ ctx->stack_size = stack_adjust;
+}
+
+void bpf_jit_build_epilogue(struct rv_jit_context *ctx)
+{
+ __build_epilogue(false, ctx);
+}
diff --git a/arch/riscv/net/bpf_jit_core.c b/arch/riscv/net/bpf_jit_core.c
new file mode 100644
index 000000000..ef17bc805
--- /dev/null
+++ b/arch/riscv/net/bpf_jit_core.c
@@ -0,0 +1,194 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Common functionality for RV32 and RV64 BPF JIT compilers
+ *
+ * Copyright (c) 2019 Björn Töpel <bjorn.topel@gmail.com>
+ *
+ */
+
+#include <linux/bpf.h>
+#include <linux/filter.h>
+#include "bpf_jit.h"
+
+/* Number of iterations to try until offsets converge. */
+#define NR_JIT_ITERATIONS 16
+
+static int build_body(struct rv_jit_context *ctx, bool extra_pass, int *offset)
+{
+ const struct bpf_prog *prog = ctx->prog;
+ int i;
+
+ for (i = 0; i < prog->len; i++) {
+ const struct bpf_insn *insn = &prog->insnsi[i];
+ int ret;
+
+ ret = bpf_jit_emit_insn(insn, ctx, extra_pass);
+ /* BPF_LD | BPF_IMM | BPF_DW: skip the next instruction. */
+ if (ret > 0)
+ i++;
+ if (offset)
+ offset[i] = ctx->ninsns;
+ if (ret < 0)
+ return ret;
+ }
+ return 0;
+}
+
+bool bpf_jit_needs_zext(void)
+{
+ return true;
+}
+
+struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
+{
+ bool tmp_blinded = false, extra_pass = false;
+ struct bpf_prog *tmp, *orig_prog = prog;
+ int pass = 0, prev_ninsns = 0, i;
+ struct rv_jit_data *jit_data;
+ struct rv_jit_context *ctx;
+ unsigned int image_size = 0;
+
+ if (!prog->jit_requested)
+ return orig_prog;
+
+ tmp = bpf_jit_blind_constants(prog);
+ if (IS_ERR(tmp))
+ return orig_prog;
+ if (tmp != prog) {
+ tmp_blinded = true;
+ prog = tmp;
+ }
+
+ jit_data = prog->aux->jit_data;
+ if (!jit_data) {
+ jit_data = kzalloc(sizeof(*jit_data), GFP_KERNEL);
+ if (!jit_data) {
+ prog = orig_prog;
+ goto out;
+ }
+ prog->aux->jit_data = jit_data;
+ }
+
+ ctx = &jit_data->ctx;
+
+ if (ctx->offset) {
+ extra_pass = true;
+ image_size = sizeof(*ctx->insns) * ctx->ninsns;
+ goto skip_init_ctx;
+ }
+
+ ctx->prog = prog;
+ ctx->offset = kcalloc(prog->len, sizeof(int), GFP_KERNEL);
+ if (!ctx->offset) {
+ prog = orig_prog;
+ goto out_offset;
+ }
+
+ if (build_body(ctx, extra_pass, NULL)) {
+ prog = orig_prog;
+ goto out_offset;
+ }
+
+ for (i = 0; i < prog->len; i++) {
+ prev_ninsns += 32;
+ ctx->offset[i] = prev_ninsns;
+ }
+
+ for (i = 0; i < NR_JIT_ITERATIONS; i++) {
+ pass++;
+ ctx->ninsns = 0;
+
+ bpf_jit_build_prologue(ctx);
+ ctx->prologue_len = ctx->ninsns;
+
+ if (build_body(ctx, extra_pass, ctx->offset)) {
+ prog = orig_prog;
+ goto out_offset;
+ }
+
+ ctx->epilogue_offset = ctx->ninsns;
+ bpf_jit_build_epilogue(ctx);
+
+ if (ctx->ninsns == prev_ninsns) {
+ if (jit_data->header)
+ break;
+
+ image_size = sizeof(*ctx->insns) * ctx->ninsns;
+ jit_data->header =
+ bpf_jit_binary_alloc(image_size,
+ &jit_data->image,
+ sizeof(u32),
+ bpf_fill_ill_insns);
+ if (!jit_data->header) {
+ prog = orig_prog;
+ goto out_offset;
+ }
+
+ ctx->insns = (u16 *)jit_data->image;
+ /*
+ * Now, when the image is allocated, the image can
+ * potentially shrink more (auipc/jalr -> jal).
+ */
+ }
+ prev_ninsns = ctx->ninsns;
+ }
+
+ if (i == NR_JIT_ITERATIONS) {
+ pr_err("bpf-jit: image did not converge in <%d passes!\n", i);
+ if (jit_data->header)
+ bpf_jit_binary_free(jit_data->header);
+ prog = orig_prog;
+ goto out_offset;
+ }
+
+skip_init_ctx:
+ pass++;
+ ctx->ninsns = 0;
+
+ bpf_jit_build_prologue(ctx);
+ if (build_body(ctx, extra_pass, NULL)) {
+ bpf_jit_binary_free(jit_data->header);
+ prog = orig_prog;
+ goto out_offset;
+ }
+ bpf_jit_build_epilogue(ctx);
+
+ if (bpf_jit_enable > 1)
+ bpf_jit_dump(prog->len, image_size, pass, ctx->insns);
+
+ prog->bpf_func = (void *)ctx->insns;
+ prog->jited = 1;
+ prog->jited_len = image_size;
+
+ bpf_flush_icache(jit_data->header, ctx->insns + ctx->ninsns);
+
+ if (!prog->is_func || extra_pass) {
+ bpf_jit_binary_lock_ro(jit_data->header);
+ for (i = 0; i < prog->len; i++)
+ ctx->offset[i] = ninsns_rvoff(ctx->offset[i]);
+ bpf_prog_fill_jited_linfo(prog, ctx->offset);
+out_offset:
+ kfree(ctx->offset);
+ kfree(jit_data);
+ prog->aux->jit_data = NULL;
+ }
+out:
+
+ if (tmp_blinded)
+ bpf_jit_prog_release_other(prog, prog == orig_prog ?
+ tmp : orig_prog);
+ return prog;
+}
+
+void *bpf_jit_alloc_exec(unsigned long size)
+{
+ return __vmalloc_node_range(size, PAGE_SIZE, BPF_JIT_REGION_START,
+ BPF_JIT_REGION_END, GFP_KERNEL,
+ PAGE_KERNEL, 0, NUMA_NO_NODE,
+ __builtin_return_address(0));
+}
+
+void bpf_jit_free_exec(void *addr)
+{
+ return vfree(addr);
+}