summaryrefslogtreecommitdiffstats
path: root/arch/riscv/include/asm
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-11 08:27:49 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-11 08:27:49 +0000
commitace9429bb58fd418f0c81d4c2835699bddf6bde6 (patch)
treeb2d64bc10158fdd5497876388cd68142ca374ed3 /arch/riscv/include/asm
parentInitial commit. (diff)
downloadlinux-ace9429bb58fd418f0c81d4c2835699bddf6bde6.tar.xz
linux-ace9429bb58fd418f0c81d4c2835699bddf6bde6.zip
Adding upstream version 6.6.15.upstream/6.6.15
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'arch/riscv/include/asm')
-rw-r--r--arch/riscv/include/asm/Kbuild11
-rw-r--r--arch/riscv/include/asm/acenv.h11
-rw-r--r--arch/riscv/include/asm/acpi.h84
-rw-r--r--arch/riscv/include/asm/alternative-macros.h166
-rw-r--r--arch/riscv/include/asm/alternative.h70
-rw-r--r--arch/riscv/include/asm/asm-extable.h71
-rw-r--r--arch/riscv/include/asm/asm-offsets.h1
-rw-r--r--arch/riscv/include/asm/asm-prototypes.h32
-rw-r--r--arch/riscv/include/asm/asm.h169
-rw-r--r--arch/riscv/include/asm/assembler.h82
-rw-r--r--arch/riscv/include/asm/atomic.h366
-rw-r--r--arch/riscv/include/asm/barrier.h78
-rw-r--r--arch/riscv/include/asm/bitops.h204
-rw-r--r--arch/riscv/include/asm/bug.h92
-rw-r--r--arch/riscv/include/asm/cache.h40
-rw-r--r--arch/riscv/include/asm/cacheflush.h75
-rw-r--r--arch/riscv/include/asm/cacheinfo.h20
-rw-r--r--arch/riscv/include/asm/cfi.h22
-rw-r--r--arch/riscv/include/asm/clint.h26
-rw-r--r--arch/riscv/include/asm/clocksource.h7
-rw-r--r--arch/riscv/include/asm/cmpxchg.h363
-rw-r--r--arch/riscv/include/asm/compat.h129
-rw-r--r--arch/riscv/include/asm/cpu.h8
-rw-r--r--arch/riscv/include/asm/cpu_ops.h45
-rw-r--r--arch/riscv/include/asm/cpu_ops_sbi.h27
-rw-r--r--arch/riscv/include/asm/cpufeature.h35
-rw-r--r--arch/riscv/include/asm/cpuidle.h24
-rw-r--r--arch/riscv/include/asm/csr.h518
-rw-r--r--arch/riscv/include/asm/current.h40
-rw-r--r--arch/riscv/include/asm/delay.h20
-rw-r--r--arch/riscv/include/asm/dma-noncoherent.h28
-rw-r--r--arch/riscv/include/asm/efi.h50
-rw-r--r--arch/riscv/include/asm/elf.h163
-rw-r--r--arch/riscv/include/asm/entry-common.h11
-rw-r--r--arch/riscv/include/asm/errata_list.h164
-rw-r--r--arch/riscv/include/asm/extable.h52
-rw-r--r--arch/riscv/include/asm/fence.h12
-rw-r--r--arch/riscv/include/asm/fixmap.h67
-rw-r--r--arch/riscv/include/asm/ftrace.h156
-rw-r--r--arch/riscv/include/asm/futex.h104
-rw-r--r--arch/riscv/include/asm/gdb_xml.h116
-rw-r--r--arch/riscv/include/asm/gpr-num.h85
-rw-r--r--arch/riscv/include/asm/hugetlb.h51
-rw-r--r--arch/riscv/include/asm/hwcap.h142
-rw-r--r--arch/riscv/include/asm/hwprobe.h18
-rw-r--r--arch/riscv/include/asm/image.h65
-rw-r--r--arch/riscv/include/asm/insn-def.h199
-rw-r--r--arch/riscv/include/asm/insn.h431
-rw-r--r--arch/riscv/include/asm/io.h143
-rw-r--r--arch/riscv/include/asm/irq.h19
-rw-r--r--arch/riscv/include/asm/irq_stack.h30
-rw-r--r--arch/riscv/include/asm/irq_work.h10
-rw-r--r--arch/riscv/include/asm/irqflags.h55
-rw-r--r--arch/riscv/include/asm/jump_label.h62
-rw-r--r--arch/riscv/include/asm/kasan.h45
-rw-r--r--arch/riscv/include/asm/kdebug.h12
-rw-r--r--arch/riscv/include/asm/kexec.h72
-rw-r--r--arch/riscv/include/asm/kfence.h30
-rw-r--r--arch/riscv/include/asm/kgdb.h113
-rw-r--r--arch/riscv/include/asm/kprobes.h54
-rw-r--r--arch/riscv/include/asm/kvm_aia.h174
-rw-r--r--arch/riscv/include/asm/kvm_aia_aplic.h58
-rw-r--r--arch/riscv/include/asm/kvm_aia_imsic.h38
-rw-r--r--arch/riscv/include/asm/kvm_host.h357
-rw-r--r--arch/riscv/include/asm/kvm_types.h7
-rw-r--r--arch/riscv/include/asm/kvm_vcpu_fp.h59
-rw-r--r--arch/riscv/include/asm/kvm_vcpu_insn.h48
-rw-r--r--arch/riscv/include/asm/kvm_vcpu_pmu.h107
-rw-r--r--arch/riscv/include/asm/kvm_vcpu_sbi.h78
-rw-r--r--arch/riscv/include/asm/kvm_vcpu_timer.h52
-rw-r--r--arch/riscv/include/asm/kvm_vcpu_vector.h80
-rw-r--r--arch/riscv/include/asm/linkage.h12
-rw-r--r--arch/riscv/include/asm/mmio.h151
-rw-r--r--arch/riscv/include/asm/mmiowb.h15
-rw-r--r--arch/riscv/include/asm/mmu.h33
-rw-r--r--arch/riscv/include/asm/mmu_context.h40
-rw-r--r--arch/riscv/include/asm/mmzone.h13
-rw-r--r--arch/riscv/include/asm/module.h130
-rw-r--r--arch/riscv/include/asm/module.lds.h9
-rw-r--r--arch/riscv/include/asm/numa.h8
-rw-r--r--arch/riscv/include/asm/page.h203
-rw-r--r--arch/riscv/include/asm/patch.h15
-rw-r--r--arch/riscv/include/asm/pci.h33
-rw-r--r--arch/riscv/include/asm/perf_event.h20
-rw-r--r--arch/riscv/include/asm/pgalloc.h163
-rw-r--r--arch/riscv/include/asm/pgtable-32.h36
-rw-r--r--arch/riscv/include/asm/pgtable-64.h411
-rw-r--r--arch/riscv/include/asm/pgtable-bits.h41
-rw-r--r--arch/riscv/include/asm/pgtable.h931
-rw-r--r--arch/riscv/include/asm/probes.h24
-rw-r--r--arch/riscv/include/asm/processor.h139
-rw-r--r--arch/riscv/include/asm/ptdump.h22
-rw-r--r--arch/riscv/include/asm/ptrace.h183
-rw-r--r--arch/riscv/include/asm/sbi.h343
-rw-r--r--arch/riscv/include/asm/seccomp.h20
-rw-r--r--arch/riscv/include/asm/sections.h34
-rw-r--r--arch/riscv/include/asm/semihost.h26
-rw-r--r--arch/riscv/include/asm/set_memory.h62
-rw-r--r--arch/riscv/include/asm/signal.h12
-rw-r--r--arch/riscv/include/asm/signal32.h18
-rw-r--r--arch/riscv/include/asm/smp.h128
-rw-r--r--arch/riscv/include/asm/soc.h24
-rw-r--r--arch/riscv/include/asm/sparsemem.h15
-rw-r--r--arch/riscv/include/asm/stackprotector.h22
-rw-r--r--arch/riscv/include/asm/stacktrace.h24
-rw-r--r--arch/riscv/include/asm/string.h42
-rw-r--r--arch/riscv/include/asm/suspend.h58
-rw-r--r--arch/riscv/include/asm/switch_to.h87
-rw-r--r--arch/riscv/include/asm/syscall.h102
-rw-r--r--arch/riscv/include/asm/syscall_wrapper.h82
-rw-r--r--arch/riscv/include/asm/thread_info.h104
-rw-r--r--arch/riscv/include/asm/timex.h91
-rw-r--r--arch/riscv/include/asm/tlb.h21
-rw-r--r--arch/riscv/include/asm/tlbflush.h64
-rw-r--r--arch/riscv/include/asm/topology.h21
-rw-r--r--arch/riscv/include/asm/uaccess.h341
-rw-r--r--arch/riscv/include/asm/unistd.h26
-rw-r--r--arch/riscv/include/asm/uprobes.h51
-rw-r--r--arch/riscv/include/asm/vdso.h41
-rw-r--r--arch/riscv/include/asm/vdso/clocksource.h8
-rw-r--r--arch/riscv/include/asm/vdso/data.h17
-rw-r--r--arch/riscv/include/asm/vdso/gettimeofday.h96
-rw-r--r--arch/riscv/include/asm/vdso/processor.h32
-rw-r--r--arch/riscv/include/asm/vdso/vsyscall.h27
-rw-r--r--arch/riscv/include/asm/vector.h219
-rw-r--r--arch/riscv/include/asm/vendorid_list.h12
-rw-r--r--arch/riscv/include/asm/vermagic.h9
-rw-r--r--arch/riscv/include/asm/vmalloc.h83
-rw-r--r--arch/riscv/include/asm/word-at-a-time.h48
-rw-r--r--arch/riscv/include/asm/xip_fixup.h31
130 files changed, 11526 insertions, 0 deletions
diff --git a/arch/riscv/include/asm/Kbuild b/arch/riscv/include/asm/Kbuild
new file mode 100644
index 0000000000..504f8b7e72
--- /dev/null
+++ b/arch/riscv/include/asm/Kbuild
@@ -0,0 +1,11 @@
+# SPDX-License-Identifier: GPL-2.0
+generic-y += early_ioremap.h
+generic-y += flat.h
+generic-y += kvm_para.h
+generic-y += parport.h
+generic-y += spinlock.h
+generic-y += spinlock_types.h
+generic-y += qrwlock.h
+generic-y += qrwlock_types.h
+generic-y += user.h
+generic-y += vmlinux.lds.h
diff --git a/arch/riscv/include/asm/acenv.h b/arch/riscv/include/asm/acenv.h
new file mode 100644
index 0000000000..43ae2e32c7
--- /dev/null
+++ b/arch/riscv/include/asm/acenv.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * RISC-V specific ACPICA environments and implementation
+ */
+
+#ifndef _ASM_ACENV_H
+#define _ASM_ACENV_H
+
+/* This header is required unconditionally by the ACPI core */
+
+#endif /* _ASM_ACENV_H */
diff --git a/arch/riscv/include/asm/acpi.h b/arch/riscv/include/asm/acpi.h
new file mode 100644
index 0000000000..d5604d2073
--- /dev/null
+++ b/arch/riscv/include/asm/acpi.h
@@ -0,0 +1,84 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2013-2014, Linaro Ltd.
+ * Author: Al Stone <al.stone@linaro.org>
+ * Author: Graeme Gregory <graeme.gregory@linaro.org>
+ * Author: Hanjun Guo <hanjun.guo@linaro.org>
+ *
+ * Copyright (C) 2021-2023, Ventana Micro Systems Inc.
+ * Author: Sunil V L <sunilvl@ventanamicro.com>
+ */
+
+#ifndef _ASM_ACPI_H
+#define _ASM_ACPI_H
+
+/* Basic configuration for ACPI */
+#ifdef CONFIG_ACPI
+
+typedef u64 phys_cpuid_t;
+#define PHYS_CPUID_INVALID INVALID_HARTID
+
+/* ACPI table mapping after acpi_permanent_mmap is set */
+void __iomem *acpi_os_ioremap(acpi_physical_address phys, acpi_size size);
+#define acpi_os_ioremap acpi_os_ioremap
+
+#define acpi_strict 1 /* No out-of-spec workarounds on RISC-V */
+extern int acpi_disabled;
+extern int acpi_noirq;
+extern int acpi_pci_disabled;
+
+static inline void disable_acpi(void)
+{
+ acpi_disabled = 1;
+ acpi_pci_disabled = 1;
+ acpi_noirq = 1;
+}
+
+static inline void enable_acpi(void)
+{
+ acpi_disabled = 0;
+ acpi_pci_disabled = 0;
+ acpi_noirq = 0;
+}
+
+/*
+ * The ACPI processor driver for ACPI core code needs this macro
+ * to find out whether this cpu was already mapped (mapping from CPU hardware
+ * ID to CPU logical ID) or not.
+ */
+#define cpu_physical_id(cpu) cpuid_to_hartid_map(cpu)
+
+/*
+ * Since MADT must provide at least one RINTC structure, the
+ * CPU will be always available in MADT on RISC-V.
+ */
+static inline bool acpi_has_cpu_in_madt(void)
+{
+ return true;
+}
+
+static inline void arch_fix_phys_package_id(int num, u32 slot) { }
+
+void acpi_init_rintc_map(void);
+struct acpi_madt_rintc *acpi_cpu_get_madt_rintc(int cpu);
+u32 get_acpi_id_for_cpu(int cpu);
+int acpi_get_riscv_isa(struct acpi_table_header *table,
+ unsigned int cpu, const char **isa);
+
+static inline int acpi_numa_get_nid(unsigned int cpu) { return NUMA_NO_NODE; }
+#else
+static inline void acpi_init_rintc_map(void) { }
+static inline struct acpi_madt_rintc *acpi_cpu_get_madt_rintc(int cpu)
+{
+ return NULL;
+}
+
+static inline int acpi_get_riscv_isa(struct acpi_table_header *table,
+ unsigned int cpu, const char **isa)
+{
+ return -EINVAL;
+}
+
+#endif /* CONFIG_ACPI */
+
+#endif /*_ASM_ACPI_H*/
diff --git a/arch/riscv/include/asm/alternative-macros.h b/arch/riscv/include/asm/alternative-macros.h
new file mode 100644
index 0000000000..721ec275ce
--- /dev/null
+++ b/arch/riscv/include/asm/alternative-macros.h
@@ -0,0 +1,166 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_ALTERNATIVE_MACROS_H
+#define __ASM_ALTERNATIVE_MACROS_H
+
+#ifdef CONFIG_RISCV_ALTERNATIVE
+
+#ifdef __ASSEMBLY__
+
+.macro ALT_ENTRY oldptr newptr vendor_id patch_id new_len
+ .4byte \oldptr - .
+ .4byte \newptr - .
+ .2byte \vendor_id
+ .2byte \new_len
+ .4byte \patch_id
+.endm
+
+.macro ALT_NEW_CONTENT vendor_id, patch_id, enable = 1, new_c
+ .if \enable
+ .pushsection .alternative, "a"
+ ALT_ENTRY 886b, 888f, \vendor_id, \patch_id, 889f - 888f
+ .popsection
+ .subsection 1
+888 :
+ .option push
+ .option norvc
+ .option norelax
+ \new_c
+ .option pop
+889 :
+ .org . - (889b - 888b) + (887b - 886b)
+ .org . - (887b - 886b) + (889b - 888b)
+ .previous
+ .endif
+.endm
+
+.macro ALTERNATIVE_CFG old_c, new_c, vendor_id, patch_id, enable
+886 :
+ .option push
+ .option norvc
+ .option norelax
+ \old_c
+ .option pop
+887 :
+ ALT_NEW_CONTENT \vendor_id, \patch_id, \enable, "\new_c"
+.endm
+
+.macro ALTERNATIVE_CFG_2 old_c, new_c_1, vendor_id_1, patch_id_1, enable_1, \
+ new_c_2, vendor_id_2, patch_id_2, enable_2
+ ALTERNATIVE_CFG "\old_c", "\new_c_1", \vendor_id_1, \patch_id_1, \enable_1
+ ALT_NEW_CONTENT \vendor_id_2, \patch_id_2, \enable_2, "\new_c_2"
+.endm
+
+#define __ALTERNATIVE_CFG(...) ALTERNATIVE_CFG __VA_ARGS__
+#define __ALTERNATIVE_CFG_2(...) ALTERNATIVE_CFG_2 __VA_ARGS__
+
+#else /* !__ASSEMBLY__ */
+
+#include <asm/asm.h>
+#include <linux/stringify.h>
+
+#define ALT_ENTRY(oldptr, newptr, vendor_id, patch_id, newlen) \
+ ".4byte ((" oldptr ") - .) \n" \
+ ".4byte ((" newptr ") - .) \n" \
+ ".2byte " vendor_id "\n" \
+ ".2byte " newlen "\n" \
+ ".4byte " patch_id "\n"
+
+#define ALT_NEW_CONTENT(vendor_id, patch_id, enable, new_c) \
+ ".if " __stringify(enable) " == 1\n" \
+ ".pushsection .alternative, \"a\"\n" \
+ ALT_ENTRY("886b", "888f", __stringify(vendor_id), __stringify(patch_id), "889f - 888f") \
+ ".popsection\n" \
+ ".subsection 1\n" \
+ "888 :\n" \
+ ".option push\n" \
+ ".option norvc\n" \
+ ".option norelax\n" \
+ new_c "\n" \
+ ".option pop\n" \
+ "889 :\n" \
+ ".org . - (887b - 886b) + (889b - 888b)\n" \
+ ".org . - (889b - 888b) + (887b - 886b)\n" \
+ ".previous\n" \
+ ".endif\n"
+
+#define __ALTERNATIVE_CFG(old_c, new_c, vendor_id, patch_id, enable) \
+ "886 :\n" \
+ ".option push\n" \
+ ".option norvc\n" \
+ ".option norelax\n" \
+ old_c "\n" \
+ ".option pop\n" \
+ "887 :\n" \
+ ALT_NEW_CONTENT(vendor_id, patch_id, enable, new_c)
+
+#define __ALTERNATIVE_CFG_2(old_c, new_c_1, vendor_id_1, patch_id_1, enable_1, \
+ new_c_2, vendor_id_2, patch_id_2, enable_2) \
+ __ALTERNATIVE_CFG(old_c, new_c_1, vendor_id_1, patch_id_1, enable_1) \
+ ALT_NEW_CONTENT(vendor_id_2, patch_id_2, enable_2, new_c_2)
+
+#endif /* __ASSEMBLY__ */
+
+#define _ALTERNATIVE_CFG(old_c, new_c, vendor_id, patch_id, CONFIG_k) \
+ __ALTERNATIVE_CFG(old_c, new_c, vendor_id, patch_id, IS_ENABLED(CONFIG_k))
+
+#define _ALTERNATIVE_CFG_2(old_c, new_c_1, vendor_id_1, patch_id_1, CONFIG_k_1, \
+ new_c_2, vendor_id_2, patch_id_2, CONFIG_k_2) \
+ __ALTERNATIVE_CFG_2(old_c, new_c_1, vendor_id_1, patch_id_1, IS_ENABLED(CONFIG_k_1), \
+ new_c_2, vendor_id_2, patch_id_2, IS_ENABLED(CONFIG_k_2))
+
+#else /* CONFIG_RISCV_ALTERNATIVE */
+#ifdef __ASSEMBLY__
+
+.macro ALTERNATIVE_CFG old_c
+ \old_c
+.endm
+
+#define _ALTERNATIVE_CFG(old_c, ...) \
+ ALTERNATIVE_CFG old_c
+
+#define _ALTERNATIVE_CFG_2(old_c, ...) \
+ ALTERNATIVE_CFG old_c
+
+#else /* !__ASSEMBLY__ */
+
+#define __ALTERNATIVE_CFG(old_c) \
+ old_c "\n"
+
+#define _ALTERNATIVE_CFG(old_c, ...) \
+ __ALTERNATIVE_CFG(old_c)
+
+#define _ALTERNATIVE_CFG_2(old_c, ...) \
+ __ALTERNATIVE_CFG(old_c)
+
+#endif /* __ASSEMBLY__ */
+#endif /* CONFIG_RISCV_ALTERNATIVE */
+
+/*
+ * Usage:
+ * ALTERNATIVE(old_content, new_content, vendor_id, patch_id, CONFIG_k)
+ * in the assembly code. Otherwise,
+ * asm(ALTERNATIVE(old_content, new_content, vendor_id, patch_id, CONFIG_k));
+ *
+ * old_content: The old content which is probably replaced with new content.
+ * new_content: The new content.
+ * vendor_id: The CPU vendor ID.
+ * patch_id: The patch ID (erratum ID or cpufeature ID).
+ * CONFIG_k: The Kconfig of this patch ID. When Kconfig is disabled, the old
+ * content will always be executed.
+ */
+#define ALTERNATIVE(old_content, new_content, vendor_id, patch_id, CONFIG_k) \
+ _ALTERNATIVE_CFG(old_content, new_content, vendor_id, patch_id, CONFIG_k)
+
+/*
+ * A vendor wants to replace an old_content, but another vendor has used
+ * ALTERNATIVE() to patch its customized content at the same location. In
+ * this case, this vendor can create a new macro ALTERNATIVE_2() based
+ * on the following sample code and then replace ALTERNATIVE() with
+ * ALTERNATIVE_2() to append its customized content.
+ */
+#define ALTERNATIVE_2(old_content, new_content_1, vendor_id_1, patch_id_1, CONFIG_k_1, \
+ new_content_2, vendor_id_2, patch_id_2, CONFIG_k_2) \
+ _ALTERNATIVE_CFG_2(old_content, new_content_1, vendor_id_1, patch_id_1, CONFIG_k_1, \
+ new_content_2, vendor_id_2, patch_id_2, CONFIG_k_2)
+
+#endif
diff --git a/arch/riscv/include/asm/alternative.h b/arch/riscv/include/asm/alternative.h
new file mode 100644
index 0000000000..3c2b59b250
--- /dev/null
+++ b/arch/riscv/include/asm/alternative.h
@@ -0,0 +1,70 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2021 Sifive.
+ */
+
+#ifndef __ASM_ALTERNATIVE_H
+#define __ASM_ALTERNATIVE_H
+
+#include <asm/alternative-macros.h>
+
+#ifndef __ASSEMBLY__
+
+#ifdef CONFIG_RISCV_ALTERNATIVE
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/stddef.h>
+#include <asm/hwcap.h>
+
+#define PATCH_ID_CPUFEATURE_ID(p) lower_16_bits(p)
+#define PATCH_ID_CPUFEATURE_VALUE(p) upper_16_bits(p)
+
+#define RISCV_ALTERNATIVES_BOOT 0 /* alternatives applied during regular boot */
+#define RISCV_ALTERNATIVES_MODULE 1 /* alternatives applied during module-init */
+#define RISCV_ALTERNATIVES_EARLY_BOOT 2 /* alternatives applied before mmu start */
+
+/* add the relative offset to the address of the offset to get the absolute address */
+#define __ALT_PTR(a, f) ((void *)&(a)->f + (a)->f)
+#define ALT_OLD_PTR(a) __ALT_PTR(a, old_offset)
+#define ALT_ALT_PTR(a) __ALT_PTR(a, alt_offset)
+
+void __init apply_boot_alternatives(void);
+void __init apply_early_boot_alternatives(void);
+void apply_module_alternatives(void *start, size_t length);
+
+void riscv_alternative_fix_offsets(void *alt_ptr, unsigned int len,
+ int patch_offset);
+
+struct alt_entry {
+ s32 old_offset; /* offset relative to original instruction or data */
+ s32 alt_offset; /* offset relative to replacement instruction or data */
+ u16 vendor_id; /* CPU vendor ID */
+ u16 alt_len; /* The replacement size */
+ u32 patch_id; /* The patch ID (erratum ID or cpufeature ID) */
+};
+
+void andes_errata_patch_func(struct alt_entry *begin, struct alt_entry *end,
+ unsigned long archid, unsigned long impid,
+ unsigned int stage);
+void sifive_errata_patch_func(struct alt_entry *begin, struct alt_entry *end,
+ unsigned long archid, unsigned long impid,
+ unsigned int stage);
+void thead_errata_patch_func(struct alt_entry *begin, struct alt_entry *end,
+ unsigned long archid, unsigned long impid,
+ unsigned int stage);
+
+void riscv_cpufeature_patch_func(struct alt_entry *begin, struct alt_entry *end,
+ unsigned int stage);
+
+#else /* CONFIG_RISCV_ALTERNATIVE */
+
+static inline void apply_boot_alternatives(void) { }
+static inline void apply_early_boot_alternatives(void) { }
+static inline void apply_module_alternatives(void *start, size_t length) { }
+
+#endif /* CONFIG_RISCV_ALTERNATIVE */
+
+#endif
+#endif
diff --git a/arch/riscv/include/asm/asm-extable.h b/arch/riscv/include/asm/asm-extable.h
new file mode 100644
index 0000000000..00a96e7a96
--- /dev/null
+++ b/arch/riscv/include/asm/asm-extable.h
@@ -0,0 +1,71 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#ifndef __ASM_ASM_EXTABLE_H
+#define __ASM_ASM_EXTABLE_H
+
+#define EX_TYPE_NONE 0
+#define EX_TYPE_FIXUP 1
+#define EX_TYPE_BPF 2
+#define EX_TYPE_UACCESS_ERR_ZERO 3
+
+#ifdef CONFIG_MMU
+
+#ifdef __ASSEMBLY__
+
+#define __ASM_EXTABLE_RAW(insn, fixup, type, data) \
+ .pushsection __ex_table, "a"; \
+ .balign 4; \
+ .long ((insn) - .); \
+ .long ((fixup) - .); \
+ .short (type); \
+ .short (data); \
+ .popsection;
+
+ .macro _asm_extable, insn, fixup
+ __ASM_EXTABLE_RAW(\insn, \fixup, EX_TYPE_FIXUP, 0)
+ .endm
+
+#else /* __ASSEMBLY__ */
+
+#include <linux/bits.h>
+#include <linux/stringify.h>
+#include <asm/gpr-num.h>
+
+#define __ASM_EXTABLE_RAW(insn, fixup, type, data) \
+ ".pushsection __ex_table, \"a\"\n" \
+ ".balign 4\n" \
+ ".long ((" insn ") - .)\n" \
+ ".long ((" fixup ") - .)\n" \
+ ".short (" type ")\n" \
+ ".short (" data ")\n" \
+ ".popsection\n"
+
+#define _ASM_EXTABLE(insn, fixup) \
+ __ASM_EXTABLE_RAW(#insn, #fixup, __stringify(EX_TYPE_FIXUP), "0")
+
+#define EX_DATA_REG_ERR_SHIFT 0
+#define EX_DATA_REG_ERR GENMASK(4, 0)
+#define EX_DATA_REG_ZERO_SHIFT 5
+#define EX_DATA_REG_ZERO GENMASK(9, 5)
+
+#define EX_DATA_REG(reg, gpr) \
+ "((.L__gpr_num_" #gpr ") << " __stringify(EX_DATA_REG_##reg##_SHIFT) ")"
+
+#define _ASM_EXTABLE_UACCESS_ERR_ZERO(insn, fixup, err, zero) \
+ __DEFINE_ASM_GPR_NUMS \
+ __ASM_EXTABLE_RAW(#insn, #fixup, \
+ __stringify(EX_TYPE_UACCESS_ERR_ZERO), \
+ "(" \
+ EX_DATA_REG(ERR, err) " | " \
+ EX_DATA_REG(ZERO, zero) \
+ ")")
+
+#define _ASM_EXTABLE_UACCESS_ERR(insn, fixup, err) \
+ _ASM_EXTABLE_UACCESS_ERR_ZERO(insn, fixup, err, zero)
+
+#endif /* __ASSEMBLY__ */
+
+#else /* CONFIG_MMU */
+ #define _ASM_EXTABLE_UACCESS_ERR(insn, fixup, err)
+#endif /* CONFIG_MMU */
+
+#endif /* __ASM_ASM_EXTABLE_H */
diff --git a/arch/riscv/include/asm/asm-offsets.h b/arch/riscv/include/asm/asm-offsets.h
new file mode 100644
index 0000000000..d370ee36a1
--- /dev/null
+++ b/arch/riscv/include/asm/asm-offsets.h
@@ -0,0 +1 @@
+#include <generated/asm-offsets.h>
diff --git a/arch/riscv/include/asm/asm-prototypes.h b/arch/riscv/include/asm/asm-prototypes.h
new file mode 100644
index 0000000000..36b955c762
--- /dev/null
+++ b/arch/riscv/include/asm/asm-prototypes.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_RISCV_PROTOTYPES_H
+#define _ASM_RISCV_PROTOTYPES_H
+
+#include <linux/ftrace.h>
+#include <asm-generic/asm-prototypes.h>
+
+long long __lshrti3(long long a, int b);
+long long __ashrti3(long long a, int b);
+long long __ashlti3(long long a, int b);
+
+
+#define DECLARE_DO_ERROR_INFO(name) asmlinkage void name(struct pt_regs *regs)
+
+DECLARE_DO_ERROR_INFO(do_trap_unknown);
+DECLARE_DO_ERROR_INFO(do_trap_insn_misaligned);
+DECLARE_DO_ERROR_INFO(do_trap_insn_fault);
+DECLARE_DO_ERROR_INFO(do_trap_insn_illegal);
+DECLARE_DO_ERROR_INFO(do_trap_load_fault);
+DECLARE_DO_ERROR_INFO(do_trap_load_misaligned);
+DECLARE_DO_ERROR_INFO(do_trap_store_misaligned);
+DECLARE_DO_ERROR_INFO(do_trap_store_fault);
+DECLARE_DO_ERROR_INFO(do_trap_ecall_u);
+DECLARE_DO_ERROR_INFO(do_trap_ecall_s);
+DECLARE_DO_ERROR_INFO(do_trap_ecall_m);
+DECLARE_DO_ERROR_INFO(do_trap_break);
+
+asmlinkage void handle_bad_stack(struct pt_regs *regs);
+asmlinkage void do_page_fault(struct pt_regs *regs);
+asmlinkage void do_irq(struct pt_regs *regs);
+
+#endif /* _ASM_RISCV_PROTOTYPES_H */
diff --git a/arch/riscv/include/asm/asm.h b/arch/riscv/include/asm/asm.h
new file mode 100644
index 0000000000..bfb4c26f11
--- /dev/null
+++ b/arch/riscv/include/asm/asm.h
@@ -0,0 +1,169 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2015 Regents of the University of California
+ */
+
+#ifndef _ASM_RISCV_ASM_H
+#define _ASM_RISCV_ASM_H
+
+#ifdef __ASSEMBLY__
+#define __ASM_STR(x) x
+#else
+#define __ASM_STR(x) #x
+#endif
+
+#if __riscv_xlen == 64
+#define __REG_SEL(a, b) __ASM_STR(a)
+#elif __riscv_xlen == 32
+#define __REG_SEL(a, b) __ASM_STR(b)
+#else
+#error "Unexpected __riscv_xlen"
+#endif
+
+#define REG_L __REG_SEL(ld, lw)
+#define REG_S __REG_SEL(sd, sw)
+#define REG_SC __REG_SEL(sc.d, sc.w)
+#define REG_AMOSWAP_AQ __REG_SEL(amoswap.d.aq, amoswap.w.aq)
+#define REG_ASM __REG_SEL(.dword, .word)
+#define SZREG __REG_SEL(8, 4)
+#define LGREG __REG_SEL(3, 2)
+
+#if __SIZEOF_POINTER__ == 8
+#ifdef __ASSEMBLY__
+#define RISCV_PTR .dword
+#define RISCV_SZPTR 8
+#define RISCV_LGPTR 3
+#else
+#define RISCV_PTR ".dword"
+#define RISCV_SZPTR "8"
+#define RISCV_LGPTR "3"
+#endif
+#elif __SIZEOF_POINTER__ == 4
+#ifdef __ASSEMBLY__
+#define RISCV_PTR .word
+#define RISCV_SZPTR 4
+#define RISCV_LGPTR 2
+#else
+#define RISCV_PTR ".word"
+#define RISCV_SZPTR "4"
+#define RISCV_LGPTR "2"
+#endif
+#else
+#error "Unexpected __SIZEOF_POINTER__"
+#endif
+
+#if (__SIZEOF_INT__ == 4)
+#define RISCV_INT __ASM_STR(.word)
+#define RISCV_SZINT __ASM_STR(4)
+#define RISCV_LGINT __ASM_STR(2)
+#else
+#error "Unexpected __SIZEOF_INT__"
+#endif
+
+#if (__SIZEOF_SHORT__ == 2)
+#define RISCV_SHORT __ASM_STR(.half)
+#define RISCV_SZSHORT __ASM_STR(2)
+#define RISCV_LGSHORT __ASM_STR(1)
+#else
+#error "Unexpected __SIZEOF_SHORT__"
+#endif
+
+#ifdef __ASSEMBLY__
+#include <asm/asm-offsets.h>
+
+/* Common assembly source macros */
+
+/*
+ * NOP sequence
+ */
+.macro nops, num
+ .rept \num
+ nop
+ .endr
+.endm
+
+#ifdef CONFIG_SMP
+#ifdef CONFIG_32BIT
+#define PER_CPU_OFFSET_SHIFT 2
+#else
+#define PER_CPU_OFFSET_SHIFT 3
+#endif
+
+.macro asm_per_cpu dst sym tmp
+ REG_L \tmp, TASK_TI_CPU_NUM(tp)
+ slli \tmp, \tmp, PER_CPU_OFFSET_SHIFT
+ la \dst, __per_cpu_offset
+ add \dst, \dst, \tmp
+ REG_L \tmp, 0(\dst)
+ la \dst, \sym
+ add \dst, \dst, \tmp
+.endm
+#else /* CONFIG_SMP */
+.macro asm_per_cpu dst sym tmp
+ la \dst, \sym
+.endm
+#endif /* CONFIG_SMP */
+
+ /* save all GPs except x1 ~ x5 */
+ .macro save_from_x6_to_x31
+ REG_S x6, PT_T1(sp)
+ REG_S x7, PT_T2(sp)
+ REG_S x8, PT_S0(sp)
+ REG_S x9, PT_S1(sp)
+ REG_S x10, PT_A0(sp)
+ REG_S x11, PT_A1(sp)
+ REG_S x12, PT_A2(sp)
+ REG_S x13, PT_A3(sp)
+ REG_S x14, PT_A4(sp)
+ REG_S x15, PT_A5(sp)
+ REG_S x16, PT_A6(sp)
+ REG_S x17, PT_A7(sp)
+ REG_S x18, PT_S2(sp)
+ REG_S x19, PT_S3(sp)
+ REG_S x20, PT_S4(sp)
+ REG_S x21, PT_S5(sp)
+ REG_S x22, PT_S6(sp)
+ REG_S x23, PT_S7(sp)
+ REG_S x24, PT_S8(sp)
+ REG_S x25, PT_S9(sp)
+ REG_S x26, PT_S10(sp)
+ REG_S x27, PT_S11(sp)
+ REG_S x28, PT_T3(sp)
+ REG_S x29, PT_T4(sp)
+ REG_S x30, PT_T5(sp)
+ REG_S x31, PT_T6(sp)
+ .endm
+
+ /* restore all GPs except x1 ~ x5 */
+ .macro restore_from_x6_to_x31
+ REG_L x6, PT_T1(sp)
+ REG_L x7, PT_T2(sp)
+ REG_L x8, PT_S0(sp)
+ REG_L x9, PT_S1(sp)
+ REG_L x10, PT_A0(sp)
+ REG_L x11, PT_A1(sp)
+ REG_L x12, PT_A2(sp)
+ REG_L x13, PT_A3(sp)
+ REG_L x14, PT_A4(sp)
+ REG_L x15, PT_A5(sp)
+ REG_L x16, PT_A6(sp)
+ REG_L x17, PT_A7(sp)
+ REG_L x18, PT_S2(sp)
+ REG_L x19, PT_S3(sp)
+ REG_L x20, PT_S4(sp)
+ REG_L x21, PT_S5(sp)
+ REG_L x22, PT_S6(sp)
+ REG_L x23, PT_S7(sp)
+ REG_L x24, PT_S8(sp)
+ REG_L x25, PT_S9(sp)
+ REG_L x26, PT_S10(sp)
+ REG_L x27, PT_S11(sp)
+ REG_L x28, PT_T3(sp)
+ REG_L x29, PT_T4(sp)
+ REG_L x30, PT_T5(sp)
+ REG_L x31, PT_T6(sp)
+ .endm
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* _ASM_RISCV_ASM_H */
diff --git a/arch/riscv/include/asm/assembler.h b/arch/riscv/include/asm/assembler.h
new file mode 100644
index 0000000000..44b1457d3e
--- /dev/null
+++ b/arch/riscv/include/asm/assembler.h
@@ -0,0 +1,82 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2023 StarFive Technology Co., Ltd.
+ *
+ * Author: Jee Heng Sia <jeeheng.sia@starfivetech.com>
+ */
+
+#ifndef __ASSEMBLY__
+#error "Only include this from assembly code"
+#endif
+
+#ifndef __ASM_ASSEMBLER_H
+#define __ASM_ASSEMBLER_H
+
+#include <asm/asm.h>
+#include <asm/asm-offsets.h>
+#include <asm/csr.h>
+
+/*
+ * suspend_restore_csrs - restore CSRs
+ */
+ .macro suspend_restore_csrs
+ REG_L t0, (SUSPEND_CONTEXT_REGS + PT_EPC)(a0)
+ csrw CSR_EPC, t0
+ REG_L t0, (SUSPEND_CONTEXT_REGS + PT_STATUS)(a0)
+ csrw CSR_STATUS, t0
+ REG_L t0, (SUSPEND_CONTEXT_REGS + PT_BADADDR)(a0)
+ csrw CSR_TVAL, t0
+ REG_L t0, (SUSPEND_CONTEXT_REGS + PT_CAUSE)(a0)
+ csrw CSR_CAUSE, t0
+ .endm
+
+/*
+ * suspend_restore_regs - Restore registers (except A0 and T0-T6)
+ */
+ .macro suspend_restore_regs
+ REG_L ra, (SUSPEND_CONTEXT_REGS + PT_RA)(a0)
+ REG_L sp, (SUSPEND_CONTEXT_REGS + PT_SP)(a0)
+ REG_L gp, (SUSPEND_CONTEXT_REGS + PT_GP)(a0)
+ REG_L tp, (SUSPEND_CONTEXT_REGS + PT_TP)(a0)
+ REG_L s0, (SUSPEND_CONTEXT_REGS + PT_S0)(a0)
+ REG_L s1, (SUSPEND_CONTEXT_REGS + PT_S1)(a0)
+ REG_L a1, (SUSPEND_CONTEXT_REGS + PT_A1)(a0)
+ REG_L a2, (SUSPEND_CONTEXT_REGS + PT_A2)(a0)
+ REG_L a3, (SUSPEND_CONTEXT_REGS + PT_A3)(a0)
+ REG_L a4, (SUSPEND_CONTEXT_REGS + PT_A4)(a0)
+ REG_L a5, (SUSPEND_CONTEXT_REGS + PT_A5)(a0)
+ REG_L a6, (SUSPEND_CONTEXT_REGS + PT_A6)(a0)
+ REG_L a7, (SUSPEND_CONTEXT_REGS + PT_A7)(a0)
+ REG_L s2, (SUSPEND_CONTEXT_REGS + PT_S2)(a0)
+ REG_L s3, (SUSPEND_CONTEXT_REGS + PT_S3)(a0)
+ REG_L s4, (SUSPEND_CONTEXT_REGS + PT_S4)(a0)
+ REG_L s5, (SUSPEND_CONTEXT_REGS + PT_S5)(a0)
+ REG_L s6, (SUSPEND_CONTEXT_REGS + PT_S6)(a0)
+ REG_L s7, (SUSPEND_CONTEXT_REGS + PT_S7)(a0)
+ REG_L s8, (SUSPEND_CONTEXT_REGS + PT_S8)(a0)
+ REG_L s9, (SUSPEND_CONTEXT_REGS + PT_S9)(a0)
+ REG_L s10, (SUSPEND_CONTEXT_REGS + PT_S10)(a0)
+ REG_L s11, (SUSPEND_CONTEXT_REGS + PT_S11)(a0)
+ .endm
+
+/*
+ * copy_page - copy 1 page (4KB) of data from source to destination
+ * @a0 - destination
+ * @a1 - source
+ */
+ .macro copy_page a0, a1
+ lui a2, 0x1
+ add a2, a2, a0
+1 :
+ REG_L t0, 0(a1)
+ REG_L t1, SZREG(a1)
+
+ REG_S t0, 0(a0)
+ REG_S t1, SZREG(a0)
+
+ addi a0, a0, 2 * SZREG
+ addi a1, a1, 2 * SZREG
+ bne a2, a0, 1b
+ .endm
+
+#endif /* __ASM_ASSEMBLER_H */
diff --git a/arch/riscv/include/asm/atomic.h b/arch/riscv/include/asm/atomic.h
new file mode 100644
index 0000000000..f5dfef6c21
--- /dev/null
+++ b/arch/riscv/include/asm/atomic.h
@@ -0,0 +1,366 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
+ * Copyright (C) 2012 Regents of the University of California
+ * Copyright (C) 2017 SiFive
+ */
+
+#ifndef _ASM_RISCV_ATOMIC_H
+#define _ASM_RISCV_ATOMIC_H
+
+#ifdef CONFIG_GENERIC_ATOMIC64
+# include <asm-generic/atomic64.h>
+#else
+# if (__riscv_xlen < 64)
+# error "64-bit atomics require XLEN to be at least 64"
+# endif
+#endif
+
+#include <asm/cmpxchg.h>
+#include <asm/barrier.h>
+
+#define __atomic_acquire_fence() \
+ __asm__ __volatile__(RISCV_ACQUIRE_BARRIER "" ::: "memory")
+
+#define __atomic_release_fence() \
+ __asm__ __volatile__(RISCV_RELEASE_BARRIER "" ::: "memory");
+
+static __always_inline int arch_atomic_read(const atomic_t *v)
+{
+ return READ_ONCE(v->counter);
+}
+static __always_inline void arch_atomic_set(atomic_t *v, int i)
+{
+ WRITE_ONCE(v->counter, i);
+}
+
+#ifndef CONFIG_GENERIC_ATOMIC64
+#define ATOMIC64_INIT(i) { (i) }
+static __always_inline s64 arch_atomic64_read(const atomic64_t *v)
+{
+ return READ_ONCE(v->counter);
+}
+static __always_inline void arch_atomic64_set(atomic64_t *v, s64 i)
+{
+ WRITE_ONCE(v->counter, i);
+}
+#endif
+
+/*
+ * First, the atomic ops that have no ordering constraints and therefor don't
+ * have the AQ or RL bits set. These don't return anything, so there's only
+ * one version to worry about.
+ */
+#define ATOMIC_OP(op, asm_op, I, asm_type, c_type, prefix) \
+static __always_inline \
+void arch_atomic##prefix##_##op(c_type i, atomic##prefix##_t *v) \
+{ \
+ __asm__ __volatile__ ( \
+ " amo" #asm_op "." #asm_type " zero, %1, %0" \
+ : "+A" (v->counter) \
+ : "r" (I) \
+ : "memory"); \
+} \
+
+#ifdef CONFIG_GENERIC_ATOMIC64
+#define ATOMIC_OPS(op, asm_op, I) \
+ ATOMIC_OP (op, asm_op, I, w, int, )
+#else
+#define ATOMIC_OPS(op, asm_op, I) \
+ ATOMIC_OP (op, asm_op, I, w, int, ) \
+ ATOMIC_OP (op, asm_op, I, d, s64, 64)
+#endif
+
+ATOMIC_OPS(add, add, i)
+ATOMIC_OPS(sub, add, -i)
+ATOMIC_OPS(and, and, i)
+ATOMIC_OPS( or, or, i)
+ATOMIC_OPS(xor, xor, i)
+
+#undef ATOMIC_OP
+#undef ATOMIC_OPS
+
+/*
+ * Atomic ops that have ordered, relaxed, acquire, and release variants.
+ * There's two flavors of these: the arithmatic ops have both fetch and return
+ * versions, while the logical ops only have fetch versions.
+ */
+#define ATOMIC_FETCH_OP(op, asm_op, I, asm_type, c_type, prefix) \
+static __always_inline \
+c_type arch_atomic##prefix##_fetch_##op##_relaxed(c_type i, \
+ atomic##prefix##_t *v) \
+{ \
+ register c_type ret; \
+ __asm__ __volatile__ ( \
+ " amo" #asm_op "." #asm_type " %1, %2, %0" \
+ : "+A" (v->counter), "=r" (ret) \
+ : "r" (I) \
+ : "memory"); \
+ return ret; \
+} \
+static __always_inline \
+c_type arch_atomic##prefix##_fetch_##op(c_type i, atomic##prefix##_t *v) \
+{ \
+ register c_type ret; \
+ __asm__ __volatile__ ( \
+ " amo" #asm_op "." #asm_type ".aqrl %1, %2, %0" \
+ : "+A" (v->counter), "=r" (ret) \
+ : "r" (I) \
+ : "memory"); \
+ return ret; \
+}
+
+#define ATOMIC_OP_RETURN(op, asm_op, c_op, I, asm_type, c_type, prefix) \
+static __always_inline \
+c_type arch_atomic##prefix##_##op##_return_relaxed(c_type i, \
+ atomic##prefix##_t *v) \
+{ \
+ return arch_atomic##prefix##_fetch_##op##_relaxed(i, v) c_op I; \
+} \
+static __always_inline \
+c_type arch_atomic##prefix##_##op##_return(c_type i, atomic##prefix##_t *v) \
+{ \
+ return arch_atomic##prefix##_fetch_##op(i, v) c_op I; \
+}
+
+#ifdef CONFIG_GENERIC_ATOMIC64
+#define ATOMIC_OPS(op, asm_op, c_op, I) \
+ ATOMIC_FETCH_OP( op, asm_op, I, w, int, ) \
+ ATOMIC_OP_RETURN(op, asm_op, c_op, I, w, int, )
+#else
+#define ATOMIC_OPS(op, asm_op, c_op, I) \
+ ATOMIC_FETCH_OP( op, asm_op, I, w, int, ) \
+ ATOMIC_OP_RETURN(op, asm_op, c_op, I, w, int, ) \
+ ATOMIC_FETCH_OP( op, asm_op, I, d, s64, 64) \
+ ATOMIC_OP_RETURN(op, asm_op, c_op, I, d, s64, 64)
+#endif
+
+ATOMIC_OPS(add, add, +, i)
+ATOMIC_OPS(sub, add, +, -i)
+
+#define arch_atomic_add_return_relaxed arch_atomic_add_return_relaxed
+#define arch_atomic_sub_return_relaxed arch_atomic_sub_return_relaxed
+#define arch_atomic_add_return arch_atomic_add_return
+#define arch_atomic_sub_return arch_atomic_sub_return
+
+#define arch_atomic_fetch_add_relaxed arch_atomic_fetch_add_relaxed
+#define arch_atomic_fetch_sub_relaxed arch_atomic_fetch_sub_relaxed
+#define arch_atomic_fetch_add arch_atomic_fetch_add
+#define arch_atomic_fetch_sub arch_atomic_fetch_sub
+
+#ifndef CONFIG_GENERIC_ATOMIC64
+#define arch_atomic64_add_return_relaxed arch_atomic64_add_return_relaxed
+#define arch_atomic64_sub_return_relaxed arch_atomic64_sub_return_relaxed
+#define arch_atomic64_add_return arch_atomic64_add_return
+#define arch_atomic64_sub_return arch_atomic64_sub_return
+
+#define arch_atomic64_fetch_add_relaxed arch_atomic64_fetch_add_relaxed
+#define arch_atomic64_fetch_sub_relaxed arch_atomic64_fetch_sub_relaxed
+#define arch_atomic64_fetch_add arch_atomic64_fetch_add
+#define arch_atomic64_fetch_sub arch_atomic64_fetch_sub
+#endif
+
+#undef ATOMIC_OPS
+
+#ifdef CONFIG_GENERIC_ATOMIC64
+#define ATOMIC_OPS(op, asm_op, I) \
+ ATOMIC_FETCH_OP(op, asm_op, I, w, int, )
+#else
+#define ATOMIC_OPS(op, asm_op, I) \
+ ATOMIC_FETCH_OP(op, asm_op, I, w, int, ) \
+ ATOMIC_FETCH_OP(op, asm_op, I, d, s64, 64)
+#endif
+
+ATOMIC_OPS(and, and, i)
+ATOMIC_OPS( or, or, i)
+ATOMIC_OPS(xor, xor, i)
+
+#define arch_atomic_fetch_and_relaxed arch_atomic_fetch_and_relaxed
+#define arch_atomic_fetch_or_relaxed arch_atomic_fetch_or_relaxed
+#define arch_atomic_fetch_xor_relaxed arch_atomic_fetch_xor_relaxed
+#define arch_atomic_fetch_and arch_atomic_fetch_and
+#define arch_atomic_fetch_or arch_atomic_fetch_or
+#define arch_atomic_fetch_xor arch_atomic_fetch_xor
+
+#ifndef CONFIG_GENERIC_ATOMIC64
+#define arch_atomic64_fetch_and_relaxed arch_atomic64_fetch_and_relaxed
+#define arch_atomic64_fetch_or_relaxed arch_atomic64_fetch_or_relaxed
+#define arch_atomic64_fetch_xor_relaxed arch_atomic64_fetch_xor_relaxed
+#define arch_atomic64_fetch_and arch_atomic64_fetch_and
+#define arch_atomic64_fetch_or arch_atomic64_fetch_or
+#define arch_atomic64_fetch_xor arch_atomic64_fetch_xor
+#endif
+
+#undef ATOMIC_OPS
+
+#undef ATOMIC_FETCH_OP
+#undef ATOMIC_OP_RETURN
+
+/* This is required to provide a full barrier on success. */
+static __always_inline int arch_atomic_fetch_add_unless(atomic_t *v, int a, int u)
+{
+ int prev, rc;
+
+ __asm__ __volatile__ (
+ "0: lr.w %[p], %[c]\n"
+ " beq %[p], %[u], 1f\n"
+ " add %[rc], %[p], %[a]\n"
+ " sc.w.rl %[rc], %[rc], %[c]\n"
+ " bnez %[rc], 0b\n"
+ " fence rw, rw\n"
+ "1:\n"
+ : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
+ : [a]"r" (a), [u]"r" (u)
+ : "memory");
+ return prev;
+}
+#define arch_atomic_fetch_add_unless arch_atomic_fetch_add_unless
+
+#ifndef CONFIG_GENERIC_ATOMIC64
+static __always_inline s64 arch_atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
+{
+ s64 prev;
+ long rc;
+
+ __asm__ __volatile__ (
+ "0: lr.d %[p], %[c]\n"
+ " beq %[p], %[u], 1f\n"
+ " add %[rc], %[p], %[a]\n"
+ " sc.d.rl %[rc], %[rc], %[c]\n"
+ " bnez %[rc], 0b\n"
+ " fence rw, rw\n"
+ "1:\n"
+ : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
+ : [a]"r" (a), [u]"r" (u)
+ : "memory");
+ return prev;
+}
+#define arch_atomic64_fetch_add_unless arch_atomic64_fetch_add_unless
+#endif
+
+static __always_inline bool arch_atomic_inc_unless_negative(atomic_t *v)
+{
+ int prev, rc;
+
+ __asm__ __volatile__ (
+ "0: lr.w %[p], %[c]\n"
+ " bltz %[p], 1f\n"
+ " addi %[rc], %[p], 1\n"
+ " sc.w.rl %[rc], %[rc], %[c]\n"
+ " bnez %[rc], 0b\n"
+ " fence rw, rw\n"
+ "1:\n"
+ : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
+ :
+ : "memory");
+ return !(prev < 0);
+}
+
+#define arch_atomic_inc_unless_negative arch_atomic_inc_unless_negative
+
+static __always_inline bool arch_atomic_dec_unless_positive(atomic_t *v)
+{
+ int prev, rc;
+
+ __asm__ __volatile__ (
+ "0: lr.w %[p], %[c]\n"
+ " bgtz %[p], 1f\n"
+ " addi %[rc], %[p], -1\n"
+ " sc.w.rl %[rc], %[rc], %[c]\n"
+ " bnez %[rc], 0b\n"
+ " fence rw, rw\n"
+ "1:\n"
+ : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
+ :
+ : "memory");
+ return !(prev > 0);
+}
+
+#define arch_atomic_dec_unless_positive arch_atomic_dec_unless_positive
+
+static __always_inline int arch_atomic_dec_if_positive(atomic_t *v)
+{
+ int prev, rc;
+
+ __asm__ __volatile__ (
+ "0: lr.w %[p], %[c]\n"
+ " addi %[rc], %[p], -1\n"
+ " bltz %[rc], 1f\n"
+ " sc.w.rl %[rc], %[rc], %[c]\n"
+ " bnez %[rc], 0b\n"
+ " fence rw, rw\n"
+ "1:\n"
+ : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
+ :
+ : "memory");
+ return prev - 1;
+}
+
+#define arch_atomic_dec_if_positive arch_atomic_dec_if_positive
+
+#ifndef CONFIG_GENERIC_ATOMIC64
+static __always_inline bool arch_atomic64_inc_unless_negative(atomic64_t *v)
+{
+ s64 prev;
+ long rc;
+
+ __asm__ __volatile__ (
+ "0: lr.d %[p], %[c]\n"
+ " bltz %[p], 1f\n"
+ " addi %[rc], %[p], 1\n"
+ " sc.d.rl %[rc], %[rc], %[c]\n"
+ " bnez %[rc], 0b\n"
+ " fence rw, rw\n"
+ "1:\n"
+ : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
+ :
+ : "memory");
+ return !(prev < 0);
+}
+
+#define arch_atomic64_inc_unless_negative arch_atomic64_inc_unless_negative
+
+static __always_inline bool arch_atomic64_dec_unless_positive(atomic64_t *v)
+{
+ s64 prev;
+ long rc;
+
+ __asm__ __volatile__ (
+ "0: lr.d %[p], %[c]\n"
+ " bgtz %[p], 1f\n"
+ " addi %[rc], %[p], -1\n"
+ " sc.d.rl %[rc], %[rc], %[c]\n"
+ " bnez %[rc], 0b\n"
+ " fence rw, rw\n"
+ "1:\n"
+ : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
+ :
+ : "memory");
+ return !(prev > 0);
+}
+
+#define arch_atomic64_dec_unless_positive arch_atomic64_dec_unless_positive
+
+static __always_inline s64 arch_atomic64_dec_if_positive(atomic64_t *v)
+{
+ s64 prev;
+ long rc;
+
+ __asm__ __volatile__ (
+ "0: lr.d %[p], %[c]\n"
+ " addi %[rc], %[p], -1\n"
+ " bltz %[rc], 1f\n"
+ " sc.d.rl %[rc], %[rc], %[c]\n"
+ " bnez %[rc], 0b\n"
+ " fence rw, rw\n"
+ "1:\n"
+ : [p]"=&r" (prev), [rc]"=&r" (rc), [c]"+A" (v->counter)
+ :
+ : "memory");
+ return prev - 1;
+}
+
+#define arch_atomic64_dec_if_positive arch_atomic64_dec_if_positive
+#endif
+
+#endif /* _ASM_RISCV_ATOMIC_H */
diff --git a/arch/riscv/include/asm/barrier.h b/arch/riscv/include/asm/barrier.h
new file mode 100644
index 0000000000..1107525942
--- /dev/null
+++ b/arch/riscv/include/asm/barrier.h
@@ -0,0 +1,78 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Based on arch/arm/include/asm/barrier.h
+ *
+ * Copyright (C) 2012 ARM Ltd.
+ * Copyright (C) 2013 Regents of the University of California
+ * Copyright (C) 2017 SiFive
+ */
+
+#ifndef _ASM_RISCV_BARRIER_H
+#define _ASM_RISCV_BARRIER_H
+
+#ifndef __ASSEMBLY__
+
+#define nop() __asm__ __volatile__ ("nop")
+#define __nops(n) ".rept " #n "\nnop\n.endr\n"
+#define nops(n) __asm__ __volatile__ (__nops(n))
+
+#define RISCV_FENCE(p, s) \
+ __asm__ __volatile__ ("fence " #p "," #s : : : "memory")
+
+/* These barriers need to enforce ordering on both devices or memory. */
+#define mb() RISCV_FENCE(iorw,iorw)
+#define rmb() RISCV_FENCE(ir,ir)
+#define wmb() RISCV_FENCE(ow,ow)
+
+/* These barriers do not need to enforce ordering on devices, just memory. */
+#define __smp_mb() RISCV_FENCE(rw,rw)
+#define __smp_rmb() RISCV_FENCE(r,r)
+#define __smp_wmb() RISCV_FENCE(w,w)
+
+#define __smp_store_release(p, v) \
+do { \
+ compiletime_assert_atomic_type(*p); \
+ RISCV_FENCE(rw,w); \
+ WRITE_ONCE(*p, v); \
+} while (0)
+
+#define __smp_load_acquire(p) \
+({ \
+ typeof(*p) ___p1 = READ_ONCE(*p); \
+ compiletime_assert_atomic_type(*p); \
+ RISCV_FENCE(r,rw); \
+ ___p1; \
+})
+
+/*
+ * This is a very specific barrier: it's currently only used in two places in
+ * the kernel, both in the scheduler. See include/linux/spinlock.h for the two
+ * orderings it guarantees, but the "critical section is RCsc" guarantee
+ * mandates a barrier on RISC-V. The sequence looks like:
+ *
+ * lr.aq lock
+ * sc lock <= LOCKED
+ * smp_mb__after_spinlock()
+ * // critical section
+ * lr lock
+ * sc.rl lock <= UNLOCKED
+ *
+ * The AQ/RL pair provides a RCpc critical section, but there's not really any
+ * way we can take advantage of that here because the ordering is only enforced
+ * on that one lock. Thus, we're just doing a full fence.
+ *
+ * Since we allow writeX to be called from preemptive regions we need at least
+ * an "o" in the predecessor set to ensure device writes are visible before the
+ * task is marked as available for scheduling on a new hart. While I don't see
+ * any concrete reason we need a full IO fence, it seems safer to just upgrade
+ * this in order to avoid any IO crossing a scheduling boundary. In both
+ * instances the scheduler pairs this with an mb(), so nothing is necessary on
+ * the new hart.
+ */
+#define smp_mb__after_spinlock() RISCV_FENCE(iorw,iorw)
+
+#include <asm-generic/barrier.h>
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* _ASM_RISCV_BARRIER_H */
diff --git a/arch/riscv/include/asm/bitops.h b/arch/riscv/include/asm/bitops.h
new file mode 100644
index 0000000000..3540b69094
--- /dev/null
+++ b/arch/riscv/include/asm/bitops.h
@@ -0,0 +1,204 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2012 Regents of the University of California
+ */
+
+#ifndef _ASM_RISCV_BITOPS_H
+#define _ASM_RISCV_BITOPS_H
+
+#ifndef _LINUX_BITOPS_H
+#error "Only <linux/bitops.h> can be included directly"
+#endif /* _LINUX_BITOPS_H */
+
+#include <linux/compiler.h>
+#include <linux/irqflags.h>
+#include <asm/barrier.h>
+#include <asm/bitsperlong.h>
+
+#include <asm-generic/bitops/__ffs.h>
+#include <asm-generic/bitops/ffz.h>
+#include <asm-generic/bitops/fls.h>
+#include <asm-generic/bitops/__fls.h>
+#include <asm-generic/bitops/fls64.h>
+#include <asm-generic/bitops/sched.h>
+#include <asm-generic/bitops/ffs.h>
+
+#include <asm-generic/bitops/hweight.h>
+
+#if (BITS_PER_LONG == 64)
+#define __AMO(op) "amo" #op ".d"
+#elif (BITS_PER_LONG == 32)
+#define __AMO(op) "amo" #op ".w"
+#else
+#error "Unexpected BITS_PER_LONG"
+#endif
+
+#define __test_and_op_bit_ord(op, mod, nr, addr, ord) \
+({ \
+ unsigned long __res, __mask; \
+ __mask = BIT_MASK(nr); \
+ __asm__ __volatile__ ( \
+ __AMO(op) #ord " %0, %2, %1" \
+ : "=r" (__res), "+A" (addr[BIT_WORD(nr)]) \
+ : "r" (mod(__mask)) \
+ : "memory"); \
+ ((__res & __mask) != 0); \
+})
+
+#define __op_bit_ord(op, mod, nr, addr, ord) \
+ __asm__ __volatile__ ( \
+ __AMO(op) #ord " zero, %1, %0" \
+ : "+A" (addr[BIT_WORD(nr)]) \
+ : "r" (mod(BIT_MASK(nr))) \
+ : "memory");
+
+#define __test_and_op_bit(op, mod, nr, addr) \
+ __test_and_op_bit_ord(op, mod, nr, addr, .aqrl)
+#define __op_bit(op, mod, nr, addr) \
+ __op_bit_ord(op, mod, nr, addr, )
+
+/* Bitmask modifiers */
+#define __NOP(x) (x)
+#define __NOT(x) (~(x))
+
+/**
+ * test_and_set_bit - Set a bit and return its old value
+ * @nr: Bit to set
+ * @addr: Address to count from
+ *
+ * This operation may be reordered on other architectures than x86.
+ */
+static inline int test_and_set_bit(int nr, volatile unsigned long *addr)
+{
+ return __test_and_op_bit(or, __NOP, nr, addr);
+}
+
+/**
+ * test_and_clear_bit - Clear a bit and return its old value
+ * @nr: Bit to clear
+ * @addr: Address to count from
+ *
+ * This operation can be reordered on other architectures other than x86.
+ */
+static inline int test_and_clear_bit(int nr, volatile unsigned long *addr)
+{
+ return __test_and_op_bit(and, __NOT, nr, addr);
+}
+
+/**
+ * test_and_change_bit - Change a bit and return its old value
+ * @nr: Bit to change
+ * @addr: Address to count from
+ *
+ * This operation is atomic and cannot be reordered.
+ * It also implies a memory barrier.
+ */
+static inline int test_and_change_bit(int nr, volatile unsigned long *addr)
+{
+ return __test_and_op_bit(xor, __NOP, nr, addr);
+}
+
+/**
+ * set_bit - Atomically set a bit in memory
+ * @nr: the bit to set
+ * @addr: the address to start counting from
+ *
+ * Note: there are no guarantees that this function will not be reordered
+ * on non x86 architectures, so if you are writing portable code,
+ * make sure not to rely on its reordering guarantees.
+ *
+ * Note that @nr may be almost arbitrarily large; this function is not
+ * restricted to acting on a single-word quantity.
+ */
+static inline void set_bit(int nr, volatile unsigned long *addr)
+{
+ __op_bit(or, __NOP, nr, addr);
+}
+
+/**
+ * clear_bit - Clears a bit in memory
+ * @nr: Bit to clear
+ * @addr: Address to start counting from
+ *
+ * Note: there are no guarantees that this function will not be reordered
+ * on non x86 architectures, so if you are writing portable code,
+ * make sure not to rely on its reordering guarantees.
+ */
+static inline void clear_bit(int nr, volatile unsigned long *addr)
+{
+ __op_bit(and, __NOT, nr, addr);
+}
+
+/**
+ * change_bit - Toggle a bit in memory
+ * @nr: Bit to change
+ * @addr: Address to start counting from
+ *
+ * change_bit() may be reordered on other architectures than x86.
+ * Note that @nr may be almost arbitrarily large; this function is not
+ * restricted to acting on a single-word quantity.
+ */
+static inline void change_bit(int nr, volatile unsigned long *addr)
+{
+ __op_bit(xor, __NOP, nr, addr);
+}
+
+/**
+ * test_and_set_bit_lock - Set a bit and return its old value, for lock
+ * @nr: Bit to set
+ * @addr: Address to count from
+ *
+ * This operation is atomic and provides acquire barrier semantics.
+ * It can be used to implement bit locks.
+ */
+static inline int test_and_set_bit_lock(
+ unsigned long nr, volatile unsigned long *addr)
+{
+ return __test_and_op_bit_ord(or, __NOP, nr, addr, .aq);
+}
+
+/**
+ * clear_bit_unlock - Clear a bit in memory, for unlock
+ * @nr: the bit to set
+ * @addr: the address to start counting from
+ *
+ * This operation is atomic and provides release barrier semantics.
+ */
+static inline void clear_bit_unlock(
+ unsigned long nr, volatile unsigned long *addr)
+{
+ __op_bit_ord(and, __NOT, nr, addr, .rl);
+}
+
+/**
+ * __clear_bit_unlock - Clear a bit in memory, for unlock
+ * @nr: the bit to set
+ * @addr: the address to start counting from
+ *
+ * This operation is like clear_bit_unlock, however it is not atomic.
+ * It does provide release barrier semantics so it can be used to unlock
+ * a bit lock, however it would only be used if no other CPU can modify
+ * any bits in the memory until the lock is released (a good example is
+ * if the bit lock itself protects access to the other bits in the word).
+ *
+ * On RISC-V systems there seems to be no benefit to taking advantage of the
+ * non-atomic property here: it's a lot more instructions and we still have to
+ * provide release semantics anyway.
+ */
+static inline void __clear_bit_unlock(
+ unsigned long nr, volatile unsigned long *addr)
+{
+ clear_bit_unlock(nr, addr);
+}
+
+#undef __test_and_op_bit
+#undef __op_bit
+#undef __NOP
+#undef __NOT
+#undef __AMO
+
+#include <asm-generic/bitops/non-atomic.h>
+#include <asm-generic/bitops/le.h>
+#include <asm-generic/bitops/ext2-atomic.h>
+
+#endif /* _ASM_RISCV_BITOPS_H */
diff --git a/arch/riscv/include/asm/bug.h b/arch/riscv/include/asm/bug.h
new file mode 100644
index 0000000000..1aaea81fb1
--- /dev/null
+++ b/arch/riscv/include/asm/bug.h
@@ -0,0 +1,92 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2012 Regents of the University of California
+ */
+
+#ifndef _ASM_RISCV_BUG_H
+#define _ASM_RISCV_BUG_H
+
+#include <linux/compiler.h>
+#include <linux/const.h>
+#include <linux/types.h>
+
+#include <asm/asm.h>
+
+#define __INSN_LENGTH_MASK _UL(0x3)
+#define __INSN_LENGTH_32 _UL(0x3)
+#define __COMPRESSED_INSN_MASK _UL(0xffff)
+
+#define __BUG_INSN_32 _UL(0x00100073) /* ebreak */
+#define __BUG_INSN_16 _UL(0x9002) /* c.ebreak */
+
+#define GET_INSN_LENGTH(insn) \
+({ \
+ unsigned long __len; \
+ __len = ((insn & __INSN_LENGTH_MASK) == __INSN_LENGTH_32) ? \
+ 4UL : 2UL; \
+ __len; \
+})
+
+typedef u32 bug_insn_t;
+
+#ifdef CONFIG_GENERIC_BUG_RELATIVE_POINTERS
+#define __BUG_ENTRY_ADDR RISCV_INT " 1b - ."
+#define __BUG_ENTRY_FILE RISCV_INT " %0 - ."
+#else
+#define __BUG_ENTRY_ADDR RISCV_PTR " 1b"
+#define __BUG_ENTRY_FILE RISCV_PTR " %0"
+#endif
+
+#ifdef CONFIG_DEBUG_BUGVERBOSE
+#define __BUG_ENTRY \
+ __BUG_ENTRY_ADDR "\n\t" \
+ __BUG_ENTRY_FILE "\n\t" \
+ RISCV_SHORT " %1\n\t" \
+ RISCV_SHORT " %2"
+#else
+#define __BUG_ENTRY \
+ __BUG_ENTRY_ADDR "\n\t" \
+ RISCV_SHORT " %2"
+#endif
+
+#ifdef CONFIG_GENERIC_BUG
+#define __BUG_FLAGS(flags) \
+do { \
+ __asm__ __volatile__ ( \
+ "1:\n\t" \
+ "ebreak\n" \
+ ".pushsection __bug_table,\"aw\"\n\t" \
+ "2:\n\t" \
+ __BUG_ENTRY "\n\t" \
+ ".org 2b + %3\n\t" \
+ ".popsection" \
+ : \
+ : "i" (__FILE__), "i" (__LINE__), \
+ "i" (flags), \
+ "i" (sizeof(struct bug_entry))); \
+} while (0)
+#else /* CONFIG_GENERIC_BUG */
+#define __BUG_FLAGS(flags) do { \
+ __asm__ __volatile__ ("ebreak\n"); \
+} while (0)
+#endif /* CONFIG_GENERIC_BUG */
+
+#define BUG() do { \
+ __BUG_FLAGS(0); \
+ unreachable(); \
+} while (0)
+
+#define __WARN_FLAGS(flags) __BUG_FLAGS(BUGFLAG_WARNING|(flags))
+
+#define HAVE_ARCH_BUG
+
+#include <asm-generic/bug.h>
+
+struct pt_regs;
+struct task_struct;
+
+void __show_regs(struct pt_regs *regs);
+void die(struct pt_regs *regs, const char *str);
+void do_trap(struct pt_regs *regs, int signo, int code, unsigned long addr);
+
+#endif /* _ASM_RISCV_BUG_H */
diff --git a/arch/riscv/include/asm/cache.h b/arch/riscv/include/asm/cache.h
new file mode 100644
index 0000000000..2174fe7bac
--- /dev/null
+++ b/arch/riscv/include/asm/cache.h
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2017 Chen Liqin <liqin.chen@sunplusct.com>
+ * Copyright (C) 2012 Regents of the University of California
+ */
+
+#ifndef _ASM_RISCV_CACHE_H
+#define _ASM_RISCV_CACHE_H
+
+#define L1_CACHE_SHIFT 6
+
+#define L1_CACHE_BYTES (1 << L1_CACHE_SHIFT)
+
+#ifdef CONFIG_RISCV_DMA_NONCOHERENT
+#define ARCH_DMA_MINALIGN L1_CACHE_BYTES
+#define ARCH_KMALLOC_MINALIGN (8)
+#endif
+
+/*
+ * RISC-V requires the stack pointer to be 16-byte aligned, so ensure that
+ * the flat loader aligns it accordingly.
+ */
+#ifndef CONFIG_MMU
+#define ARCH_SLAB_MINALIGN 16
+#endif
+
+#ifndef __ASSEMBLY__
+
+#ifdef CONFIG_RISCV_DMA_NONCOHERENT
+extern int dma_cache_alignment;
+#define dma_get_cache_alignment dma_get_cache_alignment
+static inline int dma_get_cache_alignment(void)
+{
+ return dma_cache_alignment;
+}
+#endif
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* _ASM_RISCV_CACHE_H */
diff --git a/arch/riscv/include/asm/cacheflush.h b/arch/riscv/include/asm/cacheflush.h
new file mode 100644
index 0000000000..3cb53c4df2
--- /dev/null
+++ b/arch/riscv/include/asm/cacheflush.h
@@ -0,0 +1,75 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2015 Regents of the University of California
+ */
+
+#ifndef _ASM_RISCV_CACHEFLUSH_H
+#define _ASM_RISCV_CACHEFLUSH_H
+
+#include <linux/mm.h>
+
+static inline void local_flush_icache_all(void)
+{
+ asm volatile ("fence.i" ::: "memory");
+}
+
+#define PG_dcache_clean PG_arch_1
+
+static inline void flush_dcache_folio(struct folio *folio)
+{
+ if (test_bit(PG_dcache_clean, &folio->flags))
+ clear_bit(PG_dcache_clean, &folio->flags);
+}
+#define flush_dcache_folio flush_dcache_folio
+#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
+
+static inline void flush_dcache_page(struct page *page)
+{
+ flush_dcache_folio(page_folio(page));
+}
+
+/*
+ * RISC-V doesn't have an instruction to flush parts of the instruction cache,
+ * so instead we just flush the whole thing.
+ */
+#define flush_icache_range(start, end) flush_icache_all()
+#define flush_icache_user_page(vma, pg, addr, len) \
+ flush_icache_mm(vma->vm_mm, 0)
+
+#ifdef CONFIG_64BIT
+#define flush_cache_vmap(start, end) flush_tlb_kernel_range(start, end)
+#endif
+
+#ifndef CONFIG_SMP
+
+#define flush_icache_all() local_flush_icache_all()
+#define flush_icache_mm(mm, local) flush_icache_all()
+
+#else /* CONFIG_SMP */
+
+void flush_icache_all(void);
+void flush_icache_mm(struct mm_struct *mm, bool local);
+
+#endif /* CONFIG_SMP */
+
+extern unsigned int riscv_cbom_block_size;
+extern unsigned int riscv_cboz_block_size;
+void riscv_init_cbo_blocksizes(void);
+
+#ifdef CONFIG_RISCV_DMA_NONCOHERENT
+void riscv_noncoherent_supported(void);
+void __init riscv_set_dma_cache_alignment(void);
+#else
+static inline void riscv_noncoherent_supported(void) {}
+static inline void riscv_set_dma_cache_alignment(void) {}
+#endif
+
+/*
+ * Bits in sys_riscv_flush_icache()'s flags argument.
+ */
+#define SYS_RISCV_FLUSH_ICACHE_LOCAL 1UL
+#define SYS_RISCV_FLUSH_ICACHE_ALL (SYS_RISCV_FLUSH_ICACHE_LOCAL)
+
+#include <asm-generic/cacheflush.h>
+
+#endif /* _ASM_RISCV_CACHEFLUSH_H */
diff --git a/arch/riscv/include/asm/cacheinfo.h b/arch/riscv/include/asm/cacheinfo.h
new file mode 100644
index 0000000000..d1a365215e
--- /dev/null
+++ b/arch/riscv/include/asm/cacheinfo.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020 SiFive
+ */
+
+#ifndef _ASM_RISCV_CACHEINFO_H
+#define _ASM_RISCV_CACHEINFO_H
+
+#include <linux/cacheinfo.h>
+
+struct riscv_cacheinfo_ops {
+ const struct attribute_group * (*get_priv_group)(struct cacheinfo
+ *this_leaf);
+};
+
+void riscv_set_cacheinfo_ops(struct riscv_cacheinfo_ops *ops);
+uintptr_t get_cache_size(u32 level, enum cache_type type);
+uintptr_t get_cache_geometry(u32 level, enum cache_type type);
+
+#endif /* _ASM_RISCV_CACHEINFO_H */
diff --git a/arch/riscv/include/asm/cfi.h b/arch/riscv/include/asm/cfi.h
new file mode 100644
index 0000000000..56bf9d69d5
--- /dev/null
+++ b/arch/riscv/include/asm/cfi.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_RISCV_CFI_H
+#define _ASM_RISCV_CFI_H
+
+/*
+ * Clang Control Flow Integrity (CFI) support.
+ *
+ * Copyright (C) 2023 Google LLC
+ */
+
+#include <linux/cfi.h>
+
+#ifdef CONFIG_CFI_CLANG
+enum bug_trap_type handle_cfi_failure(struct pt_regs *regs);
+#else
+static inline enum bug_trap_type handle_cfi_failure(struct pt_regs *regs)
+{
+ return BUG_TRAP_TYPE_NONE;
+}
+#endif /* CONFIG_CFI_CLANG */
+
+#endif /* _ASM_RISCV_CFI_H */
diff --git a/arch/riscv/include/asm/clint.h b/arch/riscv/include/asm/clint.h
new file mode 100644
index 0000000000..0789fd37b4
--- /dev/null
+++ b/arch/riscv/include/asm/clint.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2020 Google, Inc
+ */
+
+#ifndef _ASM_RISCV_CLINT_H
+#define _ASM_RISCV_CLINT_H
+
+#include <linux/types.h>
+#include <asm/mmio.h>
+
+#ifdef CONFIG_RISCV_M_MODE
+/*
+ * This lives in the CLINT driver, but is accessed directly by timex.h to avoid
+ * any overhead when accessing the MMIO timer.
+ *
+ * The ISA defines mtime as a 64-bit memory-mapped register that increments at
+ * a constant frequency, but it doesn't define some other constraints we depend
+ * on (most notably ordering constraints, but also some simpler stuff like the
+ * memory layout). Thus, this is called "clint_time_val" instead of something
+ * like "riscv_mtime", to signify that these non-ISA assumptions must hold.
+ */
+extern u64 __iomem *clint_time_val;
+#endif
+
+#endif
diff --git a/arch/riscv/include/asm/clocksource.h b/arch/riscv/include/asm/clocksource.h
new file mode 100644
index 0000000000..482185566b
--- /dev/null
+++ b/arch/riscv/include/asm/clocksource.h
@@ -0,0 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_CLOCKSOURCE_H
+#define _ASM_CLOCKSOURCE_H
+
+#include <asm/vdso/clocksource.h>
+
+#endif
diff --git a/arch/riscv/include/asm/cmpxchg.h b/arch/riscv/include/asm/cmpxchg.h
new file mode 100644
index 0000000000..2f4726d3cf
--- /dev/null
+++ b/arch/riscv/include/asm/cmpxchg.h
@@ -0,0 +1,363 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2014 Regents of the University of California
+ */
+
+#ifndef _ASM_RISCV_CMPXCHG_H
+#define _ASM_RISCV_CMPXCHG_H
+
+#include <linux/bug.h>
+
+#include <asm/barrier.h>
+#include <asm/fence.h>
+
+#define __xchg_relaxed(ptr, new, size) \
+({ \
+ __typeof__(ptr) __ptr = (ptr); \
+ __typeof__(new) __new = (new); \
+ __typeof__(*(ptr)) __ret; \
+ switch (size) { \
+ case 4: \
+ __asm__ __volatile__ ( \
+ " amoswap.w %0, %2, %1\n" \
+ : "=r" (__ret), "+A" (*__ptr) \
+ : "r" (__new) \
+ : "memory"); \
+ break; \
+ case 8: \
+ __asm__ __volatile__ ( \
+ " amoswap.d %0, %2, %1\n" \
+ : "=r" (__ret), "+A" (*__ptr) \
+ : "r" (__new) \
+ : "memory"); \
+ break; \
+ default: \
+ BUILD_BUG(); \
+ } \
+ __ret; \
+})
+
+#define arch_xchg_relaxed(ptr, x) \
+({ \
+ __typeof__(*(ptr)) _x_ = (x); \
+ (__typeof__(*(ptr))) __xchg_relaxed((ptr), \
+ _x_, sizeof(*(ptr))); \
+})
+
+#define __xchg_acquire(ptr, new, size) \
+({ \
+ __typeof__(ptr) __ptr = (ptr); \
+ __typeof__(new) __new = (new); \
+ __typeof__(*(ptr)) __ret; \
+ switch (size) { \
+ case 4: \
+ __asm__ __volatile__ ( \
+ " amoswap.w %0, %2, %1\n" \
+ RISCV_ACQUIRE_BARRIER \
+ : "=r" (__ret), "+A" (*__ptr) \
+ : "r" (__new) \
+ : "memory"); \
+ break; \
+ case 8: \
+ __asm__ __volatile__ ( \
+ " amoswap.d %0, %2, %1\n" \
+ RISCV_ACQUIRE_BARRIER \
+ : "=r" (__ret), "+A" (*__ptr) \
+ : "r" (__new) \
+ : "memory"); \
+ break; \
+ default: \
+ BUILD_BUG(); \
+ } \
+ __ret; \
+})
+
+#define arch_xchg_acquire(ptr, x) \
+({ \
+ __typeof__(*(ptr)) _x_ = (x); \
+ (__typeof__(*(ptr))) __xchg_acquire((ptr), \
+ _x_, sizeof(*(ptr))); \
+})
+
+#define __xchg_release(ptr, new, size) \
+({ \
+ __typeof__(ptr) __ptr = (ptr); \
+ __typeof__(new) __new = (new); \
+ __typeof__(*(ptr)) __ret; \
+ switch (size) { \
+ case 4: \
+ __asm__ __volatile__ ( \
+ RISCV_RELEASE_BARRIER \
+ " amoswap.w %0, %2, %1\n" \
+ : "=r" (__ret), "+A" (*__ptr) \
+ : "r" (__new) \
+ : "memory"); \
+ break; \
+ case 8: \
+ __asm__ __volatile__ ( \
+ RISCV_RELEASE_BARRIER \
+ " amoswap.d %0, %2, %1\n" \
+ : "=r" (__ret), "+A" (*__ptr) \
+ : "r" (__new) \
+ : "memory"); \
+ break; \
+ default: \
+ BUILD_BUG(); \
+ } \
+ __ret; \
+})
+
+#define arch_xchg_release(ptr, x) \
+({ \
+ __typeof__(*(ptr)) _x_ = (x); \
+ (__typeof__(*(ptr))) __xchg_release((ptr), \
+ _x_, sizeof(*(ptr))); \
+})
+
+#define __arch_xchg(ptr, new, size) \
+({ \
+ __typeof__(ptr) __ptr = (ptr); \
+ __typeof__(new) __new = (new); \
+ __typeof__(*(ptr)) __ret; \
+ switch (size) { \
+ case 4: \
+ __asm__ __volatile__ ( \
+ " amoswap.w.aqrl %0, %2, %1\n" \
+ : "=r" (__ret), "+A" (*__ptr) \
+ : "r" (__new) \
+ : "memory"); \
+ break; \
+ case 8: \
+ __asm__ __volatile__ ( \
+ " amoswap.d.aqrl %0, %2, %1\n" \
+ : "=r" (__ret), "+A" (*__ptr) \
+ : "r" (__new) \
+ : "memory"); \
+ break; \
+ default: \
+ BUILD_BUG(); \
+ } \
+ __ret; \
+})
+
+#define arch_xchg(ptr, x) \
+({ \
+ __typeof__(*(ptr)) _x_ = (x); \
+ (__typeof__(*(ptr))) __arch_xchg((ptr), _x_, sizeof(*(ptr))); \
+})
+
+#define xchg32(ptr, x) \
+({ \
+ BUILD_BUG_ON(sizeof(*(ptr)) != 4); \
+ arch_xchg((ptr), (x)); \
+})
+
+#define xchg64(ptr, x) \
+({ \
+ BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
+ arch_xchg((ptr), (x)); \
+})
+
+/*
+ * Atomic compare and exchange. Compare OLD with MEM, if identical,
+ * store NEW in MEM. Return the initial value in MEM. Success is
+ * indicated by comparing RETURN with OLD.
+ */
+#define __cmpxchg_relaxed(ptr, old, new, size) \
+({ \
+ __typeof__(ptr) __ptr = (ptr); \
+ __typeof__(*(ptr)) __old = (old); \
+ __typeof__(*(ptr)) __new = (new); \
+ __typeof__(*(ptr)) __ret; \
+ register unsigned int __rc; \
+ switch (size) { \
+ case 4: \
+ __asm__ __volatile__ ( \
+ "0: lr.w %0, %2\n" \
+ " bne %0, %z3, 1f\n" \
+ " sc.w %1, %z4, %2\n" \
+ " bnez %1, 0b\n" \
+ "1:\n" \
+ : "=&r" (__ret), "=&r" (__rc), "+A" (*__ptr) \
+ : "rJ" ((long)__old), "rJ" (__new) \
+ : "memory"); \
+ break; \
+ case 8: \
+ __asm__ __volatile__ ( \
+ "0: lr.d %0, %2\n" \
+ " bne %0, %z3, 1f\n" \
+ " sc.d %1, %z4, %2\n" \
+ " bnez %1, 0b\n" \
+ "1:\n" \
+ : "=&r" (__ret), "=&r" (__rc), "+A" (*__ptr) \
+ : "rJ" (__old), "rJ" (__new) \
+ : "memory"); \
+ break; \
+ default: \
+ BUILD_BUG(); \
+ } \
+ __ret; \
+})
+
+#define arch_cmpxchg_relaxed(ptr, o, n) \
+({ \
+ __typeof__(*(ptr)) _o_ = (o); \
+ __typeof__(*(ptr)) _n_ = (n); \
+ (__typeof__(*(ptr))) __cmpxchg_relaxed((ptr), \
+ _o_, _n_, sizeof(*(ptr))); \
+})
+
+#define __cmpxchg_acquire(ptr, old, new, size) \
+({ \
+ __typeof__(ptr) __ptr = (ptr); \
+ __typeof__(*(ptr)) __old = (old); \
+ __typeof__(*(ptr)) __new = (new); \
+ __typeof__(*(ptr)) __ret; \
+ register unsigned int __rc; \
+ switch (size) { \
+ case 4: \
+ __asm__ __volatile__ ( \
+ "0: lr.w %0, %2\n" \
+ " bne %0, %z3, 1f\n" \
+ " sc.w %1, %z4, %2\n" \
+ " bnez %1, 0b\n" \
+ RISCV_ACQUIRE_BARRIER \
+ "1:\n" \
+ : "=&r" (__ret), "=&r" (__rc), "+A" (*__ptr) \
+ : "rJ" ((long)__old), "rJ" (__new) \
+ : "memory"); \
+ break; \
+ case 8: \
+ __asm__ __volatile__ ( \
+ "0: lr.d %0, %2\n" \
+ " bne %0, %z3, 1f\n" \
+ " sc.d %1, %z4, %2\n" \
+ " bnez %1, 0b\n" \
+ RISCV_ACQUIRE_BARRIER \
+ "1:\n" \
+ : "=&r" (__ret), "=&r" (__rc), "+A" (*__ptr) \
+ : "rJ" (__old), "rJ" (__new) \
+ : "memory"); \
+ break; \
+ default: \
+ BUILD_BUG(); \
+ } \
+ __ret; \
+})
+
+#define arch_cmpxchg_acquire(ptr, o, n) \
+({ \
+ __typeof__(*(ptr)) _o_ = (o); \
+ __typeof__(*(ptr)) _n_ = (n); \
+ (__typeof__(*(ptr))) __cmpxchg_acquire((ptr), \
+ _o_, _n_, sizeof(*(ptr))); \
+})
+
+#define __cmpxchg_release(ptr, old, new, size) \
+({ \
+ __typeof__(ptr) __ptr = (ptr); \
+ __typeof__(*(ptr)) __old = (old); \
+ __typeof__(*(ptr)) __new = (new); \
+ __typeof__(*(ptr)) __ret; \
+ register unsigned int __rc; \
+ switch (size) { \
+ case 4: \
+ __asm__ __volatile__ ( \
+ RISCV_RELEASE_BARRIER \
+ "0: lr.w %0, %2\n" \
+ " bne %0, %z3, 1f\n" \
+ " sc.w %1, %z4, %2\n" \
+ " bnez %1, 0b\n" \
+ "1:\n" \
+ : "=&r" (__ret), "=&r" (__rc), "+A" (*__ptr) \
+ : "rJ" ((long)__old), "rJ" (__new) \
+ : "memory"); \
+ break; \
+ case 8: \
+ __asm__ __volatile__ ( \
+ RISCV_RELEASE_BARRIER \
+ "0: lr.d %0, %2\n" \
+ " bne %0, %z3, 1f\n" \
+ " sc.d %1, %z4, %2\n" \
+ " bnez %1, 0b\n" \
+ "1:\n" \
+ : "=&r" (__ret), "=&r" (__rc), "+A" (*__ptr) \
+ : "rJ" (__old), "rJ" (__new) \
+ : "memory"); \
+ break; \
+ default: \
+ BUILD_BUG(); \
+ } \
+ __ret; \
+})
+
+#define arch_cmpxchg_release(ptr, o, n) \
+({ \
+ __typeof__(*(ptr)) _o_ = (o); \
+ __typeof__(*(ptr)) _n_ = (n); \
+ (__typeof__(*(ptr))) __cmpxchg_release((ptr), \
+ _o_, _n_, sizeof(*(ptr))); \
+})
+
+#define __cmpxchg(ptr, old, new, size) \
+({ \
+ __typeof__(ptr) __ptr = (ptr); \
+ __typeof__(*(ptr)) __old = (old); \
+ __typeof__(*(ptr)) __new = (new); \
+ __typeof__(*(ptr)) __ret; \
+ register unsigned int __rc; \
+ switch (size) { \
+ case 4: \
+ __asm__ __volatile__ ( \
+ "0: lr.w %0, %2\n" \
+ " bne %0, %z3, 1f\n" \
+ " sc.w.rl %1, %z4, %2\n" \
+ " bnez %1, 0b\n" \
+ " fence rw, rw\n" \
+ "1:\n" \
+ : "=&r" (__ret), "=&r" (__rc), "+A" (*__ptr) \
+ : "rJ" ((long)__old), "rJ" (__new) \
+ : "memory"); \
+ break; \
+ case 8: \
+ __asm__ __volatile__ ( \
+ "0: lr.d %0, %2\n" \
+ " bne %0, %z3, 1f\n" \
+ " sc.d.rl %1, %z4, %2\n" \
+ " bnez %1, 0b\n" \
+ " fence rw, rw\n" \
+ "1:\n" \
+ : "=&r" (__ret), "=&r" (__rc), "+A" (*__ptr) \
+ : "rJ" (__old), "rJ" (__new) \
+ : "memory"); \
+ break; \
+ default: \
+ BUILD_BUG(); \
+ } \
+ __ret; \
+})
+
+#define arch_cmpxchg(ptr, o, n) \
+({ \
+ __typeof__(*(ptr)) _o_ = (o); \
+ __typeof__(*(ptr)) _n_ = (n); \
+ (__typeof__(*(ptr))) __cmpxchg((ptr), \
+ _o_, _n_, sizeof(*(ptr))); \
+})
+
+#define arch_cmpxchg_local(ptr, o, n) \
+ (__cmpxchg_relaxed((ptr), (o), (n), sizeof(*(ptr))))
+
+#define arch_cmpxchg64(ptr, o, n) \
+({ \
+ BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
+ arch_cmpxchg((ptr), (o), (n)); \
+})
+
+#define arch_cmpxchg64_local(ptr, o, n) \
+({ \
+ BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
+ arch_cmpxchg_relaxed((ptr), (o), (n)); \
+})
+
+#endif /* _ASM_RISCV_CMPXCHG_H */
diff --git a/arch/riscv/include/asm/compat.h b/arch/riscv/include/asm/compat.h
new file mode 100644
index 0000000000..2ac955b511
--- /dev/null
+++ b/arch/riscv/include/asm/compat.h
@@ -0,0 +1,129 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#ifndef __ASM_COMPAT_H
+#define __ASM_COMPAT_H
+
+#define COMPAT_UTS_MACHINE "riscv\0\0"
+
+/*
+ * Architecture specific compatibility types
+ */
+#include <linux/types.h>
+#include <linux/sched.h>
+#include <linux/sched/task_stack.h>
+#include <asm-generic/compat.h>
+
+static inline int is_compat_task(void)
+{
+ return test_thread_flag(TIF_32BIT);
+}
+
+struct compat_user_regs_struct {
+ compat_ulong_t pc;
+ compat_ulong_t ra;
+ compat_ulong_t sp;
+ compat_ulong_t gp;
+ compat_ulong_t tp;
+ compat_ulong_t t0;
+ compat_ulong_t t1;
+ compat_ulong_t t2;
+ compat_ulong_t s0;
+ compat_ulong_t s1;
+ compat_ulong_t a0;
+ compat_ulong_t a1;
+ compat_ulong_t a2;
+ compat_ulong_t a3;
+ compat_ulong_t a4;
+ compat_ulong_t a5;
+ compat_ulong_t a6;
+ compat_ulong_t a7;
+ compat_ulong_t s2;
+ compat_ulong_t s3;
+ compat_ulong_t s4;
+ compat_ulong_t s5;
+ compat_ulong_t s6;
+ compat_ulong_t s7;
+ compat_ulong_t s8;
+ compat_ulong_t s9;
+ compat_ulong_t s10;
+ compat_ulong_t s11;
+ compat_ulong_t t3;
+ compat_ulong_t t4;
+ compat_ulong_t t5;
+ compat_ulong_t t6;
+};
+
+static inline void regs_to_cregs(struct compat_user_regs_struct *cregs,
+ struct pt_regs *regs)
+{
+ cregs->pc = (compat_ulong_t) regs->epc;
+ cregs->ra = (compat_ulong_t) regs->ra;
+ cregs->sp = (compat_ulong_t) regs->sp;
+ cregs->gp = (compat_ulong_t) regs->gp;
+ cregs->tp = (compat_ulong_t) regs->tp;
+ cregs->t0 = (compat_ulong_t) regs->t0;
+ cregs->t1 = (compat_ulong_t) regs->t1;
+ cregs->t2 = (compat_ulong_t) regs->t2;
+ cregs->s0 = (compat_ulong_t) regs->s0;
+ cregs->s1 = (compat_ulong_t) regs->s1;
+ cregs->a0 = (compat_ulong_t) regs->a0;
+ cregs->a1 = (compat_ulong_t) regs->a1;
+ cregs->a2 = (compat_ulong_t) regs->a2;
+ cregs->a3 = (compat_ulong_t) regs->a3;
+ cregs->a4 = (compat_ulong_t) regs->a4;
+ cregs->a5 = (compat_ulong_t) regs->a5;
+ cregs->a6 = (compat_ulong_t) regs->a6;
+ cregs->a7 = (compat_ulong_t) regs->a7;
+ cregs->s2 = (compat_ulong_t) regs->s2;
+ cregs->s3 = (compat_ulong_t) regs->s3;
+ cregs->s4 = (compat_ulong_t) regs->s4;
+ cregs->s5 = (compat_ulong_t) regs->s5;
+ cregs->s6 = (compat_ulong_t) regs->s6;
+ cregs->s7 = (compat_ulong_t) regs->s7;
+ cregs->s8 = (compat_ulong_t) regs->s8;
+ cregs->s9 = (compat_ulong_t) regs->s9;
+ cregs->s10 = (compat_ulong_t) regs->s10;
+ cregs->s11 = (compat_ulong_t) regs->s11;
+ cregs->t3 = (compat_ulong_t) regs->t3;
+ cregs->t4 = (compat_ulong_t) regs->t4;
+ cregs->t5 = (compat_ulong_t) regs->t5;
+ cregs->t6 = (compat_ulong_t) regs->t6;
+};
+
+static inline void cregs_to_regs(struct compat_user_regs_struct *cregs,
+ struct pt_regs *regs)
+{
+ regs->epc = (unsigned long) cregs->pc;
+ regs->ra = (unsigned long) cregs->ra;
+ regs->sp = (unsigned long) cregs->sp;
+ regs->gp = (unsigned long) cregs->gp;
+ regs->tp = (unsigned long) cregs->tp;
+ regs->t0 = (unsigned long) cregs->t0;
+ regs->t1 = (unsigned long) cregs->t1;
+ regs->t2 = (unsigned long) cregs->t2;
+ regs->s0 = (unsigned long) cregs->s0;
+ regs->s1 = (unsigned long) cregs->s1;
+ regs->a0 = (unsigned long) cregs->a0;
+ regs->a1 = (unsigned long) cregs->a1;
+ regs->a2 = (unsigned long) cregs->a2;
+ regs->a3 = (unsigned long) cregs->a3;
+ regs->a4 = (unsigned long) cregs->a4;
+ regs->a5 = (unsigned long) cregs->a5;
+ regs->a6 = (unsigned long) cregs->a6;
+ regs->a7 = (unsigned long) cregs->a7;
+ regs->s2 = (unsigned long) cregs->s2;
+ regs->s3 = (unsigned long) cregs->s3;
+ regs->s4 = (unsigned long) cregs->s4;
+ regs->s5 = (unsigned long) cregs->s5;
+ regs->s6 = (unsigned long) cregs->s6;
+ regs->s7 = (unsigned long) cregs->s7;
+ regs->s8 = (unsigned long) cregs->s8;
+ regs->s9 = (unsigned long) cregs->s9;
+ regs->s10 = (unsigned long) cregs->s10;
+ regs->s11 = (unsigned long) cregs->s11;
+ regs->t3 = (unsigned long) cregs->t3;
+ regs->t4 = (unsigned long) cregs->t4;
+ regs->t5 = (unsigned long) cregs->t5;
+ regs->t6 = (unsigned long) cregs->t6;
+};
+
+#endif /* __ASM_COMPAT_H */
diff --git a/arch/riscv/include/asm/cpu.h b/arch/riscv/include/asm/cpu.h
new file mode 100644
index 0000000000..28d45a6678
--- /dev/null
+++ b/arch/riscv/include/asm/cpu.h
@@ -0,0 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef _ASM_CPU_H
+#define _ASM_CPU_H
+
+/* This header is required unconditionally by the ACPI core */
+
+#endif /* _ASM_CPU_H */
diff --git a/arch/riscv/include/asm/cpu_ops.h b/arch/riscv/include/asm/cpu_ops.h
new file mode 100644
index 0000000000..aa128466c4
--- /dev/null
+++ b/arch/riscv/include/asm/cpu_ops.h
@@ -0,0 +1,45 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2020 Western Digital Corporation or its affiliates.
+ * Based on arch/arm64/include/asm/cpu_ops.h
+ */
+#ifndef __ASM_CPU_OPS_H
+#define __ASM_CPU_OPS_H
+
+#include <linux/init.h>
+#include <linux/sched.h>
+#include <linux/threads.h>
+
+/**
+ * struct cpu_operations - Callback operations for hotplugging CPUs.
+ *
+ * @name: Name of the boot protocol.
+ * @cpu_prepare: Early one-time preparation step for a cpu. If there
+ * is a mechanism for doing so, tests whether it is
+ * possible to boot the given HART.
+ * @cpu_start: Boots a cpu into the kernel.
+ * @cpu_disable: Prepares a cpu to die. May fail for some
+ * mechanism-specific reason, which will cause the hot
+ * unplug to be aborted. Called from the cpu to be killed.
+ * @cpu_stop: Makes a cpu leave the kernel. Must not fail. Called from
+ * the cpu being stopped.
+ * @cpu_is_stopped: Ensures a cpu has left the kernel. Called from another
+ * cpu.
+ */
+struct cpu_operations {
+ const char *name;
+ int (*cpu_prepare)(unsigned int cpu);
+ int (*cpu_start)(unsigned int cpu,
+ struct task_struct *tidle);
+#ifdef CONFIG_HOTPLUG_CPU
+ int (*cpu_disable)(unsigned int cpu);
+ void (*cpu_stop)(void);
+ int (*cpu_is_stopped)(unsigned int cpu);
+#endif
+};
+
+extern const struct cpu_operations cpu_ops_spinwait;
+extern const struct cpu_operations *cpu_ops[NR_CPUS];
+void __init cpu_set_ops(int cpu);
+
+#endif /* ifndef __ASM_CPU_OPS_H */
diff --git a/arch/riscv/include/asm/cpu_ops_sbi.h b/arch/riscv/include/asm/cpu_ops_sbi.h
new file mode 100644
index 0000000000..d6e4665b31
--- /dev/null
+++ b/arch/riscv/include/asm/cpu_ops_sbi.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2021 by Rivos Inc.
+ */
+#ifndef __ASM_CPU_OPS_SBI_H
+#define __ASM_CPU_OPS_SBI_H
+
+#ifndef __ASSEMBLY__
+#include <linux/init.h>
+#include <linux/sched.h>
+#include <linux/threads.h>
+
+extern const struct cpu_operations cpu_ops_sbi;
+
+/**
+ * struct sbi_hart_boot_data - Hart specific boot used during booting and
+ * cpu hotplug.
+ * @task_ptr: A pointer to the hart specific tp
+ * @stack_ptr: A pointer to the hart specific sp
+ */
+struct sbi_hart_boot_data {
+ void *task_ptr;
+ void *stack_ptr;
+};
+#endif
+
+#endif /* ifndef __ASM_CPU_OPS_SBI_H */
diff --git a/arch/riscv/include/asm/cpufeature.h b/arch/riscv/include/asm/cpufeature.h
new file mode 100644
index 0000000000..d0345bd659
--- /dev/null
+++ b/arch/riscv/include/asm/cpufeature.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright 2022-2023 Rivos, Inc
+ */
+
+#ifndef _ASM_CPUFEATURE_H
+#define _ASM_CPUFEATURE_H
+
+#include <linux/bitmap.h>
+#include <asm/hwcap.h>
+
+/*
+ * These are probed via a device_initcall(), via either the SBI or directly
+ * from the corresponding CSRs.
+ */
+struct riscv_cpuinfo {
+ unsigned long mvendorid;
+ unsigned long marchid;
+ unsigned long mimpid;
+};
+
+struct riscv_isainfo {
+ DECLARE_BITMAP(isa, RISCV_ISA_EXT_MAX);
+};
+
+DECLARE_PER_CPU(struct riscv_cpuinfo, riscv_cpuinfo);
+
+DECLARE_PER_CPU(long, misaligned_access_speed);
+
+/* Per-cpu ISA extensions. */
+extern struct riscv_isainfo hart_isa[NR_CPUS];
+
+void check_unaligned_access(int cpu);
+
+#endif
diff --git a/arch/riscv/include/asm/cpuidle.h b/arch/riscv/include/asm/cpuidle.h
new file mode 100644
index 0000000000..71fdc607d4
--- /dev/null
+++ b/arch/riscv/include/asm/cpuidle.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2021 Allwinner Ltd
+ * Copyright (C) 2021 Western Digital Corporation or its affiliates.
+ */
+
+#ifndef _ASM_RISCV_CPUIDLE_H
+#define _ASM_RISCV_CPUIDLE_H
+
+#include <asm/barrier.h>
+#include <asm/processor.h>
+
+static inline void cpu_do_idle(void)
+{
+ /*
+ * Add mb() here to ensure that all
+ * IO/MEM accesses are completed prior
+ * to entering WFI.
+ */
+ mb();
+ wait_for_interrupt();
+}
+
+#endif
diff --git a/arch/riscv/include/asm/csr.h b/arch/riscv/include/asm/csr.h
new file mode 100644
index 0000000000..777cb82995
--- /dev/null
+++ b/arch/riscv/include/asm/csr.h
@@ -0,0 +1,518 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2015 Regents of the University of California
+ */
+
+#ifndef _ASM_RISCV_CSR_H
+#define _ASM_RISCV_CSR_H
+
+#include <asm/asm.h>
+#include <linux/bits.h>
+
+/* Status register flags */
+#define SR_SIE _AC(0x00000002, UL) /* Supervisor Interrupt Enable */
+#define SR_MIE _AC(0x00000008, UL) /* Machine Interrupt Enable */
+#define SR_SPIE _AC(0x00000020, UL) /* Previous Supervisor IE */
+#define SR_MPIE _AC(0x00000080, UL) /* Previous Machine IE */
+#define SR_SPP _AC(0x00000100, UL) /* Previously Supervisor */
+#define SR_MPP _AC(0x00001800, UL) /* Previously Machine */
+#define SR_SUM _AC(0x00040000, UL) /* Supervisor User Memory Access */
+
+#define SR_FS _AC(0x00006000, UL) /* Floating-point Status */
+#define SR_FS_OFF _AC(0x00000000, UL)
+#define SR_FS_INITIAL _AC(0x00002000, UL)
+#define SR_FS_CLEAN _AC(0x00004000, UL)
+#define SR_FS_DIRTY _AC(0x00006000, UL)
+
+#define SR_VS _AC(0x00000600, UL) /* Vector Status */
+#define SR_VS_OFF _AC(0x00000000, UL)
+#define SR_VS_INITIAL _AC(0x00000200, UL)
+#define SR_VS_CLEAN _AC(0x00000400, UL)
+#define SR_VS_DIRTY _AC(0x00000600, UL)
+
+#define SR_XS _AC(0x00018000, UL) /* Extension Status */
+#define SR_XS_OFF _AC(0x00000000, UL)
+#define SR_XS_INITIAL _AC(0x00008000, UL)
+#define SR_XS_CLEAN _AC(0x00010000, UL)
+#define SR_XS_DIRTY _AC(0x00018000, UL)
+
+#define SR_FS_VS (SR_FS | SR_VS) /* Vector and Floating-Point Unit */
+
+#ifndef CONFIG_64BIT
+#define SR_SD _AC(0x80000000, UL) /* FS/VS/XS dirty */
+#else
+#define SR_SD _AC(0x8000000000000000, UL) /* FS/VS/XS dirty */
+#endif
+
+#ifdef CONFIG_64BIT
+#define SR_UXL _AC(0x300000000, UL) /* XLEN mask for U-mode */
+#define SR_UXL_32 _AC(0x100000000, UL) /* XLEN = 32 for U-mode */
+#define SR_UXL_64 _AC(0x200000000, UL) /* XLEN = 64 for U-mode */
+#endif
+
+/* SATP flags */
+#ifndef CONFIG_64BIT
+#define SATP_PPN _AC(0x003FFFFF, UL)
+#define SATP_MODE_32 _AC(0x80000000, UL)
+#define SATP_MODE_SHIFT 31
+#define SATP_ASID_BITS 9
+#define SATP_ASID_SHIFT 22
+#define SATP_ASID_MASK _AC(0x1FF, UL)
+#else
+#define SATP_PPN _AC(0x00000FFFFFFFFFFF, UL)
+#define SATP_MODE_39 _AC(0x8000000000000000, UL)
+#define SATP_MODE_48 _AC(0x9000000000000000, UL)
+#define SATP_MODE_57 _AC(0xa000000000000000, UL)
+#define SATP_MODE_SHIFT 60
+#define SATP_ASID_BITS 16
+#define SATP_ASID_SHIFT 44
+#define SATP_ASID_MASK _AC(0xFFFF, UL)
+#endif
+
+/* Exception cause high bit - is an interrupt if set */
+#define CAUSE_IRQ_FLAG (_AC(1, UL) << (__riscv_xlen - 1))
+
+/* Interrupt causes (minus the high bit) */
+#define IRQ_S_SOFT 1
+#define IRQ_VS_SOFT 2
+#define IRQ_M_SOFT 3
+#define IRQ_S_TIMER 5
+#define IRQ_VS_TIMER 6
+#define IRQ_M_TIMER 7
+#define IRQ_S_EXT 9
+#define IRQ_VS_EXT 10
+#define IRQ_M_EXT 11
+#define IRQ_S_GEXT 12
+#define IRQ_PMU_OVF 13
+#define IRQ_LOCAL_MAX (IRQ_PMU_OVF + 1)
+#define IRQ_LOCAL_MASK GENMASK((IRQ_LOCAL_MAX - 1), 0)
+
+/* Exception causes */
+#define EXC_INST_MISALIGNED 0
+#define EXC_INST_ACCESS 1
+#define EXC_INST_ILLEGAL 2
+#define EXC_BREAKPOINT 3
+#define EXC_LOAD_MISALIGNED 4
+#define EXC_LOAD_ACCESS 5
+#define EXC_STORE_MISALIGNED 6
+#define EXC_STORE_ACCESS 7
+#define EXC_SYSCALL 8
+#define EXC_HYPERVISOR_SYSCALL 9
+#define EXC_SUPERVISOR_SYSCALL 10
+#define EXC_INST_PAGE_FAULT 12
+#define EXC_LOAD_PAGE_FAULT 13
+#define EXC_STORE_PAGE_FAULT 15
+#define EXC_INST_GUEST_PAGE_FAULT 20
+#define EXC_LOAD_GUEST_PAGE_FAULT 21
+#define EXC_VIRTUAL_INST_FAULT 22
+#define EXC_STORE_GUEST_PAGE_FAULT 23
+
+/* PMP configuration */
+#define PMP_R 0x01
+#define PMP_W 0x02
+#define PMP_X 0x04
+#define PMP_A 0x18
+#define PMP_A_TOR 0x08
+#define PMP_A_NA4 0x10
+#define PMP_A_NAPOT 0x18
+#define PMP_L 0x80
+
+/* HSTATUS flags */
+#ifdef CONFIG_64BIT
+#define HSTATUS_VSXL _AC(0x300000000, UL)
+#define HSTATUS_VSXL_SHIFT 32
+#endif
+#define HSTATUS_VTSR _AC(0x00400000, UL)
+#define HSTATUS_VTW _AC(0x00200000, UL)
+#define HSTATUS_VTVM _AC(0x00100000, UL)
+#define HSTATUS_VGEIN _AC(0x0003f000, UL)
+#define HSTATUS_VGEIN_SHIFT 12
+#define HSTATUS_HU _AC(0x00000200, UL)
+#define HSTATUS_SPVP _AC(0x00000100, UL)
+#define HSTATUS_SPV _AC(0x00000080, UL)
+#define HSTATUS_GVA _AC(0x00000040, UL)
+#define HSTATUS_VSBE _AC(0x00000020, UL)
+
+/* HGATP flags */
+#define HGATP_MODE_OFF _AC(0, UL)
+#define HGATP_MODE_SV32X4 _AC(1, UL)
+#define HGATP_MODE_SV39X4 _AC(8, UL)
+#define HGATP_MODE_SV48X4 _AC(9, UL)
+#define HGATP_MODE_SV57X4 _AC(10, UL)
+
+#define HGATP32_MODE_SHIFT 31
+#define HGATP32_VMID_SHIFT 22
+#define HGATP32_VMID GENMASK(28, 22)
+#define HGATP32_PPN GENMASK(21, 0)
+
+#define HGATP64_MODE_SHIFT 60
+#define HGATP64_VMID_SHIFT 44
+#define HGATP64_VMID GENMASK(57, 44)
+#define HGATP64_PPN GENMASK(43, 0)
+
+#define HGATP_PAGE_SHIFT 12
+
+#ifdef CONFIG_64BIT
+#define HGATP_PPN HGATP64_PPN
+#define HGATP_VMID_SHIFT HGATP64_VMID_SHIFT
+#define HGATP_VMID HGATP64_VMID
+#define HGATP_MODE_SHIFT HGATP64_MODE_SHIFT
+#else
+#define HGATP_PPN HGATP32_PPN
+#define HGATP_VMID_SHIFT HGATP32_VMID_SHIFT
+#define HGATP_VMID HGATP32_VMID
+#define HGATP_MODE_SHIFT HGATP32_MODE_SHIFT
+#endif
+
+/* VSIP & HVIP relation */
+#define VSIP_TO_HVIP_SHIFT (IRQ_VS_SOFT - IRQ_S_SOFT)
+#define VSIP_VALID_MASK ((_AC(1, UL) << IRQ_S_SOFT) | \
+ (_AC(1, UL) << IRQ_S_TIMER) | \
+ (_AC(1, UL) << IRQ_S_EXT))
+
+/* AIA CSR bits */
+#define TOPI_IID_SHIFT 16
+#define TOPI_IID_MASK GENMASK(11, 0)
+#define TOPI_IPRIO_MASK GENMASK(7, 0)
+#define TOPI_IPRIO_BITS 8
+
+#define TOPEI_ID_SHIFT 16
+#define TOPEI_ID_MASK GENMASK(10, 0)
+#define TOPEI_PRIO_MASK GENMASK(10, 0)
+
+#define ISELECT_IPRIO0 0x30
+#define ISELECT_IPRIO15 0x3f
+#define ISELECT_MASK GENMASK(8, 0)
+
+#define HVICTL_VTI BIT(30)
+#define HVICTL_IID GENMASK(27, 16)
+#define HVICTL_IID_SHIFT 16
+#define HVICTL_DPR BIT(9)
+#define HVICTL_IPRIOM BIT(8)
+#define HVICTL_IPRIO GENMASK(7, 0)
+
+/* xENVCFG flags */
+#define ENVCFG_STCE (_AC(1, ULL) << 63)
+#define ENVCFG_PBMTE (_AC(1, ULL) << 62)
+#define ENVCFG_CBZE (_AC(1, UL) << 7)
+#define ENVCFG_CBCFE (_AC(1, UL) << 6)
+#define ENVCFG_CBIE_SHIFT 4
+#define ENVCFG_CBIE (_AC(0x3, UL) << ENVCFG_CBIE_SHIFT)
+#define ENVCFG_CBIE_ILL _AC(0x0, UL)
+#define ENVCFG_CBIE_FLUSH _AC(0x1, UL)
+#define ENVCFG_CBIE_INV _AC(0x3, UL)
+#define ENVCFG_FIOM _AC(0x1, UL)
+
+/* symbolic CSR names: */
+#define CSR_CYCLE 0xc00
+#define CSR_TIME 0xc01
+#define CSR_INSTRET 0xc02
+#define CSR_HPMCOUNTER3 0xc03
+#define CSR_HPMCOUNTER4 0xc04
+#define CSR_HPMCOUNTER5 0xc05
+#define CSR_HPMCOUNTER6 0xc06
+#define CSR_HPMCOUNTER7 0xc07
+#define CSR_HPMCOUNTER8 0xc08
+#define CSR_HPMCOUNTER9 0xc09
+#define CSR_HPMCOUNTER10 0xc0a
+#define CSR_HPMCOUNTER11 0xc0b
+#define CSR_HPMCOUNTER12 0xc0c
+#define CSR_HPMCOUNTER13 0xc0d
+#define CSR_HPMCOUNTER14 0xc0e
+#define CSR_HPMCOUNTER15 0xc0f
+#define CSR_HPMCOUNTER16 0xc10
+#define CSR_HPMCOUNTER17 0xc11
+#define CSR_HPMCOUNTER18 0xc12
+#define CSR_HPMCOUNTER19 0xc13
+#define CSR_HPMCOUNTER20 0xc14
+#define CSR_HPMCOUNTER21 0xc15
+#define CSR_HPMCOUNTER22 0xc16
+#define CSR_HPMCOUNTER23 0xc17
+#define CSR_HPMCOUNTER24 0xc18
+#define CSR_HPMCOUNTER25 0xc19
+#define CSR_HPMCOUNTER26 0xc1a
+#define CSR_HPMCOUNTER27 0xc1b
+#define CSR_HPMCOUNTER28 0xc1c
+#define CSR_HPMCOUNTER29 0xc1d
+#define CSR_HPMCOUNTER30 0xc1e
+#define CSR_HPMCOUNTER31 0xc1f
+#define CSR_CYCLEH 0xc80
+#define CSR_TIMEH 0xc81
+#define CSR_INSTRETH 0xc82
+#define CSR_HPMCOUNTER3H 0xc83
+#define CSR_HPMCOUNTER4H 0xc84
+#define CSR_HPMCOUNTER5H 0xc85
+#define CSR_HPMCOUNTER6H 0xc86
+#define CSR_HPMCOUNTER7H 0xc87
+#define CSR_HPMCOUNTER8H 0xc88
+#define CSR_HPMCOUNTER9H 0xc89
+#define CSR_HPMCOUNTER10H 0xc8a
+#define CSR_HPMCOUNTER11H 0xc8b
+#define CSR_HPMCOUNTER12H 0xc8c
+#define CSR_HPMCOUNTER13H 0xc8d
+#define CSR_HPMCOUNTER14H 0xc8e
+#define CSR_HPMCOUNTER15H 0xc8f
+#define CSR_HPMCOUNTER16H 0xc90
+#define CSR_HPMCOUNTER17H 0xc91
+#define CSR_HPMCOUNTER18H 0xc92
+#define CSR_HPMCOUNTER19H 0xc93
+#define CSR_HPMCOUNTER20H 0xc94
+#define CSR_HPMCOUNTER21H 0xc95
+#define CSR_HPMCOUNTER22H 0xc96
+#define CSR_HPMCOUNTER23H 0xc97
+#define CSR_HPMCOUNTER24H 0xc98
+#define CSR_HPMCOUNTER25H 0xc99
+#define CSR_HPMCOUNTER26H 0xc9a
+#define CSR_HPMCOUNTER27H 0xc9b
+#define CSR_HPMCOUNTER28H 0xc9c
+#define CSR_HPMCOUNTER29H 0xc9d
+#define CSR_HPMCOUNTER30H 0xc9e
+#define CSR_HPMCOUNTER31H 0xc9f
+
+#define CSR_SSCOUNTOVF 0xda0
+
+#define CSR_SSTATUS 0x100
+#define CSR_SIE 0x104
+#define CSR_STVEC 0x105
+#define CSR_SCOUNTEREN 0x106
+#define CSR_SSCRATCH 0x140
+#define CSR_SEPC 0x141
+#define CSR_SCAUSE 0x142
+#define CSR_STVAL 0x143
+#define CSR_SIP 0x144
+#define CSR_SATP 0x180
+
+#define CSR_STIMECMP 0x14D
+#define CSR_STIMECMPH 0x15D
+
+/* Supervisor-Level Window to Indirectly Accessed Registers (AIA) */
+#define CSR_SISELECT 0x150
+#define CSR_SIREG 0x151
+
+/* Supervisor-Level Interrupts (AIA) */
+#define CSR_STOPEI 0x15c
+#define CSR_STOPI 0xdb0
+
+/* Supervisor-Level High-Half CSRs (AIA) */
+#define CSR_SIEH 0x114
+#define CSR_SIPH 0x154
+
+#define CSR_VSSTATUS 0x200
+#define CSR_VSIE 0x204
+#define CSR_VSTVEC 0x205
+#define CSR_VSSCRATCH 0x240
+#define CSR_VSEPC 0x241
+#define CSR_VSCAUSE 0x242
+#define CSR_VSTVAL 0x243
+#define CSR_VSIP 0x244
+#define CSR_VSATP 0x280
+#define CSR_VSTIMECMP 0x24D
+#define CSR_VSTIMECMPH 0x25D
+
+#define CSR_HSTATUS 0x600
+#define CSR_HEDELEG 0x602
+#define CSR_HIDELEG 0x603
+#define CSR_HIE 0x604
+#define CSR_HTIMEDELTA 0x605
+#define CSR_HCOUNTEREN 0x606
+#define CSR_HGEIE 0x607
+#define CSR_HENVCFG 0x60a
+#define CSR_HTIMEDELTAH 0x615
+#define CSR_HENVCFGH 0x61a
+#define CSR_HTVAL 0x643
+#define CSR_HIP 0x644
+#define CSR_HVIP 0x645
+#define CSR_HTINST 0x64a
+#define CSR_HGATP 0x680
+#define CSR_HGEIP 0xe12
+
+/* Virtual Interrupts and Interrupt Priorities (H-extension with AIA) */
+#define CSR_HVIEN 0x608
+#define CSR_HVICTL 0x609
+#define CSR_HVIPRIO1 0x646
+#define CSR_HVIPRIO2 0x647
+
+/* VS-Level Window to Indirectly Accessed Registers (H-extension with AIA) */
+#define CSR_VSISELECT 0x250
+#define CSR_VSIREG 0x251
+
+/* VS-Level Interrupts (H-extension with AIA) */
+#define CSR_VSTOPEI 0x25c
+#define CSR_VSTOPI 0xeb0
+
+/* Hypervisor and VS-Level High-Half CSRs (H-extension with AIA) */
+#define CSR_HIDELEGH 0x613
+#define CSR_HVIENH 0x618
+#define CSR_HVIPH 0x655
+#define CSR_HVIPRIO1H 0x656
+#define CSR_HVIPRIO2H 0x657
+#define CSR_VSIEH 0x214
+#define CSR_VSIPH 0x254
+
+#define CSR_MSTATUS 0x300
+#define CSR_MISA 0x301
+#define CSR_MIDELEG 0x303
+#define CSR_MIE 0x304
+#define CSR_MTVEC 0x305
+#define CSR_MENVCFG 0x30a
+#define CSR_MENVCFGH 0x31a
+#define CSR_MSCRATCH 0x340
+#define CSR_MEPC 0x341
+#define CSR_MCAUSE 0x342
+#define CSR_MTVAL 0x343
+#define CSR_MIP 0x344
+#define CSR_PMPCFG0 0x3a0
+#define CSR_PMPADDR0 0x3b0
+#define CSR_MVENDORID 0xf11
+#define CSR_MARCHID 0xf12
+#define CSR_MIMPID 0xf13
+#define CSR_MHARTID 0xf14
+
+/* Machine-Level Window to Indirectly Accessed Registers (AIA) */
+#define CSR_MISELECT 0x350
+#define CSR_MIREG 0x351
+
+/* Machine-Level Interrupts (AIA) */
+#define CSR_MTOPEI 0x35c
+#define CSR_MTOPI 0xfb0
+
+/* Virtual Interrupts for Supervisor Level (AIA) */
+#define CSR_MVIEN 0x308
+#define CSR_MVIP 0x309
+
+/* Machine-Level High-Half CSRs (AIA) */
+#define CSR_MIDELEGH 0x313
+#define CSR_MIEH 0x314
+#define CSR_MVIENH 0x318
+#define CSR_MVIPH 0x319
+#define CSR_MIPH 0x354
+
+#define CSR_VSTART 0x8
+#define CSR_VCSR 0xf
+#define CSR_VL 0xc20
+#define CSR_VTYPE 0xc21
+#define CSR_VLENB 0xc22
+
+#ifdef CONFIG_RISCV_M_MODE
+# define CSR_STATUS CSR_MSTATUS
+# define CSR_IE CSR_MIE
+# define CSR_TVEC CSR_MTVEC
+# define CSR_SCRATCH CSR_MSCRATCH
+# define CSR_EPC CSR_MEPC
+# define CSR_CAUSE CSR_MCAUSE
+# define CSR_TVAL CSR_MTVAL
+# define CSR_IP CSR_MIP
+
+# define CSR_IEH CSR_MIEH
+# define CSR_ISELECT CSR_MISELECT
+# define CSR_IREG CSR_MIREG
+# define CSR_IPH CSR_MIPH
+# define CSR_TOPEI CSR_MTOPEI
+# define CSR_TOPI CSR_MTOPI
+
+# define SR_IE SR_MIE
+# define SR_PIE SR_MPIE
+# define SR_PP SR_MPP
+
+# define RV_IRQ_SOFT IRQ_M_SOFT
+# define RV_IRQ_TIMER IRQ_M_TIMER
+# define RV_IRQ_EXT IRQ_M_EXT
+#else /* CONFIG_RISCV_M_MODE */
+# define CSR_STATUS CSR_SSTATUS
+# define CSR_IE CSR_SIE
+# define CSR_TVEC CSR_STVEC
+# define CSR_SCRATCH CSR_SSCRATCH
+# define CSR_EPC CSR_SEPC
+# define CSR_CAUSE CSR_SCAUSE
+# define CSR_TVAL CSR_STVAL
+# define CSR_IP CSR_SIP
+
+# define CSR_IEH CSR_SIEH
+# define CSR_ISELECT CSR_SISELECT
+# define CSR_IREG CSR_SIREG
+# define CSR_IPH CSR_SIPH
+# define CSR_TOPEI CSR_STOPEI
+# define CSR_TOPI CSR_STOPI
+
+# define SR_IE SR_SIE
+# define SR_PIE SR_SPIE
+# define SR_PP SR_SPP
+
+# define RV_IRQ_SOFT IRQ_S_SOFT
+# define RV_IRQ_TIMER IRQ_S_TIMER
+# define RV_IRQ_EXT IRQ_S_EXT
+# define RV_IRQ_PMU IRQ_PMU_OVF
+# define SIP_LCOFIP (_AC(0x1, UL) << IRQ_PMU_OVF)
+
+#endif /* !CONFIG_RISCV_M_MODE */
+
+/* IE/IP (Supervisor/Machine Interrupt Enable/Pending) flags */
+#define IE_SIE (_AC(0x1, UL) << RV_IRQ_SOFT)
+#define IE_TIE (_AC(0x1, UL) << RV_IRQ_TIMER)
+#define IE_EIE (_AC(0x1, UL) << RV_IRQ_EXT)
+
+#ifndef __ASSEMBLY__
+
+#define csr_swap(csr, val) \
+({ \
+ unsigned long __v = (unsigned long)(val); \
+ __asm__ __volatile__ ("csrrw %0, " __ASM_STR(csr) ", %1"\
+ : "=r" (__v) : "rK" (__v) \
+ : "memory"); \
+ __v; \
+})
+
+#define csr_read(csr) \
+({ \
+ register unsigned long __v; \
+ __asm__ __volatile__ ("csrr %0, " __ASM_STR(csr) \
+ : "=r" (__v) : \
+ : "memory"); \
+ __v; \
+})
+
+#define csr_write(csr, val) \
+({ \
+ unsigned long __v = (unsigned long)(val); \
+ __asm__ __volatile__ ("csrw " __ASM_STR(csr) ", %0" \
+ : : "rK" (__v) \
+ : "memory"); \
+})
+
+#define csr_read_set(csr, val) \
+({ \
+ unsigned long __v = (unsigned long)(val); \
+ __asm__ __volatile__ ("csrrs %0, " __ASM_STR(csr) ", %1"\
+ : "=r" (__v) : "rK" (__v) \
+ : "memory"); \
+ __v; \
+})
+
+#define csr_set(csr, val) \
+({ \
+ unsigned long __v = (unsigned long)(val); \
+ __asm__ __volatile__ ("csrs " __ASM_STR(csr) ", %0" \
+ : : "rK" (__v) \
+ : "memory"); \
+})
+
+#define csr_read_clear(csr, val) \
+({ \
+ unsigned long __v = (unsigned long)(val); \
+ __asm__ __volatile__ ("csrrc %0, " __ASM_STR(csr) ", %1"\
+ : "=r" (__v) : "rK" (__v) \
+ : "memory"); \
+ __v; \
+})
+
+#define csr_clear(csr, val) \
+({ \
+ unsigned long __v = (unsigned long)(val); \
+ __asm__ __volatile__ ("csrc " __ASM_STR(csr) ", %0" \
+ : : "rK" (__v) \
+ : "memory"); \
+})
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* _ASM_RISCV_CSR_H */
diff --git a/arch/riscv/include/asm/current.h b/arch/riscv/include/asm/current.h
new file mode 100644
index 0000000000..21774d868c
--- /dev/null
+++ b/arch/riscv/include/asm/current.h
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Based on arm/arm64/include/asm/current.h
+ *
+ * Copyright (C) 2016 ARM
+ * Copyright (C) 2017 SiFive
+ */
+
+
+#ifndef _ASM_RISCV_CURRENT_H
+#define _ASM_RISCV_CURRENT_H
+
+#include <linux/bug.h>
+#include <linux/compiler.h>
+
+#ifndef __ASSEMBLY__
+
+struct task_struct;
+
+register struct task_struct *riscv_current_is_tp __asm__("tp");
+
+/*
+ * This only works because "struct thread_info" is at offset 0 from "struct
+ * task_struct". This constraint seems to be necessary on other architectures
+ * as well, but __switch_to enforces it. We can't check TASK_TI here because
+ * <asm/asm-offsets.h> includes this, and I can't get the definition of "struct
+ * task_struct" here due to some header ordering problems.
+ */
+static __always_inline struct task_struct *get_current(void)
+{
+ return riscv_current_is_tp;
+}
+
+#define current get_current()
+
+register unsigned long current_stack_pointer __asm__("sp");
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* _ASM_RISCV_CURRENT_H */
diff --git a/arch/riscv/include/asm/delay.h b/arch/riscv/include/asm/delay.h
new file mode 100644
index 0000000000..524f8ef7c8
--- /dev/null
+++ b/arch/riscv/include/asm/delay.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2009 Chen Liqin <liqin.chen@sunplusct.com>
+ * Copyright (C) 2016 Regents of the University of California
+ */
+
+#ifndef _ASM_RISCV_DELAY_H
+#define _ASM_RISCV_DELAY_H
+
+extern unsigned long riscv_timebase;
+
+#define udelay udelay
+extern void udelay(unsigned long usecs);
+
+#define ndelay ndelay
+extern void ndelay(unsigned long nsecs);
+
+extern void __delay(unsigned long cycles);
+
+#endif /* _ASM_RISCV_DELAY_H */
diff --git a/arch/riscv/include/asm/dma-noncoherent.h b/arch/riscv/include/asm/dma-noncoherent.h
new file mode 100644
index 0000000000..312cfa0858
--- /dev/null
+++ b/arch/riscv/include/asm/dma-noncoherent.h
@@ -0,0 +1,28 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2023 Renesas Electronics Corp.
+ */
+
+#ifndef __ASM_DMA_NONCOHERENT_H
+#define __ASM_DMA_NONCOHERENT_H
+
+#include <linux/dma-direct.h>
+
+/*
+ * struct riscv_nonstd_cache_ops - Structure for non-standard CMO function pointers
+ *
+ * @wback: Function pointer for cache writeback
+ * @inv: Function pointer for invalidating cache
+ * @wback_inv: Function pointer for flushing the cache (writeback + invalidating)
+ */
+struct riscv_nonstd_cache_ops {
+ void (*wback)(phys_addr_t paddr, size_t size);
+ void (*inv)(phys_addr_t paddr, size_t size);
+ void (*wback_inv)(phys_addr_t paddr, size_t size);
+};
+
+extern struct riscv_nonstd_cache_ops noncoherent_cache_ops;
+
+void riscv_noncoherent_register_cache_ops(const struct riscv_nonstd_cache_ops *ops);
+
+#endif /* __ASM_DMA_NONCOHERENT_H */
diff --git a/arch/riscv/include/asm/efi.h b/arch/riscv/include/asm/efi.h
new file mode 100644
index 0000000000..46a355913b
--- /dev/null
+++ b/arch/riscv/include/asm/efi.h
@@ -0,0 +1,50 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2020 Western Digital Corporation or its affiliates.
+ */
+#ifndef _ASM_EFI_H
+#define _ASM_EFI_H
+
+#include <asm/csr.h>
+#include <asm/io.h>
+#include <asm/mmu_context.h>
+#include <asm/ptrace.h>
+#include <asm/tlbflush.h>
+#include <asm/pgalloc.h>
+
+#ifdef CONFIG_EFI
+extern void efi_init(void);
+#else
+#define efi_init()
+#endif
+
+int efi_create_mapping(struct mm_struct *mm, efi_memory_desc_t *md);
+int efi_set_mapping_permissions(struct mm_struct *mm, efi_memory_desc_t *md, bool);
+
+#define ARCH_EFI_IRQ_FLAGS_MASK (SR_IE | SR_SPIE)
+
+/* Load initrd anywhere in system RAM */
+static inline unsigned long efi_get_max_initrd_addr(unsigned long image_addr)
+{
+ return ULONG_MAX;
+}
+
+static inline unsigned long efi_get_kimg_min_align(void)
+{
+ /*
+ * RISC-V requires the kernel image to placed 2 MB aligned base for 64
+ * bit and 4MB for 32 bit.
+ */
+ return IS_ENABLED(CONFIG_64BIT) ? SZ_2M : SZ_4M;
+}
+
+#define EFI_KIMG_PREFERRED_ADDRESS efi_get_kimg_min_align()
+
+void arch_efi_call_virt_setup(void);
+void arch_efi_call_virt_teardown(void);
+
+unsigned long stext_offset(void);
+
+void efi_icache_sync(unsigned long start, unsigned long end);
+
+#endif /* _ASM_EFI_H */
diff --git a/arch/riscv/include/asm/elf.h b/arch/riscv/include/asm/elf.h
new file mode 100644
index 0000000000..b3b2dfbdf9
--- /dev/null
+++ b/arch/riscv/include/asm/elf.h
@@ -0,0 +1,163 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com>
+ * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se>
+ * Copyright (C) 2012 Regents of the University of California
+ */
+
+#ifndef _ASM_RISCV_ELF_H
+#define _ASM_RISCV_ELF_H
+
+#include <uapi/linux/elf.h>
+#include <linux/compat.h>
+#include <uapi/asm/elf.h>
+#include <asm/auxvec.h>
+#include <asm/byteorder.h>
+#include <asm/cacheinfo.h>
+#include <asm/hwcap.h>
+
+/*
+ * These are used to set parameters in the core dumps.
+ */
+#define ELF_ARCH EM_RISCV
+
+#ifndef ELF_CLASS
+#ifdef CONFIG_64BIT
+#define ELF_CLASS ELFCLASS64
+#else
+#define ELF_CLASS ELFCLASS32
+#endif
+#endif
+
+#define ELF_DATA ELFDATA2LSB
+
+/*
+ * This is used to ensure we don't load something for the wrong architecture.
+ */
+#define elf_check_arch(x) (((x)->e_machine == EM_RISCV) && \
+ ((x)->e_ident[EI_CLASS] == ELF_CLASS))
+
+extern bool compat_elf_check_arch(Elf32_Ehdr *hdr);
+#define compat_elf_check_arch compat_elf_check_arch
+
+#define CORE_DUMP_USE_REGSET
+#define ELF_FDPIC_CORE_EFLAGS 0
+#define ELF_EXEC_PAGESIZE (PAGE_SIZE)
+
+/*
+ * This is the location that an ET_DYN program is loaded if exec'ed. Typical
+ * use of this is to invoke "./ld.so someprog" to test out a new version of
+ * the loader. We need to make sure that it is out of the way of the program
+ * that it will "exec", and that there is sufficient room for the brk.
+ */
+#define ELF_ET_DYN_BASE ((DEFAULT_MAP_WINDOW / 3) * 2)
+
+#ifdef CONFIG_64BIT
+#ifdef CONFIG_COMPAT
+#define STACK_RND_MASK (test_thread_flag(TIF_32BIT) ? \
+ 0x7ff >> (PAGE_SHIFT - 12) : \
+ 0x3ffff >> (PAGE_SHIFT - 12))
+#else
+#define STACK_RND_MASK (0x3ffff >> (PAGE_SHIFT - 12))
+#endif
+#endif
+
+/*
+ * Provides information on the availiable set of ISA extensions to userspace,
+ * via a bitmap that coorespends to each single-letter ISA extension. This is
+ * essentially defunct, but will remain for compatibility with userspace.
+ */
+#define ELF_HWCAP riscv_get_elf_hwcap()
+extern unsigned long elf_hwcap;
+
+#define ELF_FDPIC_PLAT_INIT(_r, _exec_map_addr, _interp_map_addr, dynamic_addr) \
+ do { \
+ (_r)->a1 = _exec_map_addr; \
+ (_r)->a2 = _interp_map_addr; \
+ (_r)->a3 = dynamic_addr; \
+ } while (0)
+
+/*
+ * This yields a string that ld.so will use to load implementation
+ * specific libraries for optimization. This is more specific in
+ * intent than poking at uname or /proc/cpuinfo.
+ */
+#define ELF_PLATFORM (NULL)
+
+#define COMPAT_ELF_PLATFORM (NULL)
+
+#define ARCH_DLINFO \
+do { \
+ /* \
+ * Note that we add ulong after elf_addr_t because \
+ * casting current->mm->context.vdso triggers a cast \
+ * warning of cast from pointer to integer for \
+ * COMPAT ELFCLASS32. \
+ */ \
+ NEW_AUX_ENT(AT_SYSINFO_EHDR, \
+ (elf_addr_t)(ulong)current->mm->context.vdso); \
+ NEW_AUX_ENT(AT_L1I_CACHESIZE, \
+ get_cache_size(1, CACHE_TYPE_INST)); \
+ NEW_AUX_ENT(AT_L1I_CACHEGEOMETRY, \
+ get_cache_geometry(1, CACHE_TYPE_INST)); \
+ NEW_AUX_ENT(AT_L1D_CACHESIZE, \
+ get_cache_size(1, CACHE_TYPE_DATA)); \
+ NEW_AUX_ENT(AT_L1D_CACHEGEOMETRY, \
+ get_cache_geometry(1, CACHE_TYPE_DATA)); \
+ NEW_AUX_ENT(AT_L2_CACHESIZE, \
+ get_cache_size(2, CACHE_TYPE_UNIFIED)); \
+ NEW_AUX_ENT(AT_L2_CACHEGEOMETRY, \
+ get_cache_geometry(2, CACHE_TYPE_UNIFIED)); \
+ NEW_AUX_ENT(AT_L3_CACHESIZE, \
+ get_cache_size(3, CACHE_TYPE_UNIFIED)); \
+ NEW_AUX_ENT(AT_L3_CACHEGEOMETRY, \
+ get_cache_geometry(3, CACHE_TYPE_UNIFIED)); \
+ /* \
+ * Should always be nonzero unless there's a kernel bug. \
+ * If we haven't determined a sensible value to give to \
+ * userspace, omit the entry: \
+ */ \
+ if (likely(signal_minsigstksz)) \
+ NEW_AUX_ENT(AT_MINSIGSTKSZ, signal_minsigstksz); \
+ else \
+ NEW_AUX_ENT(AT_IGNORE, 0); \
+} while (0)
+
+#ifdef CONFIG_MMU
+#define ARCH_HAS_SETUP_ADDITIONAL_PAGES
+struct linux_binprm;
+extern int arch_setup_additional_pages(struct linux_binprm *bprm,
+ int uses_interp);
+#endif /* CONFIG_MMU */
+
+#define ELF_CORE_COPY_REGS(dest, regs) \
+do { \
+ *(struct user_regs_struct *)&(dest) = \
+ *(struct user_regs_struct *)regs; \
+} while (0);
+
+#ifdef CONFIG_COMPAT
+
+#define SET_PERSONALITY(ex) \
+do { if ((ex).e_ident[EI_CLASS] == ELFCLASS32) \
+ set_thread_flag(TIF_32BIT); \
+ else \
+ clear_thread_flag(TIF_32BIT); \
+ if (personality(current->personality) != PER_LINUX32) \
+ set_personality(PER_LINUX | \
+ (current->personality & (~PER_MASK))); \
+} while (0)
+
+#define COMPAT_ELF_ET_DYN_BASE ((TASK_SIZE_32 / 3) * 2)
+
+/* rv32 registers */
+typedef compat_ulong_t compat_elf_greg_t;
+typedef compat_elf_greg_t compat_elf_gregset_t[ELF_NGREG];
+
+extern int compat_arch_setup_additional_pages(struct linux_binprm *bprm,
+ int uses_interp);
+#define compat_arch_setup_additional_pages \
+ compat_arch_setup_additional_pages
+
+#endif /* CONFIG_COMPAT */
+#endif /* _ASM_RISCV_ELF_H */
diff --git a/arch/riscv/include/asm/entry-common.h b/arch/riscv/include/asm/entry-common.h
new file mode 100644
index 0000000000..6e4dee49d8
--- /dev/null
+++ b/arch/riscv/include/asm/entry-common.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _ASM_RISCV_ENTRY_COMMON_H
+#define _ASM_RISCV_ENTRY_COMMON_H
+
+#include <asm/stacktrace.h>
+
+void handle_page_fault(struct pt_regs *regs);
+void handle_break(struct pt_regs *regs);
+
+#endif /* _ASM_RISCV_ENTRY_COMMON_H */
diff --git a/arch/riscv/include/asm/errata_list.h b/arch/riscv/include/asm/errata_list.h
new file mode 100644
index 0000000000..b55b434f00
--- /dev/null
+++ b/arch/riscv/include/asm/errata_list.h
@@ -0,0 +1,164 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2021 Sifive.
+ */
+#ifndef ASM_ERRATA_LIST_H
+#define ASM_ERRATA_LIST_H
+
+#include <asm/alternative.h>
+#include <asm/csr.h>
+#include <asm/insn-def.h>
+#include <asm/hwcap.h>
+#include <asm/vendorid_list.h>
+
+#ifdef CONFIG_ERRATA_ANDES
+#define ERRATA_ANDESTECH_NO_IOCP 0
+#define ERRATA_ANDESTECH_NUMBER 1
+#endif
+
+#ifdef CONFIG_ERRATA_SIFIVE
+#define ERRATA_SIFIVE_CIP_453 0
+#define ERRATA_SIFIVE_CIP_1200 1
+#define ERRATA_SIFIVE_NUMBER 2
+#endif
+
+#ifdef CONFIG_ERRATA_THEAD
+#define ERRATA_THEAD_PBMT 0
+#define ERRATA_THEAD_CMO 1
+#define ERRATA_THEAD_PMU 2
+#define ERRATA_THEAD_NUMBER 3
+#endif
+
+#ifdef __ASSEMBLY__
+
+#define ALT_INSN_FAULT(x) \
+ALTERNATIVE(__stringify(RISCV_PTR do_trap_insn_fault), \
+ __stringify(RISCV_PTR sifive_cip_453_insn_fault_trp), \
+ SIFIVE_VENDOR_ID, ERRATA_SIFIVE_CIP_453, \
+ CONFIG_ERRATA_SIFIVE_CIP_453)
+
+#define ALT_PAGE_FAULT(x) \
+ALTERNATIVE(__stringify(RISCV_PTR do_page_fault), \
+ __stringify(RISCV_PTR sifive_cip_453_page_fault_trp), \
+ SIFIVE_VENDOR_ID, ERRATA_SIFIVE_CIP_453, \
+ CONFIG_ERRATA_SIFIVE_CIP_453)
+#else /* !__ASSEMBLY__ */
+
+#define ALT_FLUSH_TLB_PAGE(x) \
+asm(ALTERNATIVE("sfence.vma %0", "sfence.vma", SIFIVE_VENDOR_ID, \
+ ERRATA_SIFIVE_CIP_1200, CONFIG_ERRATA_SIFIVE_CIP_1200) \
+ : : "r" (addr) : "memory")
+
+/*
+ * _val is marked as "will be overwritten", so need to set it to 0
+ * in the default case.
+ */
+#define ALT_SVPBMT_SHIFT 61
+#define ALT_THEAD_PBMT_SHIFT 59
+#define ALT_SVPBMT(_val, prot) \
+asm(ALTERNATIVE_2("li %0, 0\t\nnop", \
+ "li %0, %1\t\nslli %0,%0,%3", 0, \
+ RISCV_ISA_EXT_SVPBMT, CONFIG_RISCV_ISA_SVPBMT, \
+ "li %0, %2\t\nslli %0,%0,%4", THEAD_VENDOR_ID, \
+ ERRATA_THEAD_PBMT, CONFIG_ERRATA_THEAD_PBMT) \
+ : "=r"(_val) \
+ : "I"(prot##_SVPBMT >> ALT_SVPBMT_SHIFT), \
+ "I"(prot##_THEAD >> ALT_THEAD_PBMT_SHIFT), \
+ "I"(ALT_SVPBMT_SHIFT), \
+ "I"(ALT_THEAD_PBMT_SHIFT))
+
+#ifdef CONFIG_ERRATA_THEAD_PBMT
+/*
+ * IO/NOCACHE memory types are handled together with svpbmt,
+ * so on T-Head chips, check if no other memory type is set,
+ * and set the non-0 PMA type if applicable.
+ */
+#define ALT_THEAD_PMA(_val) \
+asm volatile(ALTERNATIVE( \
+ __nops(7), \
+ "li t3, %1\n\t" \
+ "slli t3, t3, %3\n\t" \
+ "and t3, %0, t3\n\t" \
+ "bne t3, zero, 2f\n\t" \
+ "li t3, %2\n\t" \
+ "slli t3, t3, %3\n\t" \
+ "or %0, %0, t3\n\t" \
+ "2:", THEAD_VENDOR_ID, \
+ ERRATA_THEAD_PBMT, CONFIG_ERRATA_THEAD_PBMT) \
+ : "+r"(_val) \
+ : "I"(_PAGE_MTMASK_THEAD >> ALT_THEAD_PBMT_SHIFT), \
+ "I"(_PAGE_PMA_THEAD >> ALT_THEAD_PBMT_SHIFT), \
+ "I"(ALT_THEAD_PBMT_SHIFT) \
+ : "t3")
+#else
+#define ALT_THEAD_PMA(_val)
+#endif
+
+/*
+ * dcache.ipa rs1 (invalidate, physical address)
+ * | 31 - 25 | 24 - 20 | 19 - 15 | 14 - 12 | 11 - 7 | 6 - 0 |
+ * 0000001 01010 rs1 000 00000 0001011
+ * dache.iva rs1 (invalida, virtual address)
+ * 0000001 00110 rs1 000 00000 0001011
+ *
+ * dcache.cpa rs1 (clean, physical address)
+ * | 31 - 25 | 24 - 20 | 19 - 15 | 14 - 12 | 11 - 7 | 6 - 0 |
+ * 0000001 01001 rs1 000 00000 0001011
+ * dcache.cva rs1 (clean, virtual address)
+ * 0000001 00101 rs1 000 00000 0001011
+ *
+ * dcache.cipa rs1 (clean then invalidate, physical address)
+ * | 31 - 25 | 24 - 20 | 19 - 15 | 14 - 12 | 11 - 7 | 6 - 0 |
+ * 0000001 01011 rs1 000 00000 0001011
+ * dcache.civa rs1 (... virtual address)
+ * 0000001 00111 rs1 000 00000 0001011
+ *
+ * sync.s (make sure all cache operations finished)
+ * | 31 - 25 | 24 - 20 | 19 - 15 | 14 - 12 | 11 - 7 | 6 - 0 |
+ * 0000000 11001 00000 000 00000 0001011
+ */
+#define THEAD_inval_A0 ".long 0x0265000b"
+#define THEAD_clean_A0 ".long 0x0255000b"
+#define THEAD_flush_A0 ".long 0x0275000b"
+#define THEAD_SYNC_S ".long 0x0190000b"
+
+#define ALT_CMO_OP(_op, _start, _size, _cachesize) \
+asm volatile(ALTERNATIVE_2( \
+ __nops(6), \
+ "mv a0, %1\n\t" \
+ "j 2f\n\t" \
+ "3:\n\t" \
+ CBO_##_op(a0) \
+ "add a0, a0, %0\n\t" \
+ "2:\n\t" \
+ "bltu a0, %2, 3b\n\t" \
+ "nop", 0, RISCV_ISA_EXT_ZICBOM, CONFIG_RISCV_ISA_ZICBOM, \
+ "mv a0, %1\n\t" \
+ "j 2f\n\t" \
+ "3:\n\t" \
+ THEAD_##_op##_A0 "\n\t" \
+ "add a0, a0, %0\n\t" \
+ "2:\n\t" \
+ "bltu a0, %2, 3b\n\t" \
+ THEAD_SYNC_S, THEAD_VENDOR_ID, \
+ ERRATA_THEAD_CMO, CONFIG_ERRATA_THEAD_CMO) \
+ : : "r"(_cachesize), \
+ "r"((unsigned long)(_start) & ~((_cachesize) - 1UL)), \
+ "r"((unsigned long)(_start) + (_size)) \
+ : "a0")
+
+#define THEAD_C9XX_RV_IRQ_PMU 17
+#define THEAD_C9XX_CSR_SCOUNTEROF 0x5c5
+
+#define ALT_SBI_PMU_OVERFLOW(__ovl) \
+asm volatile(ALTERNATIVE( \
+ "csrr %0, " __stringify(CSR_SSCOUNTOVF), \
+ "csrr %0, " __stringify(THEAD_C9XX_CSR_SCOUNTEROF), \
+ THEAD_VENDOR_ID, ERRATA_THEAD_PMU, \
+ CONFIG_ERRATA_THEAD_PMU) \
+ : "=r" (__ovl) : \
+ : "memory")
+
+#endif /* __ASSEMBLY__ */
+
+#endif
diff --git a/arch/riscv/include/asm/extable.h b/arch/riscv/include/asm/extable.h
new file mode 100644
index 0000000000..3eb5c1f7bf
--- /dev/null
+++ b/arch/riscv/include/asm/extable.h
@@ -0,0 +1,52 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_RISCV_EXTABLE_H
+#define _ASM_RISCV_EXTABLE_H
+
+/*
+ * The exception table consists of pairs of relative offsets: the first
+ * is the relative offset to an instruction that is allowed to fault,
+ * and the second is the relative offset at which the program should
+ * continue. No registers are modified, so it is entirely up to the
+ * continuation code to figure out what to do.
+ *
+ * All the routines below use bits of fixup code that are out of line
+ * with the main instruction path. This means when everything is well,
+ * we don't even have to jump over them. Further, they do not intrude
+ * on our cache or tlb entries.
+ */
+
+struct exception_table_entry {
+ int insn, fixup;
+ short type, data;
+};
+
+#define ARCH_HAS_RELATIVE_EXTABLE
+
+#define swap_ex_entry_fixup(a, b, tmp, delta) \
+do { \
+ (a)->fixup = (b)->fixup + (delta); \
+ (b)->fixup = (tmp).fixup - (delta); \
+ (a)->type = (b)->type; \
+ (b)->type = (tmp).type; \
+ (a)->data = (b)->data; \
+ (b)->data = (tmp).data; \
+} while (0)
+
+#ifdef CONFIG_MMU
+bool fixup_exception(struct pt_regs *regs);
+#else
+static inline bool fixup_exception(struct pt_regs *regs) { return false; }
+#endif
+
+#if defined(CONFIG_BPF_JIT) && defined(CONFIG_ARCH_RV64I)
+bool ex_handler_bpf(const struct exception_table_entry *ex, struct pt_regs *regs);
+#else
+static inline bool
+ex_handler_bpf(const struct exception_table_entry *ex,
+ struct pt_regs *regs)
+{
+ return false;
+}
+#endif
+
+#endif
diff --git a/arch/riscv/include/asm/fence.h b/arch/riscv/include/asm/fence.h
new file mode 100644
index 0000000000..2b443a3a48
--- /dev/null
+++ b/arch/riscv/include/asm/fence.h
@@ -0,0 +1,12 @@
+#ifndef _ASM_RISCV_FENCE_H
+#define _ASM_RISCV_FENCE_H
+
+#ifdef CONFIG_SMP
+#define RISCV_ACQUIRE_BARRIER "\tfence r , rw\n"
+#define RISCV_RELEASE_BARRIER "\tfence rw, w\n"
+#else
+#define RISCV_ACQUIRE_BARRIER
+#define RISCV_RELEASE_BARRIER
+#endif
+
+#endif /* _ASM_RISCV_FENCE_H */
diff --git a/arch/riscv/include/asm/fixmap.h b/arch/riscv/include/asm/fixmap.h
new file mode 100644
index 0000000000..0a55099bb7
--- /dev/null
+++ b/arch/riscv/include/asm/fixmap.h
@@ -0,0 +1,67 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2019 Western Digital Corporation or its affiliates.
+ */
+
+#ifndef _ASM_RISCV_FIXMAP_H
+#define _ASM_RISCV_FIXMAP_H
+
+#include <linux/kernel.h>
+#include <linux/sizes.h>
+#include <linux/pgtable.h>
+#include <asm/page.h>
+
+#ifdef CONFIG_MMU
+/*
+ * Here we define all the compile-time 'special' virtual addresses.
+ * The point is to have a constant address at compile time, but to
+ * set the physical address only in the boot process.
+ *
+ * These 'compile-time allocated' memory buffers are page-sized. Use
+ * set_fixmap(idx,phys) to associate physical memory with fixmap indices.
+ */
+enum fixed_addresses {
+ FIX_HOLE,
+ /*
+ * The fdt fixmap mapping must be PMD aligned and will be mapped
+ * using PMD entries in fixmap_pmd in 64-bit and a PGD entry in 32-bit.
+ */
+ FIX_FDT_END,
+ FIX_FDT = FIX_FDT_END + FIX_FDT_SIZE / PAGE_SIZE - 1,
+
+ /* Below fixmaps will be mapped using fixmap_pte */
+ FIX_PTE,
+ FIX_PMD,
+ FIX_PUD,
+ FIX_P4D,
+ FIX_TEXT_POKE1,
+ FIX_TEXT_POKE0,
+ FIX_EARLYCON_MEM_BASE,
+
+ __end_of_permanent_fixed_addresses,
+ /*
+ * Temporary boot-time mappings, used by early_ioremap(),
+ * before ioremap() is functional.
+ */
+#define NR_FIX_BTMAPS (SZ_256K / PAGE_SIZE)
+#define FIX_BTMAPS_SLOTS 7
+#define TOTAL_FIX_BTMAPS (NR_FIX_BTMAPS * FIX_BTMAPS_SLOTS)
+
+ FIX_BTMAP_END = __end_of_permanent_fixed_addresses,
+ FIX_BTMAP_BEGIN = FIX_BTMAP_END + TOTAL_FIX_BTMAPS - 1,
+
+ __end_of_fixed_addresses
+};
+
+#define __early_set_fixmap __set_fixmap
+
+#define __late_set_fixmap __set_fixmap
+#define __late_clear_fixmap(idx) __set_fixmap((idx), 0, FIXMAP_PAGE_CLEAR)
+
+extern void __set_fixmap(enum fixed_addresses idx,
+ phys_addr_t phys, pgprot_t prot);
+
+#include <asm-generic/fixmap.h>
+
+#endif /* CONFIG_MMU */
+#endif /* _ASM_RISCV_FIXMAP_H */
diff --git a/arch/riscv/include/asm/ftrace.h b/arch/riscv/include/asm/ftrace.h
new file mode 100644
index 0000000000..2b2f5df7ef
--- /dev/null
+++ b/arch/riscv/include/asm/ftrace.h
@@ -0,0 +1,156 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2017 Andes Technology Corporation */
+
+#ifndef _ASM_RISCV_FTRACE_H
+#define _ASM_RISCV_FTRACE_H
+
+/*
+ * The graph frame test is not possible if CONFIG_FRAME_POINTER is not enabled.
+ * Check arch/riscv/kernel/mcount.S for detail.
+ */
+#if defined(CONFIG_FUNCTION_GRAPH_TRACER) && defined(CONFIG_FRAME_POINTER)
+#define HAVE_FUNCTION_GRAPH_FP_TEST
+#endif
+#define HAVE_FUNCTION_GRAPH_RET_ADDR_PTR
+
+/*
+ * Clang prior to 13 had "mcount" instead of "_mcount":
+ * https://reviews.llvm.org/D98881
+ */
+#if defined(CONFIG_CC_IS_GCC) || CONFIG_CLANG_VERSION >= 130000
+#define MCOUNT_NAME _mcount
+#else
+#define MCOUNT_NAME mcount
+#endif
+
+#define ARCH_SUPPORTS_FTRACE_OPS 1
+#ifndef __ASSEMBLY__
+void MCOUNT_NAME(void);
+static inline unsigned long ftrace_call_adjust(unsigned long addr)
+{
+ return addr;
+}
+
+/*
+ * Let's do like x86/arm64 and ignore the compat syscalls.
+ */
+#define ARCH_TRACE_IGNORE_COMPAT_SYSCALLS
+static inline bool arch_trace_is_compat_syscall(struct pt_regs *regs)
+{
+ return is_compat_task();
+}
+
+#define ARCH_HAS_SYSCALL_MATCH_SYM_NAME
+static inline bool arch_syscall_match_sym_name(const char *sym,
+ const char *name)
+{
+ /*
+ * Since all syscall functions have __riscv_ prefix, we must skip it.
+ * However, as we described above, we decided to ignore compat
+ * syscalls, so we don't care about __riscv_compat_ prefix here.
+ */
+ return !strcmp(sym + 8, name);
+}
+
+struct dyn_arch_ftrace {
+};
+#endif
+
+#ifdef CONFIG_DYNAMIC_FTRACE
+/*
+ * A general call in RISC-V is a pair of insts:
+ * 1) auipc: setting high-20 pc-related bits to ra register
+ * 2) jalr: setting low-12 offset to ra, jump to ra, and set ra to
+ * return address (original pc + 4)
+ *
+ *<ftrace enable>:
+ * 0: auipc t0/ra, 0x?
+ * 4: jalr t0/ra, ?(t0/ra)
+ *
+ *<ftrace disable>:
+ * 0: nop
+ * 4: nop
+ *
+ * Dynamic ftrace generates probes to call sites, so we must deal with
+ * both auipc and jalr at the same time.
+ */
+
+#define MCOUNT_ADDR ((unsigned long)MCOUNT_NAME)
+#define JALR_SIGN_MASK (0x00000800)
+#define JALR_OFFSET_MASK (0x00000fff)
+#define AUIPC_OFFSET_MASK (0xfffff000)
+#define AUIPC_PAD (0x00001000)
+#define JALR_SHIFT 20
+#define JALR_RA (0x000080e7)
+#define AUIPC_RA (0x00000097)
+#define JALR_T0 (0x000282e7)
+#define AUIPC_T0 (0x00000297)
+#define NOP4 (0x00000013)
+
+#define to_jalr_t0(offset) \
+ (((offset & JALR_OFFSET_MASK) << JALR_SHIFT) | JALR_T0)
+
+#define to_auipc_t0(offset) \
+ ((offset & JALR_SIGN_MASK) ? \
+ (((offset & AUIPC_OFFSET_MASK) + AUIPC_PAD) | AUIPC_T0) : \
+ ((offset & AUIPC_OFFSET_MASK) | AUIPC_T0))
+
+#define make_call_t0(caller, callee, call) \
+do { \
+ unsigned int offset = \
+ (unsigned long) callee - (unsigned long) caller; \
+ call[0] = to_auipc_t0(offset); \
+ call[1] = to_jalr_t0(offset); \
+} while (0)
+
+#define to_jalr_ra(offset) \
+ (((offset & JALR_OFFSET_MASK) << JALR_SHIFT) | JALR_RA)
+
+#define to_auipc_ra(offset) \
+ ((offset & JALR_SIGN_MASK) ? \
+ (((offset & AUIPC_OFFSET_MASK) + AUIPC_PAD) | AUIPC_RA) : \
+ ((offset & AUIPC_OFFSET_MASK) | AUIPC_RA))
+
+#define make_call_ra(caller, callee, call) \
+do { \
+ unsigned int offset = \
+ (unsigned long) callee - (unsigned long) caller; \
+ call[0] = to_auipc_ra(offset); \
+ call[1] = to_jalr_ra(offset); \
+} while (0)
+
+/*
+ * Let auipc+jalr be the basic *mcount unit*, so we make it 8 bytes here.
+ */
+#define MCOUNT_INSN_SIZE 8
+
+#ifndef __ASSEMBLY__
+struct dyn_ftrace;
+int ftrace_init_nop(struct module *mod, struct dyn_ftrace *rec);
+#define ftrace_init_nop ftrace_init_nop
+#endif
+
+#endif /* CONFIG_DYNAMIC_FTRACE */
+
+#ifndef __ASSEMBLY__
+#ifdef CONFIG_FUNCTION_GRAPH_TRACER
+struct fgraph_ret_regs {
+ unsigned long a1;
+ unsigned long a0;
+ unsigned long s0;
+ unsigned long ra;
+};
+
+static inline unsigned long fgraph_ret_regs_return_value(struct fgraph_ret_regs *ret_regs)
+{
+ return ret_regs->a0;
+}
+
+static inline unsigned long fgraph_ret_regs_frame_pointer(struct fgraph_ret_regs *ret_regs)
+{
+ return ret_regs->s0;
+}
+#endif /* ifdef CONFIG_FUNCTION_GRAPH_TRACER */
+#endif
+
+#endif /* _ASM_RISCV_FTRACE_H */
diff --git a/arch/riscv/include/asm/futex.h b/arch/riscv/include/asm/futex.h
new file mode 100644
index 0000000000..fc8130f995
--- /dev/null
+++ b/arch/riscv/include/asm/futex.h
@@ -0,0 +1,104 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2006 Ralf Baechle (ralf@linux-mips.org)
+ * Copyright (c) 2018 Jim Wilson (jimw@sifive.com)
+ */
+
+#ifndef _ASM_RISCV_FUTEX_H
+#define _ASM_RISCV_FUTEX_H
+
+#include <linux/futex.h>
+#include <linux/uaccess.h>
+#include <linux/errno.h>
+#include <asm/asm.h>
+#include <asm/asm-extable.h>
+
+/* We don't even really need the extable code, but for now keep it simple */
+#ifndef CONFIG_MMU
+#define __enable_user_access() do { } while (0)
+#define __disable_user_access() do { } while (0)
+#endif
+
+#define __futex_atomic_op(insn, ret, oldval, uaddr, oparg) \
+{ \
+ __enable_user_access(); \
+ __asm__ __volatile__ ( \
+ "1: " insn " \n" \
+ "2: \n" \
+ _ASM_EXTABLE_UACCESS_ERR(1b, 2b, %[r]) \
+ : [r] "+r" (ret), [ov] "=&r" (oldval), \
+ [u] "+m" (*uaddr) \
+ : [op] "Jr" (oparg) \
+ : "memory"); \
+ __disable_user_access(); \
+}
+
+static inline int
+arch_futex_atomic_op_inuser(int op, int oparg, int *oval, u32 __user *uaddr)
+{
+ int oldval = 0, ret = 0;
+
+ if (!access_ok(uaddr, sizeof(u32)))
+ return -EFAULT;
+
+ switch (op) {
+ case FUTEX_OP_SET:
+ __futex_atomic_op("amoswap.w.aqrl %[ov],%z[op],%[u]",
+ ret, oldval, uaddr, oparg);
+ break;
+ case FUTEX_OP_ADD:
+ __futex_atomic_op("amoadd.w.aqrl %[ov],%z[op],%[u]",
+ ret, oldval, uaddr, oparg);
+ break;
+ case FUTEX_OP_OR:
+ __futex_atomic_op("amoor.w.aqrl %[ov],%z[op],%[u]",
+ ret, oldval, uaddr, oparg);
+ break;
+ case FUTEX_OP_ANDN:
+ __futex_atomic_op("amoand.w.aqrl %[ov],%z[op],%[u]",
+ ret, oldval, uaddr, ~oparg);
+ break;
+ case FUTEX_OP_XOR:
+ __futex_atomic_op("amoxor.w.aqrl %[ov],%z[op],%[u]",
+ ret, oldval, uaddr, oparg);
+ break;
+ default:
+ ret = -ENOSYS;
+ }
+
+ if (!ret)
+ *oval = oldval;
+
+ return ret;
+}
+
+static inline int
+futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr,
+ u32 oldval, u32 newval)
+{
+ int ret = 0;
+ u32 val;
+ uintptr_t tmp;
+
+ if (!access_ok(uaddr, sizeof(u32)))
+ return -EFAULT;
+
+ __enable_user_access();
+ __asm__ __volatile__ (
+ "1: lr.w.aqrl %[v],%[u] \n"
+ " bne %[v],%z[ov],3f \n"
+ "2: sc.w.aqrl %[t],%z[nv],%[u] \n"
+ " bnez %[t],1b \n"
+ "3: \n"
+ _ASM_EXTABLE_UACCESS_ERR(1b, 3b, %[r]) \
+ _ASM_EXTABLE_UACCESS_ERR(2b, 3b, %[r]) \
+ : [r] "+r" (ret), [v] "=&r" (val), [u] "+m" (*uaddr), [t] "=&r" (tmp)
+ : [ov] "Jr" (oldval), [nv] "Jr" (newval)
+ : "memory");
+ __disable_user_access();
+
+ *uval = val;
+ return ret;
+}
+
+#endif /* _ASM_RISCV_FUTEX_H */
diff --git a/arch/riscv/include/asm/gdb_xml.h b/arch/riscv/include/asm/gdb_xml.h
new file mode 100644
index 0000000000..09342111f2
--- /dev/null
+++ b/arch/riscv/include/asm/gdb_xml.h
@@ -0,0 +1,116 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef __ASM_GDB_XML_H_
+#define __ASM_GDB_XML_H_
+
+const char riscv_gdb_stub_feature[64] =
+ "PacketSize=800;qXfer:features:read+;";
+
+static const char gdb_xfer_read_target[31] = "qXfer:features:read:target.xml:";
+
+#ifdef CONFIG_64BIT
+static const char gdb_xfer_read_cpuxml[39] =
+ "qXfer:features:read:riscv-64bit-cpu.xml";
+
+static const char riscv_gdb_stub_target_desc[256] =
+"l<?xml version=\"1.0\"?>"
+"<!DOCTYPE target SYSTEM \"gdb-target.dtd\">"
+"<target>"
+"<xi:include href=\"riscv-64bit-cpu.xml\"/>"
+"</target>";
+
+static const char riscv_gdb_stub_cpuxml[2048] =
+"l<?xml version=\"1.0\"?>"
+"<!DOCTYPE feature SYSTEM \"gdb-target.dtd\">"
+"<feature name=\"org.gnu.gdb.riscv.cpu\">"
+"<reg name=\""DBG_REG_ZERO"\" bitsize=\"64\" type=\"int\" regnum=\"0\"/>"
+"<reg name=\""DBG_REG_RA"\" bitsize=\"64\" type=\"code_ptr\"/>"
+"<reg name=\""DBG_REG_SP"\" bitsize=\"64\" type=\"data_ptr\"/>"
+"<reg name=\""DBG_REG_GP"\" bitsize=\"64\" type=\"data_ptr\"/>"
+"<reg name=\""DBG_REG_TP"\" bitsize=\"64\" type=\"data_ptr\"/>"
+"<reg name=\""DBG_REG_T0"\" bitsize=\"64\" type=\"int\"/>"
+"<reg name=\""DBG_REG_T1"\" bitsize=\"64\" type=\"int\"/>"
+"<reg name=\""DBG_REG_T2"\" bitsize=\"64\" type=\"int\"/>"
+"<reg name=\""DBG_REG_FP"\" bitsize=\"64\" type=\"data_ptr\"/>"
+"<reg name=\""DBG_REG_S1"\" bitsize=\"64\" type=\"int\"/>"
+"<reg name=\""DBG_REG_A0"\" bitsize=\"64\" type=\"int\"/>"
+"<reg name=\""DBG_REG_A1"\" bitsize=\"64\" type=\"int\"/>"
+"<reg name=\""DBG_REG_A2"\" bitsize=\"64\" type=\"int\"/>"
+"<reg name=\""DBG_REG_A3"\" bitsize=\"64\" type=\"int\"/>"
+"<reg name=\""DBG_REG_A4"\" bitsize=\"64\" type=\"int\"/>"
+"<reg name=\""DBG_REG_A5"\" bitsize=\"64\" type=\"int\"/>"
+"<reg name=\""DBG_REG_A6"\" bitsize=\"64\" type=\"int\"/>"
+"<reg name=\""DBG_REG_A7"\" bitsize=\"64\" type=\"int\"/>"
+"<reg name=\""DBG_REG_S2"\" bitsize=\"64\" type=\"int\"/>"
+"<reg name=\""DBG_REG_S3"\" bitsize=\"64\" type=\"int\"/>"
+"<reg name=\""DBG_REG_S4"\" bitsize=\"64\" type=\"int\"/>"
+"<reg name=\""DBG_REG_S5"\" bitsize=\"64\" type=\"int\"/>"
+"<reg name=\""DBG_REG_S6"\" bitsize=\"64\" type=\"int\"/>"
+"<reg name=\""DBG_REG_S7"\" bitsize=\"64\" type=\"int\"/>"
+"<reg name=\""DBG_REG_S8"\" bitsize=\"64\" type=\"int\"/>"
+"<reg name=\""DBG_REG_S9"\" bitsize=\"64\" type=\"int\"/>"
+"<reg name=\""DBG_REG_S10"\" bitsize=\"64\" type=\"int\"/>"
+"<reg name=\""DBG_REG_S11"\" bitsize=\"64\" type=\"int\"/>"
+"<reg name=\""DBG_REG_T3"\" bitsize=\"64\" type=\"int\"/>"
+"<reg name=\""DBG_REG_T4"\" bitsize=\"64\" type=\"int\"/>"
+"<reg name=\""DBG_REG_T5"\" bitsize=\"64\" type=\"int\"/>"
+"<reg name=\""DBG_REG_T6"\" bitsize=\"64\" type=\"int\"/>"
+"<reg name=\""DBG_REG_EPC"\" bitsize=\"64\" type=\"code_ptr\"/>"
+"<reg name=\""DBG_REG_STATUS"\" bitsize=\"64\" type=\"int\"/>"
+"<reg name=\""DBG_REG_BADADDR"\" bitsize=\"64\" type=\"int\"/>"
+"<reg name=\""DBG_REG_CAUSE"\" bitsize=\"64\" type=\"int\"/>"
+"</feature>";
+#else
+static const char gdb_xfer_read_cpuxml[39] =
+ "qXfer:features:read:riscv-32bit-cpu.xml";
+
+static const char riscv_gdb_stub_target_desc[256] =
+"l<?xml version=\"1.0\"?>"
+"<!DOCTYPE target SYSTEM \"gdb-target.dtd\">"
+"<target>"
+"<xi:include href=\"riscv-32bit-cpu.xml\"/>"
+"</target>";
+
+static const char riscv_gdb_stub_cpuxml[2048] =
+"l<?xml version=\"1.0\"?>"
+"<!DOCTYPE feature SYSTEM \"gdb-target.dtd\">"
+"<feature name=\"org.gnu.gdb.riscv.cpu\">"
+"<reg name=\""DBG_REG_ZERO"\" bitsize=\"32\" type=\"int\" regnum=\"0\"/>"
+"<reg name=\""DBG_REG_RA"\" bitsize=\"32\" type=\"code_ptr\"/>"
+"<reg name=\""DBG_REG_SP"\" bitsize=\"32\" type=\"data_ptr\"/>"
+"<reg name=\""DBG_REG_GP"\" bitsize=\"32\" type=\"data_ptr\"/>"
+"<reg name=\""DBG_REG_TP"\" bitsize=\"32\" type=\"data_ptr\"/>"
+"<reg name=\""DBG_REG_T0"\" bitsize=\"32\" type=\"int\"/>"
+"<reg name=\""DBG_REG_T1"\" bitsize=\"32\" type=\"int\"/>"
+"<reg name=\""DBG_REG_T2"\" bitsize=\"32\" type=\"int\"/>"
+"<reg name=\""DBG_REG_FP"\" bitsize=\"32\" type=\"data_ptr\"/>"
+"<reg name=\""DBG_REG_S1"\" bitsize=\"32\" type=\"int\"/>"
+"<reg name=\""DBG_REG_A0"\" bitsize=\"32\" type=\"int\"/>"
+"<reg name=\""DBG_REG_A1"\" bitsize=\"32\" type=\"int\"/>"
+"<reg name=\""DBG_REG_A2"\" bitsize=\"32\" type=\"int\"/>"
+"<reg name=\""DBG_REG_A3"\" bitsize=\"32\" type=\"int\"/>"
+"<reg name=\""DBG_REG_A4"\" bitsize=\"32\" type=\"int\"/>"
+"<reg name=\""DBG_REG_A5"\" bitsize=\"32\" type=\"int\"/>"
+"<reg name=\""DBG_REG_A6"\" bitsize=\"32\" type=\"int\"/>"
+"<reg name=\""DBG_REG_A7"\" bitsize=\"32\" type=\"int\"/>"
+"<reg name=\""DBG_REG_S2"\" bitsize=\"32\" type=\"int\"/>"
+"<reg name=\""DBG_REG_S3"\" bitsize=\"32\" type=\"int\"/>"
+"<reg name=\""DBG_REG_S4"\" bitsize=\"32\" type=\"int\"/>"
+"<reg name=\""DBG_REG_S5"\" bitsize=\"32\" type=\"int\"/>"
+"<reg name=\""DBG_REG_S6"\" bitsize=\"32\" type=\"int\"/>"
+"<reg name=\""DBG_REG_S7"\" bitsize=\"32\" type=\"int\"/>"
+"<reg name=\""DBG_REG_S8"\" bitsize=\"32\" type=\"int\"/>"
+"<reg name=\""DBG_REG_S9"\" bitsize=\"32\" type=\"int\"/>"
+"<reg name=\""DBG_REG_S10"\" bitsize=\"32\" type=\"int\"/>"
+"<reg name=\""DBG_REG_S11"\" bitsize=\"32\" type=\"int\"/>"
+"<reg name=\""DBG_REG_T3"\" bitsize=\"32\" type=\"int\"/>"
+"<reg name=\""DBG_REG_T4"\" bitsize=\"32\" type=\"int\"/>"
+"<reg name=\""DBG_REG_T5"\" bitsize=\"32\" type=\"int\"/>"
+"<reg name=\""DBG_REG_T6"\" bitsize=\"32\" type=\"int\"/>"
+"<reg name=\""DBG_REG_EPC"\" bitsize=\"32\" type=\"code_ptr\"/>"
+"<reg name=\""DBG_REG_STATUS"\" bitsize=\"32\" type=\"int\"/>"
+"<reg name=\""DBG_REG_BADADDR"\" bitsize=\"32\" type=\"int\"/>"
+"<reg name=\""DBG_REG_CAUSE"\" bitsize=\"32\" type=\"int\"/>"
+"</feature>";
+#endif
+#endif
diff --git a/arch/riscv/include/asm/gpr-num.h b/arch/riscv/include/asm/gpr-num.h
new file mode 100644
index 0000000000..efeb5edf8a
--- /dev/null
+++ b/arch/riscv/include/asm/gpr-num.h
@@ -0,0 +1,85 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#ifndef __ASM_GPR_NUM_H
+#define __ASM_GPR_NUM_H
+
+#ifdef __ASSEMBLY__
+
+ .irp num,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31
+ .equ .L__gpr_num_x\num, \num
+ .endr
+
+ .equ .L__gpr_num_zero, 0
+ .equ .L__gpr_num_ra, 1
+ .equ .L__gpr_num_sp, 2
+ .equ .L__gpr_num_gp, 3
+ .equ .L__gpr_num_tp, 4
+ .equ .L__gpr_num_t0, 5
+ .equ .L__gpr_num_t1, 6
+ .equ .L__gpr_num_t2, 7
+ .equ .L__gpr_num_s0, 8
+ .equ .L__gpr_num_s1, 9
+ .equ .L__gpr_num_a0, 10
+ .equ .L__gpr_num_a1, 11
+ .equ .L__gpr_num_a2, 12
+ .equ .L__gpr_num_a3, 13
+ .equ .L__gpr_num_a4, 14
+ .equ .L__gpr_num_a5, 15
+ .equ .L__gpr_num_a6, 16
+ .equ .L__gpr_num_a7, 17
+ .equ .L__gpr_num_s2, 18
+ .equ .L__gpr_num_s3, 19
+ .equ .L__gpr_num_s4, 20
+ .equ .L__gpr_num_s5, 21
+ .equ .L__gpr_num_s6, 22
+ .equ .L__gpr_num_s7, 23
+ .equ .L__gpr_num_s8, 24
+ .equ .L__gpr_num_s9, 25
+ .equ .L__gpr_num_s10, 26
+ .equ .L__gpr_num_s11, 27
+ .equ .L__gpr_num_t3, 28
+ .equ .L__gpr_num_t4, 29
+ .equ .L__gpr_num_t5, 30
+ .equ .L__gpr_num_t6, 31
+
+#else /* __ASSEMBLY__ */
+
+#define __DEFINE_ASM_GPR_NUMS \
+" .irp num,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31\n" \
+" .equ .L__gpr_num_x\\num, \\num\n" \
+" .endr\n" \
+" .equ .L__gpr_num_zero, 0\n" \
+" .equ .L__gpr_num_ra, 1\n" \
+" .equ .L__gpr_num_sp, 2\n" \
+" .equ .L__gpr_num_gp, 3\n" \
+" .equ .L__gpr_num_tp, 4\n" \
+" .equ .L__gpr_num_t0, 5\n" \
+" .equ .L__gpr_num_t1, 6\n" \
+" .equ .L__gpr_num_t2, 7\n" \
+" .equ .L__gpr_num_s0, 8\n" \
+" .equ .L__gpr_num_s1, 9\n" \
+" .equ .L__gpr_num_a0, 10\n" \
+" .equ .L__gpr_num_a1, 11\n" \
+" .equ .L__gpr_num_a2, 12\n" \
+" .equ .L__gpr_num_a3, 13\n" \
+" .equ .L__gpr_num_a4, 14\n" \
+" .equ .L__gpr_num_a5, 15\n" \
+" .equ .L__gpr_num_a6, 16\n" \
+" .equ .L__gpr_num_a7, 17\n" \
+" .equ .L__gpr_num_s2, 18\n" \
+" .equ .L__gpr_num_s3, 19\n" \
+" .equ .L__gpr_num_s4, 20\n" \
+" .equ .L__gpr_num_s5, 21\n" \
+" .equ .L__gpr_num_s6, 22\n" \
+" .equ .L__gpr_num_s7, 23\n" \
+" .equ .L__gpr_num_s8, 24\n" \
+" .equ .L__gpr_num_s9, 25\n" \
+" .equ .L__gpr_num_s10, 26\n" \
+" .equ .L__gpr_num_s11, 27\n" \
+" .equ .L__gpr_num_t3, 28\n" \
+" .equ .L__gpr_num_t4, 29\n" \
+" .equ .L__gpr_num_t5, 30\n" \
+" .equ .L__gpr_num_t6, 31\n"
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* __ASM_GPR_NUM_H */
diff --git a/arch/riscv/include/asm/hugetlb.h b/arch/riscv/include/asm/hugetlb.h
new file mode 100644
index 0000000000..4c5b0e9298
--- /dev/null
+++ b/arch/riscv/include/asm/hugetlb.h
@@ -0,0 +1,51 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_RISCV_HUGETLB_H
+#define _ASM_RISCV_HUGETLB_H
+
+#include <asm/cacheflush.h>
+#include <asm/page.h>
+
+static inline void arch_clear_hugepage_flags(struct page *page)
+{
+ clear_bit(PG_dcache_clean, &page->flags);
+}
+#define arch_clear_hugepage_flags arch_clear_hugepage_flags
+
+#ifdef CONFIG_RISCV_ISA_SVNAPOT
+#define __HAVE_ARCH_HUGE_PTE_CLEAR
+void huge_pte_clear(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep, unsigned long sz);
+
+#define __HAVE_ARCH_HUGE_SET_HUGE_PTE_AT
+void set_huge_pte_at(struct mm_struct *mm,
+ unsigned long addr, pte_t *ptep, pte_t pte,
+ unsigned long sz);
+
+#define __HAVE_ARCH_HUGE_PTEP_GET_AND_CLEAR
+pte_t huge_ptep_get_and_clear(struct mm_struct *mm,
+ unsigned long addr, pte_t *ptep);
+
+#define __HAVE_ARCH_HUGE_PTEP_CLEAR_FLUSH
+pte_t huge_ptep_clear_flush(struct vm_area_struct *vma,
+ unsigned long addr, pte_t *ptep);
+
+#define __HAVE_ARCH_HUGE_PTEP_SET_WRPROTECT
+void huge_ptep_set_wrprotect(struct mm_struct *mm,
+ unsigned long addr, pte_t *ptep);
+
+#define __HAVE_ARCH_HUGE_PTEP_SET_ACCESS_FLAGS
+int huge_ptep_set_access_flags(struct vm_area_struct *vma,
+ unsigned long addr, pte_t *ptep,
+ pte_t pte, int dirty);
+
+#define __HAVE_ARCH_HUGE_PTEP_GET
+pte_t huge_ptep_get(pte_t *ptep);
+
+pte_t arch_make_huge_pte(pte_t entry, unsigned int shift, vm_flags_t flags);
+#define arch_make_huge_pte arch_make_huge_pte
+
+#endif /*CONFIG_RISCV_ISA_SVNAPOT*/
+
+#include <asm-generic/hugetlb.h>
+
+#endif /* _ASM_RISCV_HUGETLB_H */
diff --git a/arch/riscv/include/asm/hwcap.h b/arch/riscv/include/asm/hwcap.h
new file mode 100644
index 0000000000..b7b58258f6
--- /dev/null
+++ b/arch/riscv/include/asm/hwcap.h
@@ -0,0 +1,142 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copied from arch/arm64/include/asm/hwcap.h
+ *
+ * Copyright (C) 2012 ARM Ltd.
+ * Copyright (C) 2017 SiFive
+ */
+#ifndef _ASM_RISCV_HWCAP_H
+#define _ASM_RISCV_HWCAP_H
+
+#include <asm/alternative-macros.h>
+#include <asm/errno.h>
+#include <linux/bits.h>
+#include <uapi/asm/hwcap.h>
+
+#define RISCV_ISA_EXT_a ('a' - 'a')
+#define RISCV_ISA_EXT_b ('b' - 'a')
+#define RISCV_ISA_EXT_c ('c' - 'a')
+#define RISCV_ISA_EXT_d ('d' - 'a')
+#define RISCV_ISA_EXT_f ('f' - 'a')
+#define RISCV_ISA_EXT_h ('h' - 'a')
+#define RISCV_ISA_EXT_i ('i' - 'a')
+#define RISCV_ISA_EXT_j ('j' - 'a')
+#define RISCV_ISA_EXT_k ('k' - 'a')
+#define RISCV_ISA_EXT_m ('m' - 'a')
+#define RISCV_ISA_EXT_p ('p' - 'a')
+#define RISCV_ISA_EXT_q ('q' - 'a')
+#define RISCV_ISA_EXT_s ('s' - 'a')
+#define RISCV_ISA_EXT_u ('u' - 'a')
+#define RISCV_ISA_EXT_v ('v' - 'a')
+
+/*
+ * These macros represent the logical IDs of each multi-letter RISC-V ISA
+ * extension and are used in the ISA bitmap. The logical IDs start from
+ * RISCV_ISA_EXT_BASE, which allows the 0-25 range to be reserved for single
+ * letter extensions. The maximum, RISCV_ISA_EXT_MAX, is defined in order
+ * to allocate the bitmap and may be increased when necessary.
+ *
+ * New extensions should just be added to the bottom, rather than added
+ * alphabetically, in order to avoid unnecessary shuffling.
+ */
+#define RISCV_ISA_EXT_BASE 26
+
+#define RISCV_ISA_EXT_SSCOFPMF 26
+#define RISCV_ISA_EXT_SSTC 27
+#define RISCV_ISA_EXT_SVINVAL 28
+#define RISCV_ISA_EXT_SVPBMT 29
+#define RISCV_ISA_EXT_ZBB 30
+#define RISCV_ISA_EXT_ZICBOM 31
+#define RISCV_ISA_EXT_ZIHINTPAUSE 32
+#define RISCV_ISA_EXT_SVNAPOT 33
+#define RISCV_ISA_EXT_ZICBOZ 34
+#define RISCV_ISA_EXT_SMAIA 35
+#define RISCV_ISA_EXT_SSAIA 36
+#define RISCV_ISA_EXT_ZBA 37
+#define RISCV_ISA_EXT_ZBS 38
+#define RISCV_ISA_EXT_ZICNTR 39
+#define RISCV_ISA_EXT_ZICSR 40
+#define RISCV_ISA_EXT_ZIFENCEI 41
+#define RISCV_ISA_EXT_ZIHPM 42
+
+#define RISCV_ISA_EXT_MAX 64
+
+#ifdef CONFIG_RISCV_M_MODE
+#define RISCV_ISA_EXT_SxAIA RISCV_ISA_EXT_SMAIA
+#else
+#define RISCV_ISA_EXT_SxAIA RISCV_ISA_EXT_SSAIA
+#endif
+
+#ifndef __ASSEMBLY__
+
+#include <linux/jump_label.h>
+
+unsigned long riscv_get_elf_hwcap(void);
+
+struct riscv_isa_ext_data {
+ const unsigned int id;
+ const char *name;
+ const char *property;
+};
+
+extern const struct riscv_isa_ext_data riscv_isa_ext[];
+extern const size_t riscv_isa_ext_count;
+extern bool riscv_isa_fallback;
+
+unsigned long riscv_isa_extension_base(const unsigned long *isa_bitmap);
+
+#define riscv_isa_extension_mask(ext) BIT_MASK(RISCV_ISA_EXT_##ext)
+
+bool __riscv_isa_extension_available(const unsigned long *isa_bitmap, int bit);
+#define riscv_isa_extension_available(isa_bitmap, ext) \
+ __riscv_isa_extension_available(isa_bitmap, RISCV_ISA_EXT_##ext)
+
+static __always_inline bool
+riscv_has_extension_likely(const unsigned long ext)
+{
+ compiletime_assert(ext < RISCV_ISA_EXT_MAX,
+ "ext must be < RISCV_ISA_EXT_MAX");
+
+ if (IS_ENABLED(CONFIG_RISCV_ALTERNATIVE)) {
+ asm_volatile_goto(
+ ALTERNATIVE("j %l[l_no]", "nop", 0, %[ext], 1)
+ :
+ : [ext] "i" (ext)
+ :
+ : l_no);
+ } else {
+ if (!__riscv_isa_extension_available(NULL, ext))
+ goto l_no;
+ }
+
+ return true;
+l_no:
+ return false;
+}
+
+static __always_inline bool
+riscv_has_extension_unlikely(const unsigned long ext)
+{
+ compiletime_assert(ext < RISCV_ISA_EXT_MAX,
+ "ext must be < RISCV_ISA_EXT_MAX");
+
+ if (IS_ENABLED(CONFIG_RISCV_ALTERNATIVE)) {
+ asm_volatile_goto(
+ ALTERNATIVE("nop", "j %l[l_yes]", 0, %[ext], 1)
+ :
+ : [ext] "i" (ext)
+ :
+ : l_yes);
+ } else {
+ if (__riscv_isa_extension_available(NULL, ext))
+ goto l_yes;
+ }
+
+ return false;
+l_yes:
+ return true;
+}
+
+#endif
+
+#endif /* _ASM_RISCV_HWCAP_H */
diff --git a/arch/riscv/include/asm/hwprobe.h b/arch/riscv/include/asm/hwprobe.h
new file mode 100644
index 0000000000..7cad513538
--- /dev/null
+++ b/arch/riscv/include/asm/hwprobe.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+/*
+ * Copyright 2023 Rivos, Inc
+ */
+
+#ifndef _ASM_HWPROBE_H
+#define _ASM_HWPROBE_H
+
+#include <uapi/asm/hwprobe.h>
+
+#define RISCV_HWPROBE_MAX_KEY 5
+
+static inline bool riscv_hwprobe_key_is_valid(__s64 key)
+{
+ return key >= 0 && key <= RISCV_HWPROBE_MAX_KEY;
+}
+
+#endif
diff --git a/arch/riscv/include/asm/image.h b/arch/riscv/include/asm/image.h
new file mode 100644
index 0000000000..e0b319af36
--- /dev/null
+++ b/arch/riscv/include/asm/image.h
@@ -0,0 +1,65 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _ASM_RISCV_IMAGE_H
+#define _ASM_RISCV_IMAGE_H
+
+#define RISCV_IMAGE_MAGIC "RISCV\0\0\0"
+#define RISCV_IMAGE_MAGIC2 "RSC\x05"
+
+#define RISCV_IMAGE_FLAG_BE_SHIFT 0
+#define RISCV_IMAGE_FLAG_BE_MASK 0x1
+
+#define RISCV_IMAGE_FLAG_LE 0
+#define RISCV_IMAGE_FLAG_BE 1
+
+#ifdef CONFIG_CPU_BIG_ENDIAN
+#error conversion of header fields to LE not yet implemented
+#else
+#define __HEAD_FLAG_BE RISCV_IMAGE_FLAG_LE
+#endif
+
+#define __HEAD_FLAG(field) (__HEAD_FLAG_##field << \
+ RISCV_IMAGE_FLAG_##field##_SHIFT)
+
+#define __HEAD_FLAGS (__HEAD_FLAG(BE))
+
+#define RISCV_HEADER_VERSION_MAJOR 0
+#define RISCV_HEADER_VERSION_MINOR 2
+
+#define RISCV_HEADER_VERSION (RISCV_HEADER_VERSION_MAJOR << 16 | \
+ RISCV_HEADER_VERSION_MINOR)
+
+#ifndef __ASSEMBLY__
+/**
+ * struct riscv_image_header - riscv kernel image header
+ * @code0: Executable code
+ * @code1: Executable code
+ * @text_offset: Image load offset (little endian)
+ * @image_size: Effective Image size (little endian)
+ * @flags: kernel flags (little endian)
+ * @version: version
+ * @res1: reserved
+ * @res2: reserved
+ * @magic: Magic number (RISC-V specific; deprecated)
+ * @magic2: Magic number 2 (to match the ARM64 'magic' field pos)
+ * @res3: reserved (will be used for PE COFF offset)
+ *
+ * The intention is for this header format to be shared between multiple
+ * architectures to avoid a proliferation of image header formats.
+ */
+
+struct riscv_image_header {
+ u32 code0;
+ u32 code1;
+ u64 text_offset;
+ u64 image_size;
+ u64 flags;
+ u32 version;
+ u32 res1;
+ u64 res2;
+ u64 magic;
+ u32 magic2;
+ u32 res3;
+};
+#endif /* __ASSEMBLY__ */
+#endif /* _ASM_RISCV_IMAGE_H */
diff --git a/arch/riscv/include/asm/insn-def.h b/arch/riscv/include/asm/insn-def.h
new file mode 100644
index 0000000000..6960beb75f
--- /dev/null
+++ b/arch/riscv/include/asm/insn-def.h
@@ -0,0 +1,199 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef __ASM_INSN_DEF_H
+#define __ASM_INSN_DEF_H
+
+#include <asm/asm.h>
+
+#define INSN_R_FUNC7_SHIFT 25
+#define INSN_R_RS2_SHIFT 20
+#define INSN_R_RS1_SHIFT 15
+#define INSN_R_FUNC3_SHIFT 12
+#define INSN_R_RD_SHIFT 7
+#define INSN_R_OPCODE_SHIFT 0
+
+#define INSN_I_SIMM12_SHIFT 20
+#define INSN_I_RS1_SHIFT 15
+#define INSN_I_FUNC3_SHIFT 12
+#define INSN_I_RD_SHIFT 7
+#define INSN_I_OPCODE_SHIFT 0
+
+#ifdef __ASSEMBLY__
+
+#ifdef CONFIG_AS_HAS_INSN
+
+ .macro insn_r, opcode, func3, func7, rd, rs1, rs2
+ .insn r \opcode, \func3, \func7, \rd, \rs1, \rs2
+ .endm
+
+ .macro insn_i, opcode, func3, rd, rs1, simm12
+ .insn i \opcode, \func3, \rd, \rs1, \simm12
+ .endm
+
+#else
+
+#include <asm/gpr-num.h>
+
+ .macro insn_r, opcode, func3, func7, rd, rs1, rs2
+ .4byte ((\opcode << INSN_R_OPCODE_SHIFT) | \
+ (\func3 << INSN_R_FUNC3_SHIFT) | \
+ (\func7 << INSN_R_FUNC7_SHIFT) | \
+ (.L__gpr_num_\rd << INSN_R_RD_SHIFT) | \
+ (.L__gpr_num_\rs1 << INSN_R_RS1_SHIFT) | \
+ (.L__gpr_num_\rs2 << INSN_R_RS2_SHIFT))
+ .endm
+
+ .macro insn_i, opcode, func3, rd, rs1, simm12
+ .4byte ((\opcode << INSN_I_OPCODE_SHIFT) | \
+ (\func3 << INSN_I_FUNC3_SHIFT) | \
+ (.L__gpr_num_\rd << INSN_I_RD_SHIFT) | \
+ (.L__gpr_num_\rs1 << INSN_I_RS1_SHIFT) | \
+ (\simm12 << INSN_I_SIMM12_SHIFT))
+ .endm
+
+#endif
+
+#define __INSN_R(...) insn_r __VA_ARGS__
+#define __INSN_I(...) insn_i __VA_ARGS__
+
+#else /* ! __ASSEMBLY__ */
+
+#ifdef CONFIG_AS_HAS_INSN
+
+#define __INSN_R(opcode, func3, func7, rd, rs1, rs2) \
+ ".insn r " opcode ", " func3 ", " func7 ", " rd ", " rs1 ", " rs2 "\n"
+
+#define __INSN_I(opcode, func3, rd, rs1, simm12) \
+ ".insn i " opcode ", " func3 ", " rd ", " rs1 ", " simm12 "\n"
+
+#else
+
+#include <linux/stringify.h>
+#include <asm/gpr-num.h>
+
+#define DEFINE_INSN_R \
+ __DEFINE_ASM_GPR_NUMS \
+" .macro insn_r, opcode, func3, func7, rd, rs1, rs2\n" \
+" .4byte ((\\opcode << " __stringify(INSN_R_OPCODE_SHIFT) ") |" \
+" (\\func3 << " __stringify(INSN_R_FUNC3_SHIFT) ") |" \
+" (\\func7 << " __stringify(INSN_R_FUNC7_SHIFT) ") |" \
+" (.L__gpr_num_\\rd << " __stringify(INSN_R_RD_SHIFT) ") |" \
+" (.L__gpr_num_\\rs1 << " __stringify(INSN_R_RS1_SHIFT) ") |" \
+" (.L__gpr_num_\\rs2 << " __stringify(INSN_R_RS2_SHIFT) "))\n" \
+" .endm\n"
+
+#define DEFINE_INSN_I \
+ __DEFINE_ASM_GPR_NUMS \
+" .macro insn_i, opcode, func3, rd, rs1, simm12\n" \
+" .4byte ((\\opcode << " __stringify(INSN_I_OPCODE_SHIFT) ") |" \
+" (\\func3 << " __stringify(INSN_I_FUNC3_SHIFT) ") |" \
+" (.L__gpr_num_\\rd << " __stringify(INSN_I_RD_SHIFT) ") |" \
+" (.L__gpr_num_\\rs1 << " __stringify(INSN_I_RS1_SHIFT) ") |" \
+" (\\simm12 << " __stringify(INSN_I_SIMM12_SHIFT) "))\n" \
+" .endm\n"
+
+#define UNDEFINE_INSN_R \
+" .purgem insn_r\n"
+
+#define UNDEFINE_INSN_I \
+" .purgem insn_i\n"
+
+#define __INSN_R(opcode, func3, func7, rd, rs1, rs2) \
+ DEFINE_INSN_R \
+ "insn_r " opcode ", " func3 ", " func7 ", " rd ", " rs1 ", " rs2 "\n" \
+ UNDEFINE_INSN_R
+
+#define __INSN_I(opcode, func3, rd, rs1, simm12) \
+ DEFINE_INSN_I \
+ "insn_i " opcode ", " func3 ", " rd ", " rs1 ", " simm12 "\n" \
+ UNDEFINE_INSN_I
+
+#endif
+
+#endif /* ! __ASSEMBLY__ */
+
+#define INSN_R(opcode, func3, func7, rd, rs1, rs2) \
+ __INSN_R(RV_##opcode, RV_##func3, RV_##func7, \
+ RV_##rd, RV_##rs1, RV_##rs2)
+
+#define INSN_I(opcode, func3, rd, rs1, simm12) \
+ __INSN_I(RV_##opcode, RV_##func3, RV_##rd, \
+ RV_##rs1, RV_##simm12)
+
+#define RV_OPCODE(v) __ASM_STR(v)
+#define RV_FUNC3(v) __ASM_STR(v)
+#define RV_FUNC7(v) __ASM_STR(v)
+#define RV_SIMM12(v) __ASM_STR(v)
+#define RV_RD(v) __ASM_STR(v)
+#define RV_RS1(v) __ASM_STR(v)
+#define RV_RS2(v) __ASM_STR(v)
+#define __RV_REG(v) __ASM_STR(x ## v)
+#define RV___RD(v) __RV_REG(v)
+#define RV___RS1(v) __RV_REG(v)
+#define RV___RS2(v) __RV_REG(v)
+
+#define RV_OPCODE_MISC_MEM RV_OPCODE(15)
+#define RV_OPCODE_SYSTEM RV_OPCODE(115)
+
+#define HFENCE_VVMA(vaddr, asid) \
+ INSN_R(OPCODE_SYSTEM, FUNC3(0), FUNC7(17), \
+ __RD(0), RS1(vaddr), RS2(asid))
+
+#define HFENCE_GVMA(gaddr, vmid) \
+ INSN_R(OPCODE_SYSTEM, FUNC3(0), FUNC7(49), \
+ __RD(0), RS1(gaddr), RS2(vmid))
+
+#define HLVX_HU(dest, addr) \
+ INSN_R(OPCODE_SYSTEM, FUNC3(4), FUNC7(50), \
+ RD(dest), RS1(addr), __RS2(3))
+
+#define HLV_W(dest, addr) \
+ INSN_R(OPCODE_SYSTEM, FUNC3(4), FUNC7(52), \
+ RD(dest), RS1(addr), __RS2(0))
+
+#ifdef CONFIG_64BIT
+#define HLV_D(dest, addr) \
+ INSN_R(OPCODE_SYSTEM, FUNC3(4), FUNC7(54), \
+ RD(dest), RS1(addr), __RS2(0))
+#else
+#define HLV_D(dest, addr) \
+ __ASM_STR(.error "hlv.d requires 64-bit support")
+#endif
+
+#define SINVAL_VMA(vaddr, asid) \
+ INSN_R(OPCODE_SYSTEM, FUNC3(0), FUNC7(11), \
+ __RD(0), RS1(vaddr), RS2(asid))
+
+#define SFENCE_W_INVAL() \
+ INSN_R(OPCODE_SYSTEM, FUNC3(0), FUNC7(12), \
+ __RD(0), __RS1(0), __RS2(0))
+
+#define SFENCE_INVAL_IR() \
+ INSN_R(OPCODE_SYSTEM, FUNC3(0), FUNC7(12), \
+ __RD(0), __RS1(0), __RS2(1))
+
+#define HINVAL_VVMA(vaddr, asid) \
+ INSN_R(OPCODE_SYSTEM, FUNC3(0), FUNC7(19), \
+ __RD(0), RS1(vaddr), RS2(asid))
+
+#define HINVAL_GVMA(gaddr, vmid) \
+ INSN_R(OPCODE_SYSTEM, FUNC3(0), FUNC7(51), \
+ __RD(0), RS1(gaddr), RS2(vmid))
+
+#define CBO_inval(base) \
+ INSN_I(OPCODE_MISC_MEM, FUNC3(2), __RD(0), \
+ RS1(base), SIMM12(0))
+
+#define CBO_clean(base) \
+ INSN_I(OPCODE_MISC_MEM, FUNC3(2), __RD(0), \
+ RS1(base), SIMM12(1))
+
+#define CBO_flush(base) \
+ INSN_I(OPCODE_MISC_MEM, FUNC3(2), __RD(0), \
+ RS1(base), SIMM12(2))
+
+#define CBO_zero(base) \
+ INSN_I(OPCODE_MISC_MEM, FUNC3(2), __RD(0), \
+ RS1(base), SIMM12(4))
+
+#endif /* __ASM_INSN_DEF_H */
diff --git a/arch/riscv/include/asm/insn.h b/arch/riscv/include/asm/insn.h
new file mode 100644
index 0000000000..06e439eeef
--- /dev/null
+++ b/arch/riscv/include/asm/insn.h
@@ -0,0 +1,431 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2020 SiFive
+ */
+
+#ifndef _ASM_RISCV_INSN_H
+#define _ASM_RISCV_INSN_H
+
+#include <linux/bits.h>
+
+#define RV_INSN_FUNCT3_MASK GENMASK(14, 12)
+#define RV_INSN_FUNCT3_OPOFF 12
+#define RV_INSN_OPCODE_MASK GENMASK(6, 0)
+#define RV_INSN_OPCODE_OPOFF 0
+#define RV_INSN_FUNCT12_OPOFF 20
+
+#define RV_ENCODE_FUNCT3(f_) (RVG_FUNCT3_##f_ << RV_INSN_FUNCT3_OPOFF)
+#define RV_ENCODE_FUNCT12(f_) (RVG_FUNCT12_##f_ << RV_INSN_FUNCT12_OPOFF)
+
+/* The bit field of immediate value in I-type instruction */
+#define RV_I_IMM_SIGN_OPOFF 31
+#define RV_I_IMM_11_0_OPOFF 20
+#define RV_I_IMM_SIGN_OFF 12
+#define RV_I_IMM_11_0_OFF 0
+#define RV_I_IMM_11_0_MASK GENMASK(11, 0)
+
+/* The bit field of immediate value in J-type instruction */
+#define RV_J_IMM_SIGN_OPOFF 31
+#define RV_J_IMM_10_1_OPOFF 21
+#define RV_J_IMM_11_OPOFF 20
+#define RV_J_IMM_19_12_OPOFF 12
+#define RV_J_IMM_SIGN_OFF 20
+#define RV_J_IMM_10_1_OFF 1
+#define RV_J_IMM_11_OFF 11
+#define RV_J_IMM_19_12_OFF 12
+#define RV_J_IMM_10_1_MASK GENMASK(9, 0)
+#define RV_J_IMM_11_MASK GENMASK(0, 0)
+#define RV_J_IMM_19_12_MASK GENMASK(7, 0)
+
+/*
+ * U-type IMMs contain the upper 20bits [31:20] of an immediate with
+ * the rest filled in by zeros, so no shifting required. Similarly,
+ * bit31 contains the signed state, so no sign extension necessary.
+ */
+#define RV_U_IMM_SIGN_OPOFF 31
+#define RV_U_IMM_31_12_OPOFF 0
+#define RV_U_IMM_31_12_MASK GENMASK(31, 12)
+
+/* The bit field of immediate value in B-type instruction */
+#define RV_B_IMM_SIGN_OPOFF 31
+#define RV_B_IMM_10_5_OPOFF 25
+#define RV_B_IMM_4_1_OPOFF 8
+#define RV_B_IMM_11_OPOFF 7
+#define RV_B_IMM_SIGN_OFF 12
+#define RV_B_IMM_10_5_OFF 5
+#define RV_B_IMM_4_1_OFF 1
+#define RV_B_IMM_11_OFF 11
+#define RV_B_IMM_10_5_MASK GENMASK(5, 0)
+#define RV_B_IMM_4_1_MASK GENMASK(3, 0)
+#define RV_B_IMM_11_MASK GENMASK(0, 0)
+
+/* The register offset in RVG instruction */
+#define RVG_RS1_OPOFF 15
+#define RVG_RS2_OPOFF 20
+#define RVG_RD_OPOFF 7
+#define RVG_RS1_MASK GENMASK(4, 0)
+#define RVG_RD_MASK GENMASK(4, 0)
+
+/* The bit field of immediate value in RVC J instruction */
+#define RVC_J_IMM_SIGN_OPOFF 12
+#define RVC_J_IMM_4_OPOFF 11
+#define RVC_J_IMM_9_8_OPOFF 9
+#define RVC_J_IMM_10_OPOFF 8
+#define RVC_J_IMM_6_OPOFF 7
+#define RVC_J_IMM_7_OPOFF 6
+#define RVC_J_IMM_3_1_OPOFF 3
+#define RVC_J_IMM_5_OPOFF 2
+#define RVC_J_IMM_SIGN_OFF 11
+#define RVC_J_IMM_4_OFF 4
+#define RVC_J_IMM_9_8_OFF 8
+#define RVC_J_IMM_10_OFF 10
+#define RVC_J_IMM_6_OFF 6
+#define RVC_J_IMM_7_OFF 7
+#define RVC_J_IMM_3_1_OFF 1
+#define RVC_J_IMM_5_OFF 5
+#define RVC_J_IMM_4_MASK GENMASK(0, 0)
+#define RVC_J_IMM_9_8_MASK GENMASK(1, 0)
+#define RVC_J_IMM_10_MASK GENMASK(0, 0)
+#define RVC_J_IMM_6_MASK GENMASK(0, 0)
+#define RVC_J_IMM_7_MASK GENMASK(0, 0)
+#define RVC_J_IMM_3_1_MASK GENMASK(2, 0)
+#define RVC_J_IMM_5_MASK GENMASK(0, 0)
+
+/* The bit field of immediate value in RVC B instruction */
+#define RVC_B_IMM_SIGN_OPOFF 12
+#define RVC_B_IMM_4_3_OPOFF 10
+#define RVC_B_IMM_7_6_OPOFF 5
+#define RVC_B_IMM_2_1_OPOFF 3
+#define RVC_B_IMM_5_OPOFF 2
+#define RVC_B_IMM_SIGN_OFF 8
+#define RVC_B_IMM_4_3_OFF 3
+#define RVC_B_IMM_7_6_OFF 6
+#define RVC_B_IMM_2_1_OFF 1
+#define RVC_B_IMM_5_OFF 5
+#define RVC_B_IMM_4_3_MASK GENMASK(1, 0)
+#define RVC_B_IMM_7_6_MASK GENMASK(1, 0)
+#define RVC_B_IMM_2_1_MASK GENMASK(1, 0)
+#define RVC_B_IMM_5_MASK GENMASK(0, 0)
+
+#define RVC_INSN_FUNCT4_MASK GENMASK(15, 12)
+#define RVC_INSN_FUNCT4_OPOFF 12
+#define RVC_INSN_FUNCT3_MASK GENMASK(15, 13)
+#define RVC_INSN_FUNCT3_OPOFF 13
+#define RVC_INSN_J_RS1_MASK GENMASK(11, 7)
+#define RVC_INSN_J_RS2_MASK GENMASK(6, 2)
+#define RVC_INSN_OPCODE_MASK GENMASK(1, 0)
+#define RVC_ENCODE_FUNCT3(f_) (RVC_FUNCT3_##f_ << RVC_INSN_FUNCT3_OPOFF)
+#define RVC_ENCODE_FUNCT4(f_) (RVC_FUNCT4_##f_ << RVC_INSN_FUNCT4_OPOFF)
+
+/* The register offset in RVC op=C0 instruction */
+#define RVC_C0_RS1_OPOFF 7
+#define RVC_C0_RS2_OPOFF 2
+#define RVC_C0_RD_OPOFF 2
+
+/* The register offset in RVC op=C1 instruction */
+#define RVC_C1_RS1_OPOFF 7
+#define RVC_C1_RS2_OPOFF 2
+#define RVC_C1_RD_OPOFF 7
+
+/* The register offset in RVC op=C2 instruction */
+#define RVC_C2_RS1_OPOFF 7
+#define RVC_C2_RS2_OPOFF 2
+#define RVC_C2_RD_OPOFF 7
+#define RVC_C2_RS1_MASK GENMASK(4, 0)
+
+/* parts of opcode for RVG*/
+#define RVG_OPCODE_FENCE 0x0f
+#define RVG_OPCODE_AUIPC 0x17
+#define RVG_OPCODE_BRANCH 0x63
+#define RVG_OPCODE_JALR 0x67
+#define RVG_OPCODE_JAL 0x6f
+#define RVG_OPCODE_SYSTEM 0x73
+#define RVG_SYSTEM_CSR_OFF 20
+#define RVG_SYSTEM_CSR_MASK GENMASK(12, 0)
+
+/* parts of opcode for RVF, RVD and RVQ */
+#define RVFDQ_FL_FS_WIDTH_OFF 12
+#define RVFDQ_FL_FS_WIDTH_MASK GENMASK(3, 0)
+#define RVFDQ_FL_FS_WIDTH_W 2
+#define RVFDQ_FL_FS_WIDTH_D 3
+#define RVFDQ_LS_FS_WIDTH_Q 4
+#define RVFDQ_OPCODE_FL 0x07
+#define RVFDQ_OPCODE_FS 0x27
+
+/* parts of opcode for RVV */
+#define RVV_OPCODE_VECTOR 0x57
+#define RVV_VL_VS_WIDTH_8 0
+#define RVV_VL_VS_WIDTH_16 5
+#define RVV_VL_VS_WIDTH_32 6
+#define RVV_VL_VS_WIDTH_64 7
+#define RVV_OPCODE_VL RVFDQ_OPCODE_FL
+#define RVV_OPCODE_VS RVFDQ_OPCODE_FS
+
+/* parts of opcode for RVC*/
+#define RVC_OPCODE_C0 0x0
+#define RVC_OPCODE_C1 0x1
+#define RVC_OPCODE_C2 0x2
+
+/* parts of funct3 code for I, M, A extension*/
+#define RVG_FUNCT3_JALR 0x0
+#define RVG_FUNCT3_BEQ 0x0
+#define RVG_FUNCT3_BNE 0x1
+#define RVG_FUNCT3_BLT 0x4
+#define RVG_FUNCT3_BGE 0x5
+#define RVG_FUNCT3_BLTU 0x6
+#define RVG_FUNCT3_BGEU 0x7
+
+/* parts of funct3 code for C extension*/
+#define RVC_FUNCT3_C_BEQZ 0x6
+#define RVC_FUNCT3_C_BNEZ 0x7
+#define RVC_FUNCT3_C_J 0x5
+#define RVC_FUNCT3_C_JAL 0x1
+#define RVC_FUNCT4_C_JR 0x8
+#define RVC_FUNCT4_C_JALR 0x9
+#define RVC_FUNCT4_C_EBREAK 0x9
+
+#define RVG_FUNCT12_EBREAK 0x1
+#define RVG_FUNCT12_SRET 0x102
+
+#define RVG_MATCH_AUIPC (RVG_OPCODE_AUIPC)
+#define RVG_MATCH_JALR (RV_ENCODE_FUNCT3(JALR) | RVG_OPCODE_JALR)
+#define RVG_MATCH_JAL (RVG_OPCODE_JAL)
+#define RVG_MATCH_FENCE (RVG_OPCODE_FENCE)
+#define RVG_MATCH_BEQ (RV_ENCODE_FUNCT3(BEQ) | RVG_OPCODE_BRANCH)
+#define RVG_MATCH_BNE (RV_ENCODE_FUNCT3(BNE) | RVG_OPCODE_BRANCH)
+#define RVG_MATCH_BLT (RV_ENCODE_FUNCT3(BLT) | RVG_OPCODE_BRANCH)
+#define RVG_MATCH_BGE (RV_ENCODE_FUNCT3(BGE) | RVG_OPCODE_BRANCH)
+#define RVG_MATCH_BLTU (RV_ENCODE_FUNCT3(BLTU) | RVG_OPCODE_BRANCH)
+#define RVG_MATCH_BGEU (RV_ENCODE_FUNCT3(BGEU) | RVG_OPCODE_BRANCH)
+#define RVG_MATCH_EBREAK (RV_ENCODE_FUNCT12(EBREAK) | RVG_OPCODE_SYSTEM)
+#define RVG_MATCH_SRET (RV_ENCODE_FUNCT12(SRET) | RVG_OPCODE_SYSTEM)
+#define RVC_MATCH_C_BEQZ (RVC_ENCODE_FUNCT3(C_BEQZ) | RVC_OPCODE_C1)
+#define RVC_MATCH_C_BNEZ (RVC_ENCODE_FUNCT3(C_BNEZ) | RVC_OPCODE_C1)
+#define RVC_MATCH_C_J (RVC_ENCODE_FUNCT3(C_J) | RVC_OPCODE_C1)
+#define RVC_MATCH_C_JAL (RVC_ENCODE_FUNCT3(C_JAL) | RVC_OPCODE_C1)
+#define RVC_MATCH_C_JR (RVC_ENCODE_FUNCT4(C_JR) | RVC_OPCODE_C2)
+#define RVC_MATCH_C_JALR (RVC_ENCODE_FUNCT4(C_JALR) | RVC_OPCODE_C2)
+#define RVC_MATCH_C_EBREAK (RVC_ENCODE_FUNCT4(C_EBREAK) | RVC_OPCODE_C2)
+
+#define RVG_MASK_AUIPC (RV_INSN_OPCODE_MASK)
+#define RVG_MASK_JALR (RV_INSN_FUNCT3_MASK | RV_INSN_OPCODE_MASK)
+#define RVG_MASK_JAL (RV_INSN_OPCODE_MASK)
+#define RVG_MASK_FENCE (RV_INSN_OPCODE_MASK)
+#define RVC_MASK_C_JALR (RVC_INSN_FUNCT4_MASK | RVC_INSN_J_RS2_MASK | RVC_INSN_OPCODE_MASK)
+#define RVC_MASK_C_JR (RVC_INSN_FUNCT4_MASK | RVC_INSN_J_RS2_MASK | RVC_INSN_OPCODE_MASK)
+#define RVC_MASK_C_JAL (RVC_INSN_FUNCT3_MASK | RVC_INSN_OPCODE_MASK)
+#define RVC_MASK_C_J (RVC_INSN_FUNCT3_MASK | RVC_INSN_OPCODE_MASK)
+#define RVG_MASK_BEQ (RV_INSN_FUNCT3_MASK | RV_INSN_OPCODE_MASK)
+#define RVG_MASK_BNE (RV_INSN_FUNCT3_MASK | RV_INSN_OPCODE_MASK)
+#define RVG_MASK_BLT (RV_INSN_FUNCT3_MASK | RV_INSN_OPCODE_MASK)
+#define RVG_MASK_BGE (RV_INSN_FUNCT3_MASK | RV_INSN_OPCODE_MASK)
+#define RVG_MASK_BLTU (RV_INSN_FUNCT3_MASK | RV_INSN_OPCODE_MASK)
+#define RVG_MASK_BGEU (RV_INSN_FUNCT3_MASK | RV_INSN_OPCODE_MASK)
+#define RVC_MASK_C_BEQZ (RVC_INSN_FUNCT3_MASK | RVC_INSN_OPCODE_MASK)
+#define RVC_MASK_C_BNEZ (RVC_INSN_FUNCT3_MASK | RVC_INSN_OPCODE_MASK)
+#define RVC_MASK_C_EBREAK 0xffff
+#define RVG_MASK_EBREAK 0xffffffff
+#define RVG_MASK_SRET 0xffffffff
+
+#define __INSN_LENGTH_MASK _UL(0x3)
+#define __INSN_LENGTH_GE_32 _UL(0x3)
+#define __INSN_OPCODE_MASK _UL(0x7F)
+#define __INSN_BRANCH_OPCODE _UL(RVG_OPCODE_BRANCH)
+
+#define __RISCV_INSN_FUNCS(name, mask, val) \
+static __always_inline bool riscv_insn_is_##name(u32 code) \
+{ \
+ BUILD_BUG_ON(~(mask) & (val)); \
+ return (code & (mask)) == (val); \
+} \
+
+#if __riscv_xlen == 32
+/* C.JAL is an RV32C-only instruction */
+__RISCV_INSN_FUNCS(c_jal, RVC_MASK_C_JAL, RVC_MATCH_C_JAL)
+#else
+#define riscv_insn_is_c_jal(opcode) 0
+#endif
+__RISCV_INSN_FUNCS(auipc, RVG_MASK_AUIPC, RVG_MATCH_AUIPC)
+__RISCV_INSN_FUNCS(jalr, RVG_MASK_JALR, RVG_MATCH_JALR)
+__RISCV_INSN_FUNCS(jal, RVG_MASK_JAL, RVG_MATCH_JAL)
+__RISCV_INSN_FUNCS(c_j, RVC_MASK_C_J, RVC_MATCH_C_J)
+__RISCV_INSN_FUNCS(beq, RVG_MASK_BEQ, RVG_MATCH_BEQ)
+__RISCV_INSN_FUNCS(bne, RVG_MASK_BNE, RVG_MATCH_BNE)
+__RISCV_INSN_FUNCS(blt, RVG_MASK_BLT, RVG_MATCH_BLT)
+__RISCV_INSN_FUNCS(bge, RVG_MASK_BGE, RVG_MATCH_BGE)
+__RISCV_INSN_FUNCS(bltu, RVG_MASK_BLTU, RVG_MATCH_BLTU)
+__RISCV_INSN_FUNCS(bgeu, RVG_MASK_BGEU, RVG_MATCH_BGEU)
+__RISCV_INSN_FUNCS(c_beqz, RVC_MASK_C_BEQZ, RVC_MATCH_C_BEQZ)
+__RISCV_INSN_FUNCS(c_bnez, RVC_MASK_C_BNEZ, RVC_MATCH_C_BNEZ)
+__RISCV_INSN_FUNCS(c_ebreak, RVC_MASK_C_EBREAK, RVC_MATCH_C_EBREAK)
+__RISCV_INSN_FUNCS(ebreak, RVG_MASK_EBREAK, RVG_MATCH_EBREAK)
+__RISCV_INSN_FUNCS(sret, RVG_MASK_SRET, RVG_MATCH_SRET)
+__RISCV_INSN_FUNCS(fence, RVG_MASK_FENCE, RVG_MATCH_FENCE);
+
+/* special case to catch _any_ system instruction */
+static __always_inline bool riscv_insn_is_system(u32 code)
+{
+ return (code & RV_INSN_OPCODE_MASK) == RVG_OPCODE_SYSTEM;
+}
+
+/* special case to catch _any_ branch instruction */
+static __always_inline bool riscv_insn_is_branch(u32 code)
+{
+ return (code & RV_INSN_OPCODE_MASK) == RVG_OPCODE_BRANCH;
+}
+
+static __always_inline bool riscv_insn_is_c_jr(u32 code)
+{
+ return (code & RVC_MASK_C_JR) == RVC_MATCH_C_JR &&
+ (code & RVC_INSN_J_RS1_MASK) != 0;
+}
+
+static __always_inline bool riscv_insn_is_c_jalr(u32 code)
+{
+ return (code & RVC_MASK_C_JALR) == RVC_MATCH_C_JALR &&
+ (code & RVC_INSN_J_RS1_MASK) != 0;
+}
+
+#define RV_IMM_SIGN(x) (-(((x) >> 31) & 1))
+#define RVC_IMM_SIGN(x) (-(((x) >> 12) & 1))
+#define RV_X(X, s, mask) (((X) >> (s)) & (mask))
+#define RVC_X(X, s, mask) RV_X(X, s, mask)
+
+#define RV_EXTRACT_RS1_REG(x) \
+ ({typeof(x) x_ = (x); \
+ (RV_X(x_, RVG_RS1_OPOFF, RVG_RS1_MASK)); })
+
+#define RV_EXTRACT_RD_REG(x) \
+ ({typeof(x) x_ = (x); \
+ (RV_X(x_, RVG_RD_OPOFF, RVG_RD_MASK)); })
+
+#define RV_EXTRACT_UTYPE_IMM(x) \
+ ({typeof(x) x_ = (x); \
+ (RV_X(x_, RV_U_IMM_31_12_OPOFF, RV_U_IMM_31_12_MASK)); })
+
+#define RV_EXTRACT_JTYPE_IMM(x) \
+ ({typeof(x) x_ = (x); \
+ (RV_X(x_, RV_J_IMM_10_1_OPOFF, RV_J_IMM_10_1_MASK) << RV_J_IMM_10_1_OFF) | \
+ (RV_X(x_, RV_J_IMM_11_OPOFF, RV_J_IMM_11_MASK) << RV_J_IMM_11_OFF) | \
+ (RV_X(x_, RV_J_IMM_19_12_OPOFF, RV_J_IMM_19_12_MASK) << RV_J_IMM_19_12_OFF) | \
+ (RV_IMM_SIGN(x_) << RV_J_IMM_SIGN_OFF); })
+
+#define RV_EXTRACT_ITYPE_IMM(x) \
+ ({typeof(x) x_ = (x); \
+ (RV_X(x_, RV_I_IMM_11_0_OPOFF, RV_I_IMM_11_0_MASK)) | \
+ (RV_IMM_SIGN(x_) << RV_I_IMM_SIGN_OFF); })
+
+#define RV_EXTRACT_BTYPE_IMM(x) \
+ ({typeof(x) x_ = (x); \
+ (RV_X(x_, RV_B_IMM_4_1_OPOFF, RV_B_IMM_4_1_MASK) << RV_B_IMM_4_1_OFF) | \
+ (RV_X(x_, RV_B_IMM_10_5_OPOFF, RV_B_IMM_10_5_MASK) << RV_B_IMM_10_5_OFF) | \
+ (RV_X(x_, RV_B_IMM_11_OPOFF, RV_B_IMM_11_MASK) << RV_B_IMM_11_OFF) | \
+ (RV_IMM_SIGN(x_) << RV_B_IMM_SIGN_OFF); })
+
+#define RVC_EXTRACT_C2_RS1_REG(x) \
+ ({typeof(x) x_ = (x); \
+ (RV_X(x_, RVC_C2_RS1_OPOFF, RVC_C2_RS1_MASK)); })
+
+#define RVC_EXTRACT_JTYPE_IMM(x) \
+ ({typeof(x) x_ = (x); \
+ (RVC_X(x_, RVC_J_IMM_3_1_OPOFF, RVC_J_IMM_3_1_MASK) << RVC_J_IMM_3_1_OFF) | \
+ (RVC_X(x_, RVC_J_IMM_4_OPOFF, RVC_J_IMM_4_MASK) << RVC_J_IMM_4_OFF) | \
+ (RVC_X(x_, RVC_J_IMM_5_OPOFF, RVC_J_IMM_5_MASK) << RVC_J_IMM_5_OFF) | \
+ (RVC_X(x_, RVC_J_IMM_6_OPOFF, RVC_J_IMM_6_MASK) << RVC_J_IMM_6_OFF) | \
+ (RVC_X(x_, RVC_J_IMM_7_OPOFF, RVC_J_IMM_7_MASK) << RVC_J_IMM_7_OFF) | \
+ (RVC_X(x_, RVC_J_IMM_9_8_OPOFF, RVC_J_IMM_9_8_MASK) << RVC_J_IMM_9_8_OFF) | \
+ (RVC_X(x_, RVC_J_IMM_10_OPOFF, RVC_J_IMM_10_MASK) << RVC_J_IMM_10_OFF) | \
+ (RVC_IMM_SIGN(x_) << RVC_J_IMM_SIGN_OFF); })
+
+#define RVC_EXTRACT_BTYPE_IMM(x) \
+ ({typeof(x) x_ = (x); \
+ (RVC_X(x_, RVC_B_IMM_2_1_OPOFF, RVC_B_IMM_2_1_MASK) << RVC_B_IMM_2_1_OFF) | \
+ (RVC_X(x_, RVC_B_IMM_4_3_OPOFF, RVC_B_IMM_4_3_MASK) << RVC_B_IMM_4_3_OFF) | \
+ (RVC_X(x_, RVC_B_IMM_5_OPOFF, RVC_B_IMM_5_MASK) << RVC_B_IMM_5_OFF) | \
+ (RVC_X(x_, RVC_B_IMM_7_6_OPOFF, RVC_B_IMM_7_6_MASK) << RVC_B_IMM_7_6_OFF) | \
+ (RVC_IMM_SIGN(x_) << RVC_B_IMM_SIGN_OFF); })
+
+#define RVG_EXTRACT_SYSTEM_CSR(x) \
+ ({typeof(x) x_ = (x); RV_X(x_, RVG_SYSTEM_CSR_OFF, RVG_SYSTEM_CSR_MASK); })
+
+#define RVFDQ_EXTRACT_FL_FS_WIDTH(x) \
+ ({typeof(x) x_ = (x); RV_X(x_, RVFDQ_FL_FS_WIDTH_OFF, \
+ RVFDQ_FL_FS_WIDTH_MASK); })
+
+#define RVV_EXRACT_VL_VS_WIDTH(x) RVFDQ_EXTRACT_FL_FS_WIDTH(x)
+
+/*
+ * Get the immediate from a J-type instruction.
+ *
+ * @insn: instruction to process
+ * Return: immediate
+ */
+static inline s32 riscv_insn_extract_jtype_imm(u32 insn)
+{
+ return RV_EXTRACT_JTYPE_IMM(insn);
+}
+
+/*
+ * Update a J-type instruction with an immediate value.
+ *
+ * @insn: pointer to the jtype instruction
+ * @imm: the immediate to insert into the instruction
+ */
+static inline void riscv_insn_insert_jtype_imm(u32 *insn, s32 imm)
+{
+ /* drop the old IMMs, all jal IMM bits sit at 31:12 */
+ *insn &= ~GENMASK(31, 12);
+ *insn |= (RV_X(imm, RV_J_IMM_10_1_OFF, RV_J_IMM_10_1_MASK) << RV_J_IMM_10_1_OPOFF) |
+ (RV_X(imm, RV_J_IMM_11_OFF, RV_J_IMM_11_MASK) << RV_J_IMM_11_OPOFF) |
+ (RV_X(imm, RV_J_IMM_19_12_OFF, RV_J_IMM_19_12_MASK) << RV_J_IMM_19_12_OPOFF) |
+ (RV_X(imm, RV_J_IMM_SIGN_OFF, 1) << RV_J_IMM_SIGN_OPOFF);
+}
+
+/*
+ * Put together one immediate from a U-type and I-type instruction pair.
+ *
+ * The U-type contains an upper immediate, meaning bits[31:12] with [11:0]
+ * being zero, while the I-type contains a 12bit immediate.
+ * Combined these can encode larger 32bit values and are used for example
+ * in auipc + jalr pairs to allow larger jumps.
+ *
+ * @utype_insn: instruction containing the upper immediate
+ * @itype_insn: instruction
+ * Return: combined immediate
+ */
+static inline s32 riscv_insn_extract_utype_itype_imm(u32 utype_insn, u32 itype_insn)
+{
+ s32 imm;
+
+ imm = RV_EXTRACT_UTYPE_IMM(utype_insn);
+ imm += RV_EXTRACT_ITYPE_IMM(itype_insn);
+
+ return imm;
+}
+
+/*
+ * Update a set of two instructions (U-type + I-type) with an immediate value.
+ *
+ * Used for example in auipc+jalrs pairs the U-type instructions contains
+ * a 20bit upper immediate representing bits[31:12], while the I-type
+ * instruction contains a 12bit immediate representing bits[11:0].
+ *
+ * This also takes into account that both separate immediates are
+ * considered as signed values, so if the I-type immediate becomes
+ * negative (BIT(11) set) the U-type part gets adjusted.
+ *
+ * @utype_insn: pointer to the utype instruction of the pair
+ * @itype_insn: pointer to the itype instruction of the pair
+ * @imm: the immediate to insert into the two instructions
+ */
+static inline void riscv_insn_insert_utype_itype_imm(u32 *utype_insn, u32 *itype_insn, s32 imm)
+{
+ /* drop possible old IMM values */
+ *utype_insn &= ~(RV_U_IMM_31_12_MASK);
+ *itype_insn &= ~(RV_I_IMM_11_0_MASK << RV_I_IMM_11_0_OPOFF);
+
+ /* add the adapted IMMs */
+ *utype_insn |= (imm & RV_U_IMM_31_12_MASK) + ((imm & BIT(11)) << 1);
+ *itype_insn |= ((imm & RV_I_IMM_11_0_MASK) << RV_I_IMM_11_0_OPOFF);
+}
+#endif /* _ASM_RISCV_INSN_H */
diff --git a/arch/riscv/include/asm/io.h b/arch/riscv/include/asm/io.h
new file mode 100644
index 0000000000..42497d487a
--- /dev/null
+++ b/arch/riscv/include/asm/io.h
@@ -0,0 +1,143 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * {read,write}{b,w,l,q} based on arch/arm64/include/asm/io.h
+ * which was based on arch/arm/include/io.h
+ *
+ * Copyright (C) 1996-2000 Russell King
+ * Copyright (C) 2012 ARM Ltd.
+ * Copyright (C) 2014 Regents of the University of California
+ */
+
+#ifndef _ASM_RISCV_IO_H
+#define _ASM_RISCV_IO_H
+
+#include <linux/types.h>
+#include <linux/pgtable.h>
+#include <asm/mmiowb.h>
+#include <asm/early_ioremap.h>
+
+/*
+ * MMIO access functions are separated out to break dependency cycles
+ * when using {read,write}* fns in low-level headers
+ */
+#include <asm/mmio.h>
+
+/*
+ * I/O port access constants.
+ */
+#ifdef CONFIG_MMU
+#define IO_SPACE_LIMIT (PCI_IO_SIZE - 1)
+#define PCI_IOBASE ((void __iomem *)PCI_IO_START)
+#endif /* CONFIG_MMU */
+
+/*
+ * Emulation routines for the port-mapped IO space used by some PCI drivers.
+ * These are defined as being "fully synchronous", but also "not guaranteed to
+ * be fully ordered with respect to other memory and I/O operations". We're
+ * going to be on the safe side here and just make them:
+ * - Fully ordered WRT each other, by bracketing them with two fences. The
+ * outer set contains both I/O so inX is ordered with outX, while the inner just
+ * needs the type of the access (I for inX and O for outX).
+ * - Ordered in the same manner as readX/writeX WRT memory by subsuming their
+ * fences.
+ * - Ordered WRT timer reads, so udelay and friends don't get elided by the
+ * implementation.
+ * Note that there is no way to actually enforce that outX is a non-posted
+ * operation on RISC-V, but hopefully the timer ordering constraint is
+ * sufficient to ensure this works sanely on controllers that support I/O
+ * writes.
+ */
+#define __io_pbr() __asm__ __volatile__ ("fence io,i" : : : "memory");
+#define __io_par(v) __asm__ __volatile__ ("fence i,ior" : : : "memory");
+#define __io_pbw() __asm__ __volatile__ ("fence iow,o" : : : "memory");
+#define __io_paw() __asm__ __volatile__ ("fence o,io" : : : "memory");
+
+/*
+ * Accesses from a single hart to a single I/O address must be ordered. This
+ * allows us to use the raw read macros, but we still need to fence before and
+ * after the block to ensure ordering WRT other macros. These are defined to
+ * perform host-endian accesses so we use __raw instead of __cpu.
+ */
+#define __io_reads_ins(port, ctype, len, bfence, afence) \
+ static inline void __ ## port ## len(const volatile void __iomem *addr, \
+ void *buffer, \
+ unsigned int count) \
+ { \
+ bfence; \
+ if (count) { \
+ ctype *buf = buffer; \
+ \
+ do { \
+ ctype x = __raw_read ## len(addr); \
+ *buf++ = x; \
+ } while (--count); \
+ } \
+ afence; \
+ }
+
+#define __io_writes_outs(port, ctype, len, bfence, afence) \
+ static inline void __ ## port ## len(volatile void __iomem *addr, \
+ const void *buffer, \
+ unsigned int count) \
+ { \
+ bfence; \
+ if (count) { \
+ const ctype *buf = buffer; \
+ \
+ do { \
+ __raw_write ## len(*buf++, addr); \
+ } while (--count); \
+ } \
+ afence; \
+ }
+
+__io_reads_ins(reads, u8, b, __io_br(), __io_ar(addr))
+__io_reads_ins(reads, u16, w, __io_br(), __io_ar(addr))
+__io_reads_ins(reads, u32, l, __io_br(), __io_ar(addr))
+#define readsb(addr, buffer, count) __readsb(addr, buffer, count)
+#define readsw(addr, buffer, count) __readsw(addr, buffer, count)
+#define readsl(addr, buffer, count) __readsl(addr, buffer, count)
+
+__io_reads_ins(ins, u8, b, __io_pbr(), __io_par(addr))
+__io_reads_ins(ins, u16, w, __io_pbr(), __io_par(addr))
+__io_reads_ins(ins, u32, l, __io_pbr(), __io_par(addr))
+#define insb(addr, buffer, count) __insb(PCI_IOBASE + (addr), buffer, count)
+#define insw(addr, buffer, count) __insw(PCI_IOBASE + (addr), buffer, count)
+#define insl(addr, buffer, count) __insl(PCI_IOBASE + (addr), buffer, count)
+
+__io_writes_outs(writes, u8, b, __io_bw(), __io_aw())
+__io_writes_outs(writes, u16, w, __io_bw(), __io_aw())
+__io_writes_outs(writes, u32, l, __io_bw(), __io_aw())
+#define writesb(addr, buffer, count) __writesb(addr, buffer, count)
+#define writesw(addr, buffer, count) __writesw(addr, buffer, count)
+#define writesl(addr, buffer, count) __writesl(addr, buffer, count)
+
+__io_writes_outs(outs, u8, b, __io_pbw(), __io_paw())
+__io_writes_outs(outs, u16, w, __io_pbw(), __io_paw())
+__io_writes_outs(outs, u32, l, __io_pbw(), __io_paw())
+#define outsb(addr, buffer, count) __outsb(PCI_IOBASE + (addr), buffer, count)
+#define outsw(addr, buffer, count) __outsw(PCI_IOBASE + (addr), buffer, count)
+#define outsl(addr, buffer, count) __outsl(PCI_IOBASE + (addr), buffer, count)
+
+#ifdef CONFIG_64BIT
+__io_reads_ins(reads, u64, q, __io_br(), __io_ar(addr))
+#define readsq(addr, buffer, count) __readsq(addr, buffer, count)
+
+__io_reads_ins(ins, u64, q, __io_pbr(), __io_par(addr))
+#define insq(addr, buffer, count) __insq(PCI_IOBASE + (addr), buffer, count)
+
+__io_writes_outs(writes, u64, q, __io_bw(), __io_aw())
+#define writesq(addr, buffer, count) __writesq(addr, buffer, count)
+
+__io_writes_outs(outs, u64, q, __io_pbr(), __io_paw())
+#define outsq(addr, buffer, count) __outsq(PCI_IOBASE + (addr), buffer, count)
+#endif
+
+#include <asm-generic/io.h>
+
+#ifdef CONFIG_MMU
+#define arch_memremap_wb(addr, size) \
+ ((__force void *)ioremap_prot((addr), (size), _PAGE_KERNEL))
+#endif
+
+#endif /* _ASM_RISCV_IO_H */
diff --git a/arch/riscv/include/asm/irq.h b/arch/riscv/include/asm/irq.h
new file mode 100644
index 0000000000..8e10a94430
--- /dev/null
+++ b/arch/riscv/include/asm/irq.h
@@ -0,0 +1,19 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2012 Regents of the University of California
+ * Copyright (C) 2017 SiFive
+ */
+
+#ifndef _ASM_RISCV_IRQ_H
+#define _ASM_RISCV_IRQ_H
+
+#include <linux/interrupt.h>
+#include <linux/linkage.h>
+
+#include <asm-generic/irq.h>
+
+void riscv_set_intc_hwnode_fn(struct fwnode_handle *(*fn)(void));
+
+struct fwnode_handle *riscv_get_intc_hwnode(void);
+
+#endif /* _ASM_RISCV_IRQ_H */
diff --git a/arch/riscv/include/asm/irq_stack.h b/arch/riscv/include/asm/irq_stack.h
new file mode 100644
index 0000000000..e4042d2975
--- /dev/null
+++ b/arch/riscv/include/asm/irq_stack.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _ASM_RISCV_IRQ_STACK_H
+#define _ASM_RISCV_IRQ_STACK_H
+
+#include <linux/bug.h>
+#include <linux/gfp.h>
+#include <linux/kconfig.h>
+#include <linux/vmalloc.h>
+#include <linux/pgtable.h>
+#include <asm/thread_info.h>
+
+DECLARE_PER_CPU(ulong *, irq_stack_ptr);
+
+#ifdef CONFIG_VMAP_STACK
+/*
+ * To ensure that VMAP'd stack overflow detection works correctly, all VMAP'd
+ * stacks need to have the same alignment.
+ */
+static inline unsigned long *arch_alloc_vmap_stack(size_t stack_size, int node)
+{
+ void *p;
+
+ p = __vmalloc_node(stack_size, THREAD_ALIGN, THREADINFO_GFP, node,
+ __builtin_return_address(0));
+ return kasan_reset_tag(p);
+}
+#endif /* CONFIG_VMAP_STACK */
+
+#endif /* _ASM_RISCV_IRQ_STACK_H */
diff --git a/arch/riscv/include/asm/irq_work.h b/arch/riscv/include/asm/irq_work.h
new file mode 100644
index 0000000000..b53891964a
--- /dev/null
+++ b/arch/riscv/include/asm/irq_work.h
@@ -0,0 +1,10 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_RISCV_IRQ_WORK_H
+#define _ASM_RISCV_IRQ_WORK_H
+
+static inline bool arch_irq_work_has_interrupt(void)
+{
+ return IS_ENABLED(CONFIG_SMP);
+}
+extern void arch_irq_work_raise(void);
+#endif /* _ASM_RISCV_IRQ_WORK_H */
diff --git a/arch/riscv/include/asm/irqflags.h b/arch/riscv/include/asm/irqflags.h
new file mode 100644
index 0000000000..08d4d6a5b7
--- /dev/null
+++ b/arch/riscv/include/asm/irqflags.h
@@ -0,0 +1,55 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2012 Regents of the University of California
+ */
+
+
+#ifndef _ASM_RISCV_IRQFLAGS_H
+#define _ASM_RISCV_IRQFLAGS_H
+
+#include <asm/processor.h>
+#include <asm/csr.h>
+
+/* read interrupt enabled status */
+static inline unsigned long arch_local_save_flags(void)
+{
+ return csr_read(CSR_STATUS);
+}
+
+/* unconditionally enable interrupts */
+static inline void arch_local_irq_enable(void)
+{
+ csr_set(CSR_STATUS, SR_IE);
+}
+
+/* unconditionally disable interrupts */
+static inline void arch_local_irq_disable(void)
+{
+ csr_clear(CSR_STATUS, SR_IE);
+}
+
+/* get status and disable interrupts */
+static inline unsigned long arch_local_irq_save(void)
+{
+ return csr_read_clear(CSR_STATUS, SR_IE);
+}
+
+/* test flags */
+static inline int arch_irqs_disabled_flags(unsigned long flags)
+{
+ return !(flags & SR_IE);
+}
+
+/* test hardware interrupt enable bit */
+static inline int arch_irqs_disabled(void)
+{
+ return arch_irqs_disabled_flags(arch_local_save_flags());
+}
+
+/* set interrupt enabled status */
+static inline void arch_local_irq_restore(unsigned long flags)
+{
+ csr_set(CSR_STATUS, flags & SR_IE);
+}
+
+#endif /* _ASM_RISCV_IRQFLAGS_H */
diff --git a/arch/riscv/include/asm/jump_label.h b/arch/riscv/include/asm/jump_label.h
new file mode 100644
index 0000000000..14a5ea8d8e
--- /dev/null
+++ b/arch/riscv/include/asm/jump_label.h
@@ -0,0 +1,62 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2020 Emil Renner Berthing
+ *
+ * Based on arch/arm64/include/asm/jump_label.h
+ */
+#ifndef __ASM_JUMP_LABEL_H
+#define __ASM_JUMP_LABEL_H
+
+#ifndef __ASSEMBLY__
+
+#include <linux/types.h>
+#include <asm/asm.h>
+
+#define JUMP_LABEL_NOP_SIZE 4
+
+static __always_inline bool arch_static_branch(struct static_key * const key,
+ const bool branch)
+{
+ asm_volatile_goto(
+ " .align 2 \n\t"
+ " .option push \n\t"
+ " .option norelax \n\t"
+ " .option norvc \n\t"
+ "1: nop \n\t"
+ " .option pop \n\t"
+ " .pushsection __jump_table, \"aw\" \n\t"
+ " .align " RISCV_LGPTR " \n\t"
+ " .long 1b - ., %l[label] - . \n\t"
+ " " RISCV_PTR " %0 - . \n\t"
+ " .popsection \n\t"
+ : : "i"(&((char *)key)[branch]) : : label);
+
+ return false;
+label:
+ return true;
+}
+
+static __always_inline bool arch_static_branch_jump(struct static_key * const key,
+ const bool branch)
+{
+ asm_volatile_goto(
+ " .align 2 \n\t"
+ " .option push \n\t"
+ " .option norelax \n\t"
+ " .option norvc \n\t"
+ "1: jal zero, %l[label] \n\t"
+ " .option pop \n\t"
+ " .pushsection __jump_table, \"aw\" \n\t"
+ " .align " RISCV_LGPTR " \n\t"
+ " .long 1b - ., %l[label] - . \n\t"
+ " " RISCV_PTR " %0 - . \n\t"
+ " .popsection \n\t"
+ : : "i"(&((char *)key)[branch]) : : label);
+
+ return false;
+label:
+ return true;
+}
+
+#endif /* __ASSEMBLY__ */
+#endif /* __ASM_JUMP_LABEL_H */
diff --git a/arch/riscv/include/asm/kasan.h b/arch/riscv/include/asm/kasan.h
new file mode 100644
index 0000000000..0b85e363e7
--- /dev/null
+++ b/arch/riscv/include/asm/kasan.h
@@ -0,0 +1,45 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2019 Andes Technology Corporation */
+
+#ifndef __ASM_KASAN_H
+#define __ASM_KASAN_H
+
+#ifndef __ASSEMBLY__
+
+#ifdef CONFIG_KASAN
+
+/*
+ * The following comment was copied from arm64:
+ * KASAN_SHADOW_START: beginning of the kernel virtual addresses.
+ * KASAN_SHADOW_END: KASAN_SHADOW_START + 1/N of kernel virtual addresses,
+ * where N = (1 << KASAN_SHADOW_SCALE_SHIFT).
+ *
+ * KASAN_SHADOW_OFFSET:
+ * This value is used to map an address to the corresponding shadow
+ * address by the following formula:
+ * shadow_addr = (address >> KASAN_SHADOW_SCALE_SHIFT) + KASAN_SHADOW_OFFSET
+ *
+ * (1 << (64 - KASAN_SHADOW_SCALE_SHIFT)) shadow addresses that lie in range
+ * [KASAN_SHADOW_OFFSET, KASAN_SHADOW_END) cover all 64-bits of virtual
+ * addresses. So KASAN_SHADOW_OFFSET should satisfy the following equation:
+ * KASAN_SHADOW_OFFSET = KASAN_SHADOW_END -
+ * (1ULL << (64 - KASAN_SHADOW_SCALE_SHIFT))
+ */
+#define KASAN_SHADOW_SCALE_SHIFT 3
+
+#define KASAN_SHADOW_SIZE (UL(1) << ((VA_BITS - 1) - KASAN_SHADOW_SCALE_SHIFT))
+/*
+ * Depending on the size of the virtual address space, the region may not be
+ * aligned on PGDIR_SIZE, so force its alignment to ease its population.
+ */
+#define KASAN_SHADOW_START ((KASAN_SHADOW_END - KASAN_SHADOW_SIZE) & PGDIR_MASK)
+#define KASAN_SHADOW_END MODULES_LOWEST_VADDR
+#define KASAN_SHADOW_OFFSET _AC(CONFIG_KASAN_SHADOW_OFFSET, UL)
+
+void kasan_init(void);
+asmlinkage void kasan_early_init(void);
+void kasan_swapper_init(void);
+
+#endif
+#endif
+#endif /* __ASM_KASAN_H */
diff --git a/arch/riscv/include/asm/kdebug.h b/arch/riscv/include/asm/kdebug.h
new file mode 100644
index 0000000000..85ac00411f
--- /dev/null
+++ b/arch/riscv/include/asm/kdebug.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef _ASM_ARC_KDEBUG_H
+#define _ASM_ARC_KDEBUG_H
+
+enum die_val {
+ DIE_UNUSED,
+ DIE_TRAP,
+ DIE_OOPS
+};
+
+#endif
diff --git a/arch/riscv/include/asm/kexec.h b/arch/riscv/include/asm/kexec.h
new file mode 100644
index 0000000000..2b56769cb5
--- /dev/null
+++ b/arch/riscv/include/asm/kexec.h
@@ -0,0 +1,72 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2019 FORTH-ICS/CARV
+ * Nick Kossifidis <mick@ics.forth.gr>
+ */
+
+#ifndef _RISCV_KEXEC_H
+#define _RISCV_KEXEC_H
+
+#include <asm/page.h> /* For PAGE_SIZE */
+
+/* Maximum physical address we can use pages from */
+#define KEXEC_SOURCE_MEMORY_LIMIT (-1UL)
+
+/* Maximum address we can reach in physical address mode */
+#define KEXEC_DESTINATION_MEMORY_LIMIT (-1UL)
+
+/* Maximum address we can use for the control code buffer */
+#define KEXEC_CONTROL_MEMORY_LIMIT (-1UL)
+
+/* Reserve a page for the control code buffer */
+#define KEXEC_CONTROL_PAGE_SIZE PAGE_SIZE
+
+#define KEXEC_ARCH KEXEC_ARCH_RISCV
+
+extern void riscv_crash_save_regs(struct pt_regs *newregs);
+
+static inline void
+crash_setup_regs(struct pt_regs *newregs,
+ struct pt_regs *oldregs)
+{
+ if (oldregs)
+ memcpy(newregs, oldregs, sizeof(struct pt_regs));
+ else
+ riscv_crash_save_regs(newregs);
+}
+
+
+#define ARCH_HAS_KIMAGE_ARCH
+
+struct kimage_arch {
+ void *fdt; /* For CONFIG_KEXEC_FILE */
+ unsigned long fdt_addr;
+};
+
+extern const unsigned char riscv_kexec_relocate[];
+extern const unsigned int riscv_kexec_relocate_size;
+
+typedef void (*riscv_kexec_method)(unsigned long first_ind_entry,
+ unsigned long jump_addr,
+ unsigned long fdt_addr,
+ unsigned long hartid,
+ unsigned long va_pa_off);
+
+extern riscv_kexec_method riscv_kexec_norelocate;
+
+#ifdef CONFIG_KEXEC_FILE
+extern const struct kexec_file_ops elf_kexec_ops;
+
+struct purgatory_info;
+int arch_kexec_apply_relocations_add(struct purgatory_info *pi,
+ Elf_Shdr *section,
+ const Elf_Shdr *relsec,
+ const Elf_Shdr *symtab);
+#define arch_kexec_apply_relocations_add arch_kexec_apply_relocations_add
+
+struct kimage;
+int arch_kimage_file_post_load_cleanup(struct kimage *image);
+#define arch_kimage_file_post_load_cleanup arch_kimage_file_post_load_cleanup
+#endif
+
+#endif
diff --git a/arch/riscv/include/asm/kfence.h b/arch/riscv/include/asm/kfence.h
new file mode 100644
index 0000000000..0bbffd5280
--- /dev/null
+++ b/arch/riscv/include/asm/kfence.h
@@ -0,0 +1,30 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _ASM_RISCV_KFENCE_H
+#define _ASM_RISCV_KFENCE_H
+
+#include <linux/kfence.h>
+#include <linux/pfn.h>
+#include <asm-generic/pgalloc.h>
+#include <asm/pgtable.h>
+
+static inline bool arch_kfence_init_pool(void)
+{
+ return true;
+}
+
+static inline bool kfence_protect_page(unsigned long addr, bool protect)
+{
+ pte_t *pte = virt_to_kpte(addr);
+
+ if (protect)
+ set_pte(pte, __pte(pte_val(*pte) & ~_PAGE_PRESENT));
+ else
+ set_pte(pte, __pte(pte_val(*pte) | _PAGE_PRESENT));
+
+ flush_tlb_kernel_range(addr, addr + PAGE_SIZE);
+
+ return true;
+}
+
+#endif /* _ASM_RISCV_KFENCE_H */
diff --git a/arch/riscv/include/asm/kgdb.h b/arch/riscv/include/asm/kgdb.h
new file mode 100644
index 0000000000..46677daf70
--- /dev/null
+++ b/arch/riscv/include/asm/kgdb.h
@@ -0,0 +1,113 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef __ASM_KGDB_H_
+#define __ASM_KGDB_H_
+
+#ifdef __KERNEL__
+
+#define GDB_SIZEOF_REG sizeof(unsigned long)
+
+#define DBG_MAX_REG_NUM (36)
+#define NUMREGBYTES ((DBG_MAX_REG_NUM) * GDB_SIZEOF_REG)
+#define CACHE_FLUSH_IS_SAFE 1
+#define BUFMAX 2048
+#ifdef CONFIG_RISCV_ISA_C
+#define BREAK_INSTR_SIZE 2
+#else
+#define BREAK_INSTR_SIZE 4
+#endif
+
+#ifndef __ASSEMBLY__
+
+extern unsigned long kgdb_compiled_break;
+
+static inline void arch_kgdb_breakpoint(void)
+{
+ asm(".global kgdb_compiled_break\n"
+ ".option norvc\n"
+ "kgdb_compiled_break: ebreak\n"
+ ".option rvc\n");
+}
+
+#endif /* !__ASSEMBLY__ */
+
+#define DBG_REG_ZERO "zero"
+#define DBG_REG_RA "ra"
+#define DBG_REG_SP "sp"
+#define DBG_REG_GP "gp"
+#define DBG_REG_TP "tp"
+#define DBG_REG_T0 "t0"
+#define DBG_REG_T1 "t1"
+#define DBG_REG_T2 "t2"
+#define DBG_REG_FP "fp"
+#define DBG_REG_S1 "s1"
+#define DBG_REG_A0 "a0"
+#define DBG_REG_A1 "a1"
+#define DBG_REG_A2 "a2"
+#define DBG_REG_A3 "a3"
+#define DBG_REG_A4 "a4"
+#define DBG_REG_A5 "a5"
+#define DBG_REG_A6 "a6"
+#define DBG_REG_A7 "a7"
+#define DBG_REG_S2 "s2"
+#define DBG_REG_S3 "s3"
+#define DBG_REG_S4 "s4"
+#define DBG_REG_S5 "s5"
+#define DBG_REG_S6 "s6"
+#define DBG_REG_S7 "s7"
+#define DBG_REG_S8 "s8"
+#define DBG_REG_S9 "s9"
+#define DBG_REG_S10 "s10"
+#define DBG_REG_S11 "s11"
+#define DBG_REG_T3 "t3"
+#define DBG_REG_T4 "t4"
+#define DBG_REG_T5 "t5"
+#define DBG_REG_T6 "t6"
+#define DBG_REG_EPC "pc"
+#define DBG_REG_STATUS "sstatus"
+#define DBG_REG_BADADDR "stval"
+#define DBG_REG_CAUSE "scause"
+
+#define DBG_REG_ZERO_OFF 0
+#define DBG_REG_RA_OFF 1
+#define DBG_REG_SP_OFF 2
+#define DBG_REG_GP_OFF 3
+#define DBG_REG_TP_OFF 4
+#define DBG_REG_T0_OFF 5
+#define DBG_REG_T1_OFF 6
+#define DBG_REG_T2_OFF 7
+#define DBG_REG_FP_OFF 8
+#define DBG_REG_S1_OFF 9
+#define DBG_REG_A0_OFF 10
+#define DBG_REG_A1_OFF 11
+#define DBG_REG_A2_OFF 12
+#define DBG_REG_A3_OFF 13
+#define DBG_REG_A4_OFF 14
+#define DBG_REG_A5_OFF 15
+#define DBG_REG_A6_OFF 16
+#define DBG_REG_A7_OFF 17
+#define DBG_REG_S2_OFF 18
+#define DBG_REG_S3_OFF 19
+#define DBG_REG_S4_OFF 20
+#define DBG_REG_S5_OFF 21
+#define DBG_REG_S6_OFF 22
+#define DBG_REG_S7_OFF 23
+#define DBG_REG_S8_OFF 24
+#define DBG_REG_S9_OFF 25
+#define DBG_REG_S10_OFF 26
+#define DBG_REG_S11_OFF 27
+#define DBG_REG_T3_OFF 28
+#define DBG_REG_T4_OFF 29
+#define DBG_REG_T5_OFF 30
+#define DBG_REG_T6_OFF 31
+#define DBG_REG_EPC_OFF 32
+#define DBG_REG_STATUS_OFF 33
+#define DBG_REG_BADADDR_OFF 34
+#define DBG_REG_CAUSE_OFF 35
+
+extern const char riscv_gdb_stub_feature[64];
+
+#define kgdb_arch_gdb_stub_feature riscv_gdb_stub_feature
+
+#endif
+#endif
diff --git a/arch/riscv/include/asm/kprobes.h b/arch/riscv/include/asm/kprobes.h
new file mode 100644
index 0000000000..78ea44f767
--- /dev/null
+++ b/arch/riscv/include/asm/kprobes.h
@@ -0,0 +1,54 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copied from arch/arm64/include/asm/kprobes.h
+ *
+ * Copyright (C) 2013 Linaro Limited
+ * Copyright (C) 2017 SiFive
+ */
+
+#ifndef _ASM_RISCV_KPROBES_H
+#define _ASM_RISCV_KPROBES_H
+
+#include <asm-generic/kprobes.h>
+
+#ifdef CONFIG_KPROBES
+#include <linux/types.h>
+#include <linux/ptrace.h>
+#include <linux/percpu.h>
+
+#define __ARCH_WANT_KPROBES_INSN_SLOT
+#define MAX_INSN_SIZE 2
+
+#define flush_insn_slot(p) do { } while (0)
+#define kretprobe_blacklist_size 0
+
+#include <asm/probes.h>
+
+struct prev_kprobe {
+ struct kprobe *kp;
+ unsigned int status;
+};
+
+/* per-cpu kprobe control block */
+struct kprobe_ctlblk {
+ unsigned int kprobe_status;
+ unsigned long saved_status;
+ struct prev_kprobe prev_kprobe;
+};
+
+void arch_remove_kprobe(struct kprobe *p);
+int kprobe_fault_handler(struct pt_regs *regs, unsigned int trapnr);
+bool kprobe_breakpoint_handler(struct pt_regs *regs);
+bool kprobe_single_step_handler(struct pt_regs *regs);
+#else
+static inline bool kprobe_breakpoint_handler(struct pt_regs *regs)
+{
+ return false;
+}
+
+static inline bool kprobe_single_step_handler(struct pt_regs *regs)
+{
+ return false;
+}
+#endif /* CONFIG_KPROBES */
+#endif /* _ASM_RISCV_KPROBES_H */
diff --git a/arch/riscv/include/asm/kvm_aia.h b/arch/riscv/include/asm/kvm_aia.h
new file mode 100644
index 0000000000..1f37b600ca
--- /dev/null
+++ b/arch/riscv/include/asm/kvm_aia.h
@@ -0,0 +1,174 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2021 Western Digital Corporation or its affiliates.
+ * Copyright (C) 2022 Ventana Micro Systems Inc.
+ *
+ * Authors:
+ * Anup Patel <apatel@ventanamicro.com>
+ */
+
+#ifndef __KVM_RISCV_AIA_H
+#define __KVM_RISCV_AIA_H
+
+#include <linux/jump_label.h>
+#include <linux/kvm_types.h>
+#include <asm/csr.h>
+
+struct kvm_aia {
+ /* In-kernel irqchip created */
+ bool in_kernel;
+
+ /* In-kernel irqchip initialized */
+ bool initialized;
+
+ /* Virtualization mode (Emulation, HW Accelerated, or Auto) */
+ u32 mode;
+
+ /* Number of MSIs */
+ u32 nr_ids;
+
+ /* Number of wired IRQs */
+ u32 nr_sources;
+
+ /* Number of group bits in IMSIC address */
+ u32 nr_group_bits;
+
+ /* Position of group bits in IMSIC address */
+ u32 nr_group_shift;
+
+ /* Number of hart bits in IMSIC address */
+ u32 nr_hart_bits;
+
+ /* Number of guest bits in IMSIC address */
+ u32 nr_guest_bits;
+
+ /* Guest physical address of APLIC */
+ gpa_t aplic_addr;
+
+ /* Internal state of APLIC */
+ void *aplic_state;
+};
+
+struct kvm_vcpu_aia_csr {
+ unsigned long vsiselect;
+ unsigned long hviprio1;
+ unsigned long hviprio2;
+ unsigned long vsieh;
+ unsigned long hviph;
+ unsigned long hviprio1h;
+ unsigned long hviprio2h;
+};
+
+struct kvm_vcpu_aia {
+ /* CPU AIA CSR context of Guest VCPU */
+ struct kvm_vcpu_aia_csr guest_csr;
+
+ /* CPU AIA CSR context upon Guest VCPU reset */
+ struct kvm_vcpu_aia_csr guest_reset_csr;
+
+ /* Guest physical address of IMSIC for this VCPU */
+ gpa_t imsic_addr;
+
+ /* HART index of IMSIC extacted from guest physical address */
+ u32 hart_index;
+
+ /* Internal state of IMSIC for this VCPU */
+ void *imsic_state;
+};
+
+#define KVM_RISCV_AIA_UNDEF_ADDR (-1)
+
+#define kvm_riscv_aia_initialized(k) ((k)->arch.aia.initialized)
+
+#define irqchip_in_kernel(k) ((k)->arch.aia.in_kernel)
+
+extern unsigned int kvm_riscv_aia_nr_hgei;
+extern unsigned int kvm_riscv_aia_max_ids;
+DECLARE_STATIC_KEY_FALSE(kvm_riscv_aia_available);
+#define kvm_riscv_aia_available() \
+ static_branch_unlikely(&kvm_riscv_aia_available)
+
+extern struct kvm_device_ops kvm_riscv_aia_device_ops;
+
+void kvm_riscv_vcpu_aia_imsic_release(struct kvm_vcpu *vcpu);
+int kvm_riscv_vcpu_aia_imsic_update(struct kvm_vcpu *vcpu);
+
+#define KVM_RISCV_AIA_IMSIC_TOPEI (ISELECT_MASK + 1)
+int kvm_riscv_vcpu_aia_imsic_rmw(struct kvm_vcpu *vcpu, unsigned long isel,
+ unsigned long *val, unsigned long new_val,
+ unsigned long wr_mask);
+int kvm_riscv_aia_imsic_rw_attr(struct kvm *kvm, unsigned long type,
+ bool write, unsigned long *val);
+int kvm_riscv_aia_imsic_has_attr(struct kvm *kvm, unsigned long type);
+void kvm_riscv_vcpu_aia_imsic_reset(struct kvm_vcpu *vcpu);
+int kvm_riscv_vcpu_aia_imsic_inject(struct kvm_vcpu *vcpu,
+ u32 guest_index, u32 offset, u32 iid);
+int kvm_riscv_vcpu_aia_imsic_init(struct kvm_vcpu *vcpu);
+void kvm_riscv_vcpu_aia_imsic_cleanup(struct kvm_vcpu *vcpu);
+
+int kvm_riscv_aia_aplic_set_attr(struct kvm *kvm, unsigned long type, u32 v);
+int kvm_riscv_aia_aplic_get_attr(struct kvm *kvm, unsigned long type, u32 *v);
+int kvm_riscv_aia_aplic_has_attr(struct kvm *kvm, unsigned long type);
+int kvm_riscv_aia_aplic_inject(struct kvm *kvm, u32 source, bool level);
+int kvm_riscv_aia_aplic_init(struct kvm *kvm);
+void kvm_riscv_aia_aplic_cleanup(struct kvm *kvm);
+
+#ifdef CONFIG_32BIT
+void kvm_riscv_vcpu_aia_flush_interrupts(struct kvm_vcpu *vcpu);
+void kvm_riscv_vcpu_aia_sync_interrupts(struct kvm_vcpu *vcpu);
+#else
+static inline void kvm_riscv_vcpu_aia_flush_interrupts(struct kvm_vcpu *vcpu)
+{
+}
+static inline void kvm_riscv_vcpu_aia_sync_interrupts(struct kvm_vcpu *vcpu)
+{
+}
+#endif
+bool kvm_riscv_vcpu_aia_has_interrupts(struct kvm_vcpu *vcpu, u64 mask);
+
+void kvm_riscv_vcpu_aia_update_hvip(struct kvm_vcpu *vcpu);
+void kvm_riscv_vcpu_aia_load(struct kvm_vcpu *vcpu, int cpu);
+void kvm_riscv_vcpu_aia_put(struct kvm_vcpu *vcpu);
+int kvm_riscv_vcpu_aia_get_csr(struct kvm_vcpu *vcpu,
+ unsigned long reg_num,
+ unsigned long *out_val);
+int kvm_riscv_vcpu_aia_set_csr(struct kvm_vcpu *vcpu,
+ unsigned long reg_num,
+ unsigned long val);
+
+int kvm_riscv_vcpu_aia_rmw_topei(struct kvm_vcpu *vcpu,
+ unsigned int csr_num,
+ unsigned long *val,
+ unsigned long new_val,
+ unsigned long wr_mask);
+int kvm_riscv_vcpu_aia_rmw_ireg(struct kvm_vcpu *vcpu, unsigned int csr_num,
+ unsigned long *val, unsigned long new_val,
+ unsigned long wr_mask);
+#define KVM_RISCV_VCPU_AIA_CSR_FUNCS \
+{ .base = CSR_SIREG, .count = 1, .func = kvm_riscv_vcpu_aia_rmw_ireg }, \
+{ .base = CSR_STOPEI, .count = 1, .func = kvm_riscv_vcpu_aia_rmw_topei },
+
+int kvm_riscv_vcpu_aia_update(struct kvm_vcpu *vcpu);
+void kvm_riscv_vcpu_aia_reset(struct kvm_vcpu *vcpu);
+int kvm_riscv_vcpu_aia_init(struct kvm_vcpu *vcpu);
+void kvm_riscv_vcpu_aia_deinit(struct kvm_vcpu *vcpu);
+
+int kvm_riscv_aia_inject_msi_by_id(struct kvm *kvm, u32 hart_index,
+ u32 guest_index, u32 iid);
+int kvm_riscv_aia_inject_msi(struct kvm *kvm, struct kvm_msi *msi);
+int kvm_riscv_aia_inject_irq(struct kvm *kvm, unsigned int irq, bool level);
+
+void kvm_riscv_aia_init_vm(struct kvm *kvm);
+void kvm_riscv_aia_destroy_vm(struct kvm *kvm);
+
+int kvm_riscv_aia_alloc_hgei(int cpu, struct kvm_vcpu *owner,
+ void __iomem **hgei_va, phys_addr_t *hgei_pa);
+void kvm_riscv_aia_free_hgei(int cpu, int hgei);
+void kvm_riscv_aia_wakeon_hgei(struct kvm_vcpu *owner, bool enable);
+
+void kvm_riscv_aia_enable(void);
+void kvm_riscv_aia_disable(void);
+int kvm_riscv_aia_init(void);
+void kvm_riscv_aia_exit(void);
+
+#endif
diff --git a/arch/riscv/include/asm/kvm_aia_aplic.h b/arch/riscv/include/asm/kvm_aia_aplic.h
new file mode 100644
index 0000000000..6dd1a4809e
--- /dev/null
+++ b/arch/riscv/include/asm/kvm_aia_aplic.h
@@ -0,0 +1,58 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2021 Western Digital Corporation or its affiliates.
+ * Copyright (C) 2022 Ventana Micro Systems Inc.
+ */
+#ifndef __KVM_RISCV_AIA_IMSIC_H
+#define __KVM_RISCV_AIA_IMSIC_H
+
+#include <linux/bitops.h>
+
+#define APLIC_MAX_IDC BIT(14)
+#define APLIC_MAX_SOURCE 1024
+
+#define APLIC_DOMAINCFG 0x0000
+#define APLIC_DOMAINCFG_RDONLY 0x80000000
+#define APLIC_DOMAINCFG_IE BIT(8)
+#define APLIC_DOMAINCFG_DM BIT(2)
+#define APLIC_DOMAINCFG_BE BIT(0)
+
+#define APLIC_SOURCECFG_BASE 0x0004
+#define APLIC_SOURCECFG_D BIT(10)
+#define APLIC_SOURCECFG_CHILDIDX_MASK 0x000003ff
+#define APLIC_SOURCECFG_SM_MASK 0x00000007
+#define APLIC_SOURCECFG_SM_INACTIVE 0x0
+#define APLIC_SOURCECFG_SM_DETACH 0x1
+#define APLIC_SOURCECFG_SM_EDGE_RISE 0x4
+#define APLIC_SOURCECFG_SM_EDGE_FALL 0x5
+#define APLIC_SOURCECFG_SM_LEVEL_HIGH 0x6
+#define APLIC_SOURCECFG_SM_LEVEL_LOW 0x7
+
+#define APLIC_IRQBITS_PER_REG 32
+
+#define APLIC_SETIP_BASE 0x1c00
+#define APLIC_SETIPNUM 0x1cdc
+
+#define APLIC_CLRIP_BASE 0x1d00
+#define APLIC_CLRIPNUM 0x1ddc
+
+#define APLIC_SETIE_BASE 0x1e00
+#define APLIC_SETIENUM 0x1edc
+
+#define APLIC_CLRIE_BASE 0x1f00
+#define APLIC_CLRIENUM 0x1fdc
+
+#define APLIC_SETIPNUM_LE 0x2000
+#define APLIC_SETIPNUM_BE 0x2004
+
+#define APLIC_GENMSI 0x3000
+
+#define APLIC_TARGET_BASE 0x3004
+#define APLIC_TARGET_HART_IDX_SHIFT 18
+#define APLIC_TARGET_HART_IDX_MASK 0x3fff
+#define APLIC_TARGET_GUEST_IDX_SHIFT 12
+#define APLIC_TARGET_GUEST_IDX_MASK 0x3f
+#define APLIC_TARGET_IPRIO_MASK 0xff
+#define APLIC_TARGET_EIID_MASK 0x7ff
+
+#endif
diff --git a/arch/riscv/include/asm/kvm_aia_imsic.h b/arch/riscv/include/asm/kvm_aia_imsic.h
new file mode 100644
index 0000000000..da5881d2bd
--- /dev/null
+++ b/arch/riscv/include/asm/kvm_aia_imsic.h
@@ -0,0 +1,38 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2021 Western Digital Corporation or its affiliates.
+ * Copyright (C) 2022 Ventana Micro Systems Inc.
+ */
+#ifndef __KVM_RISCV_AIA_IMSIC_H
+#define __KVM_RISCV_AIA_IMSIC_H
+
+#include <linux/types.h>
+#include <asm/csr.h>
+
+#define IMSIC_MMIO_PAGE_SHIFT 12
+#define IMSIC_MMIO_PAGE_SZ (1UL << IMSIC_MMIO_PAGE_SHIFT)
+#define IMSIC_MMIO_PAGE_LE 0x00
+#define IMSIC_MMIO_PAGE_BE 0x04
+
+#define IMSIC_MIN_ID 63
+#define IMSIC_MAX_ID 2048
+
+#define IMSIC_EIDELIVERY 0x70
+
+#define IMSIC_EITHRESHOLD 0x72
+
+#define IMSIC_EIP0 0x80
+#define IMSIC_EIP63 0xbf
+#define IMSIC_EIPx_BITS 32
+
+#define IMSIC_EIE0 0xc0
+#define IMSIC_EIE63 0xff
+#define IMSIC_EIEx_BITS 32
+
+#define IMSIC_FIRST IMSIC_EIDELIVERY
+#define IMSIC_LAST IMSIC_EIE63
+
+#define IMSIC_MMIO_SETIPNUM_LE 0x00
+#define IMSIC_MMIO_SETIPNUM_BE 0x04
+
+#endif
diff --git a/arch/riscv/include/asm/kvm_host.h b/arch/riscv/include/asm/kvm_host.h
new file mode 100644
index 0000000000..1ebf20dfba
--- /dev/null
+++ b/arch/riscv/include/asm/kvm_host.h
@@ -0,0 +1,357 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2019 Western Digital Corporation or its affiliates.
+ *
+ * Authors:
+ * Anup Patel <anup.patel@wdc.com>
+ */
+
+#ifndef __RISCV_KVM_HOST_H__
+#define __RISCV_KVM_HOST_H__
+
+#include <linux/types.h>
+#include <linux/kvm.h>
+#include <linux/kvm_types.h>
+#include <linux/spinlock.h>
+#include <asm/hwcap.h>
+#include <asm/kvm_aia.h>
+#include <asm/ptrace.h>
+#include <asm/kvm_vcpu_fp.h>
+#include <asm/kvm_vcpu_insn.h>
+#include <asm/kvm_vcpu_sbi.h>
+#include <asm/kvm_vcpu_timer.h>
+#include <asm/kvm_vcpu_pmu.h>
+
+#define KVM_MAX_VCPUS 1024
+
+#define KVM_HALT_POLL_NS_DEFAULT 500000
+
+#define KVM_VCPU_MAX_FEATURES 0
+
+#define KVM_IRQCHIP_NUM_PINS 1024
+
+#define KVM_REQ_SLEEP \
+ KVM_ARCH_REQ_FLAGS(0, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
+#define KVM_REQ_VCPU_RESET KVM_ARCH_REQ(1)
+#define KVM_REQ_UPDATE_HGATP KVM_ARCH_REQ(2)
+#define KVM_REQ_FENCE_I \
+ KVM_ARCH_REQ_FLAGS(3, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
+#define KVM_REQ_HFENCE_GVMA_VMID_ALL KVM_REQ_TLB_FLUSH
+#define KVM_REQ_HFENCE_VVMA_ALL \
+ KVM_ARCH_REQ_FLAGS(4, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
+#define KVM_REQ_HFENCE \
+ KVM_ARCH_REQ_FLAGS(5, KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
+
+enum kvm_riscv_hfence_type {
+ KVM_RISCV_HFENCE_UNKNOWN = 0,
+ KVM_RISCV_HFENCE_GVMA_VMID_GPA,
+ KVM_RISCV_HFENCE_VVMA_ASID_GVA,
+ KVM_RISCV_HFENCE_VVMA_ASID_ALL,
+ KVM_RISCV_HFENCE_VVMA_GVA,
+};
+
+struct kvm_riscv_hfence {
+ enum kvm_riscv_hfence_type type;
+ unsigned long asid;
+ unsigned long order;
+ gpa_t addr;
+ gpa_t size;
+};
+
+#define KVM_RISCV_VCPU_MAX_HFENCE 64
+
+struct kvm_vm_stat {
+ struct kvm_vm_stat_generic generic;
+};
+
+struct kvm_vcpu_stat {
+ struct kvm_vcpu_stat_generic generic;
+ u64 ecall_exit_stat;
+ u64 wfi_exit_stat;
+ u64 mmio_exit_user;
+ u64 mmio_exit_kernel;
+ u64 csr_exit_user;
+ u64 csr_exit_kernel;
+ u64 signal_exits;
+ u64 exits;
+};
+
+struct kvm_arch_memory_slot {
+};
+
+struct kvm_vmid {
+ /*
+ * Writes to vmid_version and vmid happen with vmid_lock held
+ * whereas reads happen without any lock held.
+ */
+ unsigned long vmid_version;
+ unsigned long vmid;
+};
+
+struct kvm_arch {
+ /* G-stage vmid */
+ struct kvm_vmid vmid;
+
+ /* G-stage page table */
+ pgd_t *pgd;
+ phys_addr_t pgd_phys;
+
+ /* Guest Timer */
+ struct kvm_guest_timer timer;
+
+ /* AIA Guest/VM context */
+ struct kvm_aia aia;
+};
+
+struct kvm_cpu_trap {
+ unsigned long sepc;
+ unsigned long scause;
+ unsigned long stval;
+ unsigned long htval;
+ unsigned long htinst;
+};
+
+struct kvm_cpu_context {
+ unsigned long zero;
+ unsigned long ra;
+ unsigned long sp;
+ unsigned long gp;
+ unsigned long tp;
+ unsigned long t0;
+ unsigned long t1;
+ unsigned long t2;
+ unsigned long s0;
+ unsigned long s1;
+ unsigned long a0;
+ unsigned long a1;
+ unsigned long a2;
+ unsigned long a3;
+ unsigned long a4;
+ unsigned long a5;
+ unsigned long a6;
+ unsigned long a7;
+ unsigned long s2;
+ unsigned long s3;
+ unsigned long s4;
+ unsigned long s5;
+ unsigned long s6;
+ unsigned long s7;
+ unsigned long s8;
+ unsigned long s9;
+ unsigned long s10;
+ unsigned long s11;
+ unsigned long t3;
+ unsigned long t4;
+ unsigned long t5;
+ unsigned long t6;
+ unsigned long sepc;
+ unsigned long sstatus;
+ unsigned long hstatus;
+ union __riscv_fp_state fp;
+ struct __riscv_v_ext_state vector;
+};
+
+struct kvm_vcpu_csr {
+ unsigned long vsstatus;
+ unsigned long vsie;
+ unsigned long vstvec;
+ unsigned long vsscratch;
+ unsigned long vsepc;
+ unsigned long vscause;
+ unsigned long vstval;
+ unsigned long hvip;
+ unsigned long vsatp;
+ unsigned long scounteren;
+};
+
+struct kvm_vcpu_arch {
+ /* VCPU ran at least once */
+ bool ran_atleast_once;
+
+ /* Last Host CPU on which Guest VCPU exited */
+ int last_exit_cpu;
+
+ /* ISA feature bits (similar to MISA) */
+ DECLARE_BITMAP(isa, RISCV_ISA_EXT_MAX);
+
+ /* Vendor, Arch, and Implementation details */
+ unsigned long mvendorid;
+ unsigned long marchid;
+ unsigned long mimpid;
+
+ /* SSCRATCH, STVEC, and SCOUNTEREN of Host */
+ unsigned long host_sscratch;
+ unsigned long host_stvec;
+ unsigned long host_scounteren;
+
+ /* CPU context of Host */
+ struct kvm_cpu_context host_context;
+
+ /* CPU context of Guest VCPU */
+ struct kvm_cpu_context guest_context;
+
+ /* CPU CSR context of Guest VCPU */
+ struct kvm_vcpu_csr guest_csr;
+
+ /* CPU context upon Guest VCPU reset */
+ struct kvm_cpu_context guest_reset_context;
+
+ /* CPU CSR context upon Guest VCPU reset */
+ struct kvm_vcpu_csr guest_reset_csr;
+
+ /*
+ * VCPU interrupts
+ *
+ * We have a lockless approach for tracking pending VCPU interrupts
+ * implemented using atomic bitops. The irqs_pending bitmap represent
+ * pending interrupts whereas irqs_pending_mask represent bits changed
+ * in irqs_pending. Our approach is modeled around multiple producer
+ * and single consumer problem where the consumer is the VCPU itself.
+ */
+#define KVM_RISCV_VCPU_NR_IRQS 64
+ DECLARE_BITMAP(irqs_pending, KVM_RISCV_VCPU_NR_IRQS);
+ DECLARE_BITMAP(irqs_pending_mask, KVM_RISCV_VCPU_NR_IRQS);
+
+ /* VCPU Timer */
+ struct kvm_vcpu_timer timer;
+
+ /* HFENCE request queue */
+ spinlock_t hfence_lock;
+ unsigned long hfence_head;
+ unsigned long hfence_tail;
+ struct kvm_riscv_hfence hfence_queue[KVM_RISCV_VCPU_MAX_HFENCE];
+
+ /* MMIO instruction details */
+ struct kvm_mmio_decode mmio_decode;
+
+ /* CSR instruction details */
+ struct kvm_csr_decode csr_decode;
+
+ /* SBI context */
+ struct kvm_vcpu_sbi_context sbi_context;
+
+ /* AIA VCPU context */
+ struct kvm_vcpu_aia aia_context;
+
+ /* Cache pages needed to program page tables with spinlock held */
+ struct kvm_mmu_memory_cache mmu_page_cache;
+
+ /* VCPU power-off state */
+ bool power_off;
+
+ /* Don't run the VCPU (blocked) */
+ bool pause;
+
+ /* Performance monitoring context */
+ struct kvm_pmu pmu_context;
+};
+
+static inline void kvm_arch_sync_events(struct kvm *kvm) {}
+static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {}
+
+#define KVM_ARCH_WANT_MMU_NOTIFIER
+
+#define KVM_RISCV_GSTAGE_TLB_MIN_ORDER 12
+
+void kvm_riscv_local_hfence_gvma_vmid_gpa(unsigned long vmid,
+ gpa_t gpa, gpa_t gpsz,
+ unsigned long order);
+void kvm_riscv_local_hfence_gvma_vmid_all(unsigned long vmid);
+void kvm_riscv_local_hfence_gvma_gpa(gpa_t gpa, gpa_t gpsz,
+ unsigned long order);
+void kvm_riscv_local_hfence_gvma_all(void);
+void kvm_riscv_local_hfence_vvma_asid_gva(unsigned long vmid,
+ unsigned long asid,
+ unsigned long gva,
+ unsigned long gvsz,
+ unsigned long order);
+void kvm_riscv_local_hfence_vvma_asid_all(unsigned long vmid,
+ unsigned long asid);
+void kvm_riscv_local_hfence_vvma_gva(unsigned long vmid,
+ unsigned long gva, unsigned long gvsz,
+ unsigned long order);
+void kvm_riscv_local_hfence_vvma_all(unsigned long vmid);
+
+void kvm_riscv_local_tlb_sanitize(struct kvm_vcpu *vcpu);
+
+void kvm_riscv_fence_i_process(struct kvm_vcpu *vcpu);
+void kvm_riscv_hfence_gvma_vmid_all_process(struct kvm_vcpu *vcpu);
+void kvm_riscv_hfence_vvma_all_process(struct kvm_vcpu *vcpu);
+void kvm_riscv_hfence_process(struct kvm_vcpu *vcpu);
+
+void kvm_riscv_fence_i(struct kvm *kvm,
+ unsigned long hbase, unsigned long hmask);
+void kvm_riscv_hfence_gvma_vmid_gpa(struct kvm *kvm,
+ unsigned long hbase, unsigned long hmask,
+ gpa_t gpa, gpa_t gpsz,
+ unsigned long order);
+void kvm_riscv_hfence_gvma_vmid_all(struct kvm *kvm,
+ unsigned long hbase, unsigned long hmask);
+void kvm_riscv_hfence_vvma_asid_gva(struct kvm *kvm,
+ unsigned long hbase, unsigned long hmask,
+ unsigned long gva, unsigned long gvsz,
+ unsigned long order, unsigned long asid);
+void kvm_riscv_hfence_vvma_asid_all(struct kvm *kvm,
+ unsigned long hbase, unsigned long hmask,
+ unsigned long asid);
+void kvm_riscv_hfence_vvma_gva(struct kvm *kvm,
+ unsigned long hbase, unsigned long hmask,
+ unsigned long gva, unsigned long gvsz,
+ unsigned long order);
+void kvm_riscv_hfence_vvma_all(struct kvm *kvm,
+ unsigned long hbase, unsigned long hmask);
+
+int kvm_riscv_gstage_ioremap(struct kvm *kvm, gpa_t gpa,
+ phys_addr_t hpa, unsigned long size,
+ bool writable, bool in_atomic);
+void kvm_riscv_gstage_iounmap(struct kvm *kvm, gpa_t gpa,
+ unsigned long size);
+int kvm_riscv_gstage_map(struct kvm_vcpu *vcpu,
+ struct kvm_memory_slot *memslot,
+ gpa_t gpa, unsigned long hva, bool is_write);
+int kvm_riscv_gstage_alloc_pgd(struct kvm *kvm);
+void kvm_riscv_gstage_free_pgd(struct kvm *kvm);
+void kvm_riscv_gstage_update_hgatp(struct kvm_vcpu *vcpu);
+void __init kvm_riscv_gstage_mode_detect(void);
+unsigned long __init kvm_riscv_gstage_mode(void);
+int kvm_riscv_gstage_gpa_bits(void);
+
+void __init kvm_riscv_gstage_vmid_detect(void);
+unsigned long kvm_riscv_gstage_vmid_bits(void);
+int kvm_riscv_gstage_vmid_init(struct kvm *kvm);
+bool kvm_riscv_gstage_vmid_ver_changed(struct kvm_vmid *vmid);
+void kvm_riscv_gstage_vmid_update(struct kvm_vcpu *vcpu);
+
+int kvm_riscv_setup_default_irq_routing(struct kvm *kvm, u32 lines);
+
+void __kvm_riscv_unpriv_trap(void);
+
+unsigned long kvm_riscv_vcpu_unpriv_read(struct kvm_vcpu *vcpu,
+ bool read_insn,
+ unsigned long guest_addr,
+ struct kvm_cpu_trap *trap);
+void kvm_riscv_vcpu_trap_redirect(struct kvm_vcpu *vcpu,
+ struct kvm_cpu_trap *trap);
+int kvm_riscv_vcpu_exit(struct kvm_vcpu *vcpu, struct kvm_run *run,
+ struct kvm_cpu_trap *trap);
+
+void __kvm_riscv_switch_to(struct kvm_vcpu_arch *vcpu_arch);
+
+void kvm_riscv_vcpu_setup_isa(struct kvm_vcpu *vcpu);
+unsigned long kvm_riscv_vcpu_num_regs(struct kvm_vcpu *vcpu);
+int kvm_riscv_vcpu_copy_reg_indices(struct kvm_vcpu *vcpu,
+ u64 __user *uindices);
+int kvm_riscv_vcpu_get_reg(struct kvm_vcpu *vcpu,
+ const struct kvm_one_reg *reg);
+int kvm_riscv_vcpu_set_reg(struct kvm_vcpu *vcpu,
+ const struct kvm_one_reg *reg);
+
+int kvm_riscv_vcpu_set_interrupt(struct kvm_vcpu *vcpu, unsigned int irq);
+int kvm_riscv_vcpu_unset_interrupt(struct kvm_vcpu *vcpu, unsigned int irq);
+void kvm_riscv_vcpu_flush_interrupts(struct kvm_vcpu *vcpu);
+void kvm_riscv_vcpu_sync_interrupts(struct kvm_vcpu *vcpu);
+bool kvm_riscv_vcpu_has_interrupts(struct kvm_vcpu *vcpu, u64 mask);
+void kvm_riscv_vcpu_power_off(struct kvm_vcpu *vcpu);
+void kvm_riscv_vcpu_power_on(struct kvm_vcpu *vcpu);
+
+#endif /* __RISCV_KVM_HOST_H__ */
diff --git a/arch/riscv/include/asm/kvm_types.h b/arch/riscv/include/asm/kvm_types.h
new file mode 100644
index 0000000000..e15765f98d
--- /dev/null
+++ b/arch/riscv/include/asm/kvm_types.h
@@ -0,0 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_RISCV_KVM_TYPES_H
+#define _ASM_RISCV_KVM_TYPES_H
+
+#define KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE 32
+
+#endif /* _ASM_RISCV_KVM_TYPES_H */
diff --git a/arch/riscv/include/asm/kvm_vcpu_fp.h b/arch/riscv/include/asm/kvm_vcpu_fp.h
new file mode 100644
index 0000000000..b554014740
--- /dev/null
+++ b/arch/riscv/include/asm/kvm_vcpu_fp.h
@@ -0,0 +1,59 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2021 Western Digital Corporation or its affiliates.
+ *
+ * Authors:
+ * Atish Patra <atish.patra@wdc.com>
+ * Anup Patel <anup.patel@wdc.com>
+ */
+
+#ifndef __KVM_VCPU_RISCV_FP_H
+#define __KVM_VCPU_RISCV_FP_H
+
+#include <linux/types.h>
+
+struct kvm_cpu_context;
+
+#ifdef CONFIG_FPU
+void __kvm_riscv_fp_f_save(struct kvm_cpu_context *context);
+void __kvm_riscv_fp_f_restore(struct kvm_cpu_context *context);
+void __kvm_riscv_fp_d_save(struct kvm_cpu_context *context);
+void __kvm_riscv_fp_d_restore(struct kvm_cpu_context *context);
+
+void kvm_riscv_vcpu_fp_reset(struct kvm_vcpu *vcpu);
+void kvm_riscv_vcpu_guest_fp_save(struct kvm_cpu_context *cntx,
+ const unsigned long *isa);
+void kvm_riscv_vcpu_guest_fp_restore(struct kvm_cpu_context *cntx,
+ const unsigned long *isa);
+void kvm_riscv_vcpu_host_fp_save(struct kvm_cpu_context *cntx);
+void kvm_riscv_vcpu_host_fp_restore(struct kvm_cpu_context *cntx);
+#else
+static inline void kvm_riscv_vcpu_fp_reset(struct kvm_vcpu *vcpu)
+{
+}
+static inline void kvm_riscv_vcpu_guest_fp_save(struct kvm_cpu_context *cntx,
+ const unsigned long *isa)
+{
+}
+static inline void kvm_riscv_vcpu_guest_fp_restore(
+ struct kvm_cpu_context *cntx,
+ const unsigned long *isa)
+{
+}
+static inline void kvm_riscv_vcpu_host_fp_save(struct kvm_cpu_context *cntx)
+{
+}
+static inline void kvm_riscv_vcpu_host_fp_restore(
+ struct kvm_cpu_context *cntx)
+{
+}
+#endif
+
+int kvm_riscv_vcpu_get_reg_fp(struct kvm_vcpu *vcpu,
+ const struct kvm_one_reg *reg,
+ unsigned long rtype);
+int kvm_riscv_vcpu_set_reg_fp(struct kvm_vcpu *vcpu,
+ const struct kvm_one_reg *reg,
+ unsigned long rtype);
+
+#endif
diff --git a/arch/riscv/include/asm/kvm_vcpu_insn.h b/arch/riscv/include/asm/kvm_vcpu_insn.h
new file mode 100644
index 0000000000..350011c835
--- /dev/null
+++ b/arch/riscv/include/asm/kvm_vcpu_insn.h
@@ -0,0 +1,48 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2022 Ventana Micro Systems Inc.
+ */
+
+#ifndef __KVM_VCPU_RISCV_INSN_H
+#define __KVM_VCPU_RISCV_INSN_H
+
+struct kvm_vcpu;
+struct kvm_run;
+struct kvm_cpu_trap;
+
+struct kvm_mmio_decode {
+ unsigned long insn;
+ int insn_len;
+ int len;
+ int shift;
+ int return_handled;
+};
+
+struct kvm_csr_decode {
+ unsigned long insn;
+ int return_handled;
+};
+
+/* Return values used by function emulating a particular instruction */
+enum kvm_insn_return {
+ KVM_INSN_EXIT_TO_USER_SPACE = 0,
+ KVM_INSN_CONTINUE_NEXT_SEPC,
+ KVM_INSN_CONTINUE_SAME_SEPC,
+ KVM_INSN_ILLEGAL_TRAP,
+ KVM_INSN_VIRTUAL_TRAP
+};
+
+void kvm_riscv_vcpu_wfi(struct kvm_vcpu *vcpu);
+int kvm_riscv_vcpu_csr_return(struct kvm_vcpu *vcpu, struct kvm_run *run);
+int kvm_riscv_vcpu_virtual_insn(struct kvm_vcpu *vcpu, struct kvm_run *run,
+ struct kvm_cpu_trap *trap);
+
+int kvm_riscv_vcpu_mmio_load(struct kvm_vcpu *vcpu, struct kvm_run *run,
+ unsigned long fault_addr,
+ unsigned long htinst);
+int kvm_riscv_vcpu_mmio_store(struct kvm_vcpu *vcpu, struct kvm_run *run,
+ unsigned long fault_addr,
+ unsigned long htinst);
+int kvm_riscv_vcpu_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run);
+
+#endif
diff --git a/arch/riscv/include/asm/kvm_vcpu_pmu.h b/arch/riscv/include/asm/kvm_vcpu_pmu.h
new file mode 100644
index 0000000000..395518a166
--- /dev/null
+++ b/arch/riscv/include/asm/kvm_vcpu_pmu.h
@@ -0,0 +1,107 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2023 Rivos Inc
+ *
+ * Authors:
+ * Atish Patra <atishp@rivosinc.com>
+ */
+
+#ifndef __KVM_VCPU_RISCV_PMU_H
+#define __KVM_VCPU_RISCV_PMU_H
+
+#include <linux/perf/riscv_pmu.h>
+#include <asm/sbi.h>
+
+#ifdef CONFIG_RISCV_PMU_SBI
+#define RISCV_KVM_MAX_FW_CTRS 32
+#define RISCV_KVM_MAX_HW_CTRS 32
+#define RISCV_KVM_MAX_COUNTERS (RISCV_KVM_MAX_HW_CTRS + RISCV_KVM_MAX_FW_CTRS)
+static_assert(RISCV_KVM_MAX_COUNTERS <= 64);
+
+struct kvm_fw_event {
+ /* Current value of the event */
+ unsigned long value;
+
+ /* Event monitoring status */
+ bool started;
+};
+
+/* Per virtual pmu counter data */
+struct kvm_pmc {
+ u8 idx;
+ struct perf_event *perf_event;
+ u64 counter_val;
+ union sbi_pmu_ctr_info cinfo;
+ /* Event monitoring status */
+ bool started;
+ /* Monitoring event ID */
+ unsigned long event_idx;
+};
+
+/* PMU data structure per vcpu */
+struct kvm_pmu {
+ struct kvm_pmc pmc[RISCV_KVM_MAX_COUNTERS];
+ struct kvm_fw_event fw_event[RISCV_KVM_MAX_FW_CTRS];
+ /* Number of the virtual firmware counters available */
+ int num_fw_ctrs;
+ /* Number of the virtual hardware counters available */
+ int num_hw_ctrs;
+ /* A flag to indicate that pmu initialization is done */
+ bool init_done;
+ /* Bit map of all the virtual counter used */
+ DECLARE_BITMAP(pmc_in_use, RISCV_KVM_MAX_COUNTERS);
+};
+
+#define vcpu_to_pmu(vcpu) (&(vcpu)->arch.pmu_context)
+#define pmu_to_vcpu(pmu) (container_of((pmu), struct kvm_vcpu, arch.pmu_context))
+
+#if defined(CONFIG_32BIT)
+#define KVM_RISCV_VCPU_HPMCOUNTER_CSR_FUNCS \
+{.base = CSR_CYCLEH, .count = 31, .func = kvm_riscv_vcpu_pmu_read_hpm }, \
+{.base = CSR_CYCLE, .count = 31, .func = kvm_riscv_vcpu_pmu_read_hpm },
+#else
+#define KVM_RISCV_VCPU_HPMCOUNTER_CSR_FUNCS \
+{.base = CSR_CYCLE, .count = 31, .func = kvm_riscv_vcpu_pmu_read_hpm },
+#endif
+
+int kvm_riscv_vcpu_pmu_incr_fw(struct kvm_vcpu *vcpu, unsigned long fid);
+int kvm_riscv_vcpu_pmu_read_hpm(struct kvm_vcpu *vcpu, unsigned int csr_num,
+ unsigned long *val, unsigned long new_val,
+ unsigned long wr_mask);
+
+int kvm_riscv_vcpu_pmu_num_ctrs(struct kvm_vcpu *vcpu, struct kvm_vcpu_sbi_return *retdata);
+int kvm_riscv_vcpu_pmu_ctr_info(struct kvm_vcpu *vcpu, unsigned long cidx,
+ struct kvm_vcpu_sbi_return *retdata);
+int kvm_riscv_vcpu_pmu_ctr_start(struct kvm_vcpu *vcpu, unsigned long ctr_base,
+ unsigned long ctr_mask, unsigned long flags, u64 ival,
+ struct kvm_vcpu_sbi_return *retdata);
+int kvm_riscv_vcpu_pmu_ctr_stop(struct kvm_vcpu *vcpu, unsigned long ctr_base,
+ unsigned long ctr_mask, unsigned long flags,
+ struct kvm_vcpu_sbi_return *retdata);
+int kvm_riscv_vcpu_pmu_ctr_cfg_match(struct kvm_vcpu *vcpu, unsigned long ctr_base,
+ unsigned long ctr_mask, unsigned long flags,
+ unsigned long eidx, u64 evtdata,
+ struct kvm_vcpu_sbi_return *retdata);
+int kvm_riscv_vcpu_pmu_ctr_read(struct kvm_vcpu *vcpu, unsigned long cidx,
+ struct kvm_vcpu_sbi_return *retdata);
+void kvm_riscv_vcpu_pmu_init(struct kvm_vcpu *vcpu);
+void kvm_riscv_vcpu_pmu_deinit(struct kvm_vcpu *vcpu);
+void kvm_riscv_vcpu_pmu_reset(struct kvm_vcpu *vcpu);
+
+#else
+struct kvm_pmu {
+};
+
+#define KVM_RISCV_VCPU_HPMCOUNTER_CSR_FUNCS \
+{.base = 0, .count = 0, .func = NULL },
+
+static inline void kvm_riscv_vcpu_pmu_init(struct kvm_vcpu *vcpu) {}
+static inline int kvm_riscv_vcpu_pmu_incr_fw(struct kvm_vcpu *vcpu, unsigned long fid)
+{
+ return 0;
+}
+
+static inline void kvm_riscv_vcpu_pmu_deinit(struct kvm_vcpu *vcpu) {}
+static inline void kvm_riscv_vcpu_pmu_reset(struct kvm_vcpu *vcpu) {}
+#endif /* CONFIG_RISCV_PMU_SBI */
+#endif /* !__KVM_VCPU_RISCV_PMU_H */
diff --git a/arch/riscv/include/asm/kvm_vcpu_sbi.h b/arch/riscv/include/asm/kvm_vcpu_sbi.h
new file mode 100644
index 0000000000..cdcf0ff07b
--- /dev/null
+++ b/arch/riscv/include/asm/kvm_vcpu_sbi.h
@@ -0,0 +1,78 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/**
+ * Copyright (c) 2021 Western Digital Corporation or its affiliates.
+ *
+ * Authors:
+ * Atish Patra <atish.patra@wdc.com>
+ */
+
+#ifndef __RISCV_KVM_VCPU_SBI_H__
+#define __RISCV_KVM_VCPU_SBI_H__
+
+#define KVM_SBI_IMPID 3
+
+#define KVM_SBI_VERSION_MAJOR 1
+#define KVM_SBI_VERSION_MINOR 0
+
+enum kvm_riscv_sbi_ext_status {
+ KVM_RISCV_SBI_EXT_UNINITIALIZED,
+ KVM_RISCV_SBI_EXT_AVAILABLE,
+ KVM_RISCV_SBI_EXT_UNAVAILABLE,
+};
+
+struct kvm_vcpu_sbi_context {
+ int return_handled;
+ enum kvm_riscv_sbi_ext_status ext_status[KVM_RISCV_SBI_EXT_MAX];
+};
+
+struct kvm_vcpu_sbi_return {
+ unsigned long out_val;
+ unsigned long err_val;
+ struct kvm_cpu_trap *utrap;
+ bool uexit;
+};
+
+struct kvm_vcpu_sbi_extension {
+ unsigned long extid_start;
+ unsigned long extid_end;
+ /**
+ * SBI extension handler. It can be defined for a given extension or group of
+ * extension. But it should always return linux error codes rather than SBI
+ * specific error codes.
+ */
+ int (*handler)(struct kvm_vcpu *vcpu, struct kvm_run *run,
+ struct kvm_vcpu_sbi_return *retdata);
+
+ /* Extension specific probe function */
+ unsigned long (*probe)(struct kvm_vcpu *vcpu);
+};
+
+void kvm_riscv_vcpu_sbi_forward(struct kvm_vcpu *vcpu, struct kvm_run *run);
+void kvm_riscv_vcpu_sbi_system_reset(struct kvm_vcpu *vcpu,
+ struct kvm_run *run,
+ u32 type, u64 flags);
+int kvm_riscv_vcpu_sbi_return(struct kvm_vcpu *vcpu, struct kvm_run *run);
+int kvm_riscv_vcpu_set_reg_sbi_ext(struct kvm_vcpu *vcpu,
+ const struct kvm_one_reg *reg);
+int kvm_riscv_vcpu_get_reg_sbi_ext(struct kvm_vcpu *vcpu,
+ const struct kvm_one_reg *reg);
+const struct kvm_vcpu_sbi_extension *kvm_vcpu_sbi_find_ext(
+ struct kvm_vcpu *vcpu, unsigned long extid);
+int kvm_riscv_vcpu_sbi_ecall(struct kvm_vcpu *vcpu, struct kvm_run *run);
+
+#ifdef CONFIG_RISCV_SBI_V01
+extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_v01;
+#endif
+extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_base;
+extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_time;
+extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_ipi;
+extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_rfence;
+extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_srst;
+extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_hsm;
+extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_experimental;
+extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_vendor;
+
+#ifdef CONFIG_RISCV_PMU_SBI
+extern const struct kvm_vcpu_sbi_extension vcpu_sbi_ext_pmu;
+#endif
+#endif /* __RISCV_KVM_VCPU_SBI_H__ */
diff --git a/arch/riscv/include/asm/kvm_vcpu_timer.h b/arch/riscv/include/asm/kvm_vcpu_timer.h
new file mode 100644
index 0000000000..82f7260301
--- /dev/null
+++ b/arch/riscv/include/asm/kvm_vcpu_timer.h
@@ -0,0 +1,52 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2019 Western Digital Corporation or its affiliates.
+ *
+ * Authors:
+ * Atish Patra <atish.patra@wdc.com>
+ */
+
+#ifndef __KVM_VCPU_RISCV_TIMER_H
+#define __KVM_VCPU_RISCV_TIMER_H
+
+#include <linux/hrtimer.h>
+
+struct kvm_guest_timer {
+ /* Mult & Shift values to get nanoseconds from cycles */
+ u32 nsec_mult;
+ u32 nsec_shift;
+ /* Time delta value */
+ u64 time_delta;
+};
+
+struct kvm_vcpu_timer {
+ /* Flag for whether init is done */
+ bool init_done;
+ /* Flag for whether timer event is configured */
+ bool next_set;
+ /* Next timer event cycles */
+ u64 next_cycles;
+ /* Underlying hrtimer instance */
+ struct hrtimer hrt;
+
+ /* Flag to check if sstc is enabled or not */
+ bool sstc_enabled;
+ /* A function pointer to switch between stimecmp or hrtimer at runtime */
+ int (*timer_next_event)(struct kvm_vcpu *vcpu, u64 ncycles);
+};
+
+int kvm_riscv_vcpu_timer_next_event(struct kvm_vcpu *vcpu, u64 ncycles);
+int kvm_riscv_vcpu_get_reg_timer(struct kvm_vcpu *vcpu,
+ const struct kvm_one_reg *reg);
+int kvm_riscv_vcpu_set_reg_timer(struct kvm_vcpu *vcpu,
+ const struct kvm_one_reg *reg);
+int kvm_riscv_vcpu_timer_init(struct kvm_vcpu *vcpu);
+int kvm_riscv_vcpu_timer_deinit(struct kvm_vcpu *vcpu);
+int kvm_riscv_vcpu_timer_reset(struct kvm_vcpu *vcpu);
+void kvm_riscv_vcpu_timer_restore(struct kvm_vcpu *vcpu);
+void kvm_riscv_guest_timer_init(struct kvm *kvm);
+void kvm_riscv_vcpu_timer_sync(struct kvm_vcpu *vcpu);
+void kvm_riscv_vcpu_timer_save(struct kvm_vcpu *vcpu);
+bool kvm_riscv_vcpu_timer_pending(struct kvm_vcpu *vcpu);
+
+#endif
diff --git a/arch/riscv/include/asm/kvm_vcpu_vector.h b/arch/riscv/include/asm/kvm_vcpu_vector.h
new file mode 100644
index 0000000000..27f5bccdd8
--- /dev/null
+++ b/arch/riscv/include/asm/kvm_vcpu_vector.h
@@ -0,0 +1,80 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2022 SiFive
+ *
+ * Authors:
+ * Vincent Chen <vincent.chen@sifive.com>
+ * Greentime Hu <greentime.hu@sifive.com>
+ */
+
+#ifndef __KVM_VCPU_RISCV_VECTOR_H
+#define __KVM_VCPU_RISCV_VECTOR_H
+
+#include <linux/types.h>
+
+#ifdef CONFIG_RISCV_ISA_V
+#include <asm/vector.h>
+#include <asm/kvm_host.h>
+
+static __always_inline void __kvm_riscv_vector_save(struct kvm_cpu_context *context)
+{
+ __riscv_v_vstate_save(&context->vector, context->vector.datap);
+}
+
+static __always_inline void __kvm_riscv_vector_restore(struct kvm_cpu_context *context)
+{
+ __riscv_v_vstate_restore(&context->vector, context->vector.datap);
+}
+
+void kvm_riscv_vcpu_vector_reset(struct kvm_vcpu *vcpu);
+void kvm_riscv_vcpu_guest_vector_save(struct kvm_cpu_context *cntx,
+ unsigned long *isa);
+void kvm_riscv_vcpu_guest_vector_restore(struct kvm_cpu_context *cntx,
+ unsigned long *isa);
+void kvm_riscv_vcpu_host_vector_save(struct kvm_cpu_context *cntx);
+void kvm_riscv_vcpu_host_vector_restore(struct kvm_cpu_context *cntx);
+int kvm_riscv_vcpu_alloc_vector_context(struct kvm_vcpu *vcpu,
+ struct kvm_cpu_context *cntx);
+void kvm_riscv_vcpu_free_vector_context(struct kvm_vcpu *vcpu);
+#else
+
+struct kvm_cpu_context;
+
+static inline void kvm_riscv_vcpu_vector_reset(struct kvm_vcpu *vcpu)
+{
+}
+
+static inline void kvm_riscv_vcpu_guest_vector_save(struct kvm_cpu_context *cntx,
+ unsigned long *isa)
+{
+}
+
+static inline void kvm_riscv_vcpu_guest_vector_restore(struct kvm_cpu_context *cntx,
+ unsigned long *isa)
+{
+}
+
+static inline void kvm_riscv_vcpu_host_vector_save(struct kvm_cpu_context *cntx)
+{
+}
+
+static inline void kvm_riscv_vcpu_host_vector_restore(struct kvm_cpu_context *cntx)
+{
+}
+
+static inline int kvm_riscv_vcpu_alloc_vector_context(struct kvm_vcpu *vcpu,
+ struct kvm_cpu_context *cntx)
+{
+ return 0;
+}
+
+static inline void kvm_riscv_vcpu_free_vector_context(struct kvm_vcpu *vcpu)
+{
+}
+#endif
+
+int kvm_riscv_vcpu_get_reg_vector(struct kvm_vcpu *vcpu,
+ const struct kvm_one_reg *reg);
+int kvm_riscv_vcpu_set_reg_vector(struct kvm_vcpu *vcpu,
+ const struct kvm_one_reg *reg);
+#endif
diff --git a/arch/riscv/include/asm/linkage.h b/arch/riscv/include/asm/linkage.h
new file mode 100644
index 0000000000..9e88ba23cd
--- /dev/null
+++ b/arch/riscv/include/asm/linkage.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2015 Regents of the University of California
+ */
+
+#ifndef _ASM_RISCV_LINKAGE_H
+#define _ASM_RISCV_LINKAGE_H
+
+#define __ALIGN .balign 4
+#define __ALIGN_STR ".balign 4"
+
+#endif /* _ASM_RISCV_LINKAGE_H */
diff --git a/arch/riscv/include/asm/mmio.h b/arch/riscv/include/asm/mmio.h
new file mode 100644
index 0000000000..4c58ee7f95
--- /dev/null
+++ b/arch/riscv/include/asm/mmio.h
@@ -0,0 +1,151 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * {read,write}{b,w,l,q} based on arch/arm64/include/asm/io.h
+ * which was based on arch/arm/include/io.h
+ *
+ * Copyright (C) 1996-2000 Russell King
+ * Copyright (C) 2012 ARM Ltd.
+ * Copyright (C) 2014 Regents of the University of California
+ */
+
+#ifndef _ASM_RISCV_MMIO_H
+#define _ASM_RISCV_MMIO_H
+
+#include <linux/types.h>
+#include <asm/mmiowb.h>
+
+/* Generic IO read/write. These perform native-endian accesses. */
+#define __raw_writeb __raw_writeb
+static inline void __raw_writeb(u8 val, volatile void __iomem *addr)
+{
+ asm volatile("sb %0, 0(%1)" : : "r" (val), "r" (addr));
+}
+
+#define __raw_writew __raw_writew
+static inline void __raw_writew(u16 val, volatile void __iomem *addr)
+{
+ asm volatile("sh %0, 0(%1)" : : "r" (val), "r" (addr));
+}
+
+#define __raw_writel __raw_writel
+static inline void __raw_writel(u32 val, volatile void __iomem *addr)
+{
+ asm volatile("sw %0, 0(%1)" : : "r" (val), "r" (addr));
+}
+
+#ifdef CONFIG_64BIT
+#define __raw_writeq __raw_writeq
+static inline void __raw_writeq(u64 val, volatile void __iomem *addr)
+{
+ asm volatile("sd %0, 0(%1)" : : "r" (val), "r" (addr));
+}
+#endif
+
+#define __raw_readb __raw_readb
+static inline u8 __raw_readb(const volatile void __iomem *addr)
+{
+ u8 val;
+
+ asm volatile("lb %0, 0(%1)" : "=r" (val) : "r" (addr));
+ return val;
+}
+
+#define __raw_readw __raw_readw
+static inline u16 __raw_readw(const volatile void __iomem *addr)
+{
+ u16 val;
+
+ asm volatile("lh %0, 0(%1)" : "=r" (val) : "r" (addr));
+ return val;
+}
+
+#define __raw_readl __raw_readl
+static inline u32 __raw_readl(const volatile void __iomem *addr)
+{
+ u32 val;
+
+ asm volatile("lw %0, 0(%1)" : "=r" (val) : "r" (addr));
+ return val;
+}
+
+#ifdef CONFIG_64BIT
+#define __raw_readq __raw_readq
+static inline u64 __raw_readq(const volatile void __iomem *addr)
+{
+ u64 val;
+
+ asm volatile("ld %0, 0(%1)" : "=r" (val) : "r" (addr));
+ return val;
+}
+#endif
+
+/*
+ * Unordered I/O memory access primitives. These are even more relaxed than
+ * the relaxed versions, as they don't even order accesses between successive
+ * operations to the I/O regions.
+ */
+#define readb_cpu(c) ({ u8 __r = __raw_readb(c); __r; })
+#define readw_cpu(c) ({ u16 __r = le16_to_cpu((__force __le16)__raw_readw(c)); __r; })
+#define readl_cpu(c) ({ u32 __r = le32_to_cpu((__force __le32)__raw_readl(c)); __r; })
+
+#define writeb_cpu(v, c) ((void)__raw_writeb((v), (c)))
+#define writew_cpu(v, c) ((void)__raw_writew((__force u16)cpu_to_le16(v), (c)))
+#define writel_cpu(v, c) ((void)__raw_writel((__force u32)cpu_to_le32(v), (c)))
+
+#ifdef CONFIG_64BIT
+#define readq_cpu(c) ({ u64 __r = le64_to_cpu((__force __le64)__raw_readq(c)); __r; })
+#define writeq_cpu(v, c) ((void)__raw_writeq((__force u64)cpu_to_le64(v), (c)))
+#endif
+
+/*
+ * Relaxed I/O memory access primitives. These follow the Device memory
+ * ordering rules but do not guarantee any ordering relative to Normal memory
+ * accesses. These are defined to order the indicated access (either a read or
+ * write) with all other I/O memory accesses to the same peripheral. Since the
+ * platform specification defines that all I/O regions are strongly ordered on
+ * channel 0, no explicit fences are required to enforce this ordering.
+ */
+/* FIXME: These are now the same as asm-generic */
+#define __io_rbr() do {} while (0)
+#define __io_rar() do {} while (0)
+#define __io_rbw() do {} while (0)
+#define __io_raw() do {} while (0)
+
+#define readb_relaxed(c) ({ u8 __v; __io_rbr(); __v = readb_cpu(c); __io_rar(); __v; })
+#define readw_relaxed(c) ({ u16 __v; __io_rbr(); __v = readw_cpu(c); __io_rar(); __v; })
+#define readl_relaxed(c) ({ u32 __v; __io_rbr(); __v = readl_cpu(c); __io_rar(); __v; })
+
+#define writeb_relaxed(v, c) ({ __io_rbw(); writeb_cpu((v), (c)); __io_raw(); })
+#define writew_relaxed(v, c) ({ __io_rbw(); writew_cpu((v), (c)); __io_raw(); })
+#define writel_relaxed(v, c) ({ __io_rbw(); writel_cpu((v), (c)); __io_raw(); })
+
+#ifdef CONFIG_64BIT
+#define readq_relaxed(c) ({ u64 __v; __io_rbr(); __v = readq_cpu(c); __io_rar(); __v; })
+#define writeq_relaxed(v, c) ({ __io_rbw(); writeq_cpu((v), (c)); __io_raw(); })
+#endif
+
+/*
+ * I/O memory access primitives. Reads are ordered relative to any following
+ * Normal memory read and delay() loop. Writes are ordered relative to any
+ * prior Normal memory write. The memory barriers here are necessary as RISC-V
+ * doesn't define any ordering between the memory space and the I/O space.
+ */
+#define __io_br() do {} while (0)
+#define __io_ar(v) ({ __asm__ __volatile__ ("fence i,ir" : : : "memory"); })
+#define __io_bw() ({ __asm__ __volatile__ ("fence w,o" : : : "memory"); })
+#define __io_aw() mmiowb_set_pending()
+
+#define readb(c) ({ u8 __v; __io_br(); __v = readb_cpu(c); __io_ar(__v); __v; })
+#define readw(c) ({ u16 __v; __io_br(); __v = readw_cpu(c); __io_ar(__v); __v; })
+#define readl(c) ({ u32 __v; __io_br(); __v = readl_cpu(c); __io_ar(__v); __v; })
+
+#define writeb(v, c) ({ __io_bw(); writeb_cpu((v), (c)); __io_aw(); })
+#define writew(v, c) ({ __io_bw(); writew_cpu((v), (c)); __io_aw(); })
+#define writel(v, c) ({ __io_bw(); writel_cpu((v), (c)); __io_aw(); })
+
+#ifdef CONFIG_64BIT
+#define readq(c) ({ u64 __v; __io_br(); __v = readq_cpu(c); __io_ar(__v); __v; })
+#define writeq(v, c) ({ __io_bw(); writeq_cpu((v), (c)); __io_aw(); })
+#endif
+
+#endif /* _ASM_RISCV_MMIO_H */
diff --git a/arch/riscv/include/asm/mmiowb.h b/arch/riscv/include/asm/mmiowb.h
new file mode 100644
index 0000000000..0b2333e71f
--- /dev/null
+++ b/arch/riscv/include/asm/mmiowb.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _ASM_RISCV_MMIOWB_H
+#define _ASM_RISCV_MMIOWB_H
+
+/*
+ * "o,w" is sufficient to ensure that all writes to the device have completed
+ * before the write to the spinlock is allowed to commit.
+ */
+#define mmiowb() __asm__ __volatile__ ("fence o,w" : : : "memory");
+
+#include <linux/smp.h>
+#include <asm-generic/mmiowb.h>
+
+#endif /* _ASM_RISCV_MMIOWB_H */
diff --git a/arch/riscv/include/asm/mmu.h b/arch/riscv/include/asm/mmu.h
new file mode 100644
index 0000000000..355504b37f
--- /dev/null
+++ b/arch/riscv/include/asm/mmu.h
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2012 Regents of the University of California
+ */
+
+
+#ifndef _ASM_RISCV_MMU_H
+#define _ASM_RISCV_MMU_H
+
+#ifndef __ASSEMBLY__
+
+typedef struct {
+#ifndef CONFIG_MMU
+ unsigned long end_brk;
+#else
+ atomic_long_t id;
+#endif
+ void *vdso;
+#ifdef CONFIG_SMP
+ /* A local icache flush is needed before user execution can resume. */
+ cpumask_t icache_stale_mask;
+#endif
+#ifdef CONFIG_BINFMT_ELF_FDPIC
+ unsigned long exec_fdpic_loadmap;
+ unsigned long interp_fdpic_loadmap;
+#endif
+} mm_context_t;
+
+void __init create_pgd_mapping(pgd_t *pgdp, uintptr_t va, phys_addr_t pa,
+ phys_addr_t sz, pgprot_t prot);
+#endif /* __ASSEMBLY__ */
+
+#endif /* _ASM_RISCV_MMU_H */
diff --git a/arch/riscv/include/asm/mmu_context.h b/arch/riscv/include/asm/mmu_context.h
new file mode 100644
index 0000000000..7030837adc
--- /dev/null
+++ b/arch/riscv/include/asm/mmu_context.h
@@ -0,0 +1,40 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2012 Regents of the University of California
+ * Copyright (C) 2017 SiFive
+ */
+
+#ifndef _ASM_RISCV_MMU_CONTEXT_H
+#define _ASM_RISCV_MMU_CONTEXT_H
+
+#include <linux/mm_types.h>
+#include <asm-generic/mm_hooks.h>
+
+#include <linux/mm.h>
+#include <linux/sched.h>
+
+void switch_mm(struct mm_struct *prev, struct mm_struct *next,
+ struct task_struct *task);
+
+#define activate_mm activate_mm
+static inline void activate_mm(struct mm_struct *prev,
+ struct mm_struct *next)
+{
+ switch_mm(prev, next, NULL);
+}
+
+#define init_new_context init_new_context
+static inline int init_new_context(struct task_struct *tsk,
+ struct mm_struct *mm)
+{
+#ifdef CONFIG_MMU
+ atomic_long_set(&mm->context.id, 0);
+#endif
+ return 0;
+}
+
+DECLARE_STATIC_KEY_FALSE(use_asid_allocator);
+
+#include <asm-generic/mmu_context.h>
+
+#endif /* _ASM_RISCV_MMU_CONTEXT_H */
diff --git a/arch/riscv/include/asm/mmzone.h b/arch/riscv/include/asm/mmzone.h
new file mode 100644
index 0000000000..fa17e01d9a
--- /dev/null
+++ b/arch/riscv/include/asm/mmzone.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_MMZONE_H
+#define __ASM_MMZONE_H
+
+#ifdef CONFIG_NUMA
+
+#include <asm/numa.h>
+
+extern struct pglist_data *node_data[];
+#define NODE_DATA(nid) (node_data[(nid)])
+
+#endif /* CONFIG_NUMA */
+#endif /* __ASM_MMZONE_H */
diff --git a/arch/riscv/include/asm/module.h b/arch/riscv/include/asm/module.h
new file mode 100644
index 0000000000..0f3baaa6a9
--- /dev/null
+++ b/arch/riscv/include/asm/module.h
@@ -0,0 +1,130 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2017 Andes Technology Corporation */
+
+#ifndef _ASM_RISCV_MODULE_H
+#define _ASM_RISCV_MODULE_H
+
+#include <asm-generic/module.h>
+#include <linux/elf.h>
+
+struct module;
+unsigned long module_emit_got_entry(struct module *mod, unsigned long val);
+unsigned long module_emit_plt_entry(struct module *mod, unsigned long val);
+
+#ifdef CONFIG_MODULE_SECTIONS
+struct mod_section {
+ Elf_Shdr *shdr;
+ int num_entries;
+ int max_entries;
+};
+
+struct mod_arch_specific {
+ struct mod_section got;
+ struct mod_section plt;
+ struct mod_section got_plt;
+};
+
+struct got_entry {
+ unsigned long symbol_addr; /* the real variable address */
+};
+
+static inline struct got_entry emit_got_entry(unsigned long val)
+{
+ return (struct got_entry) {val};
+}
+
+static inline struct got_entry *get_got_entry(unsigned long val,
+ const struct mod_section *sec)
+{
+ struct got_entry *got = (struct got_entry *)(sec->shdr->sh_addr);
+ int i;
+ for (i = 0; i < sec->num_entries; i++) {
+ if (got[i].symbol_addr == val)
+ return &got[i];
+ }
+ return NULL;
+}
+
+struct plt_entry {
+ /*
+ * Trampoline code to real target address. The return address
+ * should be the original (pc+4) before entring plt entry.
+ */
+ u32 insn_auipc; /* auipc t0, 0x0 */
+ u32 insn_ld; /* ld t1, 0x10(t0) */
+ u32 insn_jr; /* jr t1 */
+};
+
+#define OPC_AUIPC 0x0017
+#define OPC_LD 0x3003
+#define OPC_JALR 0x0067
+#define REG_T0 0x5
+#define REG_T1 0x6
+
+static inline struct plt_entry emit_plt_entry(unsigned long val,
+ unsigned long plt,
+ unsigned long got_plt)
+{
+ /*
+ * U-Type encoding:
+ * +------------+----------+----------+
+ * | imm[31:12] | rd[11:7] | opc[6:0] |
+ * +------------+----------+----------+
+ *
+ * I-Type encoding:
+ * +------------+------------+--------+----------+----------+
+ * | imm[31:20] | rs1[19:15] | funct3 | rd[11:7] | opc[6:0] |
+ * +------------+------------+--------+----------+----------+
+ *
+ */
+ unsigned long offset = got_plt - plt;
+ u32 hi20 = (offset + 0x800) & 0xfffff000;
+ u32 lo12 = (offset - hi20);
+ return (struct plt_entry) {
+ OPC_AUIPC | (REG_T0 << 7) | hi20,
+ OPC_LD | (lo12 << 20) | (REG_T0 << 15) | (REG_T1 << 7),
+ OPC_JALR | (REG_T1 << 15)
+ };
+}
+
+static inline int get_got_plt_idx(unsigned long val, const struct mod_section *sec)
+{
+ struct got_entry *got_plt = (struct got_entry *)sec->shdr->sh_addr;
+ int i;
+ for (i = 0; i < sec->num_entries; i++) {
+ if (got_plt[i].symbol_addr == val)
+ return i;
+ }
+ return -1;
+}
+
+static inline struct plt_entry *get_plt_entry(unsigned long val,
+ const struct mod_section *sec_plt,
+ const struct mod_section *sec_got_plt)
+{
+ struct plt_entry *plt = (struct plt_entry *)sec_plt->shdr->sh_addr;
+ int got_plt_idx = get_got_plt_idx(val, sec_got_plt);
+ if (got_plt_idx >= 0)
+ return plt + got_plt_idx;
+ else
+ return NULL;
+}
+
+#endif /* CONFIG_MODULE_SECTIONS */
+
+static inline const Elf_Shdr *find_section(const Elf_Ehdr *hdr,
+ const Elf_Shdr *sechdrs,
+ const char *name)
+{
+ const Elf_Shdr *s, *se;
+ const char *secstrs = (void *)hdr + sechdrs[hdr->e_shstrndx].sh_offset;
+
+ for (s = sechdrs, se = sechdrs + hdr->e_shnum; s < se; s++) {
+ if (strcmp(name, secstrs + s->sh_name) == 0)
+ return s;
+ }
+
+ return NULL;
+}
+
+#endif /* _ASM_RISCV_MODULE_H */
diff --git a/arch/riscv/include/asm/module.lds.h b/arch/riscv/include/asm/module.lds.h
new file mode 100644
index 0000000000..1075beae1a
--- /dev/null
+++ b/arch/riscv/include/asm/module.lds.h
@@ -0,0 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2017 Andes Technology Corporation */
+#ifdef CONFIG_MODULE_SECTIONS
+SECTIONS {
+ .plt : { BYTE(0) }
+ .got : { BYTE(0) }
+ .got.plt : { BYTE(0) }
+}
+#endif
diff --git a/arch/riscv/include/asm/numa.h b/arch/riscv/include/asm/numa.h
new file mode 100644
index 0000000000..8c8cf4297c
--- /dev/null
+++ b/arch/riscv/include/asm/numa.h
@@ -0,0 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_NUMA_H
+#define __ASM_NUMA_H
+
+#include <asm/topology.h>
+#include <asm-generic/numa.h>
+
+#endif /* __ASM_NUMA_H */
diff --git a/arch/riscv/include/asm/page.h b/arch/riscv/include/asm/page.h
new file mode 100644
index 0000000000..57e887bfa3
--- /dev/null
+++ b/arch/riscv/include/asm/page.h
@@ -0,0 +1,203 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2009 Chen Liqin <liqin.chen@sunplusct.com>
+ * Copyright (C) 2012 Regents of the University of California
+ * Copyright (C) 2017 SiFive
+ * Copyright (C) 2017 XiaojingZhu <zhuxiaoj@ict.ac.cn>
+ */
+
+#ifndef _ASM_RISCV_PAGE_H
+#define _ASM_RISCV_PAGE_H
+
+#include <linux/pfn.h>
+#include <linux/const.h>
+
+#define PAGE_SHIFT (12)
+#define PAGE_SIZE (_AC(1, UL) << PAGE_SHIFT)
+#define PAGE_MASK (~(PAGE_SIZE - 1))
+
+#define HPAGE_SHIFT PMD_SHIFT
+#define HPAGE_SIZE (_AC(1, UL) << HPAGE_SHIFT)
+#define HPAGE_MASK (~(HPAGE_SIZE - 1))
+#define HUGETLB_PAGE_ORDER (HPAGE_SHIFT - PAGE_SHIFT)
+
+/*
+ * PAGE_OFFSET -- the first address of the first page of memory.
+ * When not using MMU this corresponds to the first free page in
+ * physical memory (aligned on a page boundary).
+ */
+#ifdef CONFIG_64BIT
+#ifdef CONFIG_MMU
+#define PAGE_OFFSET kernel_map.page_offset
+#else
+#define PAGE_OFFSET _AC(CONFIG_PAGE_OFFSET, UL)
+#endif
+/*
+ * By default, CONFIG_PAGE_OFFSET value corresponds to SV57 address space so
+ * define the PAGE_OFFSET value for SV48 and SV39.
+ */
+#define PAGE_OFFSET_L4 _AC(0xffffaf8000000000, UL)
+#define PAGE_OFFSET_L3 _AC(0xffffffd800000000, UL)
+#else
+#define PAGE_OFFSET _AC(CONFIG_PAGE_OFFSET, UL)
+#endif /* CONFIG_64BIT */
+
+#ifndef __ASSEMBLY__
+
+#ifdef CONFIG_RISCV_ISA_ZICBOZ
+void clear_page(void *page);
+#else
+#define clear_page(pgaddr) memset((pgaddr), 0, PAGE_SIZE)
+#endif
+#define copy_page(to, from) memcpy((to), (from), PAGE_SIZE)
+
+#define clear_user_page(pgaddr, vaddr, page) clear_page(pgaddr)
+#define copy_user_page(vto, vfrom, vaddr, topg) \
+ memcpy((vto), (vfrom), PAGE_SIZE)
+
+/*
+ * Use struct definitions to apply C type checking
+ */
+
+/* Page Global Directory entry */
+typedef struct {
+ unsigned long pgd;
+} pgd_t;
+
+/* Page Table entry */
+typedef struct {
+ unsigned long pte;
+} pte_t;
+
+typedef struct {
+ unsigned long pgprot;
+} pgprot_t;
+
+typedef struct page *pgtable_t;
+
+#define pte_val(x) ((x).pte)
+#define pgd_val(x) ((x).pgd)
+#define pgprot_val(x) ((x).pgprot)
+
+#define __pte(x) ((pte_t) { (x) })
+#define __pgd(x) ((pgd_t) { (x) })
+#define __pgprot(x) ((pgprot_t) { (x) })
+
+#ifdef CONFIG_64BIT
+#define PTE_FMT "%016lx"
+#else
+#define PTE_FMT "%08lx"
+#endif
+
+#ifdef CONFIG_64BIT
+/*
+ * We override this value as its generic definition uses __pa too early in
+ * the boot process (before kernel_map.va_pa_offset is set).
+ */
+#define MIN_MEMBLOCK_ADDR 0
+#endif
+
+#ifdef CONFIG_MMU
+#define ARCH_PFN_OFFSET (PFN_DOWN((unsigned long)phys_ram_base))
+#else
+#define ARCH_PFN_OFFSET (PAGE_OFFSET >> PAGE_SHIFT)
+#endif /* CONFIG_MMU */
+
+struct kernel_mapping {
+ unsigned long page_offset;
+ unsigned long virt_addr;
+ unsigned long virt_offset;
+ uintptr_t phys_addr;
+ uintptr_t size;
+ /* Offset between linear mapping virtual address and kernel load address */
+ unsigned long va_pa_offset;
+ /* Offset between kernel mapping virtual address and kernel load address */
+ unsigned long va_kernel_pa_offset;
+ unsigned long va_kernel_xip_pa_offset;
+#ifdef CONFIG_XIP_KERNEL
+ uintptr_t xiprom;
+ uintptr_t xiprom_sz;
+#endif
+};
+
+extern struct kernel_mapping kernel_map;
+extern phys_addr_t phys_ram_base;
+
+#define is_kernel_mapping(x) \
+ ((x) >= kernel_map.virt_addr && (x) < (kernel_map.virt_addr + kernel_map.size))
+
+#define is_linear_mapping(x) \
+ ((x) >= PAGE_OFFSET && (!IS_ENABLED(CONFIG_64BIT) || (x) < PAGE_OFFSET + KERN_VIRT_SIZE))
+
+#ifndef CONFIG_DEBUG_VIRTUAL
+#define linear_mapping_pa_to_va(x) ((void *)((unsigned long)(x) + kernel_map.va_pa_offset))
+#else
+void *linear_mapping_pa_to_va(unsigned long x);
+#endif
+#define kernel_mapping_pa_to_va(y) ({ \
+ unsigned long _y = (unsigned long)(y); \
+ (IS_ENABLED(CONFIG_XIP_KERNEL) && _y < phys_ram_base) ? \
+ (void *)(_y + kernel_map.va_kernel_xip_pa_offset) : \
+ (void *)(_y + kernel_map.va_kernel_pa_offset + XIP_OFFSET); \
+ })
+#define __pa_to_va_nodebug(x) linear_mapping_pa_to_va(x)
+
+#ifndef CONFIG_DEBUG_VIRTUAL
+#define linear_mapping_va_to_pa(x) ((unsigned long)(x) - kernel_map.va_pa_offset)
+#else
+phys_addr_t linear_mapping_va_to_pa(unsigned long x);
+#endif
+#define kernel_mapping_va_to_pa(y) ({ \
+ unsigned long _y = (unsigned long)(y); \
+ (IS_ENABLED(CONFIG_XIP_KERNEL) && _y < kernel_map.virt_addr + XIP_OFFSET) ? \
+ (_y - kernel_map.va_kernel_xip_pa_offset) : \
+ (_y - kernel_map.va_kernel_pa_offset - XIP_OFFSET); \
+ })
+
+#define __va_to_pa_nodebug(x) ({ \
+ unsigned long _x = x; \
+ is_linear_mapping(_x) ? \
+ linear_mapping_va_to_pa(_x) : kernel_mapping_va_to_pa(_x); \
+ })
+
+#ifdef CONFIG_DEBUG_VIRTUAL
+extern phys_addr_t __virt_to_phys(unsigned long x);
+extern phys_addr_t __phys_addr_symbol(unsigned long x);
+#else
+#define __virt_to_phys(x) __va_to_pa_nodebug(x)
+#define __phys_addr_symbol(x) __va_to_pa_nodebug(x)
+#endif /* CONFIG_DEBUG_VIRTUAL */
+
+#define __pa_symbol(x) __phys_addr_symbol(RELOC_HIDE((unsigned long)(x), 0))
+#define __pa(x) __virt_to_phys((unsigned long)(x))
+#define __va(x) ((void *)__pa_to_va_nodebug((phys_addr_t)(x)))
+
+#define phys_to_pfn(phys) (PFN_DOWN(phys))
+#define pfn_to_phys(pfn) (PFN_PHYS(pfn))
+
+#define virt_to_pfn(vaddr) (phys_to_pfn(__pa(vaddr)))
+#define pfn_to_virt(pfn) (__va(pfn_to_phys(pfn)))
+
+#define virt_to_page(vaddr) (pfn_to_page(virt_to_pfn(vaddr)))
+#define page_to_virt(page) (pfn_to_virt(page_to_pfn(page)))
+
+#define page_to_phys(page) (pfn_to_phys(page_to_pfn(page)))
+#define phys_to_page(paddr) (pfn_to_page(phys_to_pfn(paddr)))
+
+#define sym_to_pfn(x) __phys_to_pfn(__pa_symbol(x))
+
+unsigned long kaslr_offset(void);
+
+#endif /* __ASSEMBLY__ */
+
+#define virt_addr_valid(vaddr) ({ \
+ unsigned long _addr = (unsigned long)vaddr; \
+ (unsigned long)(_addr) >= PAGE_OFFSET && pfn_valid(virt_to_pfn(_addr)); \
+})
+
+#define VM_DATA_DEFAULT_FLAGS VM_DATA_FLAGS_NON_EXEC
+
+#include <asm-generic/memory_model.h>
+#include <asm-generic/getorder.h>
+
+#endif /* _ASM_RISCV_PAGE_H */
diff --git a/arch/riscv/include/asm/patch.h b/arch/riscv/include/asm/patch.h
new file mode 100644
index 0000000000..e88b52d39e
--- /dev/null
+++ b/arch/riscv/include/asm/patch.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2020 SiFive
+ */
+
+#ifndef _ASM_RISCV_PATCH_H
+#define _ASM_RISCV_PATCH_H
+
+int patch_text_nosync(void *addr, const void *insns, size_t len);
+int patch_text_set_nosync(void *addr, u8 c, size_t len);
+int patch_text(void *addr, u32 *insns, int ninsns);
+
+extern int riscv_patch_in_stop_machine;
+
+#endif /* _ASM_RISCV_PATCH_H */
diff --git a/arch/riscv/include/asm/pci.h b/arch/riscv/include/asm/pci.h
new file mode 100644
index 0000000000..cc2a184cfc
--- /dev/null
+++ b/arch/riscv/include/asm/pci.h
@@ -0,0 +1,33 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2016 SiFive
+ */
+
+#ifndef _ASM_RISCV_PCI_H
+#define _ASM_RISCV_PCI_H
+
+#include <linux/types.h>
+#include <linux/slab.h>
+#include <linux/dma-mapping.h>
+
+#include <asm/io.h>
+
+#define PCIBIOS_MIN_IO 4
+#define PCIBIOS_MIN_MEM 16
+
+#if defined(CONFIG_PCI) && defined(CONFIG_NUMA)
+static inline int pcibus_to_node(struct pci_bus *bus)
+{
+ return dev_to_node(&bus->dev);
+}
+#ifndef cpumask_of_pcibus
+#define cpumask_of_pcibus(bus) (pcibus_to_node(bus) == -1 ? \
+ cpu_all_mask : \
+ cpumask_of_node(pcibus_to_node(bus)))
+#endif
+#endif /* defined(CONFIG_PCI) && defined(CONFIG_NUMA) */
+
+/* Generic PCI */
+#include <asm-generic/pci.h>
+
+#endif /* _ASM_RISCV_PCI_H */
diff --git a/arch/riscv/include/asm/perf_event.h b/arch/riscv/include/asm/perf_event.h
new file mode 100644
index 0000000000..665bbc9b2f
--- /dev/null
+++ b/arch/riscv/include/asm/perf_event.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2018 SiFive
+ * Copyright (C) 2018 Andes Technology Corporation
+ *
+ */
+
+#ifndef _ASM_RISCV_PERF_EVENT_H
+#define _ASM_RISCV_PERF_EVENT_H
+
+#include <linux/perf_event.h>
+#define perf_arch_bpf_user_pt_regs(regs) (struct user_regs_struct *)regs
+
+#define perf_arch_fetch_caller_regs(regs, __ip) { \
+ (regs)->epc = (__ip); \
+ (regs)->s0 = (unsigned long) __builtin_frame_address(0); \
+ (regs)->sp = current_stack_pointer; \
+ (regs)->status = SR_PP; \
+}
+#endif /* _ASM_RISCV_PERF_EVENT_H */
diff --git a/arch/riscv/include/asm/pgalloc.h b/arch/riscv/include/asm/pgalloc.h
new file mode 100644
index 0000000000..d169a4f41a
--- /dev/null
+++ b/arch/riscv/include/asm/pgalloc.h
@@ -0,0 +1,163 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2009 Chen Liqin <liqin.chen@sunplusct.com>
+ * Copyright (C) 2012 Regents of the University of California
+ */
+
+#ifndef _ASM_RISCV_PGALLOC_H
+#define _ASM_RISCV_PGALLOC_H
+
+#include <linux/mm.h>
+#include <asm/tlb.h>
+
+#ifdef CONFIG_MMU
+#define __HAVE_ARCH_PUD_ALLOC_ONE
+#define __HAVE_ARCH_PUD_FREE
+#include <asm-generic/pgalloc.h>
+
+static inline void pmd_populate_kernel(struct mm_struct *mm,
+ pmd_t *pmd, pte_t *pte)
+{
+ unsigned long pfn = virt_to_pfn(pte);
+
+ set_pmd(pmd, __pmd((pfn << _PAGE_PFN_SHIFT) | _PAGE_TABLE));
+}
+
+static inline void pmd_populate(struct mm_struct *mm,
+ pmd_t *pmd, pgtable_t pte)
+{
+ unsigned long pfn = virt_to_pfn(page_address(pte));
+
+ set_pmd(pmd, __pmd((pfn << _PAGE_PFN_SHIFT) | _PAGE_TABLE));
+}
+
+#ifndef __PAGETABLE_PMD_FOLDED
+static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
+{
+ unsigned long pfn = virt_to_pfn(pmd);
+
+ set_pud(pud, __pud((pfn << _PAGE_PFN_SHIFT) | _PAGE_TABLE));
+}
+
+static inline void p4d_populate(struct mm_struct *mm, p4d_t *p4d, pud_t *pud)
+{
+ if (pgtable_l4_enabled) {
+ unsigned long pfn = virt_to_pfn(pud);
+
+ set_p4d(p4d, __p4d((pfn << _PAGE_PFN_SHIFT) | _PAGE_TABLE));
+ }
+}
+
+static inline void p4d_populate_safe(struct mm_struct *mm, p4d_t *p4d,
+ pud_t *pud)
+{
+ if (pgtable_l4_enabled) {
+ unsigned long pfn = virt_to_pfn(pud);
+
+ set_p4d_safe(p4d,
+ __p4d((pfn << _PAGE_PFN_SHIFT) | _PAGE_TABLE));
+ }
+}
+
+static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, p4d_t *p4d)
+{
+ if (pgtable_l5_enabled) {
+ unsigned long pfn = virt_to_pfn(p4d);
+
+ set_pgd(pgd, __pgd((pfn << _PAGE_PFN_SHIFT) | _PAGE_TABLE));
+ }
+}
+
+static inline void pgd_populate_safe(struct mm_struct *mm, pgd_t *pgd,
+ p4d_t *p4d)
+{
+ if (pgtable_l5_enabled) {
+ unsigned long pfn = virt_to_pfn(p4d);
+
+ set_pgd_safe(pgd,
+ __pgd((pfn << _PAGE_PFN_SHIFT) | _PAGE_TABLE));
+ }
+}
+
+#define pud_alloc_one pud_alloc_one
+static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
+{
+ if (pgtable_l4_enabled)
+ return __pud_alloc_one(mm, addr);
+
+ return NULL;
+}
+
+#define pud_free pud_free
+static inline void pud_free(struct mm_struct *mm, pud_t *pud)
+{
+ if (pgtable_l4_enabled)
+ __pud_free(mm, pud);
+}
+
+#define __pud_free_tlb(tlb, pud, addr) pud_free((tlb)->mm, pud)
+
+#define p4d_alloc_one p4d_alloc_one
+static inline p4d_t *p4d_alloc_one(struct mm_struct *mm, unsigned long addr)
+{
+ if (pgtable_l5_enabled) {
+ gfp_t gfp = GFP_PGTABLE_USER;
+
+ if (mm == &init_mm)
+ gfp = GFP_PGTABLE_KERNEL;
+ return (p4d_t *)get_zeroed_page(gfp);
+ }
+
+ return NULL;
+}
+
+static inline void __p4d_free(struct mm_struct *mm, p4d_t *p4d)
+{
+ BUG_ON((unsigned long)p4d & (PAGE_SIZE-1));
+ free_page((unsigned long)p4d);
+}
+
+#define p4d_free p4d_free
+static inline void p4d_free(struct mm_struct *mm, p4d_t *p4d)
+{
+ if (pgtable_l5_enabled)
+ __p4d_free(mm, p4d);
+}
+
+#define __p4d_free_tlb(tlb, p4d, addr) p4d_free((tlb)->mm, p4d)
+#endif /* __PAGETABLE_PMD_FOLDED */
+
+static inline void sync_kernel_mappings(pgd_t *pgd)
+{
+ memcpy(pgd + USER_PTRS_PER_PGD,
+ init_mm.pgd + USER_PTRS_PER_PGD,
+ (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
+}
+
+static inline pgd_t *pgd_alloc(struct mm_struct *mm)
+{
+ pgd_t *pgd;
+
+ pgd = (pgd_t *)__get_free_page(GFP_KERNEL);
+ if (likely(pgd != NULL)) {
+ memset(pgd, 0, USER_PTRS_PER_PGD * sizeof(pgd_t));
+ /* Copy kernel mappings */
+ sync_kernel_mappings(pgd);
+ }
+ return pgd;
+}
+
+#ifndef __PAGETABLE_PMD_FOLDED
+
+#define __pmd_free_tlb(tlb, pmd, addr) pmd_free((tlb)->mm, pmd)
+
+#endif /* __PAGETABLE_PMD_FOLDED */
+
+#define __pte_free_tlb(tlb, pte, buf) \
+do { \
+ pagetable_pte_dtor(page_ptdesc(pte)); \
+ tlb_remove_page_ptdesc((tlb), page_ptdesc(pte));\
+} while (0)
+#endif /* CONFIG_MMU */
+
+#endif /* _ASM_RISCV_PGALLOC_H */
diff --git a/arch/riscv/include/asm/pgtable-32.h b/arch/riscv/include/asm/pgtable-32.h
new file mode 100644
index 0000000000..59ba1fbaf7
--- /dev/null
+++ b/arch/riscv/include/asm/pgtable-32.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2012 Regents of the University of California
+ */
+
+#ifndef _ASM_RISCV_PGTABLE_32_H
+#define _ASM_RISCV_PGTABLE_32_H
+
+#include <asm-generic/pgtable-nopmd.h>
+#include <linux/bits.h>
+#include <linux/const.h>
+
+/* Size of region mapped by a page global directory */
+#define PGDIR_SHIFT 22
+#define PGDIR_SIZE (_AC(1, UL) << PGDIR_SHIFT)
+#define PGDIR_MASK (~(PGDIR_SIZE - 1))
+
+#define MAX_POSSIBLE_PHYSMEM_BITS 34
+
+/*
+ * rv32 PTE format:
+ * | XLEN-1 10 | 9 8 | 7 | 6 | 5 | 4 | 3 | 2 | 1 | 0
+ * PFN reserved for SW D A G U X W R V
+ */
+#define _PAGE_PFN_MASK GENMASK(31, 10)
+
+#define _PAGE_NOCACHE 0
+#define _PAGE_IO 0
+#define _PAGE_MTMASK 0
+
+/* Set of bits to preserve across pte_modify() */
+#define _PAGE_CHG_MASK (~(unsigned long)(_PAGE_PRESENT | _PAGE_READ | \
+ _PAGE_WRITE | _PAGE_EXEC | \
+ _PAGE_USER | _PAGE_GLOBAL))
+
+#endif /* _ASM_RISCV_PGTABLE_32_H */
diff --git a/arch/riscv/include/asm/pgtable-64.h b/arch/riscv/include/asm/pgtable-64.h
new file mode 100644
index 0000000000..7a5097202e
--- /dev/null
+++ b/arch/riscv/include/asm/pgtable-64.h
@@ -0,0 +1,411 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2012 Regents of the University of California
+ */
+
+#ifndef _ASM_RISCV_PGTABLE_64_H
+#define _ASM_RISCV_PGTABLE_64_H
+
+#include <linux/bits.h>
+#include <linux/const.h>
+#include <asm/errata_list.h>
+
+extern bool pgtable_l4_enabled;
+extern bool pgtable_l5_enabled;
+
+#define PGDIR_SHIFT_L3 30
+#define PGDIR_SHIFT_L4 39
+#define PGDIR_SHIFT_L5 48
+#define PGDIR_SIZE_L3 (_AC(1, UL) << PGDIR_SHIFT_L3)
+
+#define PGDIR_SHIFT (pgtable_l5_enabled ? PGDIR_SHIFT_L5 : \
+ (pgtable_l4_enabled ? PGDIR_SHIFT_L4 : PGDIR_SHIFT_L3))
+/* Size of region mapped by a page global directory */
+#define PGDIR_SIZE (_AC(1, UL) << PGDIR_SHIFT)
+#define PGDIR_MASK (~(PGDIR_SIZE - 1))
+
+/* p4d is folded into pgd in case of 4-level page table */
+#define P4D_SHIFT_L3 30
+#define P4D_SHIFT_L4 39
+#define P4D_SHIFT_L5 39
+#define P4D_SHIFT (pgtable_l5_enabled ? P4D_SHIFT_L5 : \
+ (pgtable_l4_enabled ? P4D_SHIFT_L4 : P4D_SHIFT_L3))
+#define P4D_SIZE (_AC(1, UL) << P4D_SHIFT)
+#define P4D_MASK (~(P4D_SIZE - 1))
+
+/* pud is folded into pgd in case of 3-level page table */
+#define PUD_SHIFT 30
+#define PUD_SIZE (_AC(1, UL) << PUD_SHIFT)
+#define PUD_MASK (~(PUD_SIZE - 1))
+
+#define PMD_SHIFT 21
+/* Size of region mapped by a page middle directory */
+#define PMD_SIZE (_AC(1, UL) << PMD_SHIFT)
+#define PMD_MASK (~(PMD_SIZE - 1))
+
+/* Page 4th Directory entry */
+typedef struct {
+ unsigned long p4d;
+} p4d_t;
+
+#define p4d_val(x) ((x).p4d)
+#define __p4d(x) ((p4d_t) { (x) })
+#define PTRS_PER_P4D (PAGE_SIZE / sizeof(p4d_t))
+
+/* Page Upper Directory entry */
+typedef struct {
+ unsigned long pud;
+} pud_t;
+
+#define pud_val(x) ((x).pud)
+#define __pud(x) ((pud_t) { (x) })
+#define PTRS_PER_PUD (PAGE_SIZE / sizeof(pud_t))
+
+/* Page Middle Directory entry */
+typedef struct {
+ unsigned long pmd;
+} pmd_t;
+
+#define pmd_val(x) ((x).pmd)
+#define __pmd(x) ((pmd_t) { (x) })
+
+#define PTRS_PER_PMD (PAGE_SIZE / sizeof(pmd_t))
+
+/*
+ * rv64 PTE format:
+ * | 63 | 62 61 | 60 54 | 53 10 | 9 8 | 7 | 6 | 5 | 4 | 3 | 2 | 1 | 0
+ * N MT RSV PFN reserved for SW D A G U X W R V
+ */
+#define _PAGE_PFN_MASK GENMASK(53, 10)
+
+/*
+ * [63] Svnapot definitions:
+ * 0 Svnapot disabled
+ * 1 Svnapot enabled
+ */
+#define _PAGE_NAPOT_SHIFT 63
+#define _PAGE_NAPOT BIT(_PAGE_NAPOT_SHIFT)
+/*
+ * Only 64KB (order 4) napot ptes supported.
+ */
+#define NAPOT_CONT_ORDER_BASE 4
+enum napot_cont_order {
+ NAPOT_CONT64KB_ORDER = NAPOT_CONT_ORDER_BASE,
+ NAPOT_ORDER_MAX,
+};
+
+#define for_each_napot_order(order) \
+ for (order = NAPOT_CONT_ORDER_BASE; order < NAPOT_ORDER_MAX; order++)
+#define for_each_napot_order_rev(order) \
+ for (order = NAPOT_ORDER_MAX - 1; \
+ order >= NAPOT_CONT_ORDER_BASE; order--)
+#define napot_cont_order(val) (__builtin_ctzl((val.pte >> _PAGE_PFN_SHIFT) << 1))
+
+#define napot_cont_shift(order) ((order) + PAGE_SHIFT)
+#define napot_cont_size(order) BIT(napot_cont_shift(order))
+#define napot_cont_mask(order) (~(napot_cont_size(order) - 1UL))
+#define napot_pte_num(order) BIT(order)
+
+#ifdef CONFIG_RISCV_ISA_SVNAPOT
+#define HUGE_MAX_HSTATE (2 + (NAPOT_ORDER_MAX - NAPOT_CONT_ORDER_BASE))
+#else
+#define HUGE_MAX_HSTATE 2
+#endif
+
+/*
+ * [62:61] Svpbmt Memory Type definitions:
+ *
+ * 00 - PMA Normal Cacheable, No change to implied PMA memory type
+ * 01 - NC Non-cacheable, idempotent, weakly-ordered Main Memory
+ * 10 - IO Non-cacheable, non-idempotent, strongly-ordered I/O memory
+ * 11 - Rsvd Reserved for future standard use
+ */
+#define _PAGE_NOCACHE_SVPBMT (1UL << 61)
+#define _PAGE_IO_SVPBMT (1UL << 62)
+#define _PAGE_MTMASK_SVPBMT (_PAGE_NOCACHE_SVPBMT | _PAGE_IO_SVPBMT)
+
+/*
+ * [63:59] T-Head Memory Type definitions:
+ *
+ * 00000 - NC Weakly-ordered, Non-cacheable, Non-bufferable, Non-shareable, Non-trustable
+ * 01110 - PMA Weakly-ordered, Cacheable, Bufferable, Shareable, Non-trustable
+ * 10000 - IO Strongly-ordered, Non-cacheable, Non-bufferable, Non-shareable, Non-trustable
+ */
+#define _PAGE_PMA_THEAD ((1UL << 62) | (1UL << 61) | (1UL << 60))
+#define _PAGE_NOCACHE_THEAD 0UL
+#define _PAGE_IO_THEAD (1UL << 63)
+#define _PAGE_MTMASK_THEAD (_PAGE_PMA_THEAD | _PAGE_IO_THEAD | (1UL << 59))
+
+static inline u64 riscv_page_mtmask(void)
+{
+ u64 val;
+
+ ALT_SVPBMT(val, _PAGE_MTMASK);
+ return val;
+}
+
+static inline u64 riscv_page_nocache(void)
+{
+ u64 val;
+
+ ALT_SVPBMT(val, _PAGE_NOCACHE);
+ return val;
+}
+
+static inline u64 riscv_page_io(void)
+{
+ u64 val;
+
+ ALT_SVPBMT(val, _PAGE_IO);
+ return val;
+}
+
+#define _PAGE_NOCACHE riscv_page_nocache()
+#define _PAGE_IO riscv_page_io()
+#define _PAGE_MTMASK riscv_page_mtmask()
+
+/* Set of bits to preserve across pte_modify() */
+#define _PAGE_CHG_MASK (~(unsigned long)(_PAGE_PRESENT | _PAGE_READ | \
+ _PAGE_WRITE | _PAGE_EXEC | \
+ _PAGE_USER | _PAGE_GLOBAL | \
+ _PAGE_MTMASK))
+
+static inline int pud_present(pud_t pud)
+{
+ return (pud_val(pud) & _PAGE_PRESENT);
+}
+
+static inline int pud_none(pud_t pud)
+{
+ return (pud_val(pud) == 0);
+}
+
+static inline int pud_bad(pud_t pud)
+{
+ return !pud_present(pud);
+}
+
+#define pud_leaf pud_leaf
+static inline int pud_leaf(pud_t pud)
+{
+ return pud_present(pud) && (pud_val(pud) & _PAGE_LEAF);
+}
+
+static inline int pud_user(pud_t pud)
+{
+ return pud_val(pud) & _PAGE_USER;
+}
+
+static inline void set_pud(pud_t *pudp, pud_t pud)
+{
+ *pudp = pud;
+}
+
+static inline void pud_clear(pud_t *pudp)
+{
+ set_pud(pudp, __pud(0));
+}
+
+static inline pud_t pfn_pud(unsigned long pfn, pgprot_t prot)
+{
+ return __pud((pfn << _PAGE_PFN_SHIFT) | pgprot_val(prot));
+}
+
+static inline unsigned long _pud_pfn(pud_t pud)
+{
+ return __page_val_to_pfn(pud_val(pud));
+}
+
+static inline pmd_t *pud_pgtable(pud_t pud)
+{
+ return (pmd_t *)pfn_to_virt(__page_val_to_pfn(pud_val(pud)));
+}
+
+static inline struct page *pud_page(pud_t pud)
+{
+ return pfn_to_page(__page_val_to_pfn(pud_val(pud)));
+}
+
+#define mm_p4d_folded mm_p4d_folded
+static inline bool mm_p4d_folded(struct mm_struct *mm)
+{
+ if (pgtable_l5_enabled)
+ return false;
+
+ return true;
+}
+
+#define mm_pud_folded mm_pud_folded
+static inline bool mm_pud_folded(struct mm_struct *mm)
+{
+ if (pgtable_l4_enabled)
+ return false;
+
+ return true;
+}
+
+#define pmd_index(addr) (((addr) >> PMD_SHIFT) & (PTRS_PER_PMD - 1))
+
+static inline pmd_t pfn_pmd(unsigned long pfn, pgprot_t prot)
+{
+ unsigned long prot_val = pgprot_val(prot);
+
+ ALT_THEAD_PMA(prot_val);
+
+ return __pmd((pfn << _PAGE_PFN_SHIFT) | prot_val);
+}
+
+static inline unsigned long _pmd_pfn(pmd_t pmd)
+{
+ return __page_val_to_pfn(pmd_val(pmd));
+}
+
+#define mk_pmd(page, prot) pfn_pmd(page_to_pfn(page), prot)
+
+#define pmd_ERROR(e) \
+ pr_err("%s:%d: bad pmd %016lx.\n", __FILE__, __LINE__, pmd_val(e))
+
+#define pud_ERROR(e) \
+ pr_err("%s:%d: bad pud %016lx.\n", __FILE__, __LINE__, pud_val(e))
+
+#define p4d_ERROR(e) \
+ pr_err("%s:%d: bad p4d %016lx.\n", __FILE__, __LINE__, p4d_val(e))
+
+static inline void set_p4d(p4d_t *p4dp, p4d_t p4d)
+{
+ if (pgtable_l4_enabled)
+ *p4dp = p4d;
+ else
+ set_pud((pud_t *)p4dp, (pud_t){ p4d_val(p4d) });
+}
+
+static inline int p4d_none(p4d_t p4d)
+{
+ if (pgtable_l4_enabled)
+ return (p4d_val(p4d) == 0);
+
+ return 0;
+}
+
+static inline int p4d_present(p4d_t p4d)
+{
+ if (pgtable_l4_enabled)
+ return (p4d_val(p4d) & _PAGE_PRESENT);
+
+ return 1;
+}
+
+static inline int p4d_bad(p4d_t p4d)
+{
+ if (pgtable_l4_enabled)
+ return !p4d_present(p4d);
+
+ return 0;
+}
+
+static inline void p4d_clear(p4d_t *p4d)
+{
+ if (pgtable_l4_enabled)
+ set_p4d(p4d, __p4d(0));
+}
+
+static inline p4d_t pfn_p4d(unsigned long pfn, pgprot_t prot)
+{
+ return __p4d((pfn << _PAGE_PFN_SHIFT) | pgprot_val(prot));
+}
+
+static inline unsigned long _p4d_pfn(p4d_t p4d)
+{
+ return __page_val_to_pfn(p4d_val(p4d));
+}
+
+static inline pud_t *p4d_pgtable(p4d_t p4d)
+{
+ if (pgtable_l4_enabled)
+ return (pud_t *)pfn_to_virt(__page_val_to_pfn(p4d_val(p4d)));
+
+ return (pud_t *)pud_pgtable((pud_t) { p4d_val(p4d) });
+}
+#define p4d_page_vaddr(p4d) ((unsigned long)p4d_pgtable(p4d))
+
+static inline struct page *p4d_page(p4d_t p4d)
+{
+ return pfn_to_page(__page_val_to_pfn(p4d_val(p4d)));
+}
+
+#define pud_index(addr) (((addr) >> PUD_SHIFT) & (PTRS_PER_PUD - 1))
+
+#define pud_offset pud_offset
+static inline pud_t *pud_offset(p4d_t *p4d, unsigned long address)
+{
+ if (pgtable_l4_enabled)
+ return p4d_pgtable(*p4d) + pud_index(address);
+
+ return (pud_t *)p4d;
+}
+
+static inline void set_pgd(pgd_t *pgdp, pgd_t pgd)
+{
+ if (pgtable_l5_enabled)
+ *pgdp = pgd;
+ else
+ set_p4d((p4d_t *)pgdp, (p4d_t){ pgd_val(pgd) });
+}
+
+static inline int pgd_none(pgd_t pgd)
+{
+ if (pgtable_l5_enabled)
+ return (pgd_val(pgd) == 0);
+
+ return 0;
+}
+
+static inline int pgd_present(pgd_t pgd)
+{
+ if (pgtable_l5_enabled)
+ return (pgd_val(pgd) & _PAGE_PRESENT);
+
+ return 1;
+}
+
+static inline int pgd_bad(pgd_t pgd)
+{
+ if (pgtable_l5_enabled)
+ return !pgd_present(pgd);
+
+ return 0;
+}
+
+static inline void pgd_clear(pgd_t *pgd)
+{
+ if (pgtable_l5_enabled)
+ set_pgd(pgd, __pgd(0));
+}
+
+static inline p4d_t *pgd_pgtable(pgd_t pgd)
+{
+ if (pgtable_l5_enabled)
+ return (p4d_t *)pfn_to_virt(__page_val_to_pfn(pgd_val(pgd)));
+
+ return (p4d_t *)p4d_pgtable((p4d_t) { pgd_val(pgd) });
+}
+#define pgd_page_vaddr(pgd) ((unsigned long)pgd_pgtable(pgd))
+
+static inline struct page *pgd_page(pgd_t pgd)
+{
+ return pfn_to_page(__page_val_to_pfn(pgd_val(pgd)));
+}
+#define pgd_page(pgd) pgd_page(pgd)
+
+#define p4d_index(addr) (((addr) >> P4D_SHIFT) & (PTRS_PER_P4D - 1))
+
+#define p4d_offset p4d_offset
+static inline p4d_t *p4d_offset(pgd_t *pgd, unsigned long address)
+{
+ if (pgtable_l5_enabled)
+ return pgd_pgtable(*pgd) + p4d_index(address);
+
+ return (p4d_t *)pgd;
+}
+
+#endif /* _ASM_RISCV_PGTABLE_64_H */
diff --git a/arch/riscv/include/asm/pgtable-bits.h b/arch/riscv/include/asm/pgtable-bits.h
new file mode 100644
index 0000000000..f896708e83
--- /dev/null
+++ b/arch/riscv/include/asm/pgtable-bits.h
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2012 Regents of the University of California
+ */
+
+#ifndef _ASM_RISCV_PGTABLE_BITS_H
+#define _ASM_RISCV_PGTABLE_BITS_H
+
+#define _PAGE_ACCESSED_OFFSET 6
+
+#define _PAGE_PRESENT (1 << 0)
+#define _PAGE_READ (1 << 1) /* Readable */
+#define _PAGE_WRITE (1 << 2) /* Writable */
+#define _PAGE_EXEC (1 << 3) /* Executable */
+#define _PAGE_USER (1 << 4) /* User */
+#define _PAGE_GLOBAL (1 << 5) /* Global */
+#define _PAGE_ACCESSED (1 << 6) /* Set by hardware on any access */
+#define _PAGE_DIRTY (1 << 7) /* Set by hardware on any write */
+#define _PAGE_SOFT (1 << 8) /* Reserved for software */
+
+#define _PAGE_SPECIAL _PAGE_SOFT
+#define _PAGE_TABLE _PAGE_PRESENT
+
+/*
+ * _PAGE_PROT_NONE is set on not-present pages (and ignored by the hardware) to
+ * distinguish them from swapped out pages
+ */
+#define _PAGE_PROT_NONE _PAGE_GLOBAL
+
+/* Used for swap PTEs only. */
+#define _PAGE_SWP_EXCLUSIVE _PAGE_ACCESSED
+
+#define _PAGE_PFN_SHIFT 10
+
+/*
+ * when all of R/W/X are zero, the PTE is a pointer to the next level
+ * of the page table; otherwise, it is a leaf PTE.
+ */
+#define _PAGE_LEAF (_PAGE_READ | _PAGE_WRITE | _PAGE_EXEC)
+
+#endif /* _ASM_RISCV_PGTABLE_BITS_H */
diff --git a/arch/riscv/include/asm/pgtable.h b/arch/riscv/include/asm/pgtable.h
new file mode 100644
index 0000000000..511cb385be
--- /dev/null
+++ b/arch/riscv/include/asm/pgtable.h
@@ -0,0 +1,931 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2012 Regents of the University of California
+ */
+
+#ifndef _ASM_RISCV_PGTABLE_H
+#define _ASM_RISCV_PGTABLE_H
+
+#include <linux/mmzone.h>
+#include <linux/sizes.h>
+
+#include <asm/pgtable-bits.h>
+
+#ifndef CONFIG_MMU
+#define KERNEL_LINK_ADDR PAGE_OFFSET
+#define KERN_VIRT_SIZE (UL(-1))
+#else
+
+#define ADDRESS_SPACE_END (UL(-1))
+
+#ifdef CONFIG_64BIT
+/* Leave 2GB for kernel and BPF at the end of the address space */
+#define KERNEL_LINK_ADDR (ADDRESS_SPACE_END - SZ_2G + 1)
+#else
+#define KERNEL_LINK_ADDR PAGE_OFFSET
+#endif
+
+/* Number of entries in the page global directory */
+#define PTRS_PER_PGD (PAGE_SIZE / sizeof(pgd_t))
+/* Number of entries in the page table */
+#define PTRS_PER_PTE (PAGE_SIZE / sizeof(pte_t))
+
+/*
+ * Half of the kernel address space (1/4 of the entries of the page global
+ * directory) is for the direct mapping.
+ */
+#define KERN_VIRT_SIZE ((PTRS_PER_PGD / 2 * PGDIR_SIZE) / 2)
+
+#define VMALLOC_SIZE (KERN_VIRT_SIZE >> 1)
+#define VMALLOC_END PAGE_OFFSET
+#define VMALLOC_START (PAGE_OFFSET - VMALLOC_SIZE)
+
+#define BPF_JIT_REGION_SIZE (SZ_128M)
+#ifdef CONFIG_64BIT
+#define BPF_JIT_REGION_START (BPF_JIT_REGION_END - BPF_JIT_REGION_SIZE)
+#define BPF_JIT_REGION_END (MODULES_END)
+#else
+#define BPF_JIT_REGION_START (PAGE_OFFSET - BPF_JIT_REGION_SIZE)
+#define BPF_JIT_REGION_END (VMALLOC_END)
+#endif
+
+/* Modules always live before the kernel */
+#ifdef CONFIG_64BIT
+/* This is used to define the end of the KASAN shadow region */
+#define MODULES_LOWEST_VADDR (KERNEL_LINK_ADDR - SZ_2G)
+#define MODULES_VADDR (PFN_ALIGN((unsigned long)&_end) - SZ_2G)
+#define MODULES_END (PFN_ALIGN((unsigned long)&_start))
+#endif
+
+/*
+ * Roughly size the vmemmap space to be large enough to fit enough
+ * struct pages to map half the virtual address space. Then
+ * position vmemmap directly below the VMALLOC region.
+ */
+#define VA_BITS_SV32 32
+#ifdef CONFIG_64BIT
+#define VA_BITS_SV39 39
+#define VA_BITS_SV48 48
+#define VA_BITS_SV57 57
+
+#define VA_BITS (pgtable_l5_enabled ? \
+ VA_BITS_SV57 : (pgtable_l4_enabled ? VA_BITS_SV48 : VA_BITS_SV39))
+#else
+#define VA_BITS VA_BITS_SV32
+#endif
+
+#define VMEMMAP_SHIFT \
+ (VA_BITS - PAGE_SHIFT - 1 + STRUCT_PAGE_MAX_SHIFT)
+#define VMEMMAP_SIZE BIT(VMEMMAP_SHIFT)
+#define VMEMMAP_END VMALLOC_START
+#define VMEMMAP_START (VMALLOC_START - VMEMMAP_SIZE)
+
+/*
+ * Define vmemmap for pfn_to_page & page_to_pfn calls. Needed if kernel
+ * is configured with CONFIG_SPARSEMEM_VMEMMAP enabled.
+ */
+#define vmemmap ((struct page *)VMEMMAP_START)
+
+#define PCI_IO_SIZE SZ_16M
+#define PCI_IO_END VMEMMAP_START
+#define PCI_IO_START (PCI_IO_END - PCI_IO_SIZE)
+
+#define FIXADDR_TOP PCI_IO_START
+#ifdef CONFIG_64BIT
+#define MAX_FDT_SIZE PMD_SIZE
+#define FIX_FDT_SIZE (MAX_FDT_SIZE + SZ_2M)
+#define FIXADDR_SIZE (PMD_SIZE + FIX_FDT_SIZE)
+#else
+#define MAX_FDT_SIZE PGDIR_SIZE
+#define FIX_FDT_SIZE MAX_FDT_SIZE
+#define FIXADDR_SIZE (PGDIR_SIZE + FIX_FDT_SIZE)
+#endif
+#define FIXADDR_START (FIXADDR_TOP - FIXADDR_SIZE)
+
+#endif
+
+#ifdef CONFIG_XIP_KERNEL
+#define XIP_OFFSET SZ_32M
+#define XIP_OFFSET_MASK (SZ_32M - 1)
+#else
+#define XIP_OFFSET 0
+#endif
+
+#ifndef __ASSEMBLY__
+
+#include <asm/page.h>
+#include <asm/tlbflush.h>
+#include <linux/mm_types.h>
+#include <asm/compat.h>
+
+#define __page_val_to_pfn(_val) (((_val) & _PAGE_PFN_MASK) >> _PAGE_PFN_SHIFT)
+
+#ifdef CONFIG_64BIT
+#include <asm/pgtable-64.h>
+
+#define VA_USER_SV39 (UL(1) << (VA_BITS_SV39 - 1))
+#define VA_USER_SV48 (UL(1) << (VA_BITS_SV48 - 1))
+#define VA_USER_SV57 (UL(1) << (VA_BITS_SV57 - 1))
+
+#ifdef CONFIG_COMPAT
+#define MMAP_VA_BITS_64 ((VA_BITS >= VA_BITS_SV48) ? VA_BITS_SV48 : VA_BITS)
+#define MMAP_MIN_VA_BITS_64 (VA_BITS_SV39)
+#define MMAP_VA_BITS (is_compat_task() ? VA_BITS_SV32 : MMAP_VA_BITS_64)
+#define MMAP_MIN_VA_BITS (is_compat_task() ? VA_BITS_SV32 : MMAP_MIN_VA_BITS_64)
+#else
+#define MMAP_VA_BITS ((VA_BITS >= VA_BITS_SV48) ? VA_BITS_SV48 : VA_BITS)
+#define MMAP_MIN_VA_BITS (VA_BITS_SV39)
+#endif /* CONFIG_COMPAT */
+
+#else
+#include <asm/pgtable-32.h>
+#endif /* CONFIG_64BIT */
+
+#include <linux/page_table_check.h>
+
+#ifdef CONFIG_XIP_KERNEL
+#define XIP_FIXUP(addr) ({ \
+ uintptr_t __a = (uintptr_t)(addr); \
+ (__a >= CONFIG_XIP_PHYS_ADDR && \
+ __a < CONFIG_XIP_PHYS_ADDR + XIP_OFFSET * 2) ? \
+ __a - CONFIG_XIP_PHYS_ADDR + CONFIG_PHYS_RAM_BASE - XIP_OFFSET :\
+ __a; \
+ })
+#else
+#define XIP_FIXUP(addr) (addr)
+#endif /* CONFIG_XIP_KERNEL */
+
+struct pt_alloc_ops {
+ pte_t *(*get_pte_virt)(phys_addr_t pa);
+ phys_addr_t (*alloc_pte)(uintptr_t va);
+#ifndef __PAGETABLE_PMD_FOLDED
+ pmd_t *(*get_pmd_virt)(phys_addr_t pa);
+ phys_addr_t (*alloc_pmd)(uintptr_t va);
+ pud_t *(*get_pud_virt)(phys_addr_t pa);
+ phys_addr_t (*alloc_pud)(uintptr_t va);
+ p4d_t *(*get_p4d_virt)(phys_addr_t pa);
+ phys_addr_t (*alloc_p4d)(uintptr_t va);
+#endif
+};
+
+extern struct pt_alloc_ops pt_ops __initdata;
+
+#ifdef CONFIG_MMU
+/* Number of PGD entries that a user-mode program can use */
+#define USER_PTRS_PER_PGD (TASK_SIZE / PGDIR_SIZE)
+
+/* Page protection bits */
+#define _PAGE_BASE (_PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_USER)
+
+#define PAGE_NONE __pgprot(_PAGE_PROT_NONE | _PAGE_READ)
+#define PAGE_READ __pgprot(_PAGE_BASE | _PAGE_READ)
+#define PAGE_WRITE __pgprot(_PAGE_BASE | _PAGE_READ | _PAGE_WRITE)
+#define PAGE_EXEC __pgprot(_PAGE_BASE | _PAGE_EXEC)
+#define PAGE_READ_EXEC __pgprot(_PAGE_BASE | _PAGE_READ | _PAGE_EXEC)
+#define PAGE_WRITE_EXEC __pgprot(_PAGE_BASE | _PAGE_READ | \
+ _PAGE_EXEC | _PAGE_WRITE)
+
+#define PAGE_COPY PAGE_READ
+#define PAGE_COPY_EXEC PAGE_READ_EXEC
+#define PAGE_SHARED PAGE_WRITE
+#define PAGE_SHARED_EXEC PAGE_WRITE_EXEC
+
+#define _PAGE_KERNEL (_PAGE_READ \
+ | _PAGE_WRITE \
+ | _PAGE_PRESENT \
+ | _PAGE_ACCESSED \
+ | _PAGE_DIRTY \
+ | _PAGE_GLOBAL)
+
+#define PAGE_KERNEL __pgprot(_PAGE_KERNEL)
+#define PAGE_KERNEL_READ __pgprot(_PAGE_KERNEL & ~_PAGE_WRITE)
+#define PAGE_KERNEL_EXEC __pgprot(_PAGE_KERNEL | _PAGE_EXEC)
+#define PAGE_KERNEL_READ_EXEC __pgprot((_PAGE_KERNEL & ~_PAGE_WRITE) \
+ | _PAGE_EXEC)
+
+#define PAGE_TABLE __pgprot(_PAGE_TABLE)
+
+#define _PAGE_IOREMAP ((_PAGE_KERNEL & ~_PAGE_MTMASK) | _PAGE_IO)
+#define PAGE_KERNEL_IO __pgprot(_PAGE_IOREMAP)
+
+extern pgd_t swapper_pg_dir[];
+extern pgd_t trampoline_pg_dir[];
+extern pgd_t early_pg_dir[];
+
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+static inline int pmd_present(pmd_t pmd)
+{
+ /*
+ * Checking for _PAGE_LEAF is needed too because:
+ * When splitting a THP, split_huge_page() will temporarily clear
+ * the present bit, in this situation, pmd_present() and
+ * pmd_trans_huge() still needs to return true.
+ */
+ return (pmd_val(pmd) & (_PAGE_PRESENT | _PAGE_PROT_NONE | _PAGE_LEAF));
+}
+#else
+static inline int pmd_present(pmd_t pmd)
+{
+ return (pmd_val(pmd) & (_PAGE_PRESENT | _PAGE_PROT_NONE));
+}
+#endif
+
+static inline int pmd_none(pmd_t pmd)
+{
+ return (pmd_val(pmd) == 0);
+}
+
+static inline int pmd_bad(pmd_t pmd)
+{
+ return !pmd_present(pmd) || (pmd_val(pmd) & _PAGE_LEAF);
+}
+
+#define pmd_leaf pmd_leaf
+static inline int pmd_leaf(pmd_t pmd)
+{
+ return pmd_present(pmd) && (pmd_val(pmd) & _PAGE_LEAF);
+}
+
+static inline void set_pmd(pmd_t *pmdp, pmd_t pmd)
+{
+ *pmdp = pmd;
+}
+
+static inline void pmd_clear(pmd_t *pmdp)
+{
+ set_pmd(pmdp, __pmd(0));
+}
+
+static inline pgd_t pfn_pgd(unsigned long pfn, pgprot_t prot)
+{
+ unsigned long prot_val = pgprot_val(prot);
+
+ ALT_THEAD_PMA(prot_val);
+
+ return __pgd((pfn << _PAGE_PFN_SHIFT) | prot_val);
+}
+
+static inline unsigned long _pgd_pfn(pgd_t pgd)
+{
+ return __page_val_to_pfn(pgd_val(pgd));
+}
+
+static inline struct page *pmd_page(pmd_t pmd)
+{
+ return pfn_to_page(__page_val_to_pfn(pmd_val(pmd)));
+}
+
+static inline unsigned long pmd_page_vaddr(pmd_t pmd)
+{
+ return (unsigned long)pfn_to_virt(__page_val_to_pfn(pmd_val(pmd)));
+}
+
+static inline pte_t pmd_pte(pmd_t pmd)
+{
+ return __pte(pmd_val(pmd));
+}
+
+static inline pte_t pud_pte(pud_t pud)
+{
+ return __pte(pud_val(pud));
+}
+
+#ifdef CONFIG_RISCV_ISA_SVNAPOT
+
+static __always_inline bool has_svnapot(void)
+{
+ return riscv_has_extension_likely(RISCV_ISA_EXT_SVNAPOT);
+}
+
+static inline unsigned long pte_napot(pte_t pte)
+{
+ return pte_val(pte) & _PAGE_NAPOT;
+}
+
+static inline pte_t pte_mknapot(pte_t pte, unsigned int order)
+{
+ int pos = order - 1 + _PAGE_PFN_SHIFT;
+ unsigned long napot_bit = BIT(pos);
+ unsigned long napot_mask = ~GENMASK(pos, _PAGE_PFN_SHIFT);
+
+ return __pte((pte_val(pte) & napot_mask) | napot_bit | _PAGE_NAPOT);
+}
+
+#else
+
+static __always_inline bool has_svnapot(void) { return false; }
+
+static inline unsigned long pte_napot(pte_t pte)
+{
+ return 0;
+}
+
+#endif /* CONFIG_RISCV_ISA_SVNAPOT */
+
+/* Yields the page frame number (PFN) of a page table entry */
+static inline unsigned long pte_pfn(pte_t pte)
+{
+ unsigned long res = __page_val_to_pfn(pte_val(pte));
+
+ if (has_svnapot() && pte_napot(pte))
+ res = res & (res - 1UL);
+
+ return res;
+}
+
+#define pte_page(x) pfn_to_page(pte_pfn(x))
+
+/* Constructs a page table entry */
+static inline pte_t pfn_pte(unsigned long pfn, pgprot_t prot)
+{
+ unsigned long prot_val = pgprot_val(prot);
+
+ ALT_THEAD_PMA(prot_val);
+
+ return __pte((pfn << _PAGE_PFN_SHIFT) | prot_val);
+}
+
+#define mk_pte(page, prot) pfn_pte(page_to_pfn(page), prot)
+
+static inline int pte_present(pte_t pte)
+{
+ return (pte_val(pte) & (_PAGE_PRESENT | _PAGE_PROT_NONE));
+}
+
+static inline int pte_none(pte_t pte)
+{
+ return (pte_val(pte) == 0);
+}
+
+static inline int pte_write(pte_t pte)
+{
+ return pte_val(pte) & _PAGE_WRITE;
+}
+
+static inline int pte_exec(pte_t pte)
+{
+ return pte_val(pte) & _PAGE_EXEC;
+}
+
+static inline int pte_user(pte_t pte)
+{
+ return pte_val(pte) & _PAGE_USER;
+}
+
+static inline int pte_huge(pte_t pte)
+{
+ return pte_present(pte) && (pte_val(pte) & _PAGE_LEAF);
+}
+
+static inline int pte_dirty(pte_t pte)
+{
+ return pte_val(pte) & _PAGE_DIRTY;
+}
+
+static inline int pte_young(pte_t pte)
+{
+ return pte_val(pte) & _PAGE_ACCESSED;
+}
+
+static inline int pte_special(pte_t pte)
+{
+ return pte_val(pte) & _PAGE_SPECIAL;
+}
+
+/* static inline pte_t pte_rdprotect(pte_t pte) */
+
+static inline pte_t pte_wrprotect(pte_t pte)
+{
+ return __pte(pte_val(pte) & ~(_PAGE_WRITE));
+}
+
+/* static inline pte_t pte_mkread(pte_t pte) */
+
+static inline pte_t pte_mkwrite_novma(pte_t pte)
+{
+ return __pte(pte_val(pte) | _PAGE_WRITE);
+}
+
+/* static inline pte_t pte_mkexec(pte_t pte) */
+
+static inline pte_t pte_mkdirty(pte_t pte)
+{
+ return __pte(pte_val(pte) | _PAGE_DIRTY);
+}
+
+static inline pte_t pte_mkclean(pte_t pte)
+{
+ return __pte(pte_val(pte) & ~(_PAGE_DIRTY));
+}
+
+static inline pte_t pte_mkyoung(pte_t pte)
+{
+ return __pte(pte_val(pte) | _PAGE_ACCESSED);
+}
+
+static inline pte_t pte_mkold(pte_t pte)
+{
+ return __pte(pte_val(pte) & ~(_PAGE_ACCESSED));
+}
+
+static inline pte_t pte_mkspecial(pte_t pte)
+{
+ return __pte(pte_val(pte) | _PAGE_SPECIAL);
+}
+
+static inline pte_t pte_mkhuge(pte_t pte)
+{
+ return pte;
+}
+
+#ifdef CONFIG_NUMA_BALANCING
+/*
+ * See the comment in include/asm-generic/pgtable.h
+ */
+static inline int pte_protnone(pte_t pte)
+{
+ return (pte_val(pte) & (_PAGE_PRESENT | _PAGE_PROT_NONE)) == _PAGE_PROT_NONE;
+}
+
+static inline int pmd_protnone(pmd_t pmd)
+{
+ return pte_protnone(pmd_pte(pmd));
+}
+#endif
+
+/* Modify page protection bits */
+static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
+{
+ unsigned long newprot_val = pgprot_val(newprot);
+
+ ALT_THEAD_PMA(newprot_val);
+
+ return __pte((pte_val(pte) & _PAGE_CHG_MASK) | newprot_val);
+}
+
+#define pgd_ERROR(e) \
+ pr_err("%s:%d: bad pgd " PTE_FMT ".\n", __FILE__, __LINE__, pgd_val(e))
+
+
+/* Commit new configuration to MMU hardware */
+static inline void update_mmu_cache_range(struct vm_fault *vmf,
+ struct vm_area_struct *vma, unsigned long address,
+ pte_t *ptep, unsigned int nr)
+{
+ /*
+ * The kernel assumes that TLBs don't cache invalid entries, but
+ * in RISC-V, SFENCE.VMA specifies an ordering constraint, not a
+ * cache flush; it is necessary even after writing invalid entries.
+ * Relying on flush_tlb_fix_spurious_fault would suffice, but
+ * the extra traps reduce performance. So, eagerly SFENCE.VMA.
+ */
+ while (nr--)
+ local_flush_tlb_page(address + nr * PAGE_SIZE);
+}
+#define update_mmu_cache(vma, addr, ptep) \
+ update_mmu_cache_range(NULL, vma, addr, ptep, 1)
+
+#define __HAVE_ARCH_UPDATE_MMU_TLB
+#define update_mmu_tlb update_mmu_cache
+
+static inline void update_mmu_cache_pmd(struct vm_area_struct *vma,
+ unsigned long address, pmd_t *pmdp)
+{
+ pte_t *ptep = (pte_t *)pmdp;
+
+ update_mmu_cache(vma, address, ptep);
+}
+
+#define __HAVE_ARCH_PTE_SAME
+static inline int pte_same(pte_t pte_a, pte_t pte_b)
+{
+ return pte_val(pte_a) == pte_val(pte_b);
+}
+
+/*
+ * Certain architectures need to do special things when PTEs within
+ * a page table are directly modified. Thus, the following hook is
+ * made available.
+ */
+static inline void set_pte(pte_t *ptep, pte_t pteval)
+{
+ *ptep = pteval;
+}
+
+void flush_icache_pte(pte_t pte);
+
+static inline void __set_pte_at(pte_t *ptep, pte_t pteval)
+{
+ if (pte_present(pteval) && pte_exec(pteval))
+ flush_icache_pte(pteval);
+
+ set_pte(ptep, pteval);
+}
+
+static inline void set_ptes(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep, pte_t pteval, unsigned int nr)
+{
+ page_table_check_ptes_set(mm, ptep, pteval, nr);
+
+ for (;;) {
+ __set_pte_at(ptep, pteval);
+ if (--nr == 0)
+ break;
+ ptep++;
+ pte_val(pteval) += 1 << _PAGE_PFN_SHIFT;
+ }
+}
+#define set_ptes set_ptes
+
+static inline void pte_clear(struct mm_struct *mm,
+ unsigned long addr, pte_t *ptep)
+{
+ __set_pte_at(ptep, __pte(0));
+}
+
+#define __HAVE_ARCH_PTEP_SET_ACCESS_FLAGS
+static inline int ptep_set_access_flags(struct vm_area_struct *vma,
+ unsigned long address, pte_t *ptep,
+ pte_t entry, int dirty)
+{
+ if (!pte_same(*ptep, entry))
+ __set_pte_at(ptep, entry);
+ /*
+ * update_mmu_cache will unconditionally execute, handling both
+ * the case that the PTE changed and the spurious fault case.
+ */
+ return true;
+}
+
+#define __HAVE_ARCH_PTEP_GET_AND_CLEAR
+static inline pte_t ptep_get_and_clear(struct mm_struct *mm,
+ unsigned long address, pte_t *ptep)
+{
+ pte_t pte = __pte(atomic_long_xchg((atomic_long_t *)ptep, 0));
+
+ page_table_check_pte_clear(mm, pte);
+
+ return pte;
+}
+
+#define __HAVE_ARCH_PTEP_TEST_AND_CLEAR_YOUNG
+static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
+ unsigned long address,
+ pte_t *ptep)
+{
+ if (!pte_young(*ptep))
+ return 0;
+ return test_and_clear_bit(_PAGE_ACCESSED_OFFSET, &pte_val(*ptep));
+}
+
+#define __HAVE_ARCH_PTEP_SET_WRPROTECT
+static inline void ptep_set_wrprotect(struct mm_struct *mm,
+ unsigned long address, pte_t *ptep)
+{
+ atomic_long_and(~(unsigned long)_PAGE_WRITE, (atomic_long_t *)ptep);
+}
+
+#define __HAVE_ARCH_PTEP_CLEAR_YOUNG_FLUSH
+static inline int ptep_clear_flush_young(struct vm_area_struct *vma,
+ unsigned long address, pte_t *ptep)
+{
+ /*
+ * This comment is borrowed from x86, but applies equally to RISC-V:
+ *
+ * Clearing the accessed bit without a TLB flush
+ * doesn't cause data corruption. [ It could cause incorrect
+ * page aging and the (mistaken) reclaim of hot pages, but the
+ * chance of that should be relatively low. ]
+ *
+ * So as a performance optimization don't flush the TLB when
+ * clearing the accessed bit, it will eventually be flushed by
+ * a context switch or a VM operation anyway. [ In the rare
+ * event of it not getting flushed for a long time the delay
+ * shouldn't really matter because there's no real memory
+ * pressure for swapout to react to. ]
+ */
+ return ptep_test_and_clear_young(vma, address, ptep);
+}
+
+#define pgprot_noncached pgprot_noncached
+static inline pgprot_t pgprot_noncached(pgprot_t _prot)
+{
+ unsigned long prot = pgprot_val(_prot);
+
+ prot &= ~_PAGE_MTMASK;
+ prot |= _PAGE_IO;
+
+ return __pgprot(prot);
+}
+
+#define pgprot_writecombine pgprot_writecombine
+static inline pgprot_t pgprot_writecombine(pgprot_t _prot)
+{
+ unsigned long prot = pgprot_val(_prot);
+
+ prot &= ~_PAGE_MTMASK;
+ prot |= _PAGE_NOCACHE;
+
+ return __pgprot(prot);
+}
+
+/*
+ * THP functions
+ */
+static inline pmd_t pte_pmd(pte_t pte)
+{
+ return __pmd(pte_val(pte));
+}
+
+static inline pmd_t pmd_mkhuge(pmd_t pmd)
+{
+ return pmd;
+}
+
+static inline pmd_t pmd_mkinvalid(pmd_t pmd)
+{
+ return __pmd(pmd_val(pmd) & ~(_PAGE_PRESENT|_PAGE_PROT_NONE));
+}
+
+#define __pmd_to_phys(pmd) (__page_val_to_pfn(pmd_val(pmd)) << PAGE_SHIFT)
+
+static inline unsigned long pmd_pfn(pmd_t pmd)
+{
+ return ((__pmd_to_phys(pmd) & PMD_MASK) >> PAGE_SHIFT);
+}
+
+#define __pud_to_phys(pud) (__page_val_to_pfn(pud_val(pud)) << PAGE_SHIFT)
+
+static inline unsigned long pud_pfn(pud_t pud)
+{
+ return ((__pud_to_phys(pud) & PUD_MASK) >> PAGE_SHIFT);
+}
+
+static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
+{
+ return pte_pmd(pte_modify(pmd_pte(pmd), newprot));
+}
+
+#define pmd_write pmd_write
+static inline int pmd_write(pmd_t pmd)
+{
+ return pte_write(pmd_pte(pmd));
+}
+
+static inline int pmd_dirty(pmd_t pmd)
+{
+ return pte_dirty(pmd_pte(pmd));
+}
+
+#define pmd_young pmd_young
+static inline int pmd_young(pmd_t pmd)
+{
+ return pte_young(pmd_pte(pmd));
+}
+
+static inline int pmd_user(pmd_t pmd)
+{
+ return pte_user(pmd_pte(pmd));
+}
+
+static inline pmd_t pmd_mkold(pmd_t pmd)
+{
+ return pte_pmd(pte_mkold(pmd_pte(pmd)));
+}
+
+static inline pmd_t pmd_mkyoung(pmd_t pmd)
+{
+ return pte_pmd(pte_mkyoung(pmd_pte(pmd)));
+}
+
+static inline pmd_t pmd_mkwrite_novma(pmd_t pmd)
+{
+ return pte_pmd(pte_mkwrite_novma(pmd_pte(pmd)));
+}
+
+static inline pmd_t pmd_wrprotect(pmd_t pmd)
+{
+ return pte_pmd(pte_wrprotect(pmd_pte(pmd)));
+}
+
+static inline pmd_t pmd_mkclean(pmd_t pmd)
+{
+ return pte_pmd(pte_mkclean(pmd_pte(pmd)));
+}
+
+static inline pmd_t pmd_mkdirty(pmd_t pmd)
+{
+ return pte_pmd(pte_mkdirty(pmd_pte(pmd)));
+}
+
+static inline void set_pmd_at(struct mm_struct *mm, unsigned long addr,
+ pmd_t *pmdp, pmd_t pmd)
+{
+ page_table_check_pmd_set(mm, pmdp, pmd);
+ return __set_pte_at((pte_t *)pmdp, pmd_pte(pmd));
+}
+
+static inline void set_pud_at(struct mm_struct *mm, unsigned long addr,
+ pud_t *pudp, pud_t pud)
+{
+ page_table_check_pud_set(mm, pudp, pud);
+ return __set_pte_at((pte_t *)pudp, pud_pte(pud));
+}
+
+#ifdef CONFIG_PAGE_TABLE_CHECK
+static inline bool pte_user_accessible_page(pte_t pte)
+{
+ return pte_present(pte) && pte_user(pte);
+}
+
+static inline bool pmd_user_accessible_page(pmd_t pmd)
+{
+ return pmd_leaf(pmd) && pmd_user(pmd);
+}
+
+static inline bool pud_user_accessible_page(pud_t pud)
+{
+ return pud_leaf(pud) && pud_user(pud);
+}
+#endif
+
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+static inline int pmd_trans_huge(pmd_t pmd)
+{
+ return pmd_leaf(pmd);
+}
+
+#define __HAVE_ARCH_PMDP_SET_ACCESS_FLAGS
+static inline int pmdp_set_access_flags(struct vm_area_struct *vma,
+ unsigned long address, pmd_t *pmdp,
+ pmd_t entry, int dirty)
+{
+ return ptep_set_access_flags(vma, address, (pte_t *)pmdp, pmd_pte(entry), dirty);
+}
+
+#define __HAVE_ARCH_PMDP_TEST_AND_CLEAR_YOUNG
+static inline int pmdp_test_and_clear_young(struct vm_area_struct *vma,
+ unsigned long address, pmd_t *pmdp)
+{
+ return ptep_test_and_clear_young(vma, address, (pte_t *)pmdp);
+}
+
+#define __HAVE_ARCH_PMDP_HUGE_GET_AND_CLEAR
+static inline pmd_t pmdp_huge_get_and_clear(struct mm_struct *mm,
+ unsigned long address, pmd_t *pmdp)
+{
+ pmd_t pmd = __pmd(atomic_long_xchg((atomic_long_t *)pmdp, 0));
+
+ page_table_check_pmd_clear(mm, pmd);
+
+ return pmd;
+}
+
+#define __HAVE_ARCH_PMDP_SET_WRPROTECT
+static inline void pmdp_set_wrprotect(struct mm_struct *mm,
+ unsigned long address, pmd_t *pmdp)
+{
+ ptep_set_wrprotect(mm, address, (pte_t *)pmdp);
+}
+
+#define pmdp_establish pmdp_establish
+static inline pmd_t pmdp_establish(struct vm_area_struct *vma,
+ unsigned long address, pmd_t *pmdp, pmd_t pmd)
+{
+ page_table_check_pmd_set(vma->vm_mm, pmdp, pmd);
+ return __pmd(atomic_long_xchg((atomic_long_t *)pmdp, pmd_val(pmd)));
+}
+
+#define pmdp_collapse_flush pmdp_collapse_flush
+extern pmd_t pmdp_collapse_flush(struct vm_area_struct *vma,
+ unsigned long address, pmd_t *pmdp);
+#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
+
+/*
+ * Encode/decode swap entries and swap PTEs. Swap PTEs are all PTEs that
+ * are !pte_none() && !pte_present().
+ *
+ * Format of swap PTE:
+ * bit 0: _PAGE_PRESENT (zero)
+ * bit 1 to 3: _PAGE_LEAF (zero)
+ * bit 5: _PAGE_PROT_NONE (zero)
+ * bit 6: exclusive marker
+ * bits 7 to 11: swap type
+ * bits 11 to XLEN-1: swap offset
+ */
+#define __SWP_TYPE_SHIFT 7
+#define __SWP_TYPE_BITS 5
+#define __SWP_TYPE_MASK ((1UL << __SWP_TYPE_BITS) - 1)
+#define __SWP_OFFSET_SHIFT (__SWP_TYPE_BITS + __SWP_TYPE_SHIFT)
+
+#define MAX_SWAPFILES_CHECK() \
+ BUILD_BUG_ON(MAX_SWAPFILES_SHIFT > __SWP_TYPE_BITS)
+
+#define __swp_type(x) (((x).val >> __SWP_TYPE_SHIFT) & __SWP_TYPE_MASK)
+#define __swp_offset(x) ((x).val >> __SWP_OFFSET_SHIFT)
+#define __swp_entry(type, offset) ((swp_entry_t) \
+ { (((type) & __SWP_TYPE_MASK) << __SWP_TYPE_SHIFT) | \
+ ((offset) << __SWP_OFFSET_SHIFT) })
+
+#define __pte_to_swp_entry(pte) ((swp_entry_t) { pte_val(pte) })
+#define __swp_entry_to_pte(x) ((pte_t) { (x).val })
+
+static inline int pte_swp_exclusive(pte_t pte)
+{
+ return pte_val(pte) & _PAGE_SWP_EXCLUSIVE;
+}
+
+static inline pte_t pte_swp_mkexclusive(pte_t pte)
+{
+ return __pte(pte_val(pte) | _PAGE_SWP_EXCLUSIVE);
+}
+
+static inline pte_t pte_swp_clear_exclusive(pte_t pte)
+{
+ return __pte(pte_val(pte) & ~_PAGE_SWP_EXCLUSIVE);
+}
+
+#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
+#define __pmd_to_swp_entry(pmd) ((swp_entry_t) { pmd_val(pmd) })
+#define __swp_entry_to_pmd(swp) __pmd((swp).val)
+#endif /* CONFIG_ARCH_ENABLE_THP_MIGRATION */
+
+/*
+ * In the RV64 Linux scheme, we give the user half of the virtual-address space
+ * and give the kernel the other (upper) half.
+ */
+#ifdef CONFIG_64BIT
+#define KERN_VIRT_START (-(BIT(VA_BITS)) + TASK_SIZE)
+#else
+#define KERN_VIRT_START FIXADDR_START
+#endif
+
+/*
+ * Task size is 0x4000000000 for RV64 or 0x9fc00000 for RV32.
+ * Note that PGDIR_SIZE must evenly divide TASK_SIZE.
+ * Task size is:
+ * - 0x9fc00000 (~2.5GB) for RV32.
+ * - 0x4000000000 ( 256GB) for RV64 using SV39 mmu
+ * - 0x800000000000 ( 128TB) for RV64 using SV48 mmu
+ * - 0x100000000000000 ( 64PB) for RV64 using SV57 mmu
+ *
+ * Note that PGDIR_SIZE must evenly divide TASK_SIZE since "RISC-V
+ * Instruction Set Manual Volume II: Privileged Architecture" states that
+ * "load and store effective addresses, which are 64bits, must have bits
+ * 63–48 all equal to bit 47, or else a page-fault exception will occur."
+ * Similarly for SV57, bits 63–57 must be equal to bit 56.
+ */
+#ifdef CONFIG_64BIT
+#define TASK_SIZE_64 (PGDIR_SIZE * PTRS_PER_PGD / 2)
+#define TASK_SIZE_MIN (PGDIR_SIZE_L3 * PTRS_PER_PGD / 2)
+
+#ifdef CONFIG_COMPAT
+#define TASK_SIZE_32 (_AC(0x80000000, UL))
+#define TASK_SIZE (test_thread_flag(TIF_32BIT) ? \
+ TASK_SIZE_32 : TASK_SIZE_64)
+#else
+#define TASK_SIZE TASK_SIZE_64
+#endif
+
+#else
+#define TASK_SIZE FIXADDR_START
+#define TASK_SIZE_MIN TASK_SIZE
+#endif
+
+#else /* CONFIG_MMU */
+
+#define PAGE_SHARED __pgprot(0)
+#define PAGE_KERNEL __pgprot(0)
+#define swapper_pg_dir NULL
+#define TASK_SIZE 0xffffffffUL
+#define VMALLOC_START 0
+#define VMALLOC_END TASK_SIZE
+
+#endif /* !CONFIG_MMU */
+
+extern char _start[];
+extern void *_dtb_early_va;
+extern uintptr_t _dtb_early_pa;
+#if defined(CONFIG_XIP_KERNEL) && defined(CONFIG_MMU)
+#define dtb_early_va (*(void **)XIP_FIXUP(&_dtb_early_va))
+#define dtb_early_pa (*(uintptr_t *)XIP_FIXUP(&_dtb_early_pa))
+#else
+#define dtb_early_va _dtb_early_va
+#define dtb_early_pa _dtb_early_pa
+#endif /* CONFIG_XIP_KERNEL */
+extern u64 satp_mode;
+extern bool pgtable_l4_enabled;
+
+void paging_init(void);
+void misc_mem_init(void);
+
+/*
+ * ZERO_PAGE is a global shared page that is always zero,
+ * used for zero-mapped memory areas, etc.
+ */
+extern unsigned long empty_zero_page[PAGE_SIZE / sizeof(unsigned long)];
+#define ZERO_PAGE(vaddr) (virt_to_page(empty_zero_page))
+
+#endif /* !__ASSEMBLY__ */
+
+#endif /* _ASM_RISCV_PGTABLE_H */
diff --git a/arch/riscv/include/asm/probes.h b/arch/riscv/include/asm/probes.h
new file mode 100644
index 0000000000..a787e6d537
--- /dev/null
+++ b/arch/riscv/include/asm/probes.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _ASM_RISCV_PROBES_H
+#define _ASM_RISCV_PROBES_H
+
+typedef u32 probe_opcode_t;
+typedef bool (probes_handler_t) (u32 opcode, unsigned long addr, struct pt_regs *);
+
+/* architecture specific copy of original instruction */
+struct arch_probe_insn {
+ probe_opcode_t *insn;
+ probes_handler_t *handler;
+ /* restore address after simulation */
+ unsigned long restore;
+};
+
+#ifdef CONFIG_KPROBES
+typedef u32 kprobe_opcode_t;
+struct arch_specific_insn {
+ struct arch_probe_insn api;
+};
+#endif
+
+#endif /* _ASM_RISCV_PROBES_H */
diff --git a/arch/riscv/include/asm/processor.h b/arch/riscv/include/asm/processor.h
new file mode 100644
index 0000000000..4f6af8c6cf
--- /dev/null
+++ b/arch/riscv/include/asm/processor.h
@@ -0,0 +1,139 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2012 Regents of the University of California
+ */
+
+#ifndef _ASM_RISCV_PROCESSOR_H
+#define _ASM_RISCV_PROCESSOR_H
+
+#include <linux/const.h>
+#include <linux/cache.h>
+
+#include <vdso/processor.h>
+
+#include <asm/ptrace.h>
+
+#ifdef CONFIG_64BIT
+#define DEFAULT_MAP_WINDOW (UL(1) << (MMAP_VA_BITS - 1))
+#define STACK_TOP_MAX TASK_SIZE
+
+#define arch_get_mmap_end(addr, len, flags) \
+({ \
+ unsigned long mmap_end; \
+ typeof(addr) _addr = (addr); \
+ if ((_addr) == 0 || (IS_ENABLED(CONFIG_COMPAT) && is_compat_task())) \
+ mmap_end = STACK_TOP_MAX; \
+ else if ((_addr) >= VA_USER_SV57) \
+ mmap_end = STACK_TOP_MAX; \
+ else if ((((_addr) >= VA_USER_SV48)) && (VA_BITS >= VA_BITS_SV48)) \
+ mmap_end = VA_USER_SV48; \
+ else \
+ mmap_end = VA_USER_SV39; \
+ mmap_end; \
+})
+
+#define arch_get_mmap_base(addr, base) \
+({ \
+ unsigned long mmap_base; \
+ typeof(addr) _addr = (addr); \
+ typeof(base) _base = (base); \
+ unsigned long rnd_gap = DEFAULT_MAP_WINDOW - (_base); \
+ if ((_addr) == 0 || (IS_ENABLED(CONFIG_COMPAT) && is_compat_task())) \
+ mmap_base = (_base); \
+ else if (((_addr) >= VA_USER_SV57) && (VA_BITS >= VA_BITS_SV57)) \
+ mmap_base = VA_USER_SV57 - rnd_gap; \
+ else if ((((_addr) >= VA_USER_SV48)) && (VA_BITS >= VA_BITS_SV48)) \
+ mmap_base = VA_USER_SV48 - rnd_gap; \
+ else \
+ mmap_base = VA_USER_SV39 - rnd_gap; \
+ mmap_base; \
+})
+
+#else
+#define DEFAULT_MAP_WINDOW TASK_SIZE
+#define STACK_TOP_MAX TASK_SIZE
+#endif
+#define STACK_ALIGN 16
+
+#define STACK_TOP DEFAULT_MAP_WINDOW
+
+/*
+ * This decides where the kernel will search for a free chunk of vm
+ * space during mmap's.
+ */
+#ifdef CONFIG_64BIT
+#define TASK_UNMAPPED_BASE PAGE_ALIGN((UL(1) << MMAP_MIN_VA_BITS) / 3)
+#else
+#define TASK_UNMAPPED_BASE PAGE_ALIGN(TASK_SIZE / 3)
+#endif
+
+#ifndef __ASSEMBLY__
+
+struct task_struct;
+struct pt_regs;
+
+/* CPU-specific state of a task */
+struct thread_struct {
+ /* Callee-saved registers */
+ unsigned long ra;
+ unsigned long sp; /* Kernel mode stack */
+ unsigned long s[12]; /* s[0]: frame pointer */
+ struct __riscv_d_ext_state fstate;
+ unsigned long bad_cause;
+ unsigned long vstate_ctrl;
+ struct __riscv_v_ext_state vstate;
+};
+
+/* Whitelist the fstate from the task_struct for hardened usercopy */
+static inline void arch_thread_struct_whitelist(unsigned long *offset,
+ unsigned long *size)
+{
+ *offset = offsetof(struct thread_struct, fstate);
+ *size = sizeof_field(struct thread_struct, fstate);
+}
+
+#define INIT_THREAD { \
+ .sp = sizeof(init_stack) + (long)&init_stack, \
+}
+
+#define task_pt_regs(tsk) \
+ ((struct pt_regs *)(task_stack_page(tsk) + THREAD_SIZE \
+ - ALIGN(sizeof(struct pt_regs), STACK_ALIGN)))
+
+#define KSTK_EIP(tsk) (task_pt_regs(tsk)->epc)
+#define KSTK_ESP(tsk) (task_pt_regs(tsk)->sp)
+
+
+/* Do necessary setup to start up a newly executed thread. */
+extern void start_thread(struct pt_regs *regs,
+ unsigned long pc, unsigned long sp);
+
+extern unsigned long __get_wchan(struct task_struct *p);
+
+
+static inline void wait_for_interrupt(void)
+{
+ __asm__ __volatile__ ("wfi");
+}
+
+struct device_node;
+int riscv_of_processor_hartid(struct device_node *node, unsigned long *hartid);
+int riscv_early_of_processor_hartid(struct device_node *node, unsigned long *hartid);
+int riscv_of_parent_hartid(struct device_node *node, unsigned long *hartid);
+
+extern void riscv_fill_hwcap(void);
+extern int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
+
+extern unsigned long signal_minsigstksz __ro_after_init;
+
+#ifdef CONFIG_RISCV_ISA_V
+/* Userspace interface for PR_RISCV_V_{SET,GET}_VS prctl()s: */
+#define RISCV_V_SET_CONTROL(arg) riscv_v_vstate_ctrl_set_current(arg)
+#define RISCV_V_GET_CONTROL() riscv_v_vstate_ctrl_get_current()
+extern long riscv_v_vstate_ctrl_set_current(unsigned long arg);
+extern long riscv_v_vstate_ctrl_get_current(void);
+#endif /* CONFIG_RISCV_ISA_V */
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* _ASM_RISCV_PROCESSOR_H */
diff --git a/arch/riscv/include/asm/ptdump.h b/arch/riscv/include/asm/ptdump.h
new file mode 100644
index 0000000000..3c9ea6dd5a
--- /dev/null
+++ b/arch/riscv/include/asm/ptdump.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2019 SiFive
+ */
+
+#ifndef _ASM_RISCV_PTDUMP_H
+#define _ASM_RISCV_PTDUMP_H
+
+void ptdump_check_wx(void);
+
+#ifdef CONFIG_DEBUG_WX
+static inline void debug_checkwx(void)
+{
+ ptdump_check_wx();
+}
+#else
+static inline void debug_checkwx(void)
+{
+}
+#endif
+
+#endif /* _ASM_RISCV_PTDUMP_H */
diff --git a/arch/riscv/include/asm/ptrace.h b/arch/riscv/include/asm/ptrace.h
new file mode 100644
index 0000000000..b5b0adcc85
--- /dev/null
+++ b/arch/riscv/include/asm/ptrace.h
@@ -0,0 +1,183 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2012 Regents of the University of California
+ */
+
+#ifndef _ASM_RISCV_PTRACE_H
+#define _ASM_RISCV_PTRACE_H
+
+#include <uapi/asm/ptrace.h>
+#include <asm/csr.h>
+#include <linux/compiler.h>
+
+#ifndef __ASSEMBLY__
+
+struct pt_regs {
+ unsigned long epc;
+ unsigned long ra;
+ unsigned long sp;
+ unsigned long gp;
+ unsigned long tp;
+ unsigned long t0;
+ unsigned long t1;
+ unsigned long t2;
+ unsigned long s0;
+ unsigned long s1;
+ unsigned long a0;
+ unsigned long a1;
+ unsigned long a2;
+ unsigned long a3;
+ unsigned long a4;
+ unsigned long a5;
+ unsigned long a6;
+ unsigned long a7;
+ unsigned long s2;
+ unsigned long s3;
+ unsigned long s4;
+ unsigned long s5;
+ unsigned long s6;
+ unsigned long s7;
+ unsigned long s8;
+ unsigned long s9;
+ unsigned long s10;
+ unsigned long s11;
+ unsigned long t3;
+ unsigned long t4;
+ unsigned long t5;
+ unsigned long t6;
+ /* Supervisor/Machine CSRs */
+ unsigned long status;
+ unsigned long badaddr;
+ unsigned long cause;
+ /* a0 value before the syscall */
+ unsigned long orig_a0;
+};
+
+#define PTRACE_SYSEMU 0x1f
+#define PTRACE_SYSEMU_SINGLESTEP 0x20
+
+#ifdef CONFIG_64BIT
+#define REG_FMT "%016lx"
+#else
+#define REG_FMT "%08lx"
+#endif
+
+#define user_mode(regs) (((regs)->status & SR_PP) == 0)
+
+#define MAX_REG_OFFSET offsetof(struct pt_regs, orig_a0)
+
+/* Helpers for working with the instruction pointer */
+static inline unsigned long instruction_pointer(struct pt_regs *regs)
+{
+ return regs->epc;
+}
+static inline void instruction_pointer_set(struct pt_regs *regs,
+ unsigned long val)
+{
+ regs->epc = val;
+}
+
+#define profile_pc(regs) instruction_pointer(regs)
+
+/* Helpers for working with the user stack pointer */
+static inline unsigned long user_stack_pointer(struct pt_regs *regs)
+{
+ return regs->sp;
+}
+static inline void user_stack_pointer_set(struct pt_regs *regs,
+ unsigned long val)
+{
+ regs->sp = val;
+}
+
+/* Valid only for Kernel mode traps. */
+static inline unsigned long kernel_stack_pointer(struct pt_regs *regs)
+{
+ return regs->sp;
+}
+
+/* Helpers for working with the frame pointer */
+static inline unsigned long frame_pointer(struct pt_regs *regs)
+{
+ return regs->s0;
+}
+static inline void frame_pointer_set(struct pt_regs *regs,
+ unsigned long val)
+{
+ regs->s0 = val;
+}
+
+static inline unsigned long regs_return_value(struct pt_regs *regs)
+{
+ return regs->a0;
+}
+
+static inline void regs_set_return_value(struct pt_regs *regs,
+ unsigned long val)
+{
+ regs->a0 = val;
+}
+
+extern int regs_query_register_offset(const char *name);
+extern unsigned long regs_get_kernel_stack_nth(struct pt_regs *regs,
+ unsigned int n);
+
+void prepare_ftrace_return(unsigned long *parent, unsigned long self_addr,
+ unsigned long frame_pointer);
+
+/**
+ * regs_get_register() - get register value from its offset
+ * @regs: pt_regs from which register value is gotten
+ * @offset: offset of the register.
+ *
+ * regs_get_register returns the value of a register whose offset from @regs.
+ * The @offset is the offset of the register in struct pt_regs.
+ * If @offset is bigger than MAX_REG_OFFSET, this returns 0.
+ */
+static inline unsigned long regs_get_register(struct pt_regs *regs,
+ unsigned int offset)
+{
+ if (unlikely(offset > MAX_REG_OFFSET))
+ return 0;
+
+ return *(unsigned long *)((unsigned long)regs + offset);
+}
+
+/**
+ * regs_get_kernel_argument() - get Nth function argument in kernel
+ * @regs: pt_regs of that context
+ * @n: function argument number (start from 0)
+ *
+ * regs_get_argument() returns @n th argument of the function call.
+ *
+ * Note you can get the parameter correctly if the function has no
+ * more than eight arguments.
+ */
+static inline unsigned long regs_get_kernel_argument(struct pt_regs *regs,
+ unsigned int n)
+{
+ static const int nr_reg_arguments = 8;
+ static const unsigned int argument_offs[] = {
+ offsetof(struct pt_regs, a0),
+ offsetof(struct pt_regs, a1),
+ offsetof(struct pt_regs, a2),
+ offsetof(struct pt_regs, a3),
+ offsetof(struct pt_regs, a4),
+ offsetof(struct pt_regs, a5),
+ offsetof(struct pt_regs, a6),
+ offsetof(struct pt_regs, a7),
+ };
+
+ if (n < nr_reg_arguments)
+ return regs_get_register(regs, argument_offs[n]);
+ return 0;
+}
+
+static inline int regs_irqs_disabled(struct pt_regs *regs)
+{
+ return !(regs->status & SR_PIE);
+}
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* _ASM_RISCV_PTRACE_H */
diff --git a/arch/riscv/include/asm/sbi.h b/arch/riscv/include/asm/sbi.h
new file mode 100644
index 0000000000..5b4a1bf5f4
--- /dev/null
+++ b/arch/riscv/include/asm/sbi.h
@@ -0,0 +1,343 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2015 Regents of the University of California
+ * Copyright (c) 2020 Western Digital Corporation or its affiliates.
+ */
+
+#ifndef _ASM_RISCV_SBI_H
+#define _ASM_RISCV_SBI_H
+
+#include <linux/types.h>
+#include <linux/cpumask.h>
+
+#ifdef CONFIG_RISCV_SBI
+enum sbi_ext_id {
+#ifdef CONFIG_RISCV_SBI_V01
+ SBI_EXT_0_1_SET_TIMER = 0x0,
+ SBI_EXT_0_1_CONSOLE_PUTCHAR = 0x1,
+ SBI_EXT_0_1_CONSOLE_GETCHAR = 0x2,
+ SBI_EXT_0_1_CLEAR_IPI = 0x3,
+ SBI_EXT_0_1_SEND_IPI = 0x4,
+ SBI_EXT_0_1_REMOTE_FENCE_I = 0x5,
+ SBI_EXT_0_1_REMOTE_SFENCE_VMA = 0x6,
+ SBI_EXT_0_1_REMOTE_SFENCE_VMA_ASID = 0x7,
+ SBI_EXT_0_1_SHUTDOWN = 0x8,
+#endif
+ SBI_EXT_BASE = 0x10,
+ SBI_EXT_TIME = 0x54494D45,
+ SBI_EXT_IPI = 0x735049,
+ SBI_EXT_RFENCE = 0x52464E43,
+ SBI_EXT_HSM = 0x48534D,
+ SBI_EXT_SRST = 0x53525354,
+ SBI_EXT_PMU = 0x504D55,
+
+ /* Experimentals extensions must lie within this range */
+ SBI_EXT_EXPERIMENTAL_START = 0x08000000,
+ SBI_EXT_EXPERIMENTAL_END = 0x08FFFFFF,
+
+ /* Vendor extensions must lie within this range */
+ SBI_EXT_VENDOR_START = 0x09000000,
+ SBI_EXT_VENDOR_END = 0x09FFFFFF,
+};
+
+enum sbi_ext_base_fid {
+ SBI_EXT_BASE_GET_SPEC_VERSION = 0,
+ SBI_EXT_BASE_GET_IMP_ID,
+ SBI_EXT_BASE_GET_IMP_VERSION,
+ SBI_EXT_BASE_PROBE_EXT,
+ SBI_EXT_BASE_GET_MVENDORID,
+ SBI_EXT_BASE_GET_MARCHID,
+ SBI_EXT_BASE_GET_MIMPID,
+};
+
+enum sbi_ext_time_fid {
+ SBI_EXT_TIME_SET_TIMER = 0,
+};
+
+enum sbi_ext_ipi_fid {
+ SBI_EXT_IPI_SEND_IPI = 0,
+};
+
+enum sbi_ext_rfence_fid {
+ SBI_EXT_RFENCE_REMOTE_FENCE_I = 0,
+ SBI_EXT_RFENCE_REMOTE_SFENCE_VMA,
+ SBI_EXT_RFENCE_REMOTE_SFENCE_VMA_ASID,
+ SBI_EXT_RFENCE_REMOTE_HFENCE_GVMA_VMID,
+ SBI_EXT_RFENCE_REMOTE_HFENCE_GVMA,
+ SBI_EXT_RFENCE_REMOTE_HFENCE_VVMA_ASID,
+ SBI_EXT_RFENCE_REMOTE_HFENCE_VVMA,
+};
+
+enum sbi_ext_hsm_fid {
+ SBI_EXT_HSM_HART_START = 0,
+ SBI_EXT_HSM_HART_STOP,
+ SBI_EXT_HSM_HART_STATUS,
+ SBI_EXT_HSM_HART_SUSPEND,
+};
+
+enum sbi_hsm_hart_state {
+ SBI_HSM_STATE_STARTED = 0,
+ SBI_HSM_STATE_STOPPED,
+ SBI_HSM_STATE_START_PENDING,
+ SBI_HSM_STATE_STOP_PENDING,
+ SBI_HSM_STATE_SUSPENDED,
+ SBI_HSM_STATE_SUSPEND_PENDING,
+ SBI_HSM_STATE_RESUME_PENDING,
+};
+
+#define SBI_HSM_SUSP_BASE_MASK 0x7fffffff
+#define SBI_HSM_SUSP_NON_RET_BIT 0x80000000
+#define SBI_HSM_SUSP_PLAT_BASE 0x10000000
+
+#define SBI_HSM_SUSPEND_RET_DEFAULT 0x00000000
+#define SBI_HSM_SUSPEND_RET_PLATFORM SBI_HSM_SUSP_PLAT_BASE
+#define SBI_HSM_SUSPEND_RET_LAST SBI_HSM_SUSP_BASE_MASK
+#define SBI_HSM_SUSPEND_NON_RET_DEFAULT SBI_HSM_SUSP_NON_RET_BIT
+#define SBI_HSM_SUSPEND_NON_RET_PLATFORM (SBI_HSM_SUSP_NON_RET_BIT | \
+ SBI_HSM_SUSP_PLAT_BASE)
+#define SBI_HSM_SUSPEND_NON_RET_LAST (SBI_HSM_SUSP_NON_RET_BIT | \
+ SBI_HSM_SUSP_BASE_MASK)
+
+enum sbi_ext_srst_fid {
+ SBI_EXT_SRST_RESET = 0,
+};
+
+enum sbi_srst_reset_type {
+ SBI_SRST_RESET_TYPE_SHUTDOWN = 0,
+ SBI_SRST_RESET_TYPE_COLD_REBOOT,
+ SBI_SRST_RESET_TYPE_WARM_REBOOT,
+};
+
+enum sbi_srst_reset_reason {
+ SBI_SRST_RESET_REASON_NONE = 0,
+ SBI_SRST_RESET_REASON_SYS_FAILURE,
+};
+
+enum sbi_ext_pmu_fid {
+ SBI_EXT_PMU_NUM_COUNTERS = 0,
+ SBI_EXT_PMU_COUNTER_GET_INFO,
+ SBI_EXT_PMU_COUNTER_CFG_MATCH,
+ SBI_EXT_PMU_COUNTER_START,
+ SBI_EXT_PMU_COUNTER_STOP,
+ SBI_EXT_PMU_COUNTER_FW_READ,
+};
+
+union sbi_pmu_ctr_info {
+ unsigned long value;
+ struct {
+ unsigned long csr:12;
+ unsigned long width:6;
+#if __riscv_xlen == 32
+ unsigned long reserved:13;
+#else
+ unsigned long reserved:45;
+#endif
+ unsigned long type:1;
+ };
+};
+
+#define RISCV_PMU_RAW_EVENT_MASK GENMASK_ULL(47, 0)
+#define RISCV_PMU_RAW_EVENT_IDX 0x20000
+
+/** General pmu event codes specified in SBI PMU extension */
+enum sbi_pmu_hw_generic_events_t {
+ SBI_PMU_HW_NO_EVENT = 0,
+ SBI_PMU_HW_CPU_CYCLES = 1,
+ SBI_PMU_HW_INSTRUCTIONS = 2,
+ SBI_PMU_HW_CACHE_REFERENCES = 3,
+ SBI_PMU_HW_CACHE_MISSES = 4,
+ SBI_PMU_HW_BRANCH_INSTRUCTIONS = 5,
+ SBI_PMU_HW_BRANCH_MISSES = 6,
+ SBI_PMU_HW_BUS_CYCLES = 7,
+ SBI_PMU_HW_STALLED_CYCLES_FRONTEND = 8,
+ SBI_PMU_HW_STALLED_CYCLES_BACKEND = 9,
+ SBI_PMU_HW_REF_CPU_CYCLES = 10,
+
+ SBI_PMU_HW_GENERAL_MAX,
+};
+
+/**
+ * Special "firmware" events provided by the firmware, even if the hardware
+ * does not support performance events. These events are encoded as a raw
+ * event type in Linux kernel perf framework.
+ */
+enum sbi_pmu_fw_generic_events_t {
+ SBI_PMU_FW_MISALIGNED_LOAD = 0,
+ SBI_PMU_FW_MISALIGNED_STORE = 1,
+ SBI_PMU_FW_ACCESS_LOAD = 2,
+ SBI_PMU_FW_ACCESS_STORE = 3,
+ SBI_PMU_FW_ILLEGAL_INSN = 4,
+ SBI_PMU_FW_SET_TIMER = 5,
+ SBI_PMU_FW_IPI_SENT = 6,
+ SBI_PMU_FW_IPI_RCVD = 7,
+ SBI_PMU_FW_FENCE_I_SENT = 8,
+ SBI_PMU_FW_FENCE_I_RCVD = 9,
+ SBI_PMU_FW_SFENCE_VMA_SENT = 10,
+ SBI_PMU_FW_SFENCE_VMA_RCVD = 11,
+ SBI_PMU_FW_SFENCE_VMA_ASID_SENT = 12,
+ SBI_PMU_FW_SFENCE_VMA_ASID_RCVD = 13,
+
+ SBI_PMU_FW_HFENCE_GVMA_SENT = 14,
+ SBI_PMU_FW_HFENCE_GVMA_RCVD = 15,
+ SBI_PMU_FW_HFENCE_GVMA_VMID_SENT = 16,
+ SBI_PMU_FW_HFENCE_GVMA_VMID_RCVD = 17,
+
+ SBI_PMU_FW_HFENCE_VVMA_SENT = 18,
+ SBI_PMU_FW_HFENCE_VVMA_RCVD = 19,
+ SBI_PMU_FW_HFENCE_VVMA_ASID_SENT = 20,
+ SBI_PMU_FW_HFENCE_VVMA_ASID_RCVD = 21,
+ SBI_PMU_FW_MAX,
+};
+
+/* SBI PMU event types */
+enum sbi_pmu_event_type {
+ SBI_PMU_EVENT_TYPE_HW = 0x0,
+ SBI_PMU_EVENT_TYPE_CACHE = 0x1,
+ SBI_PMU_EVENT_TYPE_RAW = 0x2,
+ SBI_PMU_EVENT_TYPE_FW = 0xf,
+};
+
+/* SBI PMU event types */
+enum sbi_pmu_ctr_type {
+ SBI_PMU_CTR_TYPE_HW = 0x0,
+ SBI_PMU_CTR_TYPE_FW,
+};
+
+/* Helper macros to decode event idx */
+#define SBI_PMU_EVENT_IDX_OFFSET 20
+#define SBI_PMU_EVENT_IDX_MASK 0xFFFFF
+#define SBI_PMU_EVENT_IDX_CODE_MASK 0xFFFF
+#define SBI_PMU_EVENT_IDX_TYPE_MASK 0xF0000
+#define SBI_PMU_EVENT_RAW_IDX 0x20000
+#define SBI_PMU_FIXED_CTR_MASK 0x07
+
+#define SBI_PMU_EVENT_CACHE_ID_CODE_MASK 0xFFF8
+#define SBI_PMU_EVENT_CACHE_OP_ID_CODE_MASK 0x06
+#define SBI_PMU_EVENT_CACHE_RESULT_ID_CODE_MASK 0x01
+
+#define SBI_PMU_EVENT_CACHE_ID_SHIFT 3
+#define SBI_PMU_EVENT_CACHE_OP_SHIFT 1
+
+#define SBI_PMU_EVENT_IDX_INVALID 0xFFFFFFFF
+
+/* Flags defined for config matching function */
+#define SBI_PMU_CFG_FLAG_SKIP_MATCH (1 << 0)
+#define SBI_PMU_CFG_FLAG_CLEAR_VALUE (1 << 1)
+#define SBI_PMU_CFG_FLAG_AUTO_START (1 << 2)
+#define SBI_PMU_CFG_FLAG_SET_VUINH (1 << 3)
+#define SBI_PMU_CFG_FLAG_SET_VSINH (1 << 4)
+#define SBI_PMU_CFG_FLAG_SET_UINH (1 << 5)
+#define SBI_PMU_CFG_FLAG_SET_SINH (1 << 6)
+#define SBI_PMU_CFG_FLAG_SET_MINH (1 << 7)
+
+/* Flags defined for counter start function */
+#define SBI_PMU_START_FLAG_SET_INIT_VALUE (1 << 0)
+
+/* Flags defined for counter stop function */
+#define SBI_PMU_STOP_FLAG_RESET (1 << 0)
+
+#define SBI_SPEC_VERSION_DEFAULT 0x1
+#define SBI_SPEC_VERSION_MAJOR_SHIFT 24
+#define SBI_SPEC_VERSION_MAJOR_MASK 0x7f
+#define SBI_SPEC_VERSION_MINOR_MASK 0xffffff
+
+/* SBI return error codes */
+#define SBI_SUCCESS 0
+#define SBI_ERR_FAILURE -1
+#define SBI_ERR_NOT_SUPPORTED -2
+#define SBI_ERR_INVALID_PARAM -3
+#define SBI_ERR_DENIED -4
+#define SBI_ERR_INVALID_ADDRESS -5
+#define SBI_ERR_ALREADY_AVAILABLE -6
+#define SBI_ERR_ALREADY_STARTED -7
+#define SBI_ERR_ALREADY_STOPPED -8
+
+extern unsigned long sbi_spec_version;
+struct sbiret {
+ long error;
+ long value;
+};
+
+void sbi_init(void);
+struct sbiret sbi_ecall(int ext, int fid, unsigned long arg0,
+ unsigned long arg1, unsigned long arg2,
+ unsigned long arg3, unsigned long arg4,
+ unsigned long arg5);
+
+void sbi_console_putchar(int ch);
+int sbi_console_getchar(void);
+long sbi_get_mvendorid(void);
+long sbi_get_marchid(void);
+long sbi_get_mimpid(void);
+void sbi_set_timer(uint64_t stime_value);
+void sbi_shutdown(void);
+void sbi_send_ipi(unsigned int cpu);
+int sbi_remote_fence_i(const struct cpumask *cpu_mask);
+int sbi_remote_sfence_vma(const struct cpumask *cpu_mask,
+ unsigned long start,
+ unsigned long size);
+
+int sbi_remote_sfence_vma_asid(const struct cpumask *cpu_mask,
+ unsigned long start,
+ unsigned long size,
+ unsigned long asid);
+int sbi_remote_hfence_gvma(const struct cpumask *cpu_mask,
+ unsigned long start,
+ unsigned long size);
+int sbi_remote_hfence_gvma_vmid(const struct cpumask *cpu_mask,
+ unsigned long start,
+ unsigned long size,
+ unsigned long vmid);
+int sbi_remote_hfence_vvma(const struct cpumask *cpu_mask,
+ unsigned long start,
+ unsigned long size);
+int sbi_remote_hfence_vvma_asid(const struct cpumask *cpu_mask,
+ unsigned long start,
+ unsigned long size,
+ unsigned long asid);
+long sbi_probe_extension(int ext);
+
+/* Check if current SBI specification version is 0.1 or not */
+static inline int sbi_spec_is_0_1(void)
+{
+ return (sbi_spec_version == SBI_SPEC_VERSION_DEFAULT) ? 1 : 0;
+}
+
+/* Get the major version of SBI */
+static inline unsigned long sbi_major_version(void)
+{
+ return (sbi_spec_version >> SBI_SPEC_VERSION_MAJOR_SHIFT) &
+ SBI_SPEC_VERSION_MAJOR_MASK;
+}
+
+/* Get the minor version of SBI */
+static inline unsigned long sbi_minor_version(void)
+{
+ return sbi_spec_version & SBI_SPEC_VERSION_MINOR_MASK;
+}
+
+/* Make SBI version */
+static inline unsigned long sbi_mk_version(unsigned long major,
+ unsigned long minor)
+{
+ return ((major & SBI_SPEC_VERSION_MAJOR_MASK) <<
+ SBI_SPEC_VERSION_MAJOR_SHIFT) | minor;
+}
+
+int sbi_err_map_linux_errno(int err);
+#else /* CONFIG_RISCV_SBI */
+static inline int sbi_remote_fence_i(const struct cpumask *cpu_mask) { return -1; }
+static inline void sbi_init(void) {}
+#endif /* CONFIG_RISCV_SBI */
+
+unsigned long riscv_cached_mvendorid(unsigned int cpu_id);
+unsigned long riscv_cached_marchid(unsigned int cpu_id);
+unsigned long riscv_cached_mimpid(unsigned int cpu_id);
+
+#if IS_ENABLED(CONFIG_SMP) && IS_ENABLED(CONFIG_RISCV_SBI)
+void sbi_ipi_init(void);
+#else
+static inline void sbi_ipi_init(void) { }
+#endif
+
+#endif /* _ASM_RISCV_SBI_H */
diff --git a/arch/riscv/include/asm/seccomp.h b/arch/riscv/include/asm/seccomp.h
new file mode 100644
index 0000000000..c7ee6a3507
--- /dev/null
+++ b/arch/riscv/include/asm/seccomp.h
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _ASM_SECCOMP_H
+#define _ASM_SECCOMP_H
+
+#include <asm/unistd.h>
+
+#include <asm-generic/seccomp.h>
+
+#ifdef CONFIG_64BIT
+# define SECCOMP_ARCH_NATIVE AUDIT_ARCH_RISCV64
+# define SECCOMP_ARCH_NATIVE_NR NR_syscalls
+# define SECCOMP_ARCH_NATIVE_NAME "riscv64"
+#else /* !CONFIG_64BIT */
+# define SECCOMP_ARCH_NATIVE AUDIT_ARCH_RISCV32
+# define SECCOMP_ARCH_NATIVE_NR NR_syscalls
+# define SECCOMP_ARCH_NATIVE_NAME "riscv32"
+#endif
+
+#endif /* _ASM_SECCOMP_H */
diff --git a/arch/riscv/include/asm/sections.h b/arch/riscv/include/asm/sections.h
new file mode 100644
index 0000000000..a393d5035c
--- /dev/null
+++ b/arch/riscv/include/asm/sections.h
@@ -0,0 +1,34 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2020 Western Digital Corporation or its affiliates.
+ */
+#ifndef __ASM_SECTIONS_H
+#define __ASM_SECTIONS_H
+
+#include <asm-generic/sections.h>
+#include <linux/mm.h>
+
+extern char _start[];
+extern char _start_kernel[];
+extern char __init_data_begin[], __init_data_end[];
+extern char __init_text_begin[], __init_text_end[];
+extern char __alt_start[], __alt_end[];
+extern char __exittext_begin[], __exittext_end[];
+
+static inline bool is_va_kernel_text(uintptr_t va)
+{
+ uintptr_t start = (uintptr_t)_start;
+ uintptr_t end = (uintptr_t)__init_data_begin;
+
+ return va >= start && va < end;
+}
+
+static inline bool is_va_kernel_lm_alias_text(uintptr_t va)
+{
+ uintptr_t start = (uintptr_t)lm_alias(_start);
+ uintptr_t end = (uintptr_t)lm_alias(__init_data_begin);
+
+ return va >= start && va < end;
+}
+
+#endif /* __ASM_SECTIONS_H */
diff --git a/arch/riscv/include/asm/semihost.h b/arch/riscv/include/asm/semihost.h
new file mode 100644
index 0000000000..557a349381
--- /dev/null
+++ b/arch/riscv/include/asm/semihost.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2022 tinylab.org
+ * Author: Bin Meng <bmeng@tinylab.org>
+ */
+
+#ifndef _RISCV_SEMIHOST_H_
+#define _RISCV_SEMIHOST_H_
+
+struct uart_port;
+
+static inline void smh_putc(struct uart_port *port, unsigned char c)
+{
+ asm volatile("addi a1, %0, 0\n"
+ "addi a0, zero, 3\n"
+ ".balign 16\n"
+ ".option push\n"
+ ".option norvc\n"
+ "slli zero, zero, 0x1f\n"
+ "ebreak\n"
+ "srai zero, zero, 0x7\n"
+ ".option pop\n"
+ : : "r" (&c) : "a0", "a1", "memory");
+}
+
+#endif /* _RISCV_SEMIHOST_H_ */
diff --git a/arch/riscv/include/asm/set_memory.h b/arch/riscv/include/asm/set_memory.h
new file mode 100644
index 0000000000..ec11001c3f
--- /dev/null
+++ b/arch/riscv/include/asm/set_memory.h
@@ -0,0 +1,62 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2019 SiFive
+ */
+
+#ifndef _ASM_RISCV_SET_MEMORY_H
+#define _ASM_RISCV_SET_MEMORY_H
+
+#ifndef __ASSEMBLY__
+/*
+ * Functions to change memory attributes.
+ */
+#ifdef CONFIG_MMU
+int set_memory_ro(unsigned long addr, int numpages);
+int set_memory_rw(unsigned long addr, int numpages);
+int set_memory_x(unsigned long addr, int numpages);
+int set_memory_nx(unsigned long addr, int numpages);
+int set_memory_rw_nx(unsigned long addr, int numpages);
+static __always_inline int set_kernel_memory(char *startp, char *endp,
+ int (*set_memory)(unsigned long start,
+ int num_pages))
+{
+ unsigned long start = (unsigned long)startp;
+ unsigned long end = (unsigned long)endp;
+ int num_pages = PAGE_ALIGN(end - start) >> PAGE_SHIFT;
+
+ return set_memory(start, num_pages);
+}
+#else
+static inline int set_memory_ro(unsigned long addr, int numpages) { return 0; }
+static inline int set_memory_rw(unsigned long addr, int numpages) { return 0; }
+static inline int set_memory_x(unsigned long addr, int numpages) { return 0; }
+static inline int set_memory_nx(unsigned long addr, int numpages) { return 0; }
+static inline int set_memory_rw_nx(unsigned long addr, int numpages) { return 0; }
+static inline int set_kernel_memory(char *startp, char *endp,
+ int (*set_memory)(unsigned long start,
+ int num_pages))
+{
+ return 0;
+}
+#endif
+
+int set_direct_map_invalid_noflush(struct page *page);
+int set_direct_map_default_noflush(struct page *page);
+bool kernel_page_present(struct page *page);
+
+#endif /* __ASSEMBLY__ */
+
+#ifdef CONFIG_STRICT_KERNEL_RWX
+#ifdef CONFIG_64BIT
+#define SECTION_ALIGN (1 << 21)
+#else
+#define SECTION_ALIGN (1 << 22)
+#endif
+#else /* !CONFIG_STRICT_KERNEL_RWX */
+#define SECTION_ALIGN L1_CACHE_BYTES
+#endif /* CONFIG_STRICT_KERNEL_RWX */
+
+#define PECOFF_SECTION_ALIGNMENT 0x1000
+#define PECOFF_FILE_ALIGNMENT 0x200
+
+#endif /* _ASM_RISCV_SET_MEMORY_H */
diff --git a/arch/riscv/include/asm/signal.h b/arch/riscv/include/asm/signal.h
new file mode 100644
index 0000000000..956ae0a01b
--- /dev/null
+++ b/arch/riscv/include/asm/signal.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef __ASM_SIGNAL_H
+#define __ASM_SIGNAL_H
+
+#include <uapi/asm/signal.h>
+#include <uapi/asm/ptrace.h>
+
+asmlinkage __visible
+void do_work_pending(struct pt_regs *regs, unsigned long thread_info_flags);
+
+#endif
diff --git a/arch/riscv/include/asm/signal32.h b/arch/riscv/include/asm/signal32.h
new file mode 100644
index 0000000000..96dc56932e
--- /dev/null
+++ b/arch/riscv/include/asm/signal32.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef __ASM_SIGNAL32_H
+#define __ASM_SIGNAL32_H
+
+#if IS_ENABLED(CONFIG_COMPAT)
+int compat_setup_rt_frame(struct ksignal *ksig, sigset_t *set,
+ struct pt_regs *regs);
+#else
+static inline
+int compat_setup_rt_frame(struct ksignal *ksig, sigset_t *set,
+ struct pt_regs *regs)
+{
+ return -1;
+}
+#endif
+
+#endif
diff --git a/arch/riscv/include/asm/smp.h b/arch/riscv/include/asm/smp.h
new file mode 100644
index 0000000000..0d555847cd
--- /dev/null
+++ b/arch/riscv/include/asm/smp.h
@@ -0,0 +1,128 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2012 Regents of the University of California
+ */
+
+#ifndef _ASM_RISCV_SMP_H
+#define _ASM_RISCV_SMP_H
+
+#include <linux/cpumask.h>
+#include <linux/irqreturn.h>
+#include <linux/thread_info.h>
+
+#define INVALID_HARTID ULONG_MAX
+
+struct seq_file;
+extern unsigned long boot_cpu_hartid;
+
+#ifdef CONFIG_SMP
+
+#include <linux/jump_label.h>
+
+/*
+ * Mapping between linux logical cpu index and hartid.
+ */
+extern unsigned long __cpuid_to_hartid_map[NR_CPUS];
+#define cpuid_to_hartid_map(cpu) __cpuid_to_hartid_map[cpu]
+
+/* print IPI stats */
+void show_ipi_stats(struct seq_file *p, int prec);
+
+/* SMP initialization hook for setup_arch */
+void __init setup_smp(void);
+
+/* Hook for the generic smp_call_function_many() routine. */
+void arch_send_call_function_ipi_mask(struct cpumask *mask);
+
+/* Hook for the generic smp_call_function_single() routine. */
+void arch_send_call_function_single_ipi(int cpu);
+
+int riscv_hartid_to_cpuid(unsigned long hartid);
+
+/* Enable IPI for CPU hotplug */
+void riscv_ipi_enable(void);
+
+/* Disable IPI for CPU hotplug */
+void riscv_ipi_disable(void);
+
+/* Check if IPI interrupt numbers are available */
+bool riscv_ipi_have_virq_range(void);
+
+/* Set the IPI interrupt numbers for arch (called by irqchip drivers) */
+void riscv_ipi_set_virq_range(int virq, int nr, bool use_for_rfence);
+
+/* Check if we can use IPIs for remote FENCEs */
+DECLARE_STATIC_KEY_FALSE(riscv_ipi_for_rfence);
+#define riscv_use_ipi_for_rfence() \
+ static_branch_unlikely(&riscv_ipi_for_rfence)
+
+/* Check other CPUs stop or not */
+bool smp_crash_stop_failed(void);
+
+/* Secondary hart entry */
+asmlinkage void smp_callin(void);
+
+/*
+ * Obtains the hart ID of the currently executing task. This relies on
+ * THREAD_INFO_IN_TASK, but we define that unconditionally.
+ */
+#define raw_smp_processor_id() (current_thread_info()->cpu)
+
+#if defined CONFIG_HOTPLUG_CPU
+int __cpu_disable(void);
+static inline void __cpu_die(unsigned int cpu) { }
+#endif /* CONFIG_HOTPLUG_CPU */
+
+#else
+
+static inline void show_ipi_stats(struct seq_file *p, int prec)
+{
+}
+
+static inline int riscv_hartid_to_cpuid(unsigned long hartid)
+{
+ if (hartid == boot_cpu_hartid)
+ return 0;
+
+ return -1;
+}
+static inline unsigned long cpuid_to_hartid_map(int cpu)
+{
+ return boot_cpu_hartid;
+}
+
+static inline void riscv_ipi_enable(void)
+{
+}
+
+static inline void riscv_ipi_disable(void)
+{
+}
+
+static inline bool riscv_ipi_have_virq_range(void)
+{
+ return false;
+}
+
+static inline void riscv_ipi_set_virq_range(int virq, int nr,
+ bool use_for_rfence)
+{
+}
+
+static inline bool riscv_use_ipi_for_rfence(void)
+{
+ return false;
+}
+
+#endif /* CONFIG_SMP */
+
+#if defined(CONFIG_HOTPLUG_CPU) && (CONFIG_SMP)
+bool cpu_has_hotplug(unsigned int cpu);
+#else
+static inline bool cpu_has_hotplug(unsigned int cpu)
+{
+ return false;
+}
+#endif
+
+#endif /* _ASM_RISCV_SMP_H */
diff --git a/arch/riscv/include/asm/soc.h b/arch/riscv/include/asm/soc.h
new file mode 100644
index 0000000000..f494066051
--- /dev/null
+++ b/arch/riscv/include/asm/soc.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (C) 2020 Western Digital Corporation or its affiliates.
+ * Copyright (C) 2020 Google, Inc
+ */
+
+#ifndef _ASM_RISCV_SOC_H
+#define _ASM_RISCV_SOC_H
+
+#include <linux/of.h>
+#include <linux/linkage.h>
+#include <linux/types.h>
+
+#define SOC_EARLY_INIT_DECLARE(name, compat, fn) \
+ static const struct of_device_id __soc_early_init__##name \
+ __used __section("__soc_early_init_table") \
+ = { .compatible = compat, .data = fn }
+
+void soc_early_init(void);
+
+extern unsigned long __soc_early_init_table_start;
+extern unsigned long __soc_early_init_table_end;
+
+#endif
diff --git a/arch/riscv/include/asm/sparsemem.h b/arch/riscv/include/asm/sparsemem.h
new file mode 100644
index 0000000000..63acaecc33
--- /dev/null
+++ b/arch/riscv/include/asm/sparsemem.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _ASM_RISCV_SPARSEMEM_H
+#define _ASM_RISCV_SPARSEMEM_H
+
+#ifdef CONFIG_SPARSEMEM
+#ifdef CONFIG_64BIT
+#define MAX_PHYSMEM_BITS 56
+#else
+#define MAX_PHYSMEM_BITS 34
+#endif /* CONFIG_64BIT */
+#define SECTION_SIZE_BITS 27
+#endif /* CONFIG_SPARSEMEM */
+
+#endif /* _ASM_RISCV_SPARSEMEM_H */
diff --git a/arch/riscv/include/asm/stackprotector.h b/arch/riscv/include/asm/stackprotector.h
new file mode 100644
index 0000000000..43895b90fe
--- /dev/null
+++ b/arch/riscv/include/asm/stackprotector.h
@@ -0,0 +1,22 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _ASM_RISCV_STACKPROTECTOR_H
+#define _ASM_RISCV_STACKPROTECTOR_H
+
+extern unsigned long __stack_chk_guard;
+
+/*
+ * Initialize the stackprotector canary value.
+ *
+ * NOTE: this must only be called from functions that never return,
+ * and it must always be inlined.
+ */
+static __always_inline void boot_init_stack_canary(void)
+{
+ unsigned long canary = get_random_canary();
+
+ current->stack_canary = canary;
+ if (!IS_ENABLED(CONFIG_STACKPROTECTOR_PER_TASK))
+ __stack_chk_guard = current->stack_canary;
+}
+#endif /* _ASM_RISCV_STACKPROTECTOR_H */
diff --git a/arch/riscv/include/asm/stacktrace.h b/arch/riscv/include/asm/stacktrace.h
new file mode 100644
index 0000000000..f7e8ef2418
--- /dev/null
+++ b/arch/riscv/include/asm/stacktrace.h
@@ -0,0 +1,24 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _ASM_RISCV_STACKTRACE_H
+#define _ASM_RISCV_STACKTRACE_H
+
+#include <linux/sched.h>
+#include <asm/ptrace.h>
+
+struct stackframe {
+ unsigned long fp;
+ unsigned long ra;
+};
+
+extern void notrace walk_stackframe(struct task_struct *task, struct pt_regs *regs,
+ bool (*fn)(void *, unsigned long), void *arg);
+extern void dump_backtrace(struct pt_regs *regs, struct task_struct *task,
+ const char *loglvl);
+
+static inline bool on_thread_stack(void)
+{
+ return !(((unsigned long)(current->stack) ^ current_stack_pointer) & ~(THREAD_SIZE - 1));
+}
+
+#endif /* _ASM_RISCV_STACKTRACE_H */
diff --git a/arch/riscv/include/asm/string.h b/arch/riscv/include/asm/string.h
new file mode 100644
index 0000000000..a96b1fea24
--- /dev/null
+++ b/arch/riscv/include/asm/string.h
@@ -0,0 +1,42 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2013 Regents of the University of California
+ */
+
+#ifndef _ASM_RISCV_STRING_H
+#define _ASM_RISCV_STRING_H
+
+#include <linux/types.h>
+#include <linux/linkage.h>
+
+#define __HAVE_ARCH_MEMSET
+extern asmlinkage void *memset(void *, int, size_t);
+extern asmlinkage void *__memset(void *, int, size_t);
+#define __HAVE_ARCH_MEMCPY
+extern asmlinkage void *memcpy(void *, const void *, size_t);
+extern asmlinkage void *__memcpy(void *, const void *, size_t);
+#define __HAVE_ARCH_MEMMOVE
+extern asmlinkage void *memmove(void *, const void *, size_t);
+extern asmlinkage void *__memmove(void *, const void *, size_t);
+
+#define __HAVE_ARCH_STRCMP
+extern asmlinkage int strcmp(const char *cs, const char *ct);
+
+#define __HAVE_ARCH_STRLEN
+extern asmlinkage __kernel_size_t strlen(const char *);
+
+#define __HAVE_ARCH_STRNCMP
+extern asmlinkage int strncmp(const char *cs, const char *ct, size_t count);
+
+/* For those files which don't want to check by kasan. */
+#if defined(CONFIG_KASAN) && !defined(__SANITIZE_ADDRESS__)
+#define memcpy(dst, src, len) __memcpy(dst, src, len)
+#define memset(s, c, n) __memset(s, c, n)
+#define memmove(dst, src, len) __memmove(dst, src, len)
+
+#ifndef __NO_FORTIFY
+#define __NO_FORTIFY /* FORTIFY_SOURCE uses __builtin_memcpy, etc. */
+#endif
+
+#endif
+#endif /* _ASM_RISCV_STRING_H */
diff --git a/arch/riscv/include/asm/suspend.h b/arch/riscv/include/asm/suspend.h
new file mode 100644
index 0000000000..02f8786738
--- /dev/null
+++ b/arch/riscv/include/asm/suspend.h
@@ -0,0 +1,58 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2021 Western Digital Corporation or its affiliates.
+ * Copyright (c) 2022 Ventana Micro Systems Inc.
+ */
+
+#ifndef _ASM_RISCV_SUSPEND_H
+#define _ASM_RISCV_SUSPEND_H
+
+#include <asm/ptrace.h>
+
+struct suspend_context {
+ /* Saved and restored by low-level functions */
+ struct pt_regs regs;
+ /* Saved and restored by high-level functions */
+ unsigned long scratch;
+ unsigned long tvec;
+ unsigned long ie;
+#ifdef CONFIG_MMU
+ unsigned long satp;
+#endif
+};
+
+/*
+ * Used by hibernation core and cleared during resume sequence
+ */
+extern int in_suspend;
+
+/* Low-level CPU suspend entry function */
+int __cpu_suspend_enter(struct suspend_context *context);
+
+/* High-level CPU suspend which will save context and call finish() */
+int cpu_suspend(unsigned long arg,
+ int (*finish)(unsigned long arg,
+ unsigned long entry,
+ unsigned long context));
+
+/* Low-level CPU resume entry function */
+int __cpu_resume_enter(unsigned long hartid, unsigned long context);
+
+/* Used to save and restore the CSRs */
+void suspend_save_csrs(struct suspend_context *context);
+void suspend_restore_csrs(struct suspend_context *context);
+
+/* Low-level API to support hibernation */
+int swsusp_arch_suspend(void);
+int swsusp_arch_resume(void);
+int arch_hibernation_header_save(void *addr, unsigned int max_size);
+int arch_hibernation_header_restore(void *addr);
+int __hibernate_cpu_resume(void);
+
+/* Used to resume on the CPU we hibernated on */
+int hibernate_resume_nonboot_cpu_disable(void);
+
+asmlinkage void hibernate_restore_image(unsigned long resume_satp, unsigned long satp_temp,
+ unsigned long cpu_resume);
+asmlinkage int hibernate_core_restore_code(void);
+#endif
diff --git a/arch/riscv/include/asm/switch_to.h b/arch/riscv/include/asm/switch_to.h
new file mode 100644
index 0000000000..a727be723c
--- /dev/null
+++ b/arch/riscv/include/asm/switch_to.h
@@ -0,0 +1,87 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2012 Regents of the University of California
+ */
+
+#ifndef _ASM_RISCV_SWITCH_TO_H
+#define _ASM_RISCV_SWITCH_TO_H
+
+#include <linux/jump_label.h>
+#include <linux/sched/task_stack.h>
+#include <asm/vector.h>
+#include <asm/hwcap.h>
+#include <asm/processor.h>
+#include <asm/ptrace.h>
+#include <asm/csr.h>
+
+#ifdef CONFIG_FPU
+extern void __fstate_save(struct task_struct *save_to);
+extern void __fstate_restore(struct task_struct *restore_from);
+
+static inline void __fstate_clean(struct pt_regs *regs)
+{
+ regs->status = (regs->status & ~SR_FS) | SR_FS_CLEAN;
+}
+
+static inline void fstate_off(struct task_struct *task,
+ struct pt_regs *regs)
+{
+ regs->status = (regs->status & ~SR_FS) | SR_FS_OFF;
+}
+
+static inline void fstate_save(struct task_struct *task,
+ struct pt_regs *regs)
+{
+ if ((regs->status & SR_FS) == SR_FS_DIRTY) {
+ __fstate_save(task);
+ __fstate_clean(regs);
+ }
+}
+
+static inline void fstate_restore(struct task_struct *task,
+ struct pt_regs *regs)
+{
+ if ((regs->status & SR_FS) != SR_FS_OFF) {
+ __fstate_restore(task);
+ __fstate_clean(regs);
+ }
+}
+
+static inline void __switch_to_fpu(struct task_struct *prev,
+ struct task_struct *next)
+{
+ struct pt_regs *regs;
+
+ regs = task_pt_regs(prev);
+ if (unlikely(regs->status & SR_SD))
+ fstate_save(prev, regs);
+ fstate_restore(next, task_pt_regs(next));
+}
+
+static __always_inline bool has_fpu(void)
+{
+ return riscv_has_extension_likely(RISCV_ISA_EXT_f) ||
+ riscv_has_extension_likely(RISCV_ISA_EXT_d);
+}
+#else
+static __always_inline bool has_fpu(void) { return false; }
+#define fstate_save(task, regs) do { } while (0)
+#define fstate_restore(task, regs) do { } while (0)
+#define __switch_to_fpu(__prev, __next) do { } while (0)
+#endif
+
+extern struct task_struct *__switch_to(struct task_struct *,
+ struct task_struct *);
+
+#define switch_to(prev, next, last) \
+do { \
+ struct task_struct *__prev = (prev); \
+ struct task_struct *__next = (next); \
+ if (has_fpu()) \
+ __switch_to_fpu(__prev, __next); \
+ if (has_vector()) \
+ __switch_to_vector(__prev, __next); \
+ ((last) = __switch_to(__prev, __next)); \
+} while (0)
+
+#endif /* _ASM_RISCV_SWITCH_TO_H */
diff --git a/arch/riscv/include/asm/syscall.h b/arch/riscv/include/asm/syscall.h
new file mode 100644
index 0000000000..121fff429d
--- /dev/null
+++ b/arch/riscv/include/asm/syscall.h
@@ -0,0 +1,102 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2008-2009 Red Hat, Inc. All rights reserved.
+ * Copyright 2010 Tilera Corporation. All Rights Reserved.
+ * Copyright 2015 Regents of the University of California, Berkeley
+ *
+ * See asm-generic/syscall.h for descriptions of what we must do here.
+ */
+
+#ifndef _ASM_RISCV_SYSCALL_H
+#define _ASM_RISCV_SYSCALL_H
+
+#include <asm/hwprobe.h>
+#include <uapi/linux/audit.h>
+#include <linux/sched.h>
+#include <linux/err.h>
+
+/* The array of function pointers for syscalls. */
+extern void * const sys_call_table[];
+extern void * const compat_sys_call_table[];
+
+/*
+ * Only the low 32 bits of orig_r0 are meaningful, so we return int.
+ * This importantly ignores the high bits on 64-bit, so comparisons
+ * sign-extend the low 32 bits.
+ */
+static inline int syscall_get_nr(struct task_struct *task,
+ struct pt_regs *regs)
+{
+ return regs->a7;
+}
+
+static inline void syscall_rollback(struct task_struct *task,
+ struct pt_regs *regs)
+{
+ regs->a0 = regs->orig_a0;
+}
+
+static inline long syscall_get_error(struct task_struct *task,
+ struct pt_regs *regs)
+{
+ unsigned long error = regs->a0;
+
+ return IS_ERR_VALUE(error) ? error : 0;
+}
+
+static inline long syscall_get_return_value(struct task_struct *task,
+ struct pt_regs *regs)
+{
+ return regs->a0;
+}
+
+static inline void syscall_set_return_value(struct task_struct *task,
+ struct pt_regs *regs,
+ int error, long val)
+{
+ regs->a0 = (long) error ?: val;
+}
+
+static inline void syscall_get_arguments(struct task_struct *task,
+ struct pt_regs *regs,
+ unsigned long *args)
+{
+ args[0] = regs->orig_a0;
+ args++;
+ memcpy(args, &regs->a1, 5 * sizeof(args[0]));
+}
+
+static inline int syscall_get_arch(struct task_struct *task)
+{
+#ifdef CONFIG_64BIT
+ return AUDIT_ARCH_RISCV64;
+#else
+ return AUDIT_ARCH_RISCV32;
+#endif
+}
+
+typedef long (*syscall_t)(const struct pt_regs *);
+static inline void syscall_handler(struct pt_regs *regs, ulong syscall)
+{
+ syscall_t fn;
+
+#ifdef CONFIG_COMPAT
+ if ((regs->status & SR_UXL) == SR_UXL_32)
+ fn = compat_sys_call_table[syscall];
+ else
+#endif
+ fn = sys_call_table[syscall];
+
+ regs->a0 = fn(regs);
+}
+
+static inline bool arch_syscall_is_vdso_sigreturn(struct pt_regs *regs)
+{
+ return false;
+}
+
+asmlinkage long sys_riscv_flush_icache(uintptr_t, uintptr_t, uintptr_t);
+
+asmlinkage long sys_riscv_hwprobe(struct riscv_hwprobe *, size_t, size_t,
+ unsigned long *, unsigned int);
+#endif /* _ASM_RISCV_SYSCALL_H */
diff --git a/arch/riscv/include/asm/syscall_wrapper.h b/arch/riscv/include/asm/syscall_wrapper.h
new file mode 100644
index 0000000000..eeec04b7da
--- /dev/null
+++ b/arch/riscv/include/asm/syscall_wrapper.h
@@ -0,0 +1,82 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * syscall_wrapper.h - riscv specific wrappers to syscall definitions
+ *
+ * Based on arch/arm64/include/syscall_wrapper.h
+ */
+
+#ifndef __ASM_SYSCALL_WRAPPER_H
+#define __ASM_SYSCALL_WRAPPER_H
+
+#include <asm/ptrace.h>
+
+asmlinkage long __riscv_sys_ni_syscall(const struct pt_regs *);
+
+#define SC_RISCV_REGS_TO_ARGS(x, ...) \
+ __MAP(x,__SC_ARGS \
+ ,,regs->orig_a0,,regs->a1,,regs->a2 \
+ ,,regs->a3,,regs->a4,,regs->a5,,regs->a6)
+
+#ifdef CONFIG_COMPAT
+
+#define COMPAT_SYSCALL_DEFINEx(x, name, ...) \
+ asmlinkage long __riscv_compat_sys##name(const struct pt_regs *regs); \
+ ALLOW_ERROR_INJECTION(__riscv_compat_sys##name, ERRNO); \
+ static long __se_compat_sys##name(__MAP(x,__SC_LONG,__VA_ARGS__)); \
+ static inline long __do_compat_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__)); \
+ asmlinkage long __riscv_compat_sys##name(const struct pt_regs *regs) \
+ { \
+ return __se_compat_sys##name(SC_RISCV_REGS_TO_ARGS(x,__VA_ARGS__)); \
+ } \
+ static long __se_compat_sys##name(__MAP(x,__SC_LONG,__VA_ARGS__)) \
+ { \
+ return __do_compat_sys##name(__MAP(x,__SC_DELOUSE,__VA_ARGS__)); \
+ } \
+ static inline long __do_compat_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__))
+
+#define COMPAT_SYSCALL_DEFINE0(sname) \
+ asmlinkage long __riscv_compat_sys_##sname(const struct pt_regs *__unused); \
+ ALLOW_ERROR_INJECTION(__riscv_compat_sys_##sname, ERRNO); \
+ asmlinkage long __riscv_compat_sys_##sname(const struct pt_regs *__unused)
+
+#define COND_SYSCALL_COMPAT(name) \
+ asmlinkage long __weak __riscv_compat_sys_##name(const struct pt_regs *regs); \
+ asmlinkage long __weak __riscv_compat_sys_##name(const struct pt_regs *regs) \
+ { \
+ return sys_ni_syscall(); \
+ }
+
+#endif /* CONFIG_COMPAT */
+
+#define __SYSCALL_DEFINEx(x, name, ...) \
+ asmlinkage long __riscv_sys##name(const struct pt_regs *regs); \
+ ALLOW_ERROR_INJECTION(__riscv_sys##name, ERRNO); \
+ static long __se_sys##name(__MAP(x,__SC_LONG,__VA_ARGS__)); \
+ static inline long __do_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__)); \
+ asmlinkage long __riscv_sys##name(const struct pt_regs *regs) \
+ { \
+ return __se_sys##name(SC_RISCV_REGS_TO_ARGS(x,__VA_ARGS__)); \
+ } \
+ static long __se_sys##name(__MAP(x,__SC_LONG,__VA_ARGS__)) \
+ { \
+ long ret = __do_sys##name(__MAP(x,__SC_CAST,__VA_ARGS__)); \
+ __MAP(x,__SC_TEST,__VA_ARGS__); \
+ __PROTECT(x, ret,__MAP(x,__SC_ARGS,__VA_ARGS__)); \
+ return ret; \
+ } \
+ static inline long __do_sys##name(__MAP(x,__SC_DECL,__VA_ARGS__))
+
+#define SYSCALL_DEFINE0(sname) \
+ SYSCALL_METADATA(_##sname, 0); \
+ asmlinkage long __riscv_sys_##sname(const struct pt_regs *__unused); \
+ ALLOW_ERROR_INJECTION(__riscv_sys_##sname, ERRNO); \
+ asmlinkage long __riscv_sys_##sname(const struct pt_regs *__unused)
+
+#define COND_SYSCALL(name) \
+ asmlinkage long __weak __riscv_sys_##name(const struct pt_regs *regs); \
+ asmlinkage long __weak __riscv_sys_##name(const struct pt_regs *regs) \
+ { \
+ return sys_ni_syscall(); \
+ }
+
+#endif /* __ASM_SYSCALL_WRAPPER_H */
diff --git a/arch/riscv/include/asm/thread_info.h b/arch/riscv/include/asm/thread_info.h
new file mode 100644
index 0000000000..d18ce0113c
--- /dev/null
+++ b/arch/riscv/include/asm/thread_info.h
@@ -0,0 +1,104 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2009 Chen Liqin <liqin.chen@sunplusct.com>
+ * Copyright (C) 2012 Regents of the University of California
+ * Copyright (C) 2017 SiFive
+ */
+
+#ifndef _ASM_RISCV_THREAD_INFO_H
+#define _ASM_RISCV_THREAD_INFO_H
+
+#include <asm/page.h>
+#include <linux/const.h>
+
+/* thread information allocation */
+#define THREAD_SIZE_ORDER CONFIG_THREAD_SIZE_ORDER
+#define THREAD_SIZE (PAGE_SIZE << THREAD_SIZE_ORDER)
+
+/*
+ * By aligning VMAP'd stacks to 2 * THREAD_SIZE, we can detect overflow by
+ * checking sp & (1 << THREAD_SHIFT), which we can do cheaply in the entry
+ * assembly.
+ */
+#ifdef CONFIG_VMAP_STACK
+#define THREAD_ALIGN (2 * THREAD_SIZE)
+#else
+#define THREAD_ALIGN THREAD_SIZE
+#endif
+
+#define THREAD_SHIFT (PAGE_SHIFT + THREAD_SIZE_ORDER)
+#define OVERFLOW_STACK_SIZE SZ_4K
+#define SHADOW_OVERFLOW_STACK_SIZE (1024)
+
+#define IRQ_STACK_SIZE THREAD_SIZE
+
+#ifndef __ASSEMBLY__
+
+#include <asm/processor.h>
+#include <asm/csr.h>
+
+/*
+ * low level task data that entry.S needs immediate access to
+ * - this struct should fit entirely inside of one cache line
+ * - if the members of this struct changes, the assembly constants
+ * in asm-offsets.c must be updated accordingly
+ * - thread_info is included in task_struct at an offset of 0. This means that
+ * tp points to both thread_info and task_struct.
+ */
+struct thread_info {
+ unsigned long flags; /* low level flags */
+ int preempt_count; /* 0=>preemptible, <0=>BUG */
+ /*
+ * These stack pointers are overwritten on every system call or
+ * exception. SP is also saved to the stack it can be recovered when
+ * overwritten.
+ */
+ long kernel_sp; /* Kernel stack pointer */
+ long user_sp; /* User stack pointer */
+ int cpu;
+ unsigned long syscall_work; /* SYSCALL_WORK_ flags */
+};
+
+/*
+ * macros/functions for gaining access to the thread information structure
+ *
+ * preempt_count needs to be 1 initially, until the scheduler is functional.
+ */
+#define INIT_THREAD_INFO(tsk) \
+{ \
+ .flags = 0, \
+ .preempt_count = INIT_PREEMPT_COUNT, \
+}
+
+void arch_release_task_struct(struct task_struct *tsk);
+int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src);
+
+#endif /* !__ASSEMBLY__ */
+
+/*
+ * thread information flags
+ * - these are process state flags that various assembly files may need to
+ * access
+ * - pending work-to-be-done flags are in lowest half-word
+ * - other flags in upper half-word(s)
+ */
+#define TIF_NOTIFY_RESUME 1 /* callback before returning to user */
+#define TIF_SIGPENDING 2 /* signal pending */
+#define TIF_NEED_RESCHED 3 /* rescheduling necessary */
+#define TIF_RESTORE_SIGMASK 4 /* restore signal mask in do_signal() */
+#define TIF_MEMDIE 5 /* is terminating due to OOM killer */
+#define TIF_NOTIFY_SIGNAL 9 /* signal notifications exist */
+#define TIF_UPROBE 10 /* uprobe breakpoint or singlestep */
+#define TIF_32BIT 11 /* compat-mode 32bit process */
+
+#define _TIF_NOTIFY_RESUME (1 << TIF_NOTIFY_RESUME)
+#define _TIF_SIGPENDING (1 << TIF_SIGPENDING)
+#define _TIF_NEED_RESCHED (1 << TIF_NEED_RESCHED)
+#define _TIF_NOTIFY_SIGNAL (1 << TIF_NOTIFY_SIGNAL)
+#define _TIF_UPROBE (1 << TIF_UPROBE)
+
+#define _TIF_WORK_MASK \
+ (_TIF_NOTIFY_RESUME | _TIF_SIGPENDING | _TIF_NEED_RESCHED | \
+ _TIF_NOTIFY_SIGNAL | _TIF_UPROBE)
+
+#endif /* _ASM_RISCV_THREAD_INFO_H */
diff --git a/arch/riscv/include/asm/timex.h b/arch/riscv/include/asm/timex.h
new file mode 100644
index 0000000000..a06697846e
--- /dev/null
+++ b/arch/riscv/include/asm/timex.h
@@ -0,0 +1,91 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2012 Regents of the University of California
+ */
+
+#ifndef _ASM_RISCV_TIMEX_H
+#define _ASM_RISCV_TIMEX_H
+
+#include <asm/csr.h>
+
+typedef unsigned long cycles_t;
+
+#ifdef CONFIG_RISCV_M_MODE
+
+#include <asm/clint.h>
+
+#ifdef CONFIG_64BIT
+static inline cycles_t get_cycles(void)
+{
+ return readq_relaxed(clint_time_val);
+}
+#else /* !CONFIG_64BIT */
+static inline u32 get_cycles(void)
+{
+ return readl_relaxed(((u32 *)clint_time_val));
+}
+#define get_cycles get_cycles
+
+static inline u32 get_cycles_hi(void)
+{
+ return readl_relaxed(((u32 *)clint_time_val) + 1);
+}
+#define get_cycles_hi get_cycles_hi
+#endif /* CONFIG_64BIT */
+
+/*
+ * Much like MIPS, we may not have a viable counter to use at an early point
+ * in the boot process. Unfortunately we don't have a fallback, so instead
+ * we just return 0.
+ */
+static inline unsigned long random_get_entropy(void)
+{
+ if (unlikely(clint_time_val == NULL))
+ return random_get_entropy_fallback();
+ return get_cycles();
+}
+#define random_get_entropy() random_get_entropy()
+
+#else /* CONFIG_RISCV_M_MODE */
+
+static inline cycles_t get_cycles(void)
+{
+ return csr_read(CSR_TIME);
+}
+#define get_cycles get_cycles
+
+static inline u32 get_cycles_hi(void)
+{
+ return csr_read(CSR_TIMEH);
+}
+#define get_cycles_hi get_cycles_hi
+
+#endif /* !CONFIG_RISCV_M_MODE */
+
+#ifdef CONFIG_64BIT
+static inline u64 get_cycles64(void)
+{
+ return get_cycles();
+}
+#else /* CONFIG_64BIT */
+static inline u64 get_cycles64(void)
+{
+ u32 hi, lo;
+
+ do {
+ hi = get_cycles_hi();
+ lo = get_cycles();
+ } while (hi != get_cycles_hi());
+
+ return ((u64)hi << 32) | lo;
+}
+#endif /* CONFIG_64BIT */
+
+#define ARCH_HAS_READ_CURRENT_TIMER
+static inline int read_current_timer(unsigned long *timer_val)
+{
+ *timer_val = get_cycles();
+ return 0;
+}
+
+#endif /* _ASM_RISCV_TIMEX_H */
diff --git a/arch/riscv/include/asm/tlb.h b/arch/riscv/include/asm/tlb.h
new file mode 100644
index 0000000000..120bcf2ed8
--- /dev/null
+++ b/arch/riscv/include/asm/tlb.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2012 Regents of the University of California
+ */
+
+#ifndef _ASM_RISCV_TLB_H
+#define _ASM_RISCV_TLB_H
+
+struct mmu_gather;
+
+static void tlb_flush(struct mmu_gather *tlb);
+
+#define tlb_flush tlb_flush
+#include <asm-generic/tlb.h>
+
+static inline void tlb_flush(struct mmu_gather *tlb)
+{
+ flush_tlb_mm(tlb->mm);
+}
+
+#endif /* _ASM_RISCV_TLB_H */
diff --git a/arch/riscv/include/asm/tlbflush.h b/arch/riscv/include/asm/tlbflush.h
new file mode 100644
index 0000000000..a09196f8de
--- /dev/null
+++ b/arch/riscv/include/asm/tlbflush.h
@@ -0,0 +1,64 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2009 Chen Liqin <liqin.chen@sunplusct.com>
+ * Copyright (C) 2012 Regents of the University of California
+ */
+
+#ifndef _ASM_RISCV_TLBFLUSH_H
+#define _ASM_RISCV_TLBFLUSH_H
+
+#include <linux/mm_types.h>
+#include <asm/smp.h>
+#include <asm/errata_list.h>
+
+#ifdef CONFIG_MMU
+extern unsigned long asid_mask;
+
+static inline void local_flush_tlb_all(void)
+{
+ __asm__ __volatile__ ("sfence.vma" : : : "memory");
+}
+
+/* Flush one page from local TLB */
+static inline void local_flush_tlb_page(unsigned long addr)
+{
+ ALT_FLUSH_TLB_PAGE(__asm__ __volatile__ ("sfence.vma %0" : : "r" (addr) : "memory"));
+}
+#else /* CONFIG_MMU */
+#define local_flush_tlb_all() do { } while (0)
+#define local_flush_tlb_page(addr) do { } while (0)
+#endif /* CONFIG_MMU */
+
+#if defined(CONFIG_SMP) && defined(CONFIG_MMU)
+void flush_tlb_all(void);
+void flush_tlb_mm(struct mm_struct *mm);
+void flush_tlb_page(struct vm_area_struct *vma, unsigned long addr);
+void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
+ unsigned long end);
+#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+#define __HAVE_ARCH_FLUSH_PMD_TLB_RANGE
+void flush_pmd_tlb_range(struct vm_area_struct *vma, unsigned long start,
+ unsigned long end);
+#endif
+#else /* CONFIG_SMP && CONFIG_MMU */
+
+#define flush_tlb_all() local_flush_tlb_all()
+#define flush_tlb_page(vma, addr) local_flush_tlb_page(addr)
+
+static inline void flush_tlb_range(struct vm_area_struct *vma,
+ unsigned long start, unsigned long end)
+{
+ local_flush_tlb_all();
+}
+
+#define flush_tlb_mm(mm) flush_tlb_all()
+#endif /* !CONFIG_SMP || !CONFIG_MMU */
+
+/* Flush a range of kernel pages */
+static inline void flush_tlb_kernel_range(unsigned long start,
+ unsigned long end)
+{
+ flush_tlb_all();
+}
+
+#endif /* _ASM_RISCV_TLBFLUSH_H */
diff --git a/arch/riscv/include/asm/topology.h b/arch/riscv/include/asm/topology.h
new file mode 100644
index 0000000000..e316ab3b77
--- /dev/null
+++ b/arch/riscv/include/asm/topology.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_RISCV_TOPOLOGY_H
+#define _ASM_RISCV_TOPOLOGY_H
+
+#include <linux/arch_topology.h>
+
+/* Replace task scheduler's default frequency-invariant accounting */
+#define arch_scale_freq_tick topology_scale_freq_tick
+#define arch_set_freq_scale topology_set_freq_scale
+#define arch_scale_freq_capacity topology_get_freq_scale
+#define arch_scale_freq_invariant topology_scale_freq_invariant
+
+/* Replace task scheduler's default cpu-invariant accounting */
+#define arch_scale_cpu_capacity topology_get_cpu_scale
+
+/* Enable topology flag updates */
+#define arch_update_cpu_topology topology_update_cpu_topology
+
+#include <asm-generic/topology.h>
+
+#endif /* _ASM_RISCV_TOPOLOGY_H */
diff --git a/arch/riscv/include/asm/uaccess.h b/arch/riscv/include/asm/uaccess.h
new file mode 100644
index 0000000000..ec0cab9fbd
--- /dev/null
+++ b/arch/riscv/include/asm/uaccess.h
@@ -0,0 +1,341 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2012 Regents of the University of California
+ *
+ * This file was copied from include/asm-generic/uaccess.h
+ */
+
+#ifndef _ASM_RISCV_UACCESS_H
+#define _ASM_RISCV_UACCESS_H
+
+#include <asm/asm-extable.h>
+#include <asm/pgtable.h> /* for TASK_SIZE */
+
+/*
+ * User space memory access functions
+ */
+#ifdef CONFIG_MMU
+#include <linux/errno.h>
+#include <linux/compiler.h>
+#include <linux/thread_info.h>
+#include <asm/byteorder.h>
+#include <asm/extable.h>
+#include <asm/asm.h>
+#include <asm-generic/access_ok.h>
+
+#define __enable_user_access() \
+ __asm__ __volatile__ ("csrs sstatus, %0" : : "r" (SR_SUM) : "memory")
+#define __disable_user_access() \
+ __asm__ __volatile__ ("csrc sstatus, %0" : : "r" (SR_SUM) : "memory")
+
+/*
+ * The exception table consists of pairs of addresses: the first is the
+ * address of an instruction that is allowed to fault, and the second is
+ * the address at which the program should continue. No registers are
+ * modified, so it is entirely up to the continuation code to figure out
+ * what to do.
+ *
+ * All the routines below use bits of fixup code that are out of line
+ * with the main instruction path. This means when everything is well,
+ * we don't even have to jump over them. Further, they do not intrude
+ * on our cache or tlb entries.
+ */
+
+#define __LSW 0
+#define __MSW 1
+
+/*
+ * The "__xxx" versions of the user access functions do not verify the address
+ * space - it must have been done previously with a separate "access_ok()"
+ * call.
+ */
+
+#define __get_user_asm(insn, x, ptr, err) \
+do { \
+ __typeof__(x) __x; \
+ __asm__ __volatile__ ( \
+ "1:\n" \
+ " " insn " %1, %2\n" \
+ "2:\n" \
+ _ASM_EXTABLE_UACCESS_ERR_ZERO(1b, 2b, %0, %1) \
+ : "+r" (err), "=&r" (__x) \
+ : "m" (*(ptr))); \
+ (x) = __x; \
+} while (0)
+
+#ifdef CONFIG_64BIT
+#define __get_user_8(x, ptr, err) \
+ __get_user_asm("ld", x, ptr, err)
+#else /* !CONFIG_64BIT */
+#define __get_user_8(x, ptr, err) \
+do { \
+ u32 __user *__ptr = (u32 __user *)(ptr); \
+ u32 __lo, __hi; \
+ __asm__ __volatile__ ( \
+ "1:\n" \
+ " lw %1, %3\n" \
+ "2:\n" \
+ " lw %2, %4\n" \
+ "3:\n" \
+ _ASM_EXTABLE_UACCESS_ERR_ZERO(1b, 3b, %0, %1) \
+ _ASM_EXTABLE_UACCESS_ERR_ZERO(2b, 3b, %0, %1) \
+ : "+r" (err), "=&r" (__lo), "=r" (__hi) \
+ : "m" (__ptr[__LSW]), "m" (__ptr[__MSW])); \
+ if (err) \
+ __hi = 0; \
+ (x) = (__typeof__(x))((__typeof__((x)-(x)))( \
+ (((u64)__hi << 32) | __lo))); \
+} while (0)
+#endif /* CONFIG_64BIT */
+
+#define __get_user_nocheck(x, __gu_ptr, __gu_err) \
+do { \
+ switch (sizeof(*__gu_ptr)) { \
+ case 1: \
+ __get_user_asm("lb", (x), __gu_ptr, __gu_err); \
+ break; \
+ case 2: \
+ __get_user_asm("lh", (x), __gu_ptr, __gu_err); \
+ break; \
+ case 4: \
+ __get_user_asm("lw", (x), __gu_ptr, __gu_err); \
+ break; \
+ case 8: \
+ __get_user_8((x), __gu_ptr, __gu_err); \
+ break; \
+ default: \
+ BUILD_BUG(); \
+ } \
+} while (0)
+
+/**
+ * __get_user: - Get a simple variable from user space, with less checking.
+ * @x: Variable to store result.
+ * @ptr: Source address, in user space.
+ *
+ * Context: User context only. This function may sleep.
+ *
+ * This macro copies a single simple variable from user space to kernel
+ * space. It supports simple types like char and int, but not larger
+ * data types like structures or arrays.
+ *
+ * @ptr must have pointer-to-simple-variable type, and the result of
+ * dereferencing @ptr must be assignable to @x without a cast.
+ *
+ * Caller must check the pointer with access_ok() before calling this
+ * function.
+ *
+ * Returns zero on success, or -EFAULT on error.
+ * On error, the variable @x is set to zero.
+ */
+#define __get_user(x, ptr) \
+({ \
+ const __typeof__(*(ptr)) __user *__gu_ptr = (ptr); \
+ long __gu_err = 0; \
+ \
+ __chk_user_ptr(__gu_ptr); \
+ \
+ __enable_user_access(); \
+ __get_user_nocheck(x, __gu_ptr, __gu_err); \
+ __disable_user_access(); \
+ \
+ __gu_err; \
+})
+
+/**
+ * get_user: - Get a simple variable from user space.
+ * @x: Variable to store result.
+ * @ptr: Source address, in user space.
+ *
+ * Context: User context only. This function may sleep.
+ *
+ * This macro copies a single simple variable from user space to kernel
+ * space. It supports simple types like char and int, but not larger
+ * data types like structures or arrays.
+ *
+ * @ptr must have pointer-to-simple-variable type, and the result of
+ * dereferencing @ptr must be assignable to @x without a cast.
+ *
+ * Returns zero on success, or -EFAULT on error.
+ * On error, the variable @x is set to zero.
+ */
+#define get_user(x, ptr) \
+({ \
+ const __typeof__(*(ptr)) __user *__p = (ptr); \
+ might_fault(); \
+ access_ok(__p, sizeof(*__p)) ? \
+ __get_user((x), __p) : \
+ ((x) = (__force __typeof__(x))0, -EFAULT); \
+})
+
+#define __put_user_asm(insn, x, ptr, err) \
+do { \
+ __typeof__(*(ptr)) __x = x; \
+ __asm__ __volatile__ ( \
+ "1:\n" \
+ " " insn " %z2, %1\n" \
+ "2:\n" \
+ _ASM_EXTABLE_UACCESS_ERR(1b, 2b, %0) \
+ : "+r" (err), "=m" (*(ptr)) \
+ : "rJ" (__x)); \
+} while (0)
+
+#ifdef CONFIG_64BIT
+#define __put_user_8(x, ptr, err) \
+ __put_user_asm("sd", x, ptr, err)
+#else /* !CONFIG_64BIT */
+#define __put_user_8(x, ptr, err) \
+do { \
+ u32 __user *__ptr = (u32 __user *)(ptr); \
+ u64 __x = (__typeof__((x)-(x)))(x); \
+ __asm__ __volatile__ ( \
+ "1:\n" \
+ " sw %z3, %1\n" \
+ "2:\n" \
+ " sw %z4, %2\n" \
+ "3:\n" \
+ _ASM_EXTABLE_UACCESS_ERR(1b, 3b, %0) \
+ _ASM_EXTABLE_UACCESS_ERR(2b, 3b, %0) \
+ : "+r" (err), \
+ "=m" (__ptr[__LSW]), \
+ "=m" (__ptr[__MSW]) \
+ : "rJ" (__x), "rJ" (__x >> 32)); \
+} while (0)
+#endif /* CONFIG_64BIT */
+
+#define __put_user_nocheck(x, __gu_ptr, __pu_err) \
+do { \
+ switch (sizeof(*__gu_ptr)) { \
+ case 1: \
+ __put_user_asm("sb", (x), __gu_ptr, __pu_err); \
+ break; \
+ case 2: \
+ __put_user_asm("sh", (x), __gu_ptr, __pu_err); \
+ break; \
+ case 4: \
+ __put_user_asm("sw", (x), __gu_ptr, __pu_err); \
+ break; \
+ case 8: \
+ __put_user_8((x), __gu_ptr, __pu_err); \
+ break; \
+ default: \
+ BUILD_BUG(); \
+ } \
+} while (0)
+
+/**
+ * __put_user: - Write a simple value into user space, with less checking.
+ * @x: Value to copy to user space.
+ * @ptr: Destination address, in user space.
+ *
+ * Context: User context only. This function may sleep.
+ *
+ * This macro copies a single simple value from kernel space to user
+ * space. It supports simple types like char and int, but not larger
+ * data types like structures or arrays.
+ *
+ * @ptr must have pointer-to-simple-variable type, and @x must be assignable
+ * to the result of dereferencing @ptr. The value of @x is copied to avoid
+ * re-ordering where @x is evaluated inside the block that enables user-space
+ * access (thus bypassing user space protection if @x is a function).
+ *
+ * Caller must check the pointer with access_ok() before calling this
+ * function.
+ *
+ * Returns zero on success, or -EFAULT on error.
+ */
+#define __put_user(x, ptr) \
+({ \
+ __typeof__(*(ptr)) __user *__gu_ptr = (ptr); \
+ __typeof__(*__gu_ptr) __val = (x); \
+ long __pu_err = 0; \
+ \
+ __chk_user_ptr(__gu_ptr); \
+ \
+ __enable_user_access(); \
+ __put_user_nocheck(__val, __gu_ptr, __pu_err); \
+ __disable_user_access(); \
+ \
+ __pu_err; \
+})
+
+/**
+ * put_user: - Write a simple value into user space.
+ * @x: Value to copy to user space.
+ * @ptr: Destination address, in user space.
+ *
+ * Context: User context only. This function may sleep.
+ *
+ * This macro copies a single simple value from kernel space to user
+ * space. It supports simple types like char and int, but not larger
+ * data types like structures or arrays.
+ *
+ * @ptr must have pointer-to-simple-variable type, and @x must be assignable
+ * to the result of dereferencing @ptr.
+ *
+ * Returns zero on success, or -EFAULT on error.
+ */
+#define put_user(x, ptr) \
+({ \
+ __typeof__(*(ptr)) __user *__p = (ptr); \
+ might_fault(); \
+ access_ok(__p, sizeof(*__p)) ? \
+ __put_user((x), __p) : \
+ -EFAULT; \
+})
+
+
+unsigned long __must_check __asm_copy_to_user(void __user *to,
+ const void *from, unsigned long n);
+unsigned long __must_check __asm_copy_from_user(void *to,
+ const void __user *from, unsigned long n);
+
+static inline unsigned long
+raw_copy_from_user(void *to, const void __user *from, unsigned long n)
+{
+ return __asm_copy_from_user(to, from, n);
+}
+
+static inline unsigned long
+raw_copy_to_user(void __user *to, const void *from, unsigned long n)
+{
+ return __asm_copy_to_user(to, from, n);
+}
+
+extern long strncpy_from_user(char *dest, const char __user *src, long count);
+
+extern long __must_check strnlen_user(const char __user *str, long n);
+
+extern
+unsigned long __must_check __clear_user(void __user *addr, unsigned long n);
+
+static inline
+unsigned long __must_check clear_user(void __user *to, unsigned long n)
+{
+ might_fault();
+ return access_ok(to, n) ?
+ __clear_user(to, n) : n;
+}
+
+#define __get_kernel_nofault(dst, src, type, err_label) \
+do { \
+ long __kr_err; \
+ \
+ __get_user_nocheck(*((type *)(dst)), (type *)(src), __kr_err); \
+ if (unlikely(__kr_err)) \
+ goto err_label; \
+} while (0)
+
+#define __put_kernel_nofault(dst, src, type, err_label) \
+do { \
+ long __kr_err; \
+ \
+ __put_user_nocheck(*((type *)(src)), (type *)(dst), __kr_err); \
+ if (unlikely(__kr_err)) \
+ goto err_label; \
+} while (0)
+
+#else /* CONFIG_MMU */
+#include <asm-generic/uaccess.h>
+#endif /* CONFIG_MMU */
+#endif /* _ASM_RISCV_UACCESS_H */
diff --git a/arch/riscv/include/asm/unistd.h b/arch/riscv/include/asm/unistd.h
new file mode 100644
index 0000000000..221630bdbd
--- /dev/null
+++ b/arch/riscv/include/asm/unistd.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2012 Regents of the University of California
+ */
+
+/*
+ * There is explicitly no include guard here because this file is expected to
+ * be included multiple times.
+ */
+
+#define __ARCH_WANT_SYS_CLONE
+
+#ifdef CONFIG_COMPAT
+#define __ARCH_WANT_COMPAT_TRUNCATE64
+#define __ARCH_WANT_COMPAT_FTRUNCATE64
+#define __ARCH_WANT_COMPAT_FALLOCATE
+#define __ARCH_WANT_COMPAT_PREAD64
+#define __ARCH_WANT_COMPAT_PWRITE64
+#define __ARCH_WANT_COMPAT_SYNC_FILE_RANGE
+#define __ARCH_WANT_COMPAT_READAHEAD
+#define __ARCH_WANT_COMPAT_FADVISE64_64
+#endif
+
+#include <uapi/asm/unistd.h>
+
+#define NR_syscalls (__NR_syscalls)
diff --git a/arch/riscv/include/asm/uprobes.h b/arch/riscv/include/asm/uprobes.h
new file mode 100644
index 0000000000..3fc7deda91
--- /dev/null
+++ b/arch/riscv/include/asm/uprobes.h
@@ -0,0 +1,51 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef _ASM_RISCV_UPROBES_H
+#define _ASM_RISCV_UPROBES_H
+
+#include <asm/probes.h>
+#include <asm/patch.h>
+#include <asm/bug.h>
+
+#define MAX_UINSN_BYTES 8
+
+#ifdef CONFIG_RISCV_ISA_C
+#define UPROBE_SWBP_INSN __BUG_INSN_16
+#define UPROBE_SWBP_INSN_SIZE 2
+#else
+#define UPROBE_SWBP_INSN __BUG_INSN_32
+#define UPROBE_SWBP_INSN_SIZE 4
+#endif
+#define UPROBE_XOL_SLOT_BYTES MAX_UINSN_BYTES
+
+typedef u32 uprobe_opcode_t;
+
+struct arch_uprobe_task {
+ unsigned long saved_cause;
+};
+
+struct arch_uprobe {
+ union {
+ u8 insn[MAX_UINSN_BYTES];
+ u8 ixol[MAX_UINSN_BYTES];
+ };
+ struct arch_probe_insn api;
+ unsigned long insn_size;
+ bool simulate;
+};
+
+#ifdef CONFIG_UPROBES
+bool uprobe_breakpoint_handler(struct pt_regs *regs);
+bool uprobe_single_step_handler(struct pt_regs *regs);
+#else
+static inline bool uprobe_breakpoint_handler(struct pt_regs *regs)
+{
+ return false;
+}
+
+static inline bool uprobe_single_step_handler(struct pt_regs *regs)
+{
+ return false;
+}
+#endif /* CONFIG_UPROBES */
+#endif /* _ASM_RISCV_UPROBES_H */
diff --git a/arch/riscv/include/asm/vdso.h b/arch/riscv/include/asm/vdso.h
new file mode 100644
index 0000000000..f891478829
--- /dev/null
+++ b/arch/riscv/include/asm/vdso.h
@@ -0,0 +1,41 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2012 ARM Limited
+ * Copyright (C) 2014 Regents of the University of California
+ * Copyright (C) 2017 SiFive
+ */
+
+#ifndef _ASM_RISCV_VDSO_H
+#define _ASM_RISCV_VDSO_H
+
+/*
+ * All systems with an MMU have a VDSO, but systems without an MMU don't
+ * support shared libraries and therefore don't have one.
+ */
+#ifdef CONFIG_MMU
+
+#define __VVAR_PAGES 2
+
+#ifndef __ASSEMBLY__
+#include <generated/vdso-offsets.h>
+
+#define VDSO_SYMBOL(base, name) \
+ (void __user *)((unsigned long)(base) + __vdso_##name##_offset)
+
+#ifdef CONFIG_COMPAT
+#include <generated/compat_vdso-offsets.h>
+
+#define COMPAT_VDSO_SYMBOL(base, name) \
+ (void __user *)((unsigned long)(base) + compat__vdso_##name##_offset)
+
+extern char compat_vdso_start[], compat_vdso_end[];
+
+#endif /* CONFIG_COMPAT */
+
+extern char vdso_start[], vdso_end[];
+
+#endif /* !__ASSEMBLY__ */
+
+#endif /* CONFIG_MMU */
+
+#endif /* _ASM_RISCV_VDSO_H */
diff --git a/arch/riscv/include/asm/vdso/clocksource.h b/arch/riscv/include/asm/vdso/clocksource.h
new file mode 100644
index 0000000000..df6ea65c1d
--- /dev/null
+++ b/arch/riscv/include/asm/vdso/clocksource.h
@@ -0,0 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_VDSOCLOCKSOURCE_H
+#define __ASM_VDSOCLOCKSOURCE_H
+
+#define VDSO_ARCH_CLOCKMODES \
+ VDSO_CLOCKMODE_ARCHTIMER
+
+#endif
diff --git a/arch/riscv/include/asm/vdso/data.h b/arch/riscv/include/asm/vdso/data.h
new file mode 100644
index 0000000000..dc2f76f58b
--- /dev/null
+++ b/arch/riscv/include/asm/vdso/data.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __RISCV_ASM_VDSO_DATA_H
+#define __RISCV_ASM_VDSO_DATA_H
+
+#include <linux/types.h>
+#include <vdso/datapage.h>
+#include <asm/hwprobe.h>
+
+struct arch_vdso_data {
+ /* Stash static answers to the hwprobe queries when all CPUs are selected. */
+ __u64 all_cpu_hwprobe_values[RISCV_HWPROBE_MAX_KEY + 1];
+
+ /* Boolean indicating all CPUs have the same static hwprobe values. */
+ __u8 homogeneous_cpus;
+};
+
+#endif /* __RISCV_ASM_VDSO_DATA_H */
diff --git a/arch/riscv/include/asm/vdso/gettimeofday.h b/arch/riscv/include/asm/vdso/gettimeofday.h
new file mode 100644
index 0000000000..ba3283cf7a
--- /dev/null
+++ b/arch/riscv/include/asm/vdso/gettimeofday.h
@@ -0,0 +1,96 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_VDSO_GETTIMEOFDAY_H
+#define __ASM_VDSO_GETTIMEOFDAY_H
+
+#ifndef __ASSEMBLY__
+
+#include <asm/barrier.h>
+#include <asm/unistd.h>
+#include <asm/csr.h>
+#include <uapi/linux/time.h>
+
+/*
+ * 32-bit land is lacking generic time vsyscalls as well as the legacy 32-bit
+ * time syscalls like gettimeofday. Skip these definitions since on 32-bit.
+ */
+#ifdef CONFIG_GENERIC_TIME_VSYSCALL
+
+#define VDSO_HAS_CLOCK_GETRES 1
+
+static __always_inline
+int gettimeofday_fallback(struct __kernel_old_timeval *_tv,
+ struct timezone *_tz)
+{
+ register struct __kernel_old_timeval *tv asm("a0") = _tv;
+ register struct timezone *tz asm("a1") = _tz;
+ register long ret asm("a0");
+ register long nr asm("a7") = __NR_gettimeofday;
+
+ asm volatile ("ecall\n"
+ : "=r" (ret)
+ : "r"(tv), "r"(tz), "r"(nr)
+ : "memory");
+
+ return ret;
+}
+
+static __always_inline
+long clock_gettime_fallback(clockid_t _clkid, struct __kernel_timespec *_ts)
+{
+ register clockid_t clkid asm("a0") = _clkid;
+ register struct __kernel_timespec *ts asm("a1") = _ts;
+ register long ret asm("a0");
+ register long nr asm("a7") = __NR_clock_gettime;
+
+ asm volatile ("ecall\n"
+ : "=r" (ret)
+ : "r"(clkid), "r"(ts), "r"(nr)
+ : "memory");
+
+ return ret;
+}
+
+static __always_inline
+int clock_getres_fallback(clockid_t _clkid, struct __kernel_timespec *_ts)
+{
+ register clockid_t clkid asm("a0") = _clkid;
+ register struct __kernel_timespec *ts asm("a1") = _ts;
+ register long ret asm("a0");
+ register long nr asm("a7") = __NR_clock_getres;
+
+ asm volatile ("ecall\n"
+ : "=r" (ret)
+ : "r"(clkid), "r"(ts), "r"(nr)
+ : "memory");
+
+ return ret;
+}
+
+#endif /* CONFIG_GENERIC_TIME_VSYSCALL */
+
+static __always_inline u64 __arch_get_hw_counter(s32 clock_mode,
+ const struct vdso_data *vd)
+{
+ /*
+ * The purpose of csr_read(CSR_TIME) is to trap the system into
+ * M-mode to obtain the value of CSR_TIME. Hence, unlike other
+ * architecture, no fence instructions surround the csr_read()
+ */
+ return csr_read(CSR_TIME);
+}
+
+static __always_inline const struct vdso_data *__arch_get_vdso_data(void)
+{
+ return _vdso_data;
+}
+
+#ifdef CONFIG_TIME_NS
+static __always_inline
+const struct vdso_data *__arch_get_timens_vdso_data(const struct vdso_data *vd)
+{
+ return _timens_data;
+}
+#endif
+#endif /* !__ASSEMBLY__ */
+
+#endif /* __ASM_VDSO_GETTIMEOFDAY_H */
diff --git a/arch/riscv/include/asm/vdso/processor.h b/arch/riscv/include/asm/vdso/processor.h
new file mode 100644
index 0000000000..96b65a5396
--- /dev/null
+++ b/arch/riscv/include/asm/vdso/processor.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#ifndef __ASM_VDSO_PROCESSOR_H
+#define __ASM_VDSO_PROCESSOR_H
+
+#ifndef __ASSEMBLY__
+
+#include <asm/barrier.h>
+
+static inline void cpu_relax(void)
+{
+#ifdef __riscv_muldiv
+ int dummy;
+ /* In lieu of a halt instruction, induce a long-latency stall. */
+ __asm__ __volatile__ ("div %0, %0, zero" : "=r" (dummy));
+#endif
+
+#ifdef CONFIG_TOOLCHAIN_HAS_ZIHINTPAUSE
+ /*
+ * Reduce instruction retirement.
+ * This assumes the PC changes.
+ */
+ __asm__ __volatile__ ("pause");
+#else
+ /* Encoding of the pause instruction */
+ __asm__ __volatile__ (".4byte 0x100000F");
+#endif
+ barrier();
+}
+
+#endif /* __ASSEMBLY__ */
+
+#endif /* __ASM_VDSO_PROCESSOR_H */
diff --git a/arch/riscv/include/asm/vdso/vsyscall.h b/arch/riscv/include/asm/vdso/vsyscall.h
new file mode 100644
index 0000000000..82fd5d83bd
--- /dev/null
+++ b/arch/riscv/include/asm/vdso/vsyscall.h
@@ -0,0 +1,27 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ASM_VDSO_VSYSCALL_H
+#define __ASM_VDSO_VSYSCALL_H
+
+#ifndef __ASSEMBLY__
+
+#include <linux/timekeeper_internal.h>
+#include <vdso/datapage.h>
+
+extern struct vdso_data *vdso_data;
+
+/*
+ * Update the vDSO data page to keep in sync with kernel timekeeping.
+ */
+static __always_inline struct vdso_data *__riscv_get_k_vdso_data(void)
+{
+ return vdso_data;
+}
+
+#define __arch_get_k_vdso_data __riscv_get_k_vdso_data
+
+/* The asm-generic header needs to be included after the definitions above */
+#include <asm-generic/vdso/vsyscall.h>
+
+#endif /* !__ASSEMBLY__ */
+
+#endif /* __ASM_VDSO_VSYSCALL_H */
diff --git a/arch/riscv/include/asm/vector.h b/arch/riscv/include/asm/vector.h
new file mode 100644
index 0000000000..c5ee07b3df
--- /dev/null
+++ b/arch/riscv/include/asm/vector.h
@@ -0,0 +1,219 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Copyright (C) 2020 SiFive
+ */
+
+#ifndef __ASM_RISCV_VECTOR_H
+#define __ASM_RISCV_VECTOR_H
+
+#include <linux/types.h>
+#include <uapi/asm-generic/errno.h>
+
+#ifdef CONFIG_RISCV_ISA_V
+
+#include <linux/stringify.h>
+#include <linux/sched.h>
+#include <linux/sched/task_stack.h>
+#include <asm/ptrace.h>
+#include <asm/hwcap.h>
+#include <asm/csr.h>
+#include <asm/asm.h>
+
+extern unsigned long riscv_v_vsize;
+int riscv_v_setup_vsize(void);
+bool riscv_v_first_use_handler(struct pt_regs *regs);
+
+static __always_inline bool has_vector(void)
+{
+ return riscv_has_extension_unlikely(RISCV_ISA_EXT_v);
+}
+
+static inline void __riscv_v_vstate_clean(struct pt_regs *regs)
+{
+ regs->status = (regs->status & ~SR_VS) | SR_VS_CLEAN;
+}
+
+static inline void __riscv_v_vstate_dirty(struct pt_regs *regs)
+{
+ regs->status = (regs->status & ~SR_VS) | SR_VS_DIRTY;
+}
+
+static inline void riscv_v_vstate_off(struct pt_regs *regs)
+{
+ regs->status = (regs->status & ~SR_VS) | SR_VS_OFF;
+}
+
+static inline void riscv_v_vstate_on(struct pt_regs *regs)
+{
+ regs->status = (regs->status & ~SR_VS) | SR_VS_INITIAL;
+}
+
+static inline bool riscv_v_vstate_query(struct pt_regs *regs)
+{
+ return (regs->status & SR_VS) != 0;
+}
+
+static __always_inline void riscv_v_enable(void)
+{
+ csr_set(CSR_SSTATUS, SR_VS);
+}
+
+static __always_inline void riscv_v_disable(void)
+{
+ csr_clear(CSR_SSTATUS, SR_VS);
+}
+
+static __always_inline void __vstate_csr_save(struct __riscv_v_ext_state *dest)
+{
+ asm volatile (
+ "csrr %0, " __stringify(CSR_VSTART) "\n\t"
+ "csrr %1, " __stringify(CSR_VTYPE) "\n\t"
+ "csrr %2, " __stringify(CSR_VL) "\n\t"
+ "csrr %3, " __stringify(CSR_VCSR) "\n\t"
+ "csrr %4, " __stringify(CSR_VLENB) "\n\t"
+ : "=r" (dest->vstart), "=r" (dest->vtype), "=r" (dest->vl),
+ "=r" (dest->vcsr), "=r" (dest->vlenb) : :);
+}
+
+static __always_inline void __vstate_csr_restore(struct __riscv_v_ext_state *src)
+{
+ asm volatile (
+ ".option push\n\t"
+ ".option arch, +v\n\t"
+ "vsetvl x0, %2, %1\n\t"
+ ".option pop\n\t"
+ "csrw " __stringify(CSR_VSTART) ", %0\n\t"
+ "csrw " __stringify(CSR_VCSR) ", %3\n\t"
+ : : "r" (src->vstart), "r" (src->vtype), "r" (src->vl),
+ "r" (src->vcsr) :);
+}
+
+static inline void __riscv_v_vstate_save(struct __riscv_v_ext_state *save_to,
+ void *datap)
+{
+ unsigned long vl;
+
+ riscv_v_enable();
+ __vstate_csr_save(save_to);
+ asm volatile (
+ ".option push\n\t"
+ ".option arch, +v\n\t"
+ "vsetvli %0, x0, e8, m8, ta, ma\n\t"
+ "vse8.v v0, (%1)\n\t"
+ "add %1, %1, %0\n\t"
+ "vse8.v v8, (%1)\n\t"
+ "add %1, %1, %0\n\t"
+ "vse8.v v16, (%1)\n\t"
+ "add %1, %1, %0\n\t"
+ "vse8.v v24, (%1)\n\t"
+ ".option pop\n\t"
+ : "=&r" (vl) : "r" (datap) : "memory");
+ riscv_v_disable();
+}
+
+static inline void __riscv_v_vstate_restore(struct __riscv_v_ext_state *restore_from,
+ void *datap)
+{
+ unsigned long vl;
+
+ riscv_v_enable();
+ asm volatile (
+ ".option push\n\t"
+ ".option arch, +v\n\t"
+ "vsetvli %0, x0, e8, m8, ta, ma\n\t"
+ "vle8.v v0, (%1)\n\t"
+ "add %1, %1, %0\n\t"
+ "vle8.v v8, (%1)\n\t"
+ "add %1, %1, %0\n\t"
+ "vle8.v v16, (%1)\n\t"
+ "add %1, %1, %0\n\t"
+ "vle8.v v24, (%1)\n\t"
+ ".option pop\n\t"
+ : "=&r" (vl) : "r" (datap) : "memory");
+ __vstate_csr_restore(restore_from);
+ riscv_v_disable();
+}
+
+static inline void __riscv_v_vstate_discard(void)
+{
+ unsigned long vl, vtype_inval = 1UL << (BITS_PER_LONG - 1);
+
+ riscv_v_enable();
+ asm volatile (
+ ".option push\n\t"
+ ".option arch, +v\n\t"
+ "vsetvli %0, x0, e8, m8, ta, ma\n\t"
+ "vmv.v.i v0, -1\n\t"
+ "vmv.v.i v8, -1\n\t"
+ "vmv.v.i v16, -1\n\t"
+ "vmv.v.i v24, -1\n\t"
+ "vsetvl %0, x0, %1\n\t"
+ ".option pop\n\t"
+ : "=&r" (vl) : "r" (vtype_inval) : "memory");
+ riscv_v_disable();
+}
+
+static inline void riscv_v_vstate_discard(struct pt_regs *regs)
+{
+ if ((regs->status & SR_VS) == SR_VS_OFF)
+ return;
+
+ __riscv_v_vstate_discard();
+ __riscv_v_vstate_dirty(regs);
+}
+
+static inline void riscv_v_vstate_save(struct task_struct *task,
+ struct pt_regs *regs)
+{
+ if ((regs->status & SR_VS) == SR_VS_DIRTY) {
+ struct __riscv_v_ext_state *vstate = &task->thread.vstate;
+
+ __riscv_v_vstate_save(vstate, vstate->datap);
+ __riscv_v_vstate_clean(regs);
+ }
+}
+
+static inline void riscv_v_vstate_restore(struct task_struct *task,
+ struct pt_regs *regs)
+{
+ if ((regs->status & SR_VS) != SR_VS_OFF) {
+ struct __riscv_v_ext_state *vstate = &task->thread.vstate;
+
+ __riscv_v_vstate_restore(vstate, vstate->datap);
+ __riscv_v_vstate_clean(regs);
+ }
+}
+
+static inline void __switch_to_vector(struct task_struct *prev,
+ struct task_struct *next)
+{
+ struct pt_regs *regs;
+
+ regs = task_pt_regs(prev);
+ riscv_v_vstate_save(prev, regs);
+ riscv_v_vstate_restore(next, task_pt_regs(next));
+}
+
+void riscv_v_vstate_ctrl_init(struct task_struct *tsk);
+bool riscv_v_vstate_ctrl_user_allowed(void);
+
+#else /* ! CONFIG_RISCV_ISA_V */
+
+struct pt_regs;
+
+static inline int riscv_v_setup_vsize(void) { return -EOPNOTSUPP; }
+static __always_inline bool has_vector(void) { return false; }
+static inline bool riscv_v_first_use_handler(struct pt_regs *regs) { return false; }
+static inline bool riscv_v_vstate_query(struct pt_regs *regs) { return false; }
+static inline bool riscv_v_vstate_ctrl_user_allowed(void) { return false; }
+#define riscv_v_vsize (0)
+#define riscv_v_vstate_discard(regs) do {} while (0)
+#define riscv_v_vstate_save(task, regs) do {} while (0)
+#define riscv_v_vstate_restore(task, regs) do {} while (0)
+#define __switch_to_vector(__prev, __next) do {} while (0)
+#define riscv_v_vstate_off(regs) do {} while (0)
+#define riscv_v_vstate_on(regs) do {} while (0)
+
+#endif /* CONFIG_RISCV_ISA_V */
+
+#endif /* ! __ASM_RISCV_VECTOR_H */
diff --git a/arch/riscv/include/asm/vendorid_list.h b/arch/riscv/include/asm/vendorid_list.h
new file mode 100644
index 0000000000..e55407ace0
--- /dev/null
+++ b/arch/riscv/include/asm/vendorid_list.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2021 SiFive
+ */
+#ifndef ASM_VENDOR_LIST_H
+#define ASM_VENDOR_LIST_H
+
+#define ANDESTECH_VENDOR_ID 0x31e
+#define SIFIVE_VENDOR_ID 0x489
+#define THEAD_VENDOR_ID 0x5b7
+
+#endif
diff --git a/arch/riscv/include/asm/vermagic.h b/arch/riscv/include/asm/vermagic.h
new file mode 100644
index 0000000000..7b9441a574
--- /dev/null
+++ b/arch/riscv/include/asm/vermagic.h
@@ -0,0 +1,9 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (C) 2017 Andes Technology Corporation */
+
+#ifndef _ASM_VERMAGIC_H
+#define _ASM_VERMAGIC_H
+
+#define MODULE_ARCH_VERMAGIC "riscv"
+
+#endif /* _ASM_VERMAGIC_H */
diff --git a/arch/riscv/include/asm/vmalloc.h b/arch/riscv/include/asm/vmalloc.h
new file mode 100644
index 0000000000..924d01b56c
--- /dev/null
+++ b/arch/riscv/include/asm/vmalloc.h
@@ -0,0 +1,83 @@
+#ifndef _ASM_RISCV_VMALLOC_H
+#define _ASM_RISCV_VMALLOC_H
+
+#ifdef CONFIG_HAVE_ARCH_HUGE_VMAP
+
+extern bool pgtable_l4_enabled, pgtable_l5_enabled;
+
+#define IOREMAP_MAX_ORDER (PUD_SHIFT)
+
+#define arch_vmap_pud_supported arch_vmap_pud_supported
+static inline bool arch_vmap_pud_supported(pgprot_t prot)
+{
+ return pgtable_l4_enabled || pgtable_l5_enabled;
+}
+
+#define arch_vmap_pmd_supported arch_vmap_pmd_supported
+static inline bool arch_vmap_pmd_supported(pgprot_t prot)
+{
+ return true;
+}
+
+#ifdef CONFIG_RISCV_ISA_SVNAPOT
+#include <linux/pgtable.h>
+
+#define arch_vmap_pte_range_map_size arch_vmap_pte_range_map_size
+static inline unsigned long arch_vmap_pte_range_map_size(unsigned long addr, unsigned long end,
+ u64 pfn, unsigned int max_page_shift)
+{
+ unsigned long map_size = PAGE_SIZE;
+ unsigned long size, order;
+
+ if (!has_svnapot())
+ return map_size;
+
+ for_each_napot_order_rev(order) {
+ if (napot_cont_shift(order) > max_page_shift)
+ continue;
+
+ size = napot_cont_size(order);
+ if (end - addr < size)
+ continue;
+
+ if (!IS_ALIGNED(addr, size))
+ continue;
+
+ if (!IS_ALIGNED(PFN_PHYS(pfn), size))
+ continue;
+
+ map_size = size;
+ break;
+ }
+
+ return map_size;
+}
+
+#define arch_vmap_pte_supported_shift arch_vmap_pte_supported_shift
+static inline int arch_vmap_pte_supported_shift(unsigned long size)
+{
+ int shift = PAGE_SHIFT;
+ unsigned long order;
+
+ if (!has_svnapot())
+ return shift;
+
+ WARN_ON_ONCE(size >= PMD_SIZE);
+
+ for_each_napot_order_rev(order) {
+ if (napot_cont_size(order) > size)
+ continue;
+
+ if (!IS_ALIGNED(size, napot_cont_size(order)))
+ continue;
+
+ shift = napot_cont_shift(order);
+ break;
+ }
+
+ return shift;
+}
+
+#endif /* CONFIG_RISCV_ISA_SVNAPOT */
+#endif /* CONFIG_HAVE_ARCH_HUGE_VMAP */
+#endif /* _ASM_RISCV_VMALLOC_H */
diff --git a/arch/riscv/include/asm/word-at-a-time.h b/arch/riscv/include/asm/word-at-a-time.h
new file mode 100644
index 0000000000..7c086ac6ec
--- /dev/null
+++ b/arch/riscv/include/asm/word-at-a-time.h
@@ -0,0 +1,48 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2012 Regents of the University of California
+ *
+ * Derived from arch/x86/include/asm/word-at-a-time.h
+ */
+
+#ifndef _ASM_RISCV_WORD_AT_A_TIME_H
+#define _ASM_RISCV_WORD_AT_A_TIME_H
+
+
+#include <linux/kernel.h>
+
+struct word_at_a_time {
+ const unsigned long one_bits, high_bits;
+};
+
+#define WORD_AT_A_TIME_CONSTANTS { REPEAT_BYTE(0x01), REPEAT_BYTE(0x80) }
+
+static inline unsigned long has_zero(unsigned long val,
+ unsigned long *bits, const struct word_at_a_time *c)
+{
+ unsigned long mask = ((val - c->one_bits) & ~val) & c->high_bits;
+ *bits = mask;
+ return mask;
+}
+
+static inline unsigned long prep_zero_mask(unsigned long val,
+ unsigned long bits, const struct word_at_a_time *c)
+{
+ return bits;
+}
+
+static inline unsigned long create_zero_mask(unsigned long bits)
+{
+ bits = (bits - 1) & ~bits;
+ return bits >> 7;
+}
+
+static inline unsigned long find_zero(unsigned long mask)
+{
+ return fls64(mask) >> 3;
+}
+
+/* The mask we created is directly usable as a bytemask */
+#define zero_bytemask(mask) (mask)
+
+#endif /* _ASM_RISCV_WORD_AT_A_TIME_H */
diff --git a/arch/riscv/include/asm/xip_fixup.h b/arch/riscv/include/asm/xip_fixup.h
new file mode 100644
index 0000000000..b65bf6306f
--- /dev/null
+++ b/arch/riscv/include/asm/xip_fixup.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * XIP fixup macros, only useful in assembly.
+ */
+#ifndef _ASM_RISCV_XIP_FIXUP_H
+#define _ASM_RISCV_XIP_FIXUP_H
+
+#include <linux/pgtable.h>
+
+#ifdef CONFIG_XIP_KERNEL
+.macro XIP_FIXUP_OFFSET reg
+ REG_L t0, _xip_fixup
+ add \reg, \reg, t0
+.endm
+.macro XIP_FIXUP_FLASH_OFFSET reg
+ la t0, __data_loc
+ REG_L t1, _xip_phys_offset
+ sub \reg, \reg, t1
+ add \reg, \reg, t0
+.endm
+
+_xip_fixup: .dword CONFIG_PHYS_RAM_BASE - CONFIG_XIP_PHYS_ADDR - XIP_OFFSET
+_xip_phys_offset: .dword CONFIG_XIP_PHYS_ADDR + XIP_OFFSET
+#else
+.macro XIP_FIXUP_OFFSET reg
+.endm
+.macro XIP_FIXUP_FLASH_OFFSET reg
+.endm
+#endif /* CONFIG_XIP_KERNEL */
+
+#endif