summaryrefslogtreecommitdiffstats
path: root/arch/riscv/kernel
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-18 17:35:05 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-05-18 17:39:31 +0000
commit85c675d0d09a45a135bddd15d7b385f8758c32fb (patch)
tree76267dbc9b9a130337be3640948fe397b04ac629 /arch/riscv/kernel
parentAdding upstream version 6.6.15. (diff)
downloadlinux-85c675d0d09a45a135bddd15d7b385f8758c32fb.tar.xz
linux-85c675d0d09a45a135bddd15d7b385f8758c32fb.zip
Adding upstream version 6.7.7.upstream/6.7.7
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to '')
-rw-r--r--arch/riscv/kernel/Makefile3
-rw-r--r--arch/riscv/kernel/acpi.c87
-rw-r--r--arch/riscv/kernel/asm-offsets.c8
-rw-r--r--arch/riscv/kernel/compat_vdso/Makefile10
-rw-r--r--arch/riscv/kernel/copy-unaligned.S8
-rw-r--r--arch/riscv/kernel/cpu.c22
-rw-r--r--arch/riscv/kernel/cpufeature.c115
-rw-r--r--arch/riscv/kernel/entry.S69
-rw-r--r--arch/riscv/kernel/fpu.S129
-rw-r--r--arch/riscv/kernel/head.S50
-rw-r--r--arch/riscv/kernel/hibernate-asm.S12
-rw-r--r--arch/riscv/kernel/image-vars.h2
-rw-r--r--arch/riscv/kernel/irq.c56
-rw-r--r--arch/riscv/kernel/kexec_relocate.S52
-rw-r--r--arch/riscv/kernel/mcount-dyn.S20
-rw-r--r--arch/riscv/kernel/mcount.S18
-rw-r--r--arch/riscv/kernel/module.c666
-rw-r--r--arch/riscv/kernel/probes/rethook_trampoline.S4
-rw-r--r--arch/riscv/kernel/process.c18
-rw-r--r--arch/riscv/kernel/sbi.c32
-rw-r--r--arch/riscv/kernel/setup.c16
-rw-r--r--arch/riscv/kernel/signal.c85
-rw-r--r--arch/riscv/kernel/smpboot.c5
-rw-r--r--arch/riscv/kernel/suspend_entry.S9
-rw-r--r--arch/riscv/kernel/sys_riscv.c46
-rw-r--r--arch/riscv/kernel/tests/Kconfig.debug35
-rw-r--r--arch/riscv/kernel/tests/Makefile1
-rw-r--r--arch/riscv/kernel/tests/module_test/Makefile15
-rw-r--r--arch/riscv/kernel/tests/module_test/test_module_linking_main.c88
-rw-r--r--arch/riscv/kernel/tests/module_test/test_set16.S23
-rw-r--r--arch/riscv/kernel/tests/module_test/test_set32.S20
-rw-r--r--arch/riscv/kernel/tests/module_test/test_set6.S23
-rw-r--r--arch/riscv/kernel/tests/module_test/test_set8.S23
-rw-r--r--arch/riscv/kernel/tests/module_test/test_sub16.S20
-rw-r--r--arch/riscv/kernel/tests/module_test/test_sub32.S20
-rw-r--r--arch/riscv/kernel/tests/module_test/test_sub6.S20
-rw-r--r--arch/riscv/kernel/tests/module_test/test_sub64.S25
-rw-r--r--arch/riscv/kernel/tests/module_test/test_sub8.S20
-rw-r--r--arch/riscv/kernel/tests/module_test/test_uleb128.S31
-rw-r--r--arch/riscv/kernel/traps.c62
-rw-r--r--arch/riscv/kernel/traps_misaligned.c375
-rw-r--r--arch/riscv/kernel/vdso/Makefile12
-rw-r--r--arch/riscv/kernel/vdso/flush_icache.S4
-rw-r--r--arch/riscv/kernel/vdso/getcpu.S4
-rw-r--r--arch/riscv/kernel/vdso/rt_sigreturn.S4
-rw-r--r--arch/riscv/kernel/vdso/sys_hwprobe.S4
-rw-r--r--arch/riscv/kernel/vdso/vdso.lds.S30
-rw-r--r--arch/riscv/kernel/vector.c1
48 files changed, 1909 insertions, 493 deletions
diff --git a/arch/riscv/kernel/Makefile b/arch/riscv/kernel/Makefile
index 95cf25d484..fee22a3d1b 100644
--- a/arch/riscv/kernel/Makefile
+++ b/arch/riscv/kernel/Makefile
@@ -57,9 +57,10 @@ obj-y += stacktrace.o
obj-y += cacheinfo.o
obj-y += patch.o
obj-y += probes/
+obj-y += tests/
obj-$(CONFIG_MMU) += vdso.o vdso/
-obj-$(CONFIG_RISCV_M_MODE) += traps_misaligned.o
+obj-$(CONFIG_RISCV_MISALIGNED) += traps_misaligned.o
obj-$(CONFIG_FPU) += fpu.o
obj-$(CONFIG_RISCV_ISA_V) += vector.o
obj-$(CONFIG_SMP) += smpboot.o
diff --git a/arch/riscv/kernel/acpi.c b/arch/riscv/kernel/acpi.c
index 56cb2c986c..e619edc8b0 100644
--- a/arch/riscv/kernel/acpi.c
+++ b/arch/riscv/kernel/acpi.c
@@ -14,9 +14,10 @@
*/
#include <linux/acpi.h>
+#include <linux/efi.h>
#include <linux/io.h>
+#include <linux/memblock.h>
#include <linux/pci.h>
-#include <linux/efi.h>
int acpi_noirq = 1; /* skip ACPI IRQ initialization */
int acpi_disabled = 1;
@@ -217,7 +218,89 @@ void __init __acpi_unmap_table(void __iomem *map, unsigned long size)
void __iomem *acpi_os_ioremap(acpi_physical_address phys, acpi_size size)
{
- return (void __iomem *)memremap(phys, size, MEMREMAP_WB);
+ efi_memory_desc_t *md, *region = NULL;
+ pgprot_t prot;
+
+ if (WARN_ON_ONCE(!efi_enabled(EFI_MEMMAP)))
+ return NULL;
+
+ for_each_efi_memory_desc(md) {
+ u64 end = md->phys_addr + (md->num_pages << EFI_PAGE_SHIFT);
+
+ if (phys < md->phys_addr || phys >= end)
+ continue;
+
+ if (phys + size > end) {
+ pr_warn(FW_BUG "requested region covers multiple EFI memory regions\n");
+ return NULL;
+ }
+ region = md;
+ break;
+ }
+
+ /*
+ * It is fine for AML to remap regions that are not represented in the
+ * EFI memory map at all, as it only describes normal memory, and MMIO
+ * regions that require a virtual mapping to make them accessible to
+ * the EFI runtime services.
+ */
+ prot = PAGE_KERNEL_IO;
+ if (region) {
+ switch (region->type) {
+ case EFI_LOADER_CODE:
+ case EFI_LOADER_DATA:
+ case EFI_BOOT_SERVICES_CODE:
+ case EFI_BOOT_SERVICES_DATA:
+ case EFI_CONVENTIONAL_MEMORY:
+ case EFI_PERSISTENT_MEMORY:
+ if (memblock_is_map_memory(phys) ||
+ !memblock_is_region_memory(phys, size)) {
+ pr_warn(FW_BUG "requested region covers kernel memory\n");
+ return NULL;
+ }
+
+ /*
+ * Mapping kernel memory is permitted if the region in
+ * question is covered by a single memblock with the
+ * NOMAP attribute set: this enables the use of ACPI
+ * table overrides passed via initramfs.
+ * This particular use case only requires read access.
+ */
+ fallthrough;
+
+ case EFI_RUNTIME_SERVICES_CODE:
+ /*
+ * This would be unusual, but not problematic per se,
+ * as long as we take care not to create a writable
+ * mapping for executable code.
+ */
+ prot = PAGE_KERNEL_RO;
+ break;
+
+ case EFI_ACPI_RECLAIM_MEMORY:
+ /*
+ * ACPI reclaim memory is used to pass firmware tables
+ * and other data that is intended for consumption by
+ * the OS only, which may decide it wants to reclaim
+ * that memory and use it for something else. We never
+ * do that, but we usually add it to the linear map
+ * anyway, in which case we should use the existing
+ * mapping.
+ */
+ if (memblock_is_map_memory(phys))
+ return (void __iomem *)__va(phys);
+ fallthrough;
+
+ default:
+ if (region->attribute & EFI_MEMORY_WB)
+ prot = PAGE_KERNEL;
+ else if ((region->attribute & EFI_MEMORY_WC) ||
+ (region->attribute & EFI_MEMORY_WT))
+ prot = pgprot_writecombine(PAGE_KERNEL);
+ }
+ }
+
+ return ioremap_prot(phys, size, pgprot_val(prot));
}
#ifdef CONFIG_PCI
diff --git a/arch/riscv/kernel/asm-offsets.c b/arch/riscv/kernel/asm-offsets.c
index 9f535d5de3..a03129f40c 100644
--- a/arch/riscv/kernel/asm-offsets.c
+++ b/arch/riscv/kernel/asm-offsets.c
@@ -14,6 +14,7 @@
#include <asm/thread_info.h>
#include <asm/ptrace.h>
#include <asm/cpu_ops_sbi.h>
+#include <asm/stacktrace.h>
#include <asm/suspend.h>
void asm_offsets(void);
@@ -38,6 +39,9 @@ void asm_offsets(void)
OFFSET(TASK_TI_PREEMPT_COUNT, task_struct, thread_info.preempt_count);
OFFSET(TASK_TI_KERNEL_SP, task_struct, thread_info.kernel_sp);
OFFSET(TASK_TI_USER_SP, task_struct, thread_info.user_sp);
+#ifdef CONFIG_SHADOW_CALL_STACK
+ OFFSET(TASK_TI_SCS_SP, task_struct, thread_info.scs_sp);
+#endif
OFFSET(TASK_TI_CPU_NUM, task_struct, thread_info.cpu);
OFFSET(TASK_THREAD_F0, task_struct, thread.fstate.f[0]);
@@ -480,4 +484,8 @@ void asm_offsets(void)
OFFSET(KERNEL_MAP_VIRT_ADDR, kernel_mapping, virt_addr);
OFFSET(SBI_HART_BOOT_TASK_PTR_OFFSET, sbi_hart_boot_data, task_ptr);
OFFSET(SBI_HART_BOOT_STACK_PTR_OFFSET, sbi_hart_boot_data, stack_ptr);
+
+ DEFINE(STACKFRAME_SIZE_ON_STACK, ALIGN(sizeof(struct stackframe), STACK_ALIGN));
+ OFFSET(STACKFRAME_FP, stackframe, fp);
+ OFFSET(STACKFRAME_RA, stackframe, ra);
}
diff --git a/arch/riscv/kernel/compat_vdso/Makefile b/arch/riscv/kernel/compat_vdso/Makefile
index b86e5e2c3a..62fa393b2e 100644
--- a/arch/riscv/kernel/compat_vdso/Makefile
+++ b/arch/riscv/kernel/compat_vdso/Makefile
@@ -76,13 +76,3 @@ quiet_cmd_compat_vdsold = VDSOLD $@
# actual build commands
quiet_cmd_compat_vdsoas = VDSOAS $@
cmd_compat_vdsoas = $(COMPAT_CC) $(a_flags) $(COMPAT_CC_FLAGS) -c -o $@ $<
-
-# install commands for the unstripped file
-quiet_cmd_compat_vdso_install = INSTALL $@
- cmd_compat_vdso_install = cp $(obj)/$@.dbg $(MODLIB)/compat_vdso/$@
-
-compat_vdso.so: $(obj)/compat_vdso.so.dbg
- @mkdir -p $(MODLIB)/compat_vdso
- $(call cmd,compat_vdso_install)
-
-compat_vdso_install: compat_vdso.so
diff --git a/arch/riscv/kernel/copy-unaligned.S b/arch/riscv/kernel/copy-unaligned.S
index cfdecfbaad..2b3d9398c1 100644
--- a/arch/riscv/kernel/copy-unaligned.S
+++ b/arch/riscv/kernel/copy-unaligned.S
@@ -9,7 +9,7 @@
/* void __riscv_copy_words_unaligned(void *, const void *, size_t) */
/* Performs a memcpy without aligning buffers, using word loads and stores. */
/* Note: The size is truncated to a multiple of 8 * SZREG */
-ENTRY(__riscv_copy_words_unaligned)
+SYM_FUNC_START(__riscv_copy_words_unaligned)
andi a4, a2, ~((8*SZREG)-1)
beqz a4, 2f
add a3, a1, a4
@@ -36,12 +36,12 @@ ENTRY(__riscv_copy_words_unaligned)
2:
ret
-END(__riscv_copy_words_unaligned)
+SYM_FUNC_END(__riscv_copy_words_unaligned)
/* void __riscv_copy_bytes_unaligned(void *, const void *, size_t) */
/* Performs a memcpy without aligning buffers, using only byte accesses. */
/* Note: The size is truncated to a multiple of 8 */
-ENTRY(__riscv_copy_bytes_unaligned)
+SYM_FUNC_START(__riscv_copy_bytes_unaligned)
andi a4, a2, ~(8-1)
beqz a4, 2f
add a3, a1, a4
@@ -68,4 +68,4 @@ ENTRY(__riscv_copy_bytes_unaligned)
2:
ret
-END(__riscv_copy_bytes_unaligned)
+SYM_FUNC_END(__riscv_copy_bytes_unaligned)
diff --git a/arch/riscv/kernel/cpu.c b/arch/riscv/kernel/cpu.c
index 157ace8b26..d11d6320fb 100644
--- a/arch/riscv/kernel/cpu.c
+++ b/arch/riscv/kernel/cpu.c
@@ -203,9 +203,8 @@ arch_initcall(riscv_cpuinfo_init);
#ifdef CONFIG_PROC_FS
-static void print_isa(struct seq_file *f)
+static void print_isa(struct seq_file *f, const unsigned long *isa_bitmap)
{
- seq_puts(f, "isa\t\t: ");
if (IS_ENABLED(CONFIG_32BIT))
seq_write(f, "rv32", 4);
@@ -213,7 +212,7 @@ static void print_isa(struct seq_file *f)
seq_write(f, "rv64", 4);
for (int i = 0; i < riscv_isa_ext_count; i++) {
- if (!__riscv_isa_extension_available(NULL, riscv_isa_ext[i].id))
+ if (!__riscv_isa_extension_available(isa_bitmap, riscv_isa_ext[i].id))
continue;
/* Only multi-letter extensions are split by underscores */
@@ -277,7 +276,15 @@ static int c_show(struct seq_file *m, void *v)
seq_printf(m, "processor\t: %lu\n", cpu_id);
seq_printf(m, "hart\t\t: %lu\n", cpuid_to_hartid_map(cpu_id));
- print_isa(m);
+
+ /*
+ * For historical raisins, the isa: line is limited to the lowest common
+ * denominator of extensions supported across all harts. A true list of
+ * extensions supported on this hart is printed later in the hart isa:
+ * line.
+ */
+ seq_puts(m, "isa\t\t: ");
+ print_isa(m, NULL);
print_mmu(m);
if (acpi_disabled) {
@@ -293,6 +300,13 @@ static int c_show(struct seq_file *m, void *v)
seq_printf(m, "mvendorid\t: 0x%lx\n", ci->mvendorid);
seq_printf(m, "marchid\t\t: 0x%lx\n", ci->marchid);
seq_printf(m, "mimpid\t\t: 0x%lx\n", ci->mimpid);
+
+ /*
+ * Print the ISA extensions specific to this hart, which may show
+ * additional extensions not present across all harts.
+ */
+ seq_puts(m, "hart isa\t: ");
+ print_isa(m, hart_isa[cpu_id].isa);
seq_puts(m, "\n");
return 0;
diff --git a/arch/riscv/kernel/cpufeature.c b/arch/riscv/kernel/cpufeature.c
index e12cd22755..b3785ffc15 100644
--- a/arch/riscv/kernel/cpufeature.c
+++ b/arch/riscv/kernel/cpufeature.c
@@ -8,6 +8,7 @@
#include <linux/acpi.h>
#include <linux/bitmap.h>
+#include <linux/cpuhotplug.h>
#include <linux/ctype.h>
#include <linux/log2.h>
#include <linux/memory.h>
@@ -29,6 +30,7 @@
#define MISALIGNED_ACCESS_JIFFIES_LG2 1
#define MISALIGNED_BUFFER_SIZE 0x4000
+#define MISALIGNED_BUFFER_ORDER get_order(MISALIGNED_BUFFER_SIZE)
#define MISALIGNED_COPY_SIZE ((MISALIGNED_BUFFER_SIZE / 2) - 0x80)
unsigned long elf_hwcap __read_mostly;
@@ -93,10 +95,10 @@ static bool riscv_isa_extension_check(int id)
return true;
case RISCV_ISA_EXT_ZICBOZ:
if (!riscv_cboz_block_size) {
- pr_err("Zicboz detected in ISA string, but no cboz-block-size found\n");
+ pr_err("Zicboz detected in ISA string, disabling as no cboz-block-size found\n");
return false;
} else if (!is_power_of_2(riscv_cboz_block_size)) {
- pr_err("cboz-block-size present, but is not a power-of-2\n");
+ pr_err("Zicboz disabled as cboz-block-size present, but is not a power-of-2\n");
return false;
}
return true;
@@ -167,6 +169,7 @@ const struct riscv_isa_ext_data riscv_isa_ext[] = {
__RISCV_ISA_EXT_DATA(zicbom, RISCV_ISA_EXT_ZICBOM),
__RISCV_ISA_EXT_DATA(zicboz, RISCV_ISA_EXT_ZICBOZ),
__RISCV_ISA_EXT_DATA(zicntr, RISCV_ISA_EXT_ZICNTR),
+ __RISCV_ISA_EXT_DATA(zicond, RISCV_ISA_EXT_ZICOND),
__RISCV_ISA_EXT_DATA(zicsr, RISCV_ISA_EXT_ZICSR),
__RISCV_ISA_EXT_DATA(zifencei, RISCV_ISA_EXT_ZIFENCEI),
__RISCV_ISA_EXT_DATA(zihintpause, RISCV_ISA_EXT_ZIHINTPAUSE),
@@ -175,6 +178,7 @@ const struct riscv_isa_ext_data riscv_isa_ext[] = {
__RISCV_ISA_EXT_DATA(zbb, RISCV_ISA_EXT_ZBB),
__RISCV_ISA_EXT_DATA(zbs, RISCV_ISA_EXT_ZBS),
__RISCV_ISA_EXT_DATA(smaia, RISCV_ISA_EXT_SMAIA),
+ __RISCV_ISA_EXT_DATA(smstateen, RISCV_ISA_EXT_SMSTATEEN),
__RISCV_ISA_EXT_DATA(ssaia, RISCV_ISA_EXT_SSAIA),
__RISCV_ISA_EXT_DATA(sscofpmf, RISCV_ISA_EXT_SSCOFPMF),
__RISCV_ISA_EXT_DATA(sstc, RISCV_ISA_EXT_SSTC),
@@ -204,10 +208,11 @@ static void __init riscv_parse_isa_string(unsigned long *this_hwcap, struct risc
switch (*ext) {
case 's':
/*
- * Workaround for invalid single-letter 's' & 'u'(QEMU).
+ * Workaround for invalid single-letter 's' & 'u' (QEMU).
* No need to set the bit in riscv_isa as 's' & 'u' are
- * not valid ISA extensions. It works until multi-letter
- * extension starting with "Su" appears.
+ * not valid ISA extensions. It works unless the first
+ * multi-letter extension in the ISA string begins with
+ * "Su" and is not prefixed with an underscore.
*/
if (ext[-1] != '_' && ext[1] == 'u') {
++isa;
@@ -556,27 +561,21 @@ unsigned long riscv_get_elf_hwcap(void)
return hwcap;
}
-void check_unaligned_access(int cpu)
+static int check_unaligned_access(void *param)
{
+ int cpu = smp_processor_id();
u64 start_cycles, end_cycles;
u64 word_cycles;
u64 byte_cycles;
int ratio;
unsigned long start_jiffies, now;
- struct page *page;
+ struct page *page = param;
void *dst;
void *src;
long speed = RISCV_HWPROBE_MISALIGNED_SLOW;
- /* We are already set since the last check */
- if (per_cpu(misaligned_access_speed, cpu) != RISCV_HWPROBE_MISALIGNED_UNKNOWN)
- return;
-
- page = alloc_pages(GFP_NOWAIT, get_order(MISALIGNED_BUFFER_SIZE));
- if (!page) {
- pr_warn("Can't alloc pages to measure memcpy performance");
- return;
- }
+ if (check_unaligned_access_emulated(cpu))
+ return 0;
/* Make an unaligned destination buffer. */
dst = (void *)((unsigned long)page_address(page) | 0x1);
@@ -630,7 +629,7 @@ void check_unaligned_access(int cpu)
pr_warn("cpu%d: rdtime lacks granularity needed to measure unaligned access speed\n",
cpu);
- goto out;
+ return 0;
}
if (word_cycles < byte_cycles)
@@ -644,18 +643,90 @@ void check_unaligned_access(int cpu)
(speed == RISCV_HWPROBE_MISALIGNED_FAST) ? "fast" : "slow");
per_cpu(misaligned_access_speed, cpu) = speed;
+ return 0;
+}
-out:
- __free_pages(page, get_order(MISALIGNED_BUFFER_SIZE));
+static void check_unaligned_access_nonboot_cpu(void *param)
+{
+ unsigned int cpu = smp_processor_id();
+ struct page **pages = param;
+
+ if (smp_processor_id() != 0)
+ check_unaligned_access(pages[cpu]);
}
-static int check_unaligned_access_boot_cpu(void)
+static int riscv_online_cpu(unsigned int cpu)
{
- check_unaligned_access(0);
+ static struct page *buf;
+
+ /* We are already set since the last check */
+ if (per_cpu(misaligned_access_speed, cpu) != RISCV_HWPROBE_MISALIGNED_UNKNOWN)
+ return 0;
+
+ buf = alloc_pages(GFP_KERNEL, MISALIGNED_BUFFER_ORDER);
+ if (!buf) {
+ pr_warn("Allocation failure, not measuring misaligned performance\n");
+ return -ENOMEM;
+ }
+
+ check_unaligned_access(buf);
+ __free_pages(buf, MISALIGNED_BUFFER_ORDER);
return 0;
}
-arch_initcall(check_unaligned_access_boot_cpu);
+/* Measure unaligned access on all CPUs present at boot in parallel. */
+static int check_unaligned_access_all_cpus(void)
+{
+ unsigned int cpu;
+ unsigned int cpu_count = num_possible_cpus();
+ struct page **bufs = kzalloc(cpu_count * sizeof(struct page *),
+ GFP_KERNEL);
+
+ if (!bufs) {
+ pr_warn("Allocation failure, not measuring misaligned performance\n");
+ return 0;
+ }
+
+ /*
+ * Allocate separate buffers for each CPU so there's no fighting over
+ * cache lines.
+ */
+ for_each_cpu(cpu, cpu_online_mask) {
+ bufs[cpu] = alloc_pages(GFP_KERNEL, MISALIGNED_BUFFER_ORDER);
+ if (!bufs[cpu]) {
+ pr_warn("Allocation failure, not measuring misaligned performance\n");
+ goto out;
+ }
+ }
+
+ /* Check everybody except 0, who stays behind to tend jiffies. */
+ on_each_cpu(check_unaligned_access_nonboot_cpu, bufs, 1);
+
+ /* Check core 0. */
+ smp_call_on_cpu(0, check_unaligned_access, bufs[0], true);
+
+ /* Setup hotplug callback for any new CPUs that come online. */
+ cpuhp_setup_state_nocalls(CPUHP_AP_ONLINE_DYN, "riscv:online",
+ riscv_online_cpu, NULL);
+
+out:
+ unaligned_emulation_finish();
+ for_each_cpu(cpu, cpu_online_mask) {
+ if (bufs[cpu])
+ __free_pages(bufs[cpu], MISALIGNED_BUFFER_ORDER);
+ }
+
+ kfree(bufs);
+ return 0;
+}
+
+arch_initcall(check_unaligned_access_all_cpus);
+
+void riscv_user_isa_enable(void)
+{
+ if (riscv_cpu_has_extension_unlikely(smp_processor_id(), RISCV_ISA_EXT_ZICBOZ))
+ csr_set(CSR_SENVCFG, ENVCFG_CBZE);
+}
#ifdef CONFIG_RISCV_ALTERNATIVE
/*
diff --git a/arch/riscv/kernel/entry.S b/arch/riscv/kernel/entry.S
index 278d01d291..54ca4564a9 100644
--- a/arch/riscv/kernel/entry.S
+++ b/arch/riscv/kernel/entry.S
@@ -9,6 +9,7 @@
#include <asm/asm.h>
#include <asm/csr.h>
+#include <asm/scs.h>
#include <asm/unistd.h>
#include <asm/page.h>
#include <asm/thread_info.h>
@@ -25,9 +26,9 @@ SYM_CODE_START(handle_exception)
* register will contain 0, and we should continue on the current TP.
*/
csrrw tp, CSR_SCRATCH, tp
- bnez tp, _save_context
+ bnez tp, .Lsave_context
-_restore_kernel_tpsp:
+.Lrestore_kernel_tpsp:
csrr tp, CSR_SCRATCH
REG_S sp, TASK_TI_KERNEL_SP(tp)
@@ -39,7 +40,7 @@ _restore_kernel_tpsp:
REG_L sp, TASK_TI_KERNEL_SP(tp)
#endif
-_save_context:
+.Lsave_context:
REG_S sp, TASK_TI_USER_SP(tp)
REG_L sp, TASK_TI_KERNEL_SP(tp)
addi sp, sp, -(PT_SIZE_ON_STACK)
@@ -77,10 +78,11 @@ _save_context:
csrw CSR_SCRATCH, x0
/* Load the global pointer */
-.option push
-.option norelax
- la gp, __global_pointer$
-.option pop
+ load_global_pointer
+
+ /* Load the kernel shadow call stack pointer if coming from userspace */
+ scs_load_current_if_task_changed s5
+
move a0, sp /* pt_regs */
la ra, ret_from_exception
@@ -127,6 +129,9 @@ SYM_CODE_START_NOALIGN(ret_from_exception)
addi s0, sp, PT_SIZE_ON_STACK
REG_S s0, TASK_TI_KERNEL_SP(tp)
+ /* Save the kernel shadow call stack pointer */
+ scs_save_current
+
/*
* Save TP into the scratch register , so we can find the kernel data
* structures again.
@@ -220,6 +225,43 @@ SYM_CODE_START(ret_from_fork)
tail syscall_exit_to_user_mode
SYM_CODE_END(ret_from_fork)
+#ifdef CONFIG_IRQ_STACKS
+/*
+ * void call_on_irq_stack(struct pt_regs *regs,
+ * void (*func)(struct pt_regs *));
+ *
+ * Calls func(regs) using the per-CPU IRQ stack.
+ */
+SYM_FUNC_START(call_on_irq_stack)
+ /* Create a frame record to save ra and s0 (fp) */
+ addi sp, sp, -STACKFRAME_SIZE_ON_STACK
+ REG_S ra, STACKFRAME_RA(sp)
+ REG_S s0, STACKFRAME_FP(sp)
+ addi s0, sp, STACKFRAME_SIZE_ON_STACK
+
+ /* Switch to the per-CPU shadow call stack */
+ scs_save_current
+ scs_load_irq_stack t0
+
+ /* Switch to the per-CPU IRQ stack and call the handler */
+ load_per_cpu t0, irq_stack_ptr, t1
+ li t1, IRQ_STACK_SIZE
+ add sp, t0, t1
+ jalr a1
+
+ /* Switch back to the thread shadow call stack */
+ scs_load_current
+
+ /* Switch back to the thread stack and restore ra and s0 */
+ addi sp, s0, -STACKFRAME_SIZE_ON_STACK
+ REG_L ra, STACKFRAME_RA(sp)
+ REG_L s0, STACKFRAME_FP(sp)
+ addi sp, sp, STACKFRAME_SIZE_ON_STACK
+
+ ret
+SYM_FUNC_END(call_on_irq_stack)
+#endif /* CONFIG_IRQ_STACKS */
+
/*
* Integer register context switch
* The callee-saved registers must be saved and restored.
@@ -249,6 +291,8 @@ SYM_FUNC_START(__switch_to)
REG_S s9, TASK_THREAD_S9_RA(a3)
REG_S s10, TASK_THREAD_S10_RA(a3)
REG_S s11, TASK_THREAD_S11_RA(a3)
+ /* Save the kernel shadow call stack pointer */
+ scs_save_current
/* Restore context from next->thread */
REG_L ra, TASK_THREAD_RA_RA(a4)
REG_L sp, TASK_THREAD_SP_RA(a4)
@@ -266,6 +310,8 @@ SYM_FUNC_START(__switch_to)
REG_L s11, TASK_THREAD_S11_RA(a4)
/* The offset of thread_info in task_struct is zero. */
move tp, a1
+ /* Switch to the next shadow call stack */
+ scs_load_current
ret
SYM_FUNC_END(__switch_to)
@@ -276,7 +322,7 @@ SYM_FUNC_END(__switch_to)
.section ".rodata"
.align LGREG
/* Exception vector table */
-SYM_CODE_START(excp_vect_table)
+SYM_DATA_START_LOCAL(excp_vect_table)
RISCV_PTR do_trap_insn_misaligned
ALT_INSN_FAULT(RISCV_PTR do_trap_insn_fault)
RISCV_PTR do_trap_insn_illegal
@@ -294,12 +340,11 @@ SYM_CODE_START(excp_vect_table)
RISCV_PTR do_page_fault /* load page fault */
RISCV_PTR do_trap_unknown
RISCV_PTR do_page_fault /* store page fault */
-excp_vect_table_end:
-SYM_CODE_END(excp_vect_table)
+SYM_DATA_END_LABEL(excp_vect_table, SYM_L_LOCAL, excp_vect_table_end)
#ifndef CONFIG_MMU
-SYM_CODE_START(__user_rt_sigreturn)
+SYM_DATA_START(__user_rt_sigreturn)
li a7, __NR_rt_sigreturn
ecall
-SYM_CODE_END(__user_rt_sigreturn)
+SYM_DATA_END(__user_rt_sigreturn)
#endif
diff --git a/arch/riscv/kernel/fpu.S b/arch/riscv/kernel/fpu.S
index dd2205473d..2c543f130f 100644
--- a/arch/riscv/kernel/fpu.S
+++ b/arch/riscv/kernel/fpu.S
@@ -19,7 +19,7 @@
#include <asm/csr.h>
#include <asm/asm-offsets.h>
-ENTRY(__fstate_save)
+SYM_FUNC_START(__fstate_save)
li a2, TASK_THREAD_F0
add a0, a0, a2
li t1, SR_FS
@@ -60,9 +60,9 @@ ENTRY(__fstate_save)
sw t0, TASK_THREAD_FCSR_F0(a0)
csrc CSR_STATUS, t1
ret
-ENDPROC(__fstate_save)
+SYM_FUNC_END(__fstate_save)
-ENTRY(__fstate_restore)
+SYM_FUNC_START(__fstate_restore)
li a2, TASK_THREAD_F0
add a0, a0, a2
li t1, SR_FS
@@ -103,4 +103,125 @@ ENTRY(__fstate_restore)
fscsr t0
csrc CSR_STATUS, t1
ret
-ENDPROC(__fstate_restore)
+SYM_FUNC_END(__fstate_restore)
+
+#define get_f32(which) fmv.x.s a0, which; j 2f
+#define put_f32(which) fmv.s.x which, a1; j 2f
+#if __riscv_xlen == 64
+# define get_f64(which) fmv.x.d a0, which; j 2f
+# define put_f64(which) fmv.d.x which, a1; j 2f
+#else
+# define get_f64(which) fsd which, 0(a1); j 2f
+# define put_f64(which) fld which, 0(a1); j 2f
+#endif
+
+.macro fp_access_prologue
+ /*
+ * Compute jump offset to store the correct FP register since we don't
+ * have indirect FP register access
+ */
+ sll t0, a0, 3
+ la t2, 1f
+ add t0, t0, t2
+ li t1, SR_FS
+ csrs CSR_STATUS, t1
+ jr t0
+1:
+.endm
+
+.macro fp_access_epilogue
+2:
+ csrc CSR_STATUS, t1
+ ret
+.endm
+
+#define fp_access_body(__access_func) \
+ __access_func(f0); \
+ __access_func(f1); \
+ __access_func(f2); \
+ __access_func(f3); \
+ __access_func(f4); \
+ __access_func(f5); \
+ __access_func(f6); \
+ __access_func(f7); \
+ __access_func(f8); \
+ __access_func(f9); \
+ __access_func(f10); \
+ __access_func(f11); \
+ __access_func(f12); \
+ __access_func(f13); \
+ __access_func(f14); \
+ __access_func(f15); \
+ __access_func(f16); \
+ __access_func(f17); \
+ __access_func(f18); \
+ __access_func(f19); \
+ __access_func(f20); \
+ __access_func(f21); \
+ __access_func(f22); \
+ __access_func(f23); \
+ __access_func(f24); \
+ __access_func(f25); \
+ __access_func(f26); \
+ __access_func(f27); \
+ __access_func(f28); \
+ __access_func(f29); \
+ __access_func(f30); \
+ __access_func(f31)
+
+
+#ifdef CONFIG_RISCV_MISALIGNED
+
+/*
+ * Disable compressed instructions set to keep a constant offset between FP
+ * load/store/move instructions
+ */
+.option norvc
+/*
+ * put_f32_reg - Set a FP register from a register containing the value
+ * a0 = FP register index to be set
+ * a1 = value to be loaded in the FP register
+ */
+SYM_FUNC_START(put_f32_reg)
+ fp_access_prologue
+ fp_access_body(put_f32)
+ fp_access_epilogue
+SYM_FUNC_END(put_f32_reg)
+
+/*
+ * get_f32_reg - Get a FP register value and return it
+ * a0 = FP register index to be retrieved
+ */
+SYM_FUNC_START(get_f32_reg)
+ fp_access_prologue
+ fp_access_body(get_f32)
+ fp_access_epilogue
+SYM_FUNC_END(get_f32_reg)
+
+/*
+ * put_f64_reg - Set a 64 bits FP register from a value or a pointer.
+ * a0 = FP register index to be set
+ * a1 = value/pointer to be loaded in the FP register (when xlen == 32 bits, we
+ * load the value to a pointer).
+ */
+SYM_FUNC_START(put_f64_reg)
+ fp_access_prologue
+ fp_access_body(put_f64)
+ fp_access_epilogue
+SYM_FUNC_END(put_f64_reg)
+
+/*
+ * put_f64_reg - Get a 64 bits FP register value and returned it or store it to
+ * a pointer.
+ * a0 = FP register index to be retrieved
+ * a1 = If xlen == 32, pointer which should be loaded with the FP register value
+ * or unused if xlen == 64. In which case the FP register value is returned
+ * through a0
+ */
+SYM_FUNC_START(get_f64_reg)
+ fp_access_prologue
+ fp_access_body(get_f64)
+ fp_access_epilogue
+SYM_FUNC_END(get_f64_reg)
+
+#endif /* CONFIG_RISCV_MISALIGNED */
diff --git a/arch/riscv/kernel/head.S b/arch/riscv/kernel/head.S
index 3710ea5d16..663881785b 100644
--- a/arch/riscv/kernel/head.S
+++ b/arch/riscv/kernel/head.S
@@ -14,11 +14,12 @@
#include <asm/cpu_ops_sbi.h>
#include <asm/hwcap.h>
#include <asm/image.h>
+#include <asm/scs.h>
#include <asm/xip_fixup.h>
#include "efi-header.S"
__HEAD
-ENTRY(_start)
+SYM_CODE_START(_start)
/*
* Image header expected by Linux boot-loaders. The image header data
* structure is described in asm/image.h.
@@ -88,6 +89,7 @@ relocate_enable_mmu:
/* Compute satp for kernel page tables, but don't load it yet */
srl a2, a0, PAGE_SHIFT
la a1, satp_mode
+ XIP_FIXUP_OFFSET a1
REG_L a1, 0(a1)
or a2, a2, a1
@@ -110,10 +112,7 @@ relocate_enable_mmu:
csrw CSR_TVEC, a0
/* Reload the global pointer */
-.option push
-.option norelax
- la gp, __global_pointer$
-.option pop
+ load_global_pointer
/*
* Switch to kernel page tables. A full fence is necessary in order to
@@ -134,10 +133,7 @@ secondary_start_sbi:
csrw CSR_IP, zero
/* Load the global pointer */
- .option push
- .option norelax
- la gp, __global_pointer$
- .option pop
+ load_global_pointer
/*
* Disable FPU & VECTOR to detect illegal usage of
@@ -168,12 +164,13 @@ secondary_start_sbi:
XIP_FIXUP_OFFSET a0
call relocate_enable_mmu
#endif
- call setup_trap_vector
+ call .Lsetup_trap_vector
+ scs_load_current
tail smp_callin
#endif /* CONFIG_SMP */
.align 2
-setup_trap_vector:
+.Lsetup_trap_vector:
/* Set trap vector to exception handler */
la a0, handle_exception
csrw CSR_TVEC, a0
@@ -191,9 +188,9 @@ setup_trap_vector:
wfi
j .Lsecondary_park
-END(_start)
+SYM_CODE_END(_start)
-ENTRY(_start_kernel)
+SYM_CODE_START(_start_kernel)
/* Mask all interrupts */
csrw CSR_IE, zero
csrw CSR_IP, zero
@@ -210,7 +207,7 @@ ENTRY(_start_kernel)
* not implement PMPs, so we set up a quick trap handler to just skip
* touching the PMPs on any trap.
*/
- la a0, pmp_done
+ la a0, .Lpmp_done
csrw CSR_TVEC, a0
li a0, -1
@@ -218,7 +215,7 @@ ENTRY(_start_kernel)
li a0, (PMP_A_NAPOT | PMP_R | PMP_W | PMP_X)
csrw CSR_PMPCFG0, a0
.align 2
-pmp_done:
+.Lpmp_done:
/*
* The hartid in a0 is expected later on, and we have no firmware
@@ -228,10 +225,7 @@ pmp_done:
#endif /* CONFIG_RISCV_M_MODE */
/* Load the global pointer */
-.option push
-.option norelax
- la gp, __global_pointer$
-.option pop
+ load_global_pointer
/*
* Disable FPU & VECTOR to detect illegal usage of
@@ -282,12 +276,12 @@ pmp_done:
/* Clear BSS for flat non-ELF images */
la a3, __bss_start
la a4, __bss_stop
- ble a4, a3, clear_bss_done
-clear_bss:
+ ble a4, a3, .Lclear_bss_done
+.Lclear_bss:
REG_S zero, (a3)
add a3, a3, RISCV_SZPTR
- blt a3, a4, clear_bss
-clear_bss_done:
+ blt a3, a4, .Lclear_bss
+.Lclear_bss_done:
#endif
la a2, boot_cpu_hartid
XIP_FIXUP_OFFSET a2
@@ -298,6 +292,7 @@ clear_bss_done:
la sp, init_thread_union + THREAD_SIZE
XIP_FIXUP_OFFSET sp
addi sp, sp, -PT_SIZE_ON_STACK
+ scs_load_init_stack
#ifdef CONFIG_BUILTIN_DTB
la a0, __dtb_start
XIP_FIXUP_OFFSET a0
@@ -311,11 +306,12 @@ clear_bss_done:
call relocate_enable_mmu
#endif /* CONFIG_MMU */
- call setup_trap_vector
+ call .Lsetup_trap_vector
/* Restore C environment */
la tp, init_task
la sp, init_thread_union + THREAD_SIZE
addi sp, sp, -PT_SIZE_ON_STACK
+ scs_load_current
#ifdef CONFIG_KASAN
call kasan_early_init
@@ -353,10 +349,10 @@ clear_bss_done:
tail .Lsecondary_start_common
#endif /* CONFIG_RISCV_BOOT_SPINWAIT */
-END(_start_kernel)
+SYM_CODE_END(_start_kernel)
#ifdef CONFIG_RISCV_M_MODE
-ENTRY(reset_regs)
+SYM_CODE_START_LOCAL(reset_regs)
li sp, 0
li gp, 0
li tp, 0
@@ -454,5 +450,5 @@ ENTRY(reset_regs)
.Lreset_regs_done_vector:
#endif /* CONFIG_RISCV_ISA_V */
ret
-END(reset_regs)
+SYM_CODE_END(reset_regs)
#endif /* CONFIG_RISCV_M_MODE */
diff --git a/arch/riscv/kernel/hibernate-asm.S b/arch/riscv/kernel/hibernate-asm.S
index d698dd7df6..d040dcf4ad 100644
--- a/arch/riscv/kernel/hibernate-asm.S
+++ b/arch/riscv/kernel/hibernate-asm.S
@@ -21,7 +21,7 @@
*
* Always returns 0
*/
-ENTRY(__hibernate_cpu_resume)
+SYM_FUNC_START(__hibernate_cpu_resume)
/* switch to hibernated image's page table. */
csrw CSR_SATP, s0
sfence.vma
@@ -34,7 +34,7 @@ ENTRY(__hibernate_cpu_resume)
mv a0, zero
ret
-END(__hibernate_cpu_resume)
+SYM_FUNC_END(__hibernate_cpu_resume)
/*
* Prepare to restore the image.
@@ -42,7 +42,7 @@ END(__hibernate_cpu_resume)
* a1: satp of temporary page tables.
* a2: cpu_resume.
*/
-ENTRY(hibernate_restore_image)
+SYM_FUNC_START(hibernate_restore_image)
mv s0, a0
mv s1, a1
mv s2, a2
@@ -50,7 +50,7 @@ ENTRY(hibernate_restore_image)
REG_L a1, relocated_restore_code
jr a1
-END(hibernate_restore_image)
+SYM_FUNC_END(hibernate_restore_image)
/*
* The below code will be executed from a 'safe' page.
@@ -58,7 +58,7 @@ END(hibernate_restore_image)
* back to the original memory location. Finally, it jumps to __hibernate_cpu_resume()
* to restore the CPU context.
*/
-ENTRY(hibernate_core_restore_code)
+SYM_FUNC_START(hibernate_core_restore_code)
/* switch to temp page table. */
csrw satp, s1
sfence.vma
@@ -73,4 +73,4 @@ ENTRY(hibernate_core_restore_code)
bnez s4, .Lcopy
jr s2
-END(hibernate_core_restore_code)
+SYM_FUNC_END(hibernate_core_restore_code)
diff --git a/arch/riscv/kernel/image-vars.h b/arch/riscv/kernel/image-vars.h
index ea1a10355c..3df30dd1c4 100644
--- a/arch/riscv/kernel/image-vars.h
+++ b/arch/riscv/kernel/image-vars.h
@@ -28,7 +28,9 @@ __efistub__start_kernel = _start_kernel;
__efistub__end = _end;
__efistub__edata = _edata;
__efistub___init_text_end = __init_text_end;
+#if defined(CONFIG_EFI_EARLYCON) || defined(CONFIG_SYSFB)
__efistub_screen_info = screen_info;
+#endif
#endif
diff --git a/arch/riscv/kernel/irq.c b/arch/riscv/kernel/irq.c
index 9cc0a76692..9ceda02507 100644
--- a/arch/riscv/kernel/irq.c
+++ b/arch/riscv/kernel/irq.c
@@ -9,6 +9,7 @@
#include <linux/irqchip.h>
#include <linux/irqdomain.h>
#include <linux/module.h>
+#include <linux/scs.h>
#include <linux/seq_file.h>
#include <asm/sbi.h>
#include <asm/smp.h>
@@ -34,6 +35,24 @@ EXPORT_SYMBOL_GPL(riscv_get_intc_hwnode);
#ifdef CONFIG_IRQ_STACKS
#include <asm/irq_stack.h>
+DECLARE_PER_CPU(ulong *, irq_shadow_call_stack_ptr);
+
+#ifdef CONFIG_SHADOW_CALL_STACK
+DEFINE_PER_CPU(ulong *, irq_shadow_call_stack_ptr);
+#endif
+
+static void init_irq_scs(void)
+{
+ int cpu;
+
+ if (!scs_is_enabled())
+ return;
+
+ for_each_possible_cpu(cpu)
+ per_cpu(irq_shadow_call_stack_ptr, cpu) =
+ scs_alloc(cpu_to_node(cpu));
+}
+
DEFINE_PER_CPU(ulong *, irq_stack_ptr);
#ifdef CONFIG_VMAP_STACK
@@ -61,40 +80,22 @@ static void init_irq_stacks(void)
#endif /* CONFIG_VMAP_STACK */
#ifdef CONFIG_SOFTIRQ_ON_OWN_STACK
+static void ___do_softirq(struct pt_regs *regs)
+{
+ __do_softirq();
+}
+
void do_softirq_own_stack(void)
{
-#ifdef CONFIG_IRQ_STACKS
- if (on_thread_stack()) {
- ulong *sp = per_cpu(irq_stack_ptr, smp_processor_id())
- + IRQ_STACK_SIZE/sizeof(ulong);
- __asm__ __volatile(
- "addi sp, sp, -"RISCV_SZPTR "\n"
- REG_S" ra, (sp) \n"
- "addi sp, sp, -"RISCV_SZPTR "\n"
- REG_S" s0, (sp) \n"
- "addi s0, sp, 2*"RISCV_SZPTR "\n"
- "move sp, %[sp] \n"
- "call __do_softirq \n"
- "addi sp, s0, -2*"RISCV_SZPTR"\n"
- REG_L" s0, (sp) \n"
- "addi sp, sp, "RISCV_SZPTR "\n"
- REG_L" ra, (sp) \n"
- "addi sp, sp, "RISCV_SZPTR "\n"
- :
- : [sp] "r" (sp)
- : "a0", "a1", "a2", "a3", "a4", "a5", "a6", "a7",
- "t0", "t1", "t2", "t3", "t4", "t5", "t6",
-#ifndef CONFIG_FRAME_POINTER
- "s0",
-#endif
- "memory");
- } else
-#endif
+ if (on_thread_stack())
+ call_on_irq_stack(NULL, ___do_softirq);
+ else
__do_softirq();
}
#endif /* CONFIG_SOFTIRQ_ON_OWN_STACK */
#else
+static void init_irq_scs(void) {}
static void init_irq_stacks(void) {}
#endif /* CONFIG_IRQ_STACKS */
@@ -106,6 +107,7 @@ int arch_show_interrupts(struct seq_file *p, int prec)
void __init init_IRQ(void)
{
+ init_irq_scs();
init_irq_stacks();
irqchip_init();
if (!handle_arch_irq)
diff --git a/arch/riscv/kernel/kexec_relocate.S b/arch/riscv/kernel/kexec_relocate.S
index 059c5e216a..de0a4b35d0 100644
--- a/arch/riscv/kernel/kexec_relocate.S
+++ b/arch/riscv/kernel/kexec_relocate.S
@@ -17,27 +17,17 @@ SYM_CODE_START(riscv_kexec_relocate)
* s1: (const) Phys address to jump to after relocation
* s2: (const) Phys address of the FDT image
* s3: (const) The hartid of the current hart
- * s4: Pointer to the destination address for the relocation
- * s5: (const) Number of words per page
- * s6: (const) 1, used for subtraction
- * s7: (const) kernel_map.va_pa_offset, used when switching MMU off
- * s8: (const) Physical address of the main loop
- * s9: (debug) indirection page counter
- * s10: (debug) entry counter
- * s11: (debug) copied words counter
+ * s4: (const) kernel_map.va_pa_offset, used when switching MMU off
+ * s5: Pointer to the destination address for the relocation
+ * s6: (const) Physical address of the main loop
*/
mv s0, a0
mv s1, a1
mv s2, a2
mv s3, a3
- mv s4, zero
- li s5, (PAGE_SIZE / RISCV_SZPTR)
- li s6, 1
- mv s7, a4
- mv s8, zero
- mv s9, zero
- mv s10, zero
- mv s11, zero
+ mv s4, a4
+ mv s5, zero
+ mv s6, zero
/* Disable / cleanup interrupts */
csrw CSR_SIE, zero
@@ -52,21 +42,27 @@ SYM_CODE_START(riscv_kexec_relocate)
* the start of the loop below so that we jump there in
* any case.
*/
- la s8, 1f
- sub s8, s8, s7
- csrw CSR_STVEC, s8
+ la s6, 1f
+ sub s6, s6, s4
+ csrw CSR_STVEC, s6
+
+ /*
+ * With C-extension, here we get 42 Bytes and the next
+ * .align directive would pad zeros here up to 44 Bytes.
+ * So manually put a nop here to avoid zeros padding.
+ */
+ nop
/* Process entries in a loop */
.align 2
1:
- addi s10, s10, 1
REG_L t0, 0(s0) /* t0 = *image->entry */
addi s0, s0, RISCV_SZPTR /* image->entry++ */
/* IND_DESTINATION entry ? -> save destination address */
andi t1, t0, 0x1
beqz t1, 2f
- andi s4, t0, ~0x1
+ andi s5, t0, ~0x1
j 1b
2:
@@ -74,9 +70,8 @@ SYM_CODE_START(riscv_kexec_relocate)
andi t1, t0, 0x2
beqz t1, 2f
andi s0, t0, ~0x2
- addi s9, s9, 1
csrw CSR_SATP, zero
- jalr zero, s8, 0
+ jr s6
2:
/* IND_DONE entry ? -> jump to done label */
@@ -92,14 +87,13 @@ SYM_CODE_START(riscv_kexec_relocate)
andi t1, t0, 0x8
beqz t1, 1b /* Unknown entry type, ignore it */
andi t0, t0, ~0x8
- mv t3, s5 /* i = num words per page */
+ li t3, (PAGE_SIZE / RISCV_SZPTR) /* i = num words per page */
3: /* copy loop */
REG_L t1, (t0) /* t1 = *src_ptr */
- REG_S t1, (s4) /* *dst_ptr = *src_ptr */
+ REG_S t1, (s5) /* *dst_ptr = *src_ptr */
addi t0, t0, RISCV_SZPTR /* stc_ptr++ */
- addi s4, s4, RISCV_SZPTR /* dst_ptr++ */
- sub t3, t3, s6 /* i-- */
- addi s11, s11, 1 /* c++ */
+ addi s5, s5, RISCV_SZPTR /* dst_ptr++ */
+ addi t3, t3, -0x1 /* i-- */
beqz t3, 1b /* copy done ? */
j 3b
@@ -146,7 +140,7 @@ SYM_CODE_START(riscv_kexec_relocate)
*/
fence.i
- jalr zero, a2, 0
+ jr a2
SYM_CODE_END(riscv_kexec_relocate)
riscv_kexec_relocate_end:
diff --git a/arch/riscv/kernel/mcount-dyn.S b/arch/riscv/kernel/mcount-dyn.S
index 669b8697aa..58dd96a2a1 100644
--- a/arch/riscv/kernel/mcount-dyn.S
+++ b/arch/riscv/kernel/mcount-dyn.S
@@ -82,7 +82,7 @@
.endm
#endif /* CONFIG_DYNAMIC_FTRACE_WITH_REGS */
-ENTRY(ftrace_caller)
+SYM_FUNC_START(ftrace_caller)
SAVE_ABI
addi a0, t0, -FENTRY_RA_OFFSET
@@ -91,8 +91,7 @@ ENTRY(ftrace_caller)
mv a1, ra
mv a3, sp
-ftrace_call:
- .global ftrace_call
+SYM_INNER_LABEL(ftrace_call, SYM_L_GLOBAL)
call ftrace_stub
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
@@ -102,16 +101,15 @@ ftrace_call:
#ifdef HAVE_FUNCTION_GRAPH_FP_TEST
mv a2, s0
#endif
-ftrace_graph_call:
- .global ftrace_graph_call
+SYM_INNER_LABEL(ftrace_graph_call, SYM_L_GLOBAL)
call ftrace_stub
#endif
RESTORE_ABI
jr t0
-ENDPROC(ftrace_caller)
+SYM_FUNC_END(ftrace_caller)
#ifdef CONFIG_DYNAMIC_FTRACE_WITH_REGS
-ENTRY(ftrace_regs_caller)
+SYM_FUNC_START(ftrace_regs_caller)
SAVE_ALL
addi a0, t0, -FENTRY_RA_OFFSET
@@ -120,8 +118,7 @@ ENTRY(ftrace_regs_caller)
mv a1, ra
mv a3, sp
-ftrace_regs_call:
- .global ftrace_regs_call
+SYM_INNER_LABEL(ftrace_regs_call, SYM_L_GLOBAL)
call ftrace_stub
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
@@ -131,12 +128,11 @@ ftrace_regs_call:
#ifdef HAVE_FUNCTION_GRAPH_FP_TEST
mv a2, s0
#endif
-ftrace_graph_regs_call:
- .global ftrace_graph_regs_call
+SYM_INNER_LABEL(ftrace_graph_regs_call, SYM_L_GLOBAL)
call ftrace_stub
#endif
RESTORE_ALL
jr t0
-ENDPROC(ftrace_regs_caller)
+SYM_FUNC_END(ftrace_regs_caller)
#endif /* CONFIG_DYNAMIC_FTRACE_WITH_REGS */
diff --git a/arch/riscv/kernel/mcount.S b/arch/riscv/kernel/mcount.S
index 8818a8fa9f..b4dd9ed684 100644
--- a/arch/riscv/kernel/mcount.S
+++ b/arch/riscv/kernel/mcount.S
@@ -61,7 +61,7 @@ SYM_TYPED_FUNC_START(ftrace_stub_graph)
ret
SYM_FUNC_END(ftrace_stub_graph)
-ENTRY(return_to_handler)
+SYM_FUNC_START(return_to_handler)
/*
* On implementing the frame point test, the ideal way is to compare the
* s0 (frame pointer, if enabled) on entry and the sp (stack pointer) on return.
@@ -76,25 +76,25 @@ ENTRY(return_to_handler)
mv a2, a0
RESTORE_RET_ABI_STATE
jalr a2
-ENDPROC(return_to_handler)
+SYM_FUNC_END(return_to_handler)
#endif
#ifndef CONFIG_DYNAMIC_FTRACE
-ENTRY(MCOUNT_NAME)
+SYM_FUNC_START(MCOUNT_NAME)
la t4, ftrace_stub
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
la t0, ftrace_graph_return
REG_L t1, 0(t0)
- bne t1, t4, do_ftrace_graph_caller
+ bne t1, t4, .Ldo_ftrace_graph_caller
la t3, ftrace_graph_entry
REG_L t2, 0(t3)
la t6, ftrace_graph_entry_stub
- bne t2, t6, do_ftrace_graph_caller
+ bne t2, t6, .Ldo_ftrace_graph_caller
#endif
la t3, ftrace_trace_function
REG_L t5, 0(t3)
- bne t5, t4, do_trace
+ bne t5, t4, .Ldo_trace
ret
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
@@ -102,7 +102,7 @@ ENTRY(MCOUNT_NAME)
* A pseudo representation for the function graph tracer:
* prepare_to_return(&ra_to_caller_of_caller, ra_to_caller)
*/
-do_ftrace_graph_caller:
+.Ldo_ftrace_graph_caller:
addi a0, s0, -SZREG
mv a1, ra
#ifdef HAVE_FUNCTION_GRAPH_FP_TEST
@@ -118,7 +118,7 @@ do_ftrace_graph_caller:
* A pseudo representation for the function tracer:
* (*ftrace_trace_function)(ra_to_caller, ra_to_caller_of_caller)
*/
-do_trace:
+.Ldo_trace:
REG_L a1, -SZREG(s0)
mv a0, ra
@@ -126,6 +126,6 @@ do_trace:
jalr t5
RESTORE_ABI_STATE
ret
-ENDPROC(MCOUNT_NAME)
+SYM_FUNC_END(MCOUNT_NAME)
#endif
EXPORT_SYMBOL(MCOUNT_NAME)
diff --git a/arch/riscv/kernel/module.c b/arch/riscv/kernel/module.c
index df4f6fec5d..c9d59a5448 100644
--- a/arch/riscv/kernel/module.c
+++ b/arch/riscv/kernel/module.c
@@ -7,6 +7,9 @@
#include <linux/elf.h>
#include <linux/err.h>
#include <linux/errno.h>
+#include <linux/hashtable.h>
+#include <linux/kernel.h>
+#include <linux/log2.h>
#include <linux/moduleloader.h>
#include <linux/vmalloc.h>
#include <linux/sizes.h>
@@ -14,6 +17,29 @@
#include <asm/alternative.h>
#include <asm/sections.h>
+struct used_bucket {
+ struct list_head head;
+ struct hlist_head *bucket;
+};
+
+struct relocation_head {
+ struct hlist_node node;
+ struct list_head *rel_entry;
+ void *location;
+};
+
+struct relocation_entry {
+ struct list_head head;
+ Elf_Addr value;
+ unsigned int type;
+};
+
+struct relocation_handlers {
+ int (*reloc_handler)(struct module *me, void *location, Elf_Addr v);
+ int (*accumulate_handler)(struct module *me, void *location,
+ long buffer);
+};
+
/*
* The auipc+jalr instruction pair can reach any PC-relative offset
* in the range [-2^31 - 2^11, 2^31 - 2^11)
@@ -27,68 +53,90 @@ static bool riscv_insn_valid_32bit_offset(ptrdiff_t val)
#endif
}
-static int apply_r_riscv_32_rela(struct module *me, u32 *location, Elf_Addr v)
+static int riscv_insn_rmw(void *location, u32 keep, u32 set)
+{
+ __le16 *parcel = location;
+ u32 insn = (u32)le16_to_cpu(parcel[0]) | (u32)le16_to_cpu(parcel[1]) << 16;
+
+ insn &= keep;
+ insn |= set;
+
+ parcel[0] = cpu_to_le16(insn);
+ parcel[1] = cpu_to_le16(insn >> 16);
+ return 0;
+}
+
+static int riscv_insn_rvc_rmw(void *location, u16 keep, u16 set)
+{
+ __le16 *parcel = location;
+ u16 insn = le16_to_cpu(*parcel);
+
+ insn &= keep;
+ insn |= set;
+
+ *parcel = cpu_to_le16(insn);
+ return 0;
+}
+
+static int apply_r_riscv_32_rela(struct module *me, void *location, Elf_Addr v)
{
if (v != (u32)v) {
pr_err("%s: value %016llx out of range for 32-bit field\n",
me->name, (long long)v);
return -EINVAL;
}
- *location = v;
+ *(u32 *)location = v;
return 0;
}
-static int apply_r_riscv_64_rela(struct module *me, u32 *location, Elf_Addr v)
+static int apply_r_riscv_64_rela(struct module *me, void *location, Elf_Addr v)
{
*(u64 *)location = v;
return 0;
}
-static int apply_r_riscv_branch_rela(struct module *me, u32 *location,
+static int apply_r_riscv_branch_rela(struct module *me, void *location,
Elf_Addr v)
{
- ptrdiff_t offset = (void *)v - (void *)location;
+ ptrdiff_t offset = (void *)v - location;
u32 imm12 = (offset & 0x1000) << (31 - 12);
u32 imm11 = (offset & 0x800) >> (11 - 7);
u32 imm10_5 = (offset & 0x7e0) << (30 - 10);
u32 imm4_1 = (offset & 0x1e) << (11 - 4);
- *location = (*location & 0x1fff07f) | imm12 | imm11 | imm10_5 | imm4_1;
- return 0;
+ return riscv_insn_rmw(location, 0x1fff07f, imm12 | imm11 | imm10_5 | imm4_1);
}
-static int apply_r_riscv_jal_rela(struct module *me, u32 *location,
+static int apply_r_riscv_jal_rela(struct module *me, void *location,
Elf_Addr v)
{
- ptrdiff_t offset = (void *)v - (void *)location;
+ ptrdiff_t offset = (void *)v - location;
u32 imm20 = (offset & 0x100000) << (31 - 20);
u32 imm19_12 = (offset & 0xff000);
u32 imm11 = (offset & 0x800) << (20 - 11);
u32 imm10_1 = (offset & 0x7fe) << (30 - 10);
- *location = (*location & 0xfff) | imm20 | imm19_12 | imm11 | imm10_1;
- return 0;
+ return riscv_insn_rmw(location, 0xfff, imm20 | imm19_12 | imm11 | imm10_1);
}
-static int apply_r_riscv_rvc_branch_rela(struct module *me, u32 *location,
+static int apply_r_riscv_rvc_branch_rela(struct module *me, void *location,
Elf_Addr v)
{
- ptrdiff_t offset = (void *)v - (void *)location;
+ ptrdiff_t offset = (void *)v - location;
u16 imm8 = (offset & 0x100) << (12 - 8);
u16 imm7_6 = (offset & 0xc0) >> (6 - 5);
u16 imm5 = (offset & 0x20) >> (5 - 2);
u16 imm4_3 = (offset & 0x18) << (12 - 5);
u16 imm2_1 = (offset & 0x6) << (12 - 10);
- *(u16 *)location = (*(u16 *)location & 0xe383) |
- imm8 | imm7_6 | imm5 | imm4_3 | imm2_1;
- return 0;
+ return riscv_insn_rvc_rmw(location, 0xe383,
+ imm8 | imm7_6 | imm5 | imm4_3 | imm2_1);
}
-static int apply_r_riscv_rvc_jump_rela(struct module *me, u32 *location,
+static int apply_r_riscv_rvc_jump_rela(struct module *me, void *location,
Elf_Addr v)
{
- ptrdiff_t offset = (void *)v - (void *)location;
+ ptrdiff_t offset = (void *)v - location;
u16 imm11 = (offset & 0x800) << (12 - 11);
u16 imm10 = (offset & 0x400) >> (10 - 8);
u16 imm9_8 = (offset & 0x300) << (12 - 11);
@@ -98,16 +146,14 @@ static int apply_r_riscv_rvc_jump_rela(struct module *me, u32 *location,
u16 imm4 = (offset & 0x10) << (12 - 5);
u16 imm3_1 = (offset & 0xe) << (12 - 10);
- *(u16 *)location = (*(u16 *)location & 0xe003) |
- imm11 | imm10 | imm9_8 | imm7 | imm6 | imm5 | imm4 | imm3_1;
- return 0;
+ return riscv_insn_rvc_rmw(location, 0xe003,
+ imm11 | imm10 | imm9_8 | imm7 | imm6 | imm5 | imm4 | imm3_1);
}
-static int apply_r_riscv_pcrel_hi20_rela(struct module *me, u32 *location,
+static int apply_r_riscv_pcrel_hi20_rela(struct module *me, void *location,
Elf_Addr v)
{
- ptrdiff_t offset = (void *)v - (void *)location;
- s32 hi20;
+ ptrdiff_t offset = (void *)v - location;
if (!riscv_insn_valid_32bit_offset(offset)) {
pr_err(
@@ -116,23 +162,20 @@ static int apply_r_riscv_pcrel_hi20_rela(struct module *me, u32 *location,
return -EINVAL;
}
- hi20 = (offset + 0x800) & 0xfffff000;
- *location = (*location & 0xfff) | hi20;
- return 0;
+ return riscv_insn_rmw(location, 0xfff, (offset + 0x800) & 0xfffff000);
}
-static int apply_r_riscv_pcrel_lo12_i_rela(struct module *me, u32 *location,
+static int apply_r_riscv_pcrel_lo12_i_rela(struct module *me, void *location,
Elf_Addr v)
{
/*
* v is the lo12 value to fill. It is calculated before calling this
* handler.
*/
- *location = (*location & 0xfffff) | ((v & 0xfff) << 20);
- return 0;
+ return riscv_insn_rmw(location, 0xfffff, (v & 0xfff) << 20);
}
-static int apply_r_riscv_pcrel_lo12_s_rela(struct module *me, u32 *location,
+static int apply_r_riscv_pcrel_lo12_s_rela(struct module *me, void *location,
Elf_Addr v)
{
/*
@@ -142,15 +185,12 @@ static int apply_r_riscv_pcrel_lo12_s_rela(struct module *me, u32 *location,
u32 imm11_5 = (v & 0xfe0) << (31 - 11);
u32 imm4_0 = (v & 0x1f) << (11 - 4);
- *location = (*location & 0x1fff07f) | imm11_5 | imm4_0;
- return 0;
+ return riscv_insn_rmw(location, 0x1fff07f, imm11_5 | imm4_0);
}
-static int apply_r_riscv_hi20_rela(struct module *me, u32 *location,
+static int apply_r_riscv_hi20_rela(struct module *me, void *location,
Elf_Addr v)
{
- s32 hi20;
-
if (IS_ENABLED(CONFIG_CMODEL_MEDLOW)) {
pr_err(
"%s: target %016llx can not be addressed by the 32-bit offset from PC = %p\n",
@@ -158,22 +198,20 @@ static int apply_r_riscv_hi20_rela(struct module *me, u32 *location,
return -EINVAL;
}
- hi20 = ((s32)v + 0x800) & 0xfffff000;
- *location = (*location & 0xfff) | hi20;
- return 0;
+ return riscv_insn_rmw(location, 0xfff, ((s32)v + 0x800) & 0xfffff000);
}
-static int apply_r_riscv_lo12_i_rela(struct module *me, u32 *location,
+static int apply_r_riscv_lo12_i_rela(struct module *me, void *location,
Elf_Addr v)
{
/* Skip medlow checking because of filtering by HI20 already */
s32 hi20 = ((s32)v + 0x800) & 0xfffff000;
s32 lo12 = ((s32)v - hi20);
- *location = (*location & 0xfffff) | ((lo12 & 0xfff) << 20);
- return 0;
+
+ return riscv_insn_rmw(location, 0xfffff, (lo12 & 0xfff) << 20);
}
-static int apply_r_riscv_lo12_s_rela(struct module *me, u32 *location,
+static int apply_r_riscv_lo12_s_rela(struct module *me, void *location,
Elf_Addr v)
{
/* Skip medlow checking because of filtering by HI20 already */
@@ -181,20 +219,18 @@ static int apply_r_riscv_lo12_s_rela(struct module *me, u32 *location,
s32 lo12 = ((s32)v - hi20);
u32 imm11_5 = (lo12 & 0xfe0) << (31 - 11);
u32 imm4_0 = (lo12 & 0x1f) << (11 - 4);
- *location = (*location & 0x1fff07f) | imm11_5 | imm4_0;
- return 0;
+
+ return riscv_insn_rmw(location, 0x1fff07f, imm11_5 | imm4_0);
}
-static int apply_r_riscv_got_hi20_rela(struct module *me, u32 *location,
+static int apply_r_riscv_got_hi20_rela(struct module *me, void *location,
Elf_Addr v)
{
- ptrdiff_t offset = (void *)v - (void *)location;
- s32 hi20;
+ ptrdiff_t offset = (void *)v - location;
/* Always emit the got entry */
if (IS_ENABLED(CONFIG_MODULE_SECTIONS)) {
- offset = module_emit_got_entry(me, v);
- offset = (void *)offset - (void *)location;
+ offset = (void *)module_emit_got_entry(me, v) - location;
} else {
pr_err(
"%s: can not generate the GOT entry for symbol = %016llx from PC = %p\n",
@@ -202,22 +238,19 @@ static int apply_r_riscv_got_hi20_rela(struct module *me, u32 *location,
return -EINVAL;
}
- hi20 = (offset + 0x800) & 0xfffff000;
- *location = (*location & 0xfff) | hi20;
- return 0;
+ return riscv_insn_rmw(location, 0xfff, (offset + 0x800) & 0xfffff000);
}
-static int apply_r_riscv_call_plt_rela(struct module *me, u32 *location,
+static int apply_r_riscv_call_plt_rela(struct module *me, void *location,
Elf_Addr v)
{
- ptrdiff_t offset = (void *)v - (void *)location;
+ ptrdiff_t offset = (void *)v - location;
u32 hi20, lo12;
if (!riscv_insn_valid_32bit_offset(offset)) {
/* Only emit the plt entry if offset over 32-bit range */
if (IS_ENABLED(CONFIG_MODULE_SECTIONS)) {
- offset = module_emit_plt_entry(me, v);
- offset = (void *)offset - (void *)location;
+ offset = (void *)module_emit_plt_entry(me, v) - location;
} else {
pr_err(
"%s: target %016llx can not be addressed by the 32-bit offset from PC = %p\n",
@@ -228,15 +261,14 @@ static int apply_r_riscv_call_plt_rela(struct module *me, u32 *location,
hi20 = (offset + 0x800) & 0xfffff000;
lo12 = (offset - hi20) & 0xfff;
- *location = (*location & 0xfff) | hi20;
- *(location + 1) = (*(location + 1) & 0xfffff) | (lo12 << 20);
- return 0;
+ riscv_insn_rmw(location, 0xfff, hi20);
+ return riscv_insn_rmw(location + 4, 0xfffff, lo12 << 20);
}
-static int apply_r_riscv_call_rela(struct module *me, u32 *location,
+static int apply_r_riscv_call_rela(struct module *me, void *location,
Elf_Addr v)
{
- ptrdiff_t offset = (void *)v - (void *)location;
+ ptrdiff_t offset = (void *)v - location;
u32 hi20, lo12;
if (!riscv_insn_valid_32bit_offset(offset)) {
@@ -248,18 +280,17 @@ static int apply_r_riscv_call_rela(struct module *me, u32 *location,
hi20 = (offset + 0x800) & 0xfffff000;
lo12 = (offset - hi20) & 0xfff;
- *location = (*location & 0xfff) | hi20;
- *(location + 1) = (*(location + 1) & 0xfffff) | (lo12 << 20);
- return 0;
+ riscv_insn_rmw(location, 0xfff, hi20);
+ return riscv_insn_rmw(location + 4, 0xfffff, lo12 << 20);
}
-static int apply_r_riscv_relax_rela(struct module *me, u32 *location,
+static int apply_r_riscv_relax_rela(struct module *me, void *location,
Elf_Addr v)
{
return 0;
}
-static int apply_r_riscv_align_rela(struct module *me, u32 *location,
+static int apply_r_riscv_align_rela(struct module *me, void *location,
Elf_Addr v)
{
pr_err(
@@ -268,91 +299,509 @@ static int apply_r_riscv_align_rela(struct module *me, u32 *location,
return -EINVAL;
}
-static int apply_r_riscv_add16_rela(struct module *me, u32 *location,
+static int apply_r_riscv_add8_rela(struct module *me, void *location, Elf_Addr v)
+{
+ *(u8 *)location += (u8)v;
+ return 0;
+}
+
+static int apply_r_riscv_add16_rela(struct module *me, void *location,
Elf_Addr v)
{
*(u16 *)location += (u16)v;
return 0;
}
-static int apply_r_riscv_add32_rela(struct module *me, u32 *location,
+static int apply_r_riscv_add32_rela(struct module *me, void *location,
Elf_Addr v)
{
*(u32 *)location += (u32)v;
return 0;
}
-static int apply_r_riscv_add64_rela(struct module *me, u32 *location,
+static int apply_r_riscv_add64_rela(struct module *me, void *location,
Elf_Addr v)
{
*(u64 *)location += (u64)v;
return 0;
}
-static int apply_r_riscv_sub16_rela(struct module *me, u32 *location,
+static int apply_r_riscv_sub8_rela(struct module *me, void *location, Elf_Addr v)
+{
+ *(u8 *)location -= (u8)v;
+ return 0;
+}
+
+static int apply_r_riscv_sub16_rela(struct module *me, void *location,
Elf_Addr v)
{
*(u16 *)location -= (u16)v;
return 0;
}
-static int apply_r_riscv_sub32_rela(struct module *me, u32 *location,
+static int apply_r_riscv_sub32_rela(struct module *me, void *location,
Elf_Addr v)
{
*(u32 *)location -= (u32)v;
return 0;
}
-static int apply_r_riscv_sub64_rela(struct module *me, u32 *location,
+static int apply_r_riscv_sub64_rela(struct module *me, void *location,
Elf_Addr v)
{
*(u64 *)location -= (u64)v;
return 0;
}
-static int (*reloc_handlers_rela[]) (struct module *me, u32 *location,
- Elf_Addr v) = {
- [R_RISCV_32] = apply_r_riscv_32_rela,
- [R_RISCV_64] = apply_r_riscv_64_rela,
- [R_RISCV_BRANCH] = apply_r_riscv_branch_rela,
- [R_RISCV_JAL] = apply_r_riscv_jal_rela,
- [R_RISCV_RVC_BRANCH] = apply_r_riscv_rvc_branch_rela,
- [R_RISCV_RVC_JUMP] = apply_r_riscv_rvc_jump_rela,
- [R_RISCV_PCREL_HI20] = apply_r_riscv_pcrel_hi20_rela,
- [R_RISCV_PCREL_LO12_I] = apply_r_riscv_pcrel_lo12_i_rela,
- [R_RISCV_PCREL_LO12_S] = apply_r_riscv_pcrel_lo12_s_rela,
- [R_RISCV_HI20] = apply_r_riscv_hi20_rela,
- [R_RISCV_LO12_I] = apply_r_riscv_lo12_i_rela,
- [R_RISCV_LO12_S] = apply_r_riscv_lo12_s_rela,
- [R_RISCV_GOT_HI20] = apply_r_riscv_got_hi20_rela,
- [R_RISCV_CALL_PLT] = apply_r_riscv_call_plt_rela,
- [R_RISCV_CALL] = apply_r_riscv_call_rela,
- [R_RISCV_RELAX] = apply_r_riscv_relax_rela,
- [R_RISCV_ALIGN] = apply_r_riscv_align_rela,
- [R_RISCV_ADD16] = apply_r_riscv_add16_rela,
- [R_RISCV_ADD32] = apply_r_riscv_add32_rela,
- [R_RISCV_ADD64] = apply_r_riscv_add64_rela,
- [R_RISCV_SUB16] = apply_r_riscv_sub16_rela,
- [R_RISCV_SUB32] = apply_r_riscv_sub32_rela,
- [R_RISCV_SUB64] = apply_r_riscv_sub64_rela,
+static int dynamic_linking_not_supported(struct module *me, void *location,
+ Elf_Addr v)
+{
+ pr_err("%s: Dynamic linking not supported in kernel modules PC = %p\n",
+ me->name, location);
+ return -EINVAL;
+}
+
+static int tls_not_supported(struct module *me, void *location, Elf_Addr v)
+{
+ pr_err("%s: Thread local storage not supported in kernel modules PC = %p\n",
+ me->name, location);
+ return -EINVAL;
+}
+
+static int apply_r_riscv_sub6_rela(struct module *me, void *location, Elf_Addr v)
+{
+ u8 *byte = location;
+ u8 value = v;
+
+ *byte = (*byte - (value & 0x3f)) & 0x3f;
+ return 0;
+}
+
+static int apply_r_riscv_set6_rela(struct module *me, void *location, Elf_Addr v)
+{
+ u8 *byte = location;
+ u8 value = v;
+
+ *byte = (*byte & 0xc0) | (value & 0x3f);
+ return 0;
+}
+
+static int apply_r_riscv_set8_rela(struct module *me, void *location, Elf_Addr v)
+{
+ *(u8 *)location = (u8)v;
+ return 0;
+}
+
+static int apply_r_riscv_set16_rela(struct module *me, void *location,
+ Elf_Addr v)
+{
+ *(u16 *)location = (u16)v;
+ return 0;
+}
+
+static int apply_r_riscv_set32_rela(struct module *me, void *location,
+ Elf_Addr v)
+{
+ *(u32 *)location = (u32)v;
+ return 0;
+}
+
+static int apply_r_riscv_32_pcrel_rela(struct module *me, void *location,
+ Elf_Addr v)
+{
+ *(u32 *)location = v - (uintptr_t)location;
+ return 0;
+}
+
+static int apply_r_riscv_plt32_rela(struct module *me, void *location,
+ Elf_Addr v)
+{
+ ptrdiff_t offset = (void *)v - location;
+
+ if (!riscv_insn_valid_32bit_offset(offset)) {
+ /* Only emit the plt entry if offset over 32-bit range */
+ if (IS_ENABLED(CONFIG_MODULE_SECTIONS)) {
+ offset = (void *)module_emit_plt_entry(me, v) - location;
+ } else {
+ pr_err("%s: target %016llx can not be addressed by the 32-bit offset from PC = %p\n",
+ me->name, (long long)v, location);
+ return -EINVAL;
+ }
+ }
+
+ *(u32 *)location = (u32)offset;
+ return 0;
+}
+
+static int apply_r_riscv_set_uleb128(struct module *me, void *location, Elf_Addr v)
+{
+ *(long *)location = v;
+ return 0;
+}
+
+static int apply_r_riscv_sub_uleb128(struct module *me, void *location, Elf_Addr v)
+{
+ *(long *)location -= v;
+ return 0;
+}
+
+static int apply_6_bit_accumulation(struct module *me, void *location, long buffer)
+{
+ u8 *byte = location;
+ u8 value = buffer;
+
+ if (buffer > 0x3f) {
+ pr_err("%s: value %ld out of range for 6-bit relocation.\n",
+ me->name, buffer);
+ return -EINVAL;
+ }
+
+ *byte = (*byte & 0xc0) | (value & 0x3f);
+ return 0;
+}
+
+static int apply_8_bit_accumulation(struct module *me, void *location, long buffer)
+{
+ if (buffer > U8_MAX) {
+ pr_err("%s: value %ld out of range for 8-bit relocation.\n",
+ me->name, buffer);
+ return -EINVAL;
+ }
+ *(u8 *)location = (u8)buffer;
+ return 0;
+}
+
+static int apply_16_bit_accumulation(struct module *me, void *location, long buffer)
+{
+ if (buffer > U16_MAX) {
+ pr_err("%s: value %ld out of range for 16-bit relocation.\n",
+ me->name, buffer);
+ return -EINVAL;
+ }
+ *(u16 *)location = (u16)buffer;
+ return 0;
+}
+
+static int apply_32_bit_accumulation(struct module *me, void *location, long buffer)
+{
+ if (buffer > U32_MAX) {
+ pr_err("%s: value %ld out of range for 32-bit relocation.\n",
+ me->name, buffer);
+ return -EINVAL;
+ }
+ *(u32 *)location = (u32)buffer;
+ return 0;
+}
+
+static int apply_64_bit_accumulation(struct module *me, void *location, long buffer)
+{
+ *(u64 *)location = (u64)buffer;
+ return 0;
+}
+
+static int apply_uleb128_accumulation(struct module *me, void *location, long buffer)
+{
+ /*
+ * ULEB128 is a variable length encoding. Encode the buffer into
+ * the ULEB128 data format.
+ */
+ u8 *p = location;
+
+ while (buffer != 0) {
+ u8 value = buffer & 0x7f;
+
+ buffer >>= 7;
+ value |= (!!buffer) << 7;
+
+ *p++ = value;
+ }
+ return 0;
+}
+
+/*
+ * Relocations defined in the riscv-elf-psabi-doc.
+ * This handles static linking only.
+ */
+static const struct relocation_handlers reloc_handlers[] = {
+ [R_RISCV_32] = { .reloc_handler = apply_r_riscv_32_rela },
+ [R_RISCV_64] = { .reloc_handler = apply_r_riscv_64_rela },
+ [R_RISCV_RELATIVE] = { .reloc_handler = dynamic_linking_not_supported },
+ [R_RISCV_COPY] = { .reloc_handler = dynamic_linking_not_supported },
+ [R_RISCV_JUMP_SLOT] = { .reloc_handler = dynamic_linking_not_supported },
+ [R_RISCV_TLS_DTPMOD32] = { .reloc_handler = dynamic_linking_not_supported },
+ [R_RISCV_TLS_DTPMOD64] = { .reloc_handler = dynamic_linking_not_supported },
+ [R_RISCV_TLS_DTPREL32] = { .reloc_handler = dynamic_linking_not_supported },
+ [R_RISCV_TLS_DTPREL64] = { .reloc_handler = dynamic_linking_not_supported },
+ [R_RISCV_TLS_TPREL32] = { .reloc_handler = dynamic_linking_not_supported },
+ [R_RISCV_TLS_TPREL64] = { .reloc_handler = dynamic_linking_not_supported },
+ /* 12-15 undefined */
+ [R_RISCV_BRANCH] = { .reloc_handler = apply_r_riscv_branch_rela },
+ [R_RISCV_JAL] = { .reloc_handler = apply_r_riscv_jal_rela },
+ [R_RISCV_CALL] = { .reloc_handler = apply_r_riscv_call_rela },
+ [R_RISCV_CALL_PLT] = { .reloc_handler = apply_r_riscv_call_plt_rela },
+ [R_RISCV_GOT_HI20] = { .reloc_handler = apply_r_riscv_got_hi20_rela },
+ [R_RISCV_TLS_GOT_HI20] = { .reloc_handler = tls_not_supported },
+ [R_RISCV_TLS_GD_HI20] = { .reloc_handler = tls_not_supported },
+ [R_RISCV_PCREL_HI20] = { .reloc_handler = apply_r_riscv_pcrel_hi20_rela },
+ [R_RISCV_PCREL_LO12_I] = { .reloc_handler = apply_r_riscv_pcrel_lo12_i_rela },
+ [R_RISCV_PCREL_LO12_S] = { .reloc_handler = apply_r_riscv_pcrel_lo12_s_rela },
+ [R_RISCV_HI20] = { .reloc_handler = apply_r_riscv_hi20_rela },
+ [R_RISCV_LO12_I] = { .reloc_handler = apply_r_riscv_lo12_i_rela },
+ [R_RISCV_LO12_S] = { .reloc_handler = apply_r_riscv_lo12_s_rela },
+ [R_RISCV_TPREL_HI20] = { .reloc_handler = tls_not_supported },
+ [R_RISCV_TPREL_LO12_I] = { .reloc_handler = tls_not_supported },
+ [R_RISCV_TPREL_LO12_S] = { .reloc_handler = tls_not_supported },
+ [R_RISCV_TPREL_ADD] = { .reloc_handler = tls_not_supported },
+ [R_RISCV_ADD8] = { .reloc_handler = apply_r_riscv_add8_rela,
+ .accumulate_handler = apply_8_bit_accumulation },
+ [R_RISCV_ADD16] = { .reloc_handler = apply_r_riscv_add16_rela,
+ .accumulate_handler = apply_16_bit_accumulation },
+ [R_RISCV_ADD32] = { .reloc_handler = apply_r_riscv_add32_rela,
+ .accumulate_handler = apply_32_bit_accumulation },
+ [R_RISCV_ADD64] = { .reloc_handler = apply_r_riscv_add64_rela,
+ .accumulate_handler = apply_64_bit_accumulation },
+ [R_RISCV_SUB8] = { .reloc_handler = apply_r_riscv_sub8_rela,
+ .accumulate_handler = apply_8_bit_accumulation },
+ [R_RISCV_SUB16] = { .reloc_handler = apply_r_riscv_sub16_rela,
+ .accumulate_handler = apply_16_bit_accumulation },
+ [R_RISCV_SUB32] = { .reloc_handler = apply_r_riscv_sub32_rela,
+ .accumulate_handler = apply_32_bit_accumulation },
+ [R_RISCV_SUB64] = { .reloc_handler = apply_r_riscv_sub64_rela,
+ .accumulate_handler = apply_64_bit_accumulation },
+ /* 41-42 reserved for future standard use */
+ [R_RISCV_ALIGN] = { .reloc_handler = apply_r_riscv_align_rela },
+ [R_RISCV_RVC_BRANCH] = { .reloc_handler = apply_r_riscv_rvc_branch_rela },
+ [R_RISCV_RVC_JUMP] = { .reloc_handler = apply_r_riscv_rvc_jump_rela },
+ /* 46-50 reserved for future standard use */
+ [R_RISCV_RELAX] = { .reloc_handler = apply_r_riscv_relax_rela },
+ [R_RISCV_SUB6] = { .reloc_handler = apply_r_riscv_sub6_rela,
+ .accumulate_handler = apply_6_bit_accumulation },
+ [R_RISCV_SET6] = { .reloc_handler = apply_r_riscv_set6_rela,
+ .accumulate_handler = apply_6_bit_accumulation },
+ [R_RISCV_SET8] = { .reloc_handler = apply_r_riscv_set8_rela,
+ .accumulate_handler = apply_8_bit_accumulation },
+ [R_RISCV_SET16] = { .reloc_handler = apply_r_riscv_set16_rela,
+ .accumulate_handler = apply_16_bit_accumulation },
+ [R_RISCV_SET32] = { .reloc_handler = apply_r_riscv_set32_rela,
+ .accumulate_handler = apply_32_bit_accumulation },
+ [R_RISCV_32_PCREL] = { .reloc_handler = apply_r_riscv_32_pcrel_rela },
+ [R_RISCV_IRELATIVE] = { .reloc_handler = dynamic_linking_not_supported },
+ [R_RISCV_PLT32] = { .reloc_handler = apply_r_riscv_plt32_rela },
+ [R_RISCV_SET_ULEB128] = { .reloc_handler = apply_r_riscv_set_uleb128,
+ .accumulate_handler = apply_uleb128_accumulation },
+ [R_RISCV_SUB_ULEB128] = { .reloc_handler = apply_r_riscv_sub_uleb128,
+ .accumulate_handler = apply_uleb128_accumulation },
+ /* 62-191 reserved for future standard use */
+ /* 192-255 nonstandard ABI extensions */
};
+static void
+process_accumulated_relocations(struct module *me,
+ struct hlist_head **relocation_hashtable,
+ struct list_head *used_buckets_list)
+{
+ /*
+ * Only ADD/SUB/SET/ULEB128 should end up here.
+ *
+ * Each bucket may have more than one relocation location. All
+ * relocations for a location are stored in a list in a bucket.
+ *
+ * Relocations are applied to a temp variable before being stored to the
+ * provided location to check for overflow. This also allows ULEB128 to
+ * properly decide how many entries are needed before storing to
+ * location. The final value is stored into location using the handler
+ * for the last relocation to an address.
+ *
+ * Three layers of indexing:
+ * - Each of the buckets in use
+ * - Groups of relocations in each bucket by location address
+ * - Each relocation entry for a location address
+ */
+ struct used_bucket *bucket_iter;
+ struct used_bucket *bucket_iter_tmp;
+ struct relocation_head *rel_head_iter;
+ struct hlist_node *rel_head_iter_tmp;
+ struct relocation_entry *rel_entry_iter;
+ struct relocation_entry *rel_entry_iter_tmp;
+ int curr_type;
+ void *location;
+ long buffer;
+
+ list_for_each_entry_safe(bucket_iter, bucket_iter_tmp,
+ used_buckets_list, head) {
+ hlist_for_each_entry_safe(rel_head_iter, rel_head_iter_tmp,
+ bucket_iter->bucket, node) {
+ buffer = 0;
+ location = rel_head_iter->location;
+ list_for_each_entry_safe(rel_entry_iter,
+ rel_entry_iter_tmp,
+ rel_head_iter->rel_entry,
+ head) {
+ curr_type = rel_entry_iter->type;
+ reloc_handlers[curr_type].reloc_handler(
+ me, &buffer, rel_entry_iter->value);
+ kfree(rel_entry_iter);
+ }
+ reloc_handlers[curr_type].accumulate_handler(
+ me, location, buffer);
+ kfree(rel_head_iter);
+ }
+ kfree(bucket_iter);
+ }
+
+ kfree(*relocation_hashtable);
+}
+
+static int add_relocation_to_accumulate(struct module *me, int type,
+ void *location,
+ unsigned int hashtable_bits, Elf_Addr v,
+ struct hlist_head *relocation_hashtable,
+ struct list_head *used_buckets_list)
+{
+ struct relocation_entry *entry;
+ struct relocation_head *rel_head;
+ struct hlist_head *current_head;
+ struct used_bucket *bucket;
+ unsigned long hash;
+
+ entry = kmalloc(sizeof(*entry), GFP_KERNEL);
+
+ if (!entry)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&entry->head);
+ entry->type = type;
+ entry->value = v;
+
+ hash = hash_min((uintptr_t)location, hashtable_bits);
+
+ current_head = &relocation_hashtable[hash];
+
+ /*
+ * Search for the relocation_head for the relocations that happen at the
+ * provided location
+ */
+ bool found = false;
+ struct relocation_head *rel_head_iter;
+
+ hlist_for_each_entry(rel_head_iter, current_head, node) {
+ if (rel_head_iter->location == location) {
+ found = true;
+ rel_head = rel_head_iter;
+ break;
+ }
+ }
+
+ /*
+ * If there has not yet been any relocations at the provided location,
+ * create a relocation_head for that location and populate it with this
+ * relocation_entry.
+ */
+ if (!found) {
+ rel_head = kmalloc(sizeof(*rel_head), GFP_KERNEL);
+
+ if (!rel_head) {
+ kfree(entry);
+ return -ENOMEM;
+ }
+
+ rel_head->rel_entry =
+ kmalloc(sizeof(struct list_head), GFP_KERNEL);
+
+ if (!rel_head->rel_entry) {
+ kfree(entry);
+ kfree(rel_head);
+ return -ENOMEM;
+ }
+
+ INIT_LIST_HEAD(rel_head->rel_entry);
+ rel_head->location = location;
+ INIT_HLIST_NODE(&rel_head->node);
+ if (!current_head->first) {
+ bucket =
+ kmalloc(sizeof(struct used_bucket), GFP_KERNEL);
+
+ if (!bucket) {
+ kfree(entry);
+ kfree(rel_head->rel_entry);
+ kfree(rel_head);
+ return -ENOMEM;
+ }
+
+ INIT_LIST_HEAD(&bucket->head);
+ bucket->bucket = current_head;
+ list_add(&bucket->head, used_buckets_list);
+ }
+ hlist_add_head(&rel_head->node, current_head);
+ }
+
+ /* Add relocation to head of discovered rel_head */
+ list_add_tail(&entry->head, rel_head->rel_entry);
+
+ return 0;
+}
+
+static unsigned int
+initialize_relocation_hashtable(unsigned int num_relocations,
+ struct hlist_head **relocation_hashtable)
+{
+ /* Can safely assume that bits is not greater than sizeof(long) */
+ unsigned long hashtable_size = roundup_pow_of_two(num_relocations);
+ /*
+ * When hashtable_size == 1, hashtable_bits == 0.
+ * This is valid because the hashing algorithm returns 0 in this case.
+ */
+ unsigned int hashtable_bits = ilog2(hashtable_size);
+
+ /*
+ * Double size of hashtable if num_relocations * 1.25 is greater than
+ * hashtable_size.
+ */
+ int should_double_size = ((num_relocations + (num_relocations >> 2)) > (hashtable_size));
+
+ hashtable_bits += should_double_size;
+
+ hashtable_size <<= should_double_size;
+
+ *relocation_hashtable = kmalloc_array(hashtable_size,
+ sizeof(**relocation_hashtable),
+ GFP_KERNEL);
+ if (!*relocation_hashtable)
+ return 0;
+
+ __hash_init(*relocation_hashtable, hashtable_size);
+
+ return hashtable_bits;
+}
+
int apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab,
unsigned int symindex, unsigned int relsec,
struct module *me)
{
Elf_Rela *rel = (void *) sechdrs[relsec].sh_addr;
- int (*handler)(struct module *me, u32 *location, Elf_Addr v);
+ int (*handler)(struct module *me, void *location, Elf_Addr v);
Elf_Sym *sym;
- u32 *location;
+ void *location;
unsigned int i, type;
Elf_Addr v;
int res;
+ unsigned int num_relocations = sechdrs[relsec].sh_size / sizeof(*rel);
+ struct hlist_head *relocation_hashtable;
+ struct list_head used_buckets_list;
+ unsigned int hashtable_bits;
+
+ hashtable_bits = initialize_relocation_hashtable(num_relocations,
+ &relocation_hashtable);
+
+ if (!relocation_hashtable)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&used_buckets_list);
pr_debug("Applying relocate section %u to %u\n", relsec,
sechdrs[relsec].sh_info);
- for (i = 0; i < sechdrs[relsec].sh_size / sizeof(*rel); i++) {
+ for (i = 0; i < num_relocations; i++) {
/* This is where to make the change */
location = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr
+ rel[i].r_offset;
@@ -370,8 +819,8 @@ int apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab,
type = ELF_RISCV_R_TYPE(rel[i].r_info);
- if (type < ARRAY_SIZE(reloc_handlers_rela))
- handler = reloc_handlers_rela[type];
+ if (type < ARRAY_SIZE(reloc_handlers))
+ handler = reloc_handlers[type].reloc_handler;
else
handler = NULL;
@@ -427,11 +876,20 @@ int apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab,
}
}
- res = handler(me, location, v);
+ if (reloc_handlers[type].accumulate_handler)
+ res = add_relocation_to_accumulate(me, type, location,
+ hashtable_bits, v,
+ relocation_hashtable,
+ &used_buckets_list);
+ else
+ res = handler(me, location, v);
if (res)
return res;
}
+ process_accumulated_relocations(me, &relocation_hashtable,
+ &used_buckets_list);
+
return 0;
}
diff --git a/arch/riscv/kernel/probes/rethook_trampoline.S b/arch/riscv/kernel/probes/rethook_trampoline.S
index 21bac92a17..f2cd83d9b0 100644
--- a/arch/riscv/kernel/probes/rethook_trampoline.S
+++ b/arch/riscv/kernel/probes/rethook_trampoline.S
@@ -75,7 +75,7 @@
REG_L x31, PT_T6(sp)
.endm
-ENTRY(arch_rethook_trampoline)
+SYM_CODE_START(arch_rethook_trampoline)
addi sp, sp, -(PT_SIZE_ON_STACK)
save_all_base_regs
@@ -90,4 +90,4 @@ ENTRY(arch_rethook_trampoline)
addi sp, sp, PT_SIZE_ON_STACK
ret
-ENDPROC(arch_rethook_trampoline)
+SYM_CODE_END(arch_rethook_trampoline)
diff --git a/arch/riscv/kernel/process.c b/arch/riscv/kernel/process.c
index e32d737e03..4f21d970a1 100644
--- a/arch/riscv/kernel/process.c
+++ b/arch/riscv/kernel/process.c
@@ -25,6 +25,7 @@
#include <asm/thread_info.h>
#include <asm/cpuidle.h>
#include <asm/vector.h>
+#include <asm/cpufeature.h>
register unsigned long gp_in_global __asm__("gp");
@@ -41,6 +42,23 @@ void arch_cpu_idle(void)
cpu_do_idle();
}
+int set_unalign_ctl(struct task_struct *tsk, unsigned int val)
+{
+ if (!unaligned_ctl_available())
+ return -EINVAL;
+
+ tsk->thread.align_ctl = val;
+ return 0;
+}
+
+int get_unalign_ctl(struct task_struct *tsk, unsigned long adr)
+{
+ if (!unaligned_ctl_available())
+ return -EINVAL;
+
+ return put_user(tsk->thread.align_ctl, (unsigned long __user *)adr);
+}
+
void __show_regs(struct pt_regs *regs)
{
show_regs_print_info(KERN_DEFAULT);
diff --git a/arch/riscv/kernel/sbi.c b/arch/riscv/kernel/sbi.c
index c672c8ba9a..5a62ed1da4 100644
--- a/arch/riscv/kernel/sbi.c
+++ b/arch/riscv/kernel/sbi.c
@@ -11,6 +11,7 @@
#include <linux/reboot.h>
#include <asm/sbi.h>
#include <asm/smp.h>
+#include <asm/tlbflush.h>
/* default SBI version is 0.1 */
unsigned long sbi_spec_version __ro_after_init = SBI_SPEC_VERSION_DEFAULT;
@@ -377,31 +378,14 @@ int sbi_remote_fence_i(const struct cpumask *cpu_mask)
EXPORT_SYMBOL(sbi_remote_fence_i);
/**
- * sbi_remote_sfence_vma() - Execute SFENCE.VMA instructions on given remote
- * harts for the specified virtual address range.
- * @cpu_mask: A cpu mask containing all the target harts.
- * @start: Start of the virtual address
- * @size: Total size of the virtual address range.
- *
- * Return: 0 on success, appropriate linux error code otherwise.
- */
-int sbi_remote_sfence_vma(const struct cpumask *cpu_mask,
- unsigned long start,
- unsigned long size)
-{
- return __sbi_rfence(SBI_EXT_RFENCE_REMOTE_SFENCE_VMA,
- cpu_mask, start, size, 0, 0);
-}
-EXPORT_SYMBOL(sbi_remote_sfence_vma);
-
-/**
* sbi_remote_sfence_vma_asid() - Execute SFENCE.VMA instructions on given
- * remote harts for a virtual address range belonging to a specific ASID.
+ * remote harts for a virtual address range belonging to a specific ASID or not.
*
* @cpu_mask: A cpu mask containing all the target harts.
* @start: Start of the virtual address
* @size: Total size of the virtual address range.
- * @asid: The value of address space identifier (ASID).
+ * @asid: The value of address space identifier (ASID), or FLUSH_TLB_NO_ASID
+ * for flushing all address spaces.
*
* Return: 0 on success, appropriate linux error code otherwise.
*/
@@ -410,8 +394,12 @@ int sbi_remote_sfence_vma_asid(const struct cpumask *cpu_mask,
unsigned long size,
unsigned long asid)
{
- return __sbi_rfence(SBI_EXT_RFENCE_REMOTE_SFENCE_VMA_ASID,
- cpu_mask, start, size, asid, 0);
+ if (asid == FLUSH_TLB_NO_ASID)
+ return __sbi_rfence(SBI_EXT_RFENCE_REMOTE_SFENCE_VMA,
+ cpu_mask, start, size, 0, 0);
+ else
+ return __sbi_rfence(SBI_EXT_RFENCE_REMOTE_SFENCE_VMA_ASID,
+ cpu_mask, start, size, asid, 0);
}
EXPORT_SYMBOL(sbi_remote_sfence_vma_asid);
diff --git a/arch/riscv/kernel/setup.c b/arch/riscv/kernel/setup.c
index aac853ae4e..535a837de5 100644
--- a/arch/riscv/kernel/setup.c
+++ b/arch/riscv/kernel/setup.c
@@ -15,7 +15,6 @@
#include <linux/memblock.h>
#include <linux/sched.h>
#include <linux/console.h>
-#include <linux/screen_info.h>
#include <linux/of_fdt.h>
#include <linux/sched/task.h>
#include <linux/smp.h>
@@ -26,6 +25,7 @@
#include <asm/acpi.h>
#include <asm/alternative.h>
#include <asm/cacheflush.h>
+#include <asm/cpufeature.h>
#include <asm/cpu_ops.h>
#include <asm/early_ioremap.h>
#include <asm/pgtable.h>
@@ -40,17 +40,6 @@
#include "head.h"
-#if defined(CONFIG_DUMMY_CONSOLE) || defined(CONFIG_EFI)
-struct screen_info screen_info __section(".data") = {
- .orig_video_lines = 30,
- .orig_video_cols = 80,
- .orig_video_mode = 0,
- .orig_video_ega_bx = 0,
- .orig_video_isVGA = 1,
- .orig_video_points = 8
-};
-#endif
-
/*
* The lucky hart to first increment this variable will boot the other cores.
* This is used before the kernel initializes the BSS so it can't be in the
@@ -301,10 +290,13 @@ void __init setup_arch(char **cmdline_p)
riscv_fill_hwcap();
init_rt_signal_env();
apply_boot_alternatives();
+
if (IS_ENABLED(CONFIG_RISCV_ISA_ZICBOM) &&
riscv_isa_extension_available(NULL, ZICBOM))
riscv_noncoherent_supported();
riscv_set_dma_cache_alignment();
+
+ riscv_user_isa_enable();
}
static int __init topology_init(void)
diff --git a/arch/riscv/kernel/signal.c b/arch/riscv/kernel/signal.c
index 21a4d0e111..88b6220b26 100644
--- a/arch/riscv/kernel/signal.c
+++ b/arch/riscv/kernel/signal.c
@@ -384,30 +384,6 @@ static void handle_signal(struct ksignal *ksig, struct pt_regs *regs)
sigset_t *oldset = sigmask_to_save();
int ret;
- /* Are we from a system call? */
- if (regs->cause == EXC_SYSCALL) {
- /* Avoid additional syscall restarting via ret_from_exception */
- regs->cause = -1UL;
- /* If so, check system call restarting.. */
- switch (regs->a0) {
- case -ERESTART_RESTARTBLOCK:
- case -ERESTARTNOHAND:
- regs->a0 = -EINTR;
- break;
-
- case -ERESTARTSYS:
- if (!(ksig->ka.sa.sa_flags & SA_RESTART)) {
- regs->a0 = -EINTR;
- break;
- }
- fallthrough;
- case -ERESTARTNOINTR:
- regs->a0 = regs->orig_a0;
- regs->epc -= 0x4;
- break;
- }
- }
-
rseq_signal_deliver(ksig, regs);
/* Set up the stack frame */
@@ -421,36 +397,67 @@ static void handle_signal(struct ksignal *ksig, struct pt_regs *regs)
void arch_do_signal_or_restart(struct pt_regs *regs)
{
+ unsigned long continue_addr = 0, restart_addr = 0;
+ int retval = 0;
struct ksignal ksig;
+ bool syscall = (regs->cause == EXC_SYSCALL);
- if (get_signal(&ksig)) {
- /* Actually deliver the signal */
- handle_signal(&ksig, regs);
- return;
- }
+ /* If we were from a system call, check for system call restarting */
+ if (syscall) {
+ continue_addr = regs->epc;
+ restart_addr = continue_addr - 4;
+ retval = regs->a0;
- /* Did we come from a system call? */
- if (regs->cause == EXC_SYSCALL) {
/* Avoid additional syscall restarting via ret_from_exception */
regs->cause = -1UL;
- /* Restart the system call - no handlers present */
- switch (regs->a0) {
+ /*
+ * Prepare for system call restart. We do this here so that a
+ * debugger will see the already changed PC.
+ */
+ switch (retval) {
case -ERESTARTNOHAND:
case -ERESTARTSYS:
case -ERESTARTNOINTR:
- regs->a0 = regs->orig_a0;
- regs->epc -= 0x4;
- break;
case -ERESTART_RESTARTBLOCK:
- regs->a0 = regs->orig_a0;
- regs->a7 = __NR_restart_syscall;
- regs->epc -= 0x4;
+ regs->a0 = regs->orig_a0;
+ regs->epc = restart_addr;
break;
}
}
/*
+ * Get the signal to deliver. When running under ptrace, at this point
+ * the debugger may change all of our registers.
+ */
+ if (get_signal(&ksig)) {
+ /*
+ * Depending on the signal settings, we may need to revert the
+ * decision to restart the system call, but skip this if a
+ * debugger has chosen to restart at a different PC.
+ */
+ if (regs->epc == restart_addr &&
+ (retval == -ERESTARTNOHAND ||
+ retval == -ERESTART_RESTARTBLOCK ||
+ (retval == -ERESTARTSYS &&
+ !(ksig.ka.sa.sa_flags & SA_RESTART)))) {
+ regs->a0 = -EINTR;
+ regs->epc = continue_addr;
+ }
+
+ /* Actually deliver the signal */
+ handle_signal(&ksig, regs);
+ return;
+ }
+
+ /*
+ * Handle restarting a different system call. As above, if a debugger
+ * has chosen to restart at a different PC, ignore the restart.
+ */
+ if (syscall && regs->epc == restart_addr && retval == -ERESTART_RESTARTBLOCK)
+ regs->a7 = __NR_restart_syscall;
+
+ /*
* If there is no signal to deliver, we just put the saved
* sigmask back.
*/
diff --git a/arch/riscv/kernel/smpboot.c b/arch/riscv/kernel/smpboot.c
index 1b8da4e40a..d162bf339b 100644
--- a/arch/riscv/kernel/smpboot.c
+++ b/arch/riscv/kernel/smpboot.c
@@ -25,6 +25,8 @@
#include <linux/of.h>
#include <linux/sched/task_stack.h>
#include <linux/sched/mm.h>
+
+#include <asm/cpufeature.h>
#include <asm/cpu_ops.h>
#include <asm/cpufeature.h>
#include <asm/irq.h>
@@ -246,13 +248,14 @@ asmlinkage __visible void smp_callin(void)
numa_add_cpu(curr_cpuid);
set_cpu_online(curr_cpuid, 1);
- check_unaligned_access(curr_cpuid);
if (has_vector()) {
if (riscv_v_setup_vsize())
elf_hwcap &= ~COMPAT_HWCAP_ISA_V;
}
+ riscv_user_isa_enable();
+
/*
* Remote TLB flushes are ignored while the CPU is offline, so emit
* a local TLB flush right now just in case.
diff --git a/arch/riscv/kernel/suspend_entry.S b/arch/riscv/kernel/suspend_entry.S
index f7960c7c5f..2d54f309c1 100644
--- a/arch/riscv/kernel/suspend_entry.S
+++ b/arch/riscv/kernel/suspend_entry.S
@@ -16,7 +16,7 @@
.altmacro
.option norelax
-ENTRY(__cpu_suspend_enter)
+SYM_FUNC_START(__cpu_suspend_enter)
/* Save registers (except A0 and T0-T6) */
REG_S ra, (SUSPEND_CONTEXT_REGS + PT_RA)(a0)
REG_S sp, (SUSPEND_CONTEXT_REGS + PT_SP)(a0)
@@ -57,14 +57,11 @@ ENTRY(__cpu_suspend_enter)
/* Return to C code */
ret
-END(__cpu_suspend_enter)
+SYM_FUNC_END(__cpu_suspend_enter)
SYM_TYPED_FUNC_START(__cpu_resume_enter)
/* Load the global pointer */
- .option push
- .option norelax
- la gp, __global_pointer$
- .option pop
+ load_global_pointer
#ifdef CONFIG_MMU
/* Save A0 and A1 */
diff --git a/arch/riscv/kernel/sys_riscv.c b/arch/riscv/kernel/sys_riscv.c
index 473159b5f3..a2ca5b7756 100644
--- a/arch/riscv/kernel/sys_riscv.c
+++ b/arch/riscv/kernel/sys_riscv.c
@@ -79,7 +79,7 @@ SYSCALL_DEFINE3(riscv_flush_icache, uintptr_t, start, uintptr_t, end,
/*
* The hwprobe interface, for allowing userspace to probe to see which features
- * are supported by the hardware. See Documentation/riscv/hwprobe.rst for more
+ * are supported by the hardware. See Documentation/arch/riscv/hwprobe.rst for more
* details.
*/
static void hwprobe_arch_id(struct riscv_hwprobe *pair,
@@ -145,26 +145,38 @@ static void hwprobe_isa_ext0(struct riscv_hwprobe *pair,
for_each_cpu(cpu, cpus) {
struct riscv_isainfo *isainfo = &hart_isa[cpu];
- if (riscv_isa_extension_available(isainfo->isa, ZBA))
- pair->value |= RISCV_HWPROBE_EXT_ZBA;
- else
- missing |= RISCV_HWPROBE_EXT_ZBA;
+#define EXT_KEY(ext) \
+ do { \
+ if (__riscv_isa_extension_available(isainfo->isa, RISCV_ISA_EXT_##ext)) \
+ pair->value |= RISCV_HWPROBE_EXT_##ext; \
+ else \
+ missing |= RISCV_HWPROBE_EXT_##ext; \
+ } while (false)
- if (riscv_isa_extension_available(isainfo->isa, ZBB))
- pair->value |= RISCV_HWPROBE_EXT_ZBB;
- else
- missing |= RISCV_HWPROBE_EXT_ZBB;
-
- if (riscv_isa_extension_available(isainfo->isa, ZBS))
- pair->value |= RISCV_HWPROBE_EXT_ZBS;
- else
- missing |= RISCV_HWPROBE_EXT_ZBS;
+ /*
+ * Only use EXT_KEY() for extensions which can be exposed to userspace,
+ * regardless of the kernel's configuration, as no other checks, besides
+ * presence in the hart_isa bitmap, are made.
+ */
+ EXT_KEY(ZBA);
+ EXT_KEY(ZBB);
+ EXT_KEY(ZBS);
+ EXT_KEY(ZICBOZ);
+#undef EXT_KEY
}
/* Now turn off reporting features if any CPU is missing it. */
pair->value &= ~missing;
}
+static bool hwprobe_ext0_has(const struct cpumask *cpus, u64 ext)
+{
+ struct riscv_hwprobe pair;
+
+ hwprobe_isa_ext0(&pair, cpus);
+ return (pair.value & ext);
+}
+
static u64 hwprobe_misaligned(const struct cpumask *cpus)
{
int cpu;
@@ -215,6 +227,12 @@ static void hwprobe_one_pair(struct riscv_hwprobe *pair,
pair->value = hwprobe_misaligned(cpus);
break;
+ case RISCV_HWPROBE_KEY_ZICBOZ_BLOCK_SIZE:
+ pair->value = 0;
+ if (hwprobe_ext0_has(cpus, RISCV_HWPROBE_EXT_ZICBOZ))
+ pair->value = riscv_cboz_block_size;
+ break;
+
/*
* For forward compatibility, unknown keys don't fail the whole
* call, but get their element key set to -1 and value set to 0
diff --git a/arch/riscv/kernel/tests/Kconfig.debug b/arch/riscv/kernel/tests/Kconfig.debug
new file mode 100644
index 0000000000..5dba64e8e9
--- /dev/null
+++ b/arch/riscv/kernel/tests/Kconfig.debug
@@ -0,0 +1,35 @@
+# SPDX-License-Identifier: GPL-2.0-only
+menu "arch/riscv/kernel Testing and Coverage"
+
+config AS_HAS_ULEB128
+ def_bool $(as-instr,.reloc label$(comma) R_RISCV_SET_ULEB128$(comma) 127\n.reloc label$(comma) R_RISCV_SUB_ULEB128$(comma) 127\nlabel:\n.word 0)
+
+menuconfig RUNTIME_KERNEL_TESTING_MENU
+ bool "arch/riscv/kernel runtime Testing"
+ def_bool y
+ help
+ Enable riscv kernel runtime testing.
+
+if RUNTIME_KERNEL_TESTING_MENU
+
+config RISCV_MODULE_LINKING_KUNIT
+ bool "KUnit test riscv module linking at runtime" if !KUNIT_ALL_TESTS
+ depends on KUNIT
+ default KUNIT_ALL_TESTS
+ help
+ Enable this option to test riscv module linking at boot. This will
+ enable a module called "test_module_linking".
+
+ KUnit tests run during boot and output the results to the debug log
+ in TAP format (http://testanything.org/). Only useful for kernel devs
+ running the KUnit test harness, and not intended for inclusion into a
+ production build.
+
+ For more information on KUnit and unit tests in general please refer
+ to the KUnit documentation in Documentation/dev-tools/kunit/.
+
+ If unsure, say N.
+
+endif # RUNTIME_TESTING_MENU
+
+endmenu # "arch/riscv/kernel runtime Testing"
diff --git a/arch/riscv/kernel/tests/Makefile b/arch/riscv/kernel/tests/Makefile
new file mode 100644
index 0000000000..7d6c76cffe
--- /dev/null
+++ b/arch/riscv/kernel/tests/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_RISCV_MODULE_LINKING_KUNIT) += module_test/
diff --git a/arch/riscv/kernel/tests/module_test/Makefile b/arch/riscv/kernel/tests/module_test/Makefile
new file mode 100644
index 0000000000..d7a6fd8943
--- /dev/null
+++ b/arch/riscv/kernel/tests/module_test/Makefile
@@ -0,0 +1,15 @@
+obj-m += test_module_linking.o
+
+test_sub := test_sub6.o test_sub8.o test_sub16.o test_sub32.o test_sub64.o
+
+test_set := test_set6.o test_set8.o test_set16.o test_set32.o
+
+test_module_linking-objs += $(test_sub)
+
+test_module_linking-objs += $(test_set)
+
+ifeq ($(CONFIG_AS_HAS_ULEB128),y)
+test_module_linking-objs += test_uleb128.o
+endif
+
+test_module_linking-objs += test_module_linking_main.o
diff --git a/arch/riscv/kernel/tests/module_test/test_module_linking_main.c b/arch/riscv/kernel/tests/module_test/test_module_linking_main.c
new file mode 100644
index 0000000000..8df5fa5b83
--- /dev/null
+++ b/arch/riscv/kernel/tests/module_test/test_module_linking_main.c
@@ -0,0 +1,88 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2023 Rivos Inc.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <kunit/test.h>
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Test module linking");
+
+extern int test_set32(void);
+extern int test_set16(void);
+extern int test_set8(void);
+extern int test_set6(void);
+extern long test_sub64(void);
+extern int test_sub32(void);
+extern int test_sub16(void);
+extern int test_sub8(void);
+extern int test_sub6(void);
+
+#ifdef CONFIG_AS_HAS_ULEB128
+extern int test_uleb_basic(void);
+extern int test_uleb_large(void);
+#endif
+
+#define CHECK_EQ(lhs, rhs) KUNIT_ASSERT_EQ(test, lhs, rhs)
+
+void run_test_set(struct kunit *test);
+void run_test_sub(struct kunit *test);
+void run_test_uleb(struct kunit *test);
+
+void run_test_set(struct kunit *test)
+{
+ int val32 = test_set32();
+ int val16 = test_set16();
+ int val8 = test_set8();
+ int val6 = test_set6();
+
+ CHECK_EQ(val32, 0);
+ CHECK_EQ(val16, 0);
+ CHECK_EQ(val8, 0);
+ CHECK_EQ(val6, 0);
+}
+
+void run_test_sub(struct kunit *test)
+{
+ int val64 = test_sub64();
+ int val32 = test_sub32();
+ int val16 = test_sub16();
+ int val8 = test_sub8();
+ int val6 = test_sub6();
+
+ CHECK_EQ(val64, 0);
+ CHECK_EQ(val32, 0);
+ CHECK_EQ(val16, 0);
+ CHECK_EQ(val8, 0);
+ CHECK_EQ(val6, 0);
+}
+
+#ifdef CONFIG_AS_HAS_ULEB128
+void run_test_uleb(struct kunit *test)
+{
+ int val_uleb = test_uleb_basic();
+ int val_uleb2 = test_uleb_large();
+
+ CHECK_EQ(val_uleb, 0);
+ CHECK_EQ(val_uleb2, 0);
+}
+#endif
+
+static struct kunit_case __refdata riscv_module_linking_test_cases[] = {
+ KUNIT_CASE(run_test_set),
+ KUNIT_CASE(run_test_sub),
+#ifdef CONFIG_AS_HAS_ULEB128
+ KUNIT_CASE(run_test_uleb),
+#endif
+ {}
+};
+
+static struct kunit_suite riscv_module_linking_test_suite = {
+ .name = "riscv_checksum",
+ .test_cases = riscv_module_linking_test_cases,
+};
+
+kunit_test_suites(&riscv_module_linking_test_suite);
diff --git a/arch/riscv/kernel/tests/module_test/test_set16.S b/arch/riscv/kernel/tests/module_test/test_set16.S
new file mode 100644
index 0000000000..2be0e441a1
--- /dev/null
+++ b/arch/riscv/kernel/tests/module_test/test_set16.S
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2023 Rivos Inc.
+ */
+
+.text
+.global test_set16
+test_set16:
+ lw a0, set16
+ la t0, set16
+#ifdef CONFIG_32BIT
+ slli t0, t0, 16
+ srli t0, t0, 16
+#else
+ slli t0, t0, 48
+ srli t0, t0, 48
+#endif
+ sub a0, a0, t0
+ ret
+.data
+set16:
+ .reloc set16, R_RISCV_SET16, set16
+ .word 0
diff --git a/arch/riscv/kernel/tests/module_test/test_set32.S b/arch/riscv/kernel/tests/module_test/test_set32.S
new file mode 100644
index 0000000000..de0444537e
--- /dev/null
+++ b/arch/riscv/kernel/tests/module_test/test_set32.S
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2023 Rivos Inc.
+ */
+
+.text
+.global test_set32
+test_set32:
+ lw a0, set32
+ la t0, set32
+#ifndef CONFIG_32BIT
+ slli t0, t0, 32
+ srli t0, t0, 32
+#endif
+ sub a0, a0, t0
+ ret
+.data
+set32:
+ .reloc set32, R_RISCV_SET32, set32
+ .word 0
diff --git a/arch/riscv/kernel/tests/module_test/test_set6.S b/arch/riscv/kernel/tests/module_test/test_set6.S
new file mode 100644
index 0000000000..c39ce4c219
--- /dev/null
+++ b/arch/riscv/kernel/tests/module_test/test_set6.S
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2023 Rivos Inc.
+ */
+
+.text
+.global test_set6
+test_set6:
+ lw a0, set6
+ la t0, set6
+#ifdef CONFIG_32BIT
+ slli t0, t0, 26
+ srli t0, t0, 26
+#else
+ slli t0, t0, 58
+ srli t0, t0, 58
+#endif
+ sub a0, a0, t0
+ ret
+.data
+set6:
+ .reloc set6, R_RISCV_SET6, set6
+ .word 0
diff --git a/arch/riscv/kernel/tests/module_test/test_set8.S b/arch/riscv/kernel/tests/module_test/test_set8.S
new file mode 100644
index 0000000000..a656173f6f
--- /dev/null
+++ b/arch/riscv/kernel/tests/module_test/test_set8.S
@@ -0,0 +1,23 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2023 Rivos Inc.
+ */
+
+.text
+.global test_set8
+test_set8:
+ lw a0, set8
+ la t0, set8
+#ifdef CONFIG_32BIT
+ slli t0, t0, 24
+ srli t0, t0, 24
+#else
+ slli t0, t0, 56
+ srli t0, t0, 56
+#endif
+ sub a0, a0, t0
+ ret
+.data
+set8:
+ .reloc set8, R_RISCV_SET8, set8
+ .word 0
diff --git a/arch/riscv/kernel/tests/module_test/test_sub16.S b/arch/riscv/kernel/tests/module_test/test_sub16.S
new file mode 100644
index 0000000000..80f731d599
--- /dev/null
+++ b/arch/riscv/kernel/tests/module_test/test_sub16.S
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2023 Rivos Inc.
+ */
+
+.text
+.global test_sub16
+test_sub16:
+ lh a0, sub16
+ addi a0, a0, -32
+ ret
+first:
+ .space 32
+second:
+
+.data
+sub16:
+ .reloc sub16, R_RISCV_ADD16, second
+ .reloc sub16, R_RISCV_SUB16, first
+ .half 0
diff --git a/arch/riscv/kernel/tests/module_test/test_sub32.S b/arch/riscv/kernel/tests/module_test/test_sub32.S
new file mode 100644
index 0000000000..a341686e12
--- /dev/null
+++ b/arch/riscv/kernel/tests/module_test/test_sub32.S
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2023 Rivos Inc.
+ */
+
+.text
+.global test_sub32
+test_sub32:
+ lw a0, sub32
+ addi a0, a0, -32
+ ret
+first:
+ .space 32
+second:
+
+.data
+sub32:
+ .reloc sub32, R_RISCV_ADD32, second
+ .reloc sub32, R_RISCV_SUB32, first
+ .word 0
diff --git a/arch/riscv/kernel/tests/module_test/test_sub6.S b/arch/riscv/kernel/tests/module_test/test_sub6.S
new file mode 100644
index 0000000000..e8b61c1ec5
--- /dev/null
+++ b/arch/riscv/kernel/tests/module_test/test_sub6.S
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2023 Rivos Inc.
+ */
+
+.text
+.global test_sub6
+test_sub6:
+ lb a0, sub6
+ addi a0, a0, -32
+ ret
+first:
+ .space 32
+second:
+
+.data
+sub6:
+ .reloc sub6, R_RISCV_SET6, second
+ .reloc sub6, R_RISCV_SUB6, first
+ .byte 0
diff --git a/arch/riscv/kernel/tests/module_test/test_sub64.S b/arch/riscv/kernel/tests/module_test/test_sub64.S
new file mode 100644
index 0000000000..a59e8afa88
--- /dev/null
+++ b/arch/riscv/kernel/tests/module_test/test_sub64.S
@@ -0,0 +1,25 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2023 Rivos Inc.
+ */
+
+.text
+.global test_sub64
+test_sub64:
+#ifdef CONFIG_32BIT
+ lw a0, sub64
+#else
+ ld a0, sub64
+#endif
+ addi a0, a0, -32
+ ret
+first:
+ .space 32
+second:
+
+.data
+sub64:
+ .reloc sub64, R_RISCV_ADD64, second
+ .reloc sub64, R_RISCV_SUB64, first
+ .word 0
+ .word 0
diff --git a/arch/riscv/kernel/tests/module_test/test_sub8.S b/arch/riscv/kernel/tests/module_test/test_sub8.S
new file mode 100644
index 0000000000..ac5d0ec98d
--- /dev/null
+++ b/arch/riscv/kernel/tests/module_test/test_sub8.S
@@ -0,0 +1,20 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2023 Rivos Inc.
+ */
+
+.text
+.global test_sub8
+test_sub8:
+ lb a0, sub8
+ addi a0, a0, -32
+ ret
+first:
+ .space 32
+second:
+
+.data
+sub8:
+ .reloc sub8, R_RISCV_ADD8, second
+ .reloc sub8, R_RISCV_SUB8, first
+ .byte 0
diff --git a/arch/riscv/kernel/tests/module_test/test_uleb128.S b/arch/riscv/kernel/tests/module_test/test_uleb128.S
new file mode 100644
index 0000000000..8515ed7cd8
--- /dev/null
+++ b/arch/riscv/kernel/tests/module_test/test_uleb128.S
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (C) 2023 Rivos Inc.
+ */
+
+.text
+.global test_uleb_basic
+test_uleb_basic:
+ lw a0, second
+ addi a0, a0, -127
+ ret
+
+.global test_uleb_large
+test_uleb_large:
+ lw a0, fourth
+ addi a0, a0, -0x07e8
+ ret
+
+.data
+first:
+ .space 127
+second:
+ .reloc second, R_RISCV_SET_ULEB128, second
+ .reloc second, R_RISCV_SUB_ULEB128, first
+ .word 0
+third:
+ .space 1000
+fourth:
+ .reloc fourth, R_RISCV_SET_ULEB128, fourth
+ .reloc fourth, R_RISCV_SUB_ULEB128, third
+ .word 0
diff --git a/arch/riscv/kernel/traps.c b/arch/riscv/kernel/traps.c
index 67d0073fb6..a1b9be3c43 100644
--- a/arch/riscv/kernel/traps.c
+++ b/arch/riscv/kernel/traps.c
@@ -36,7 +36,21 @@ int show_unhandled_signals = 1;
static DEFINE_SPINLOCK(die_lock);
-static void dump_kernel_instr(const char *loglvl, struct pt_regs *regs)
+static int copy_code(struct pt_regs *regs, u16 *val, const u16 *insns)
+{
+ const void __user *uaddr = (__force const void __user *)insns;
+
+ if (!user_mode(regs))
+ return get_kernel_nofault(*val, insns);
+
+ /* The user space code from other tasks cannot be accessed. */
+ if (regs != task_pt_regs(current))
+ return -EPERM;
+
+ return copy_from_user_nofault(val, uaddr, sizeof(*val));
+}
+
+static void dump_instr(const char *loglvl, struct pt_regs *regs)
{
char str[sizeof("0000 ") * 12 + 2 + 1], *p = str;
const u16 *insns = (u16 *)instruction_pointer(regs);
@@ -45,7 +59,7 @@ static void dump_kernel_instr(const char *loglvl, struct pt_regs *regs)
int i;
for (i = -10; i < 2; i++) {
- bad = get_kernel_nofault(val, &insns[i]);
+ bad = copy_code(regs, &val, &insns[i]);
if (!bad) {
p += sprintf(p, i == 0 ? "(%04hx) " : "%04hx ", val);
} else {
@@ -74,7 +88,7 @@ void die(struct pt_regs *regs, const char *str)
print_modules();
if (regs) {
show_regs(regs);
- dump_kernel_instr(KERN_EMERG, regs);
+ dump_instr(KERN_EMERG, regs);
}
cause = regs ? regs->cause : -1;
@@ -107,6 +121,7 @@ void do_trap(struct pt_regs *regs, int signo, int code, unsigned long addr)
print_vma_addr(KERN_CONT " in ", instruction_pointer(regs));
pr_cont("\n");
__show_regs(regs);
+ dump_instr(KERN_EMERG, regs);
}
force_sig_fault(signo, code, (void __user *)addr);
@@ -181,14 +196,6 @@ asmlinkage __visible __trap_section void do_trap_insn_illegal(struct pt_regs *re
DO_ERROR_INFO(do_trap_load_fault,
SIGSEGV, SEGV_ACCERR, "load access fault");
-#ifndef CONFIG_RISCV_M_MODE
-DO_ERROR_INFO(do_trap_load_misaligned,
- SIGBUS, BUS_ADRALN, "Oops - load address misaligned");
-DO_ERROR_INFO(do_trap_store_misaligned,
- SIGBUS, BUS_ADRALN, "Oops - store (or AMO) address misaligned");
-#else
-int handle_misaligned_load(struct pt_regs *regs);
-int handle_misaligned_store(struct pt_regs *regs);
asmlinkage __visible __trap_section void do_trap_load_misaligned(struct pt_regs *regs)
{
@@ -231,7 +238,6 @@ asmlinkage __visible __trap_section void do_trap_store_misaligned(struct pt_regs
irqentry_nmi_exit(regs, state);
}
}
-#endif
DO_ERROR_INFO(do_trap_store_fault,
SIGSEGV, SEGV_ACCERR, "store (or AMO) access fault");
DO_ERROR_INFO(do_trap_ecall_s,
@@ -360,34 +366,10 @@ static void noinstr handle_riscv_irq(struct pt_regs *regs)
asmlinkage void noinstr do_irq(struct pt_regs *regs)
{
irqentry_state_t state = irqentry_enter(regs);
-#ifdef CONFIG_IRQ_STACKS
- if (on_thread_stack()) {
- ulong *sp = per_cpu(irq_stack_ptr, smp_processor_id())
- + IRQ_STACK_SIZE/sizeof(ulong);
- __asm__ __volatile(
- "addi sp, sp, -"RISCV_SZPTR "\n"
- REG_S" ra, (sp) \n"
- "addi sp, sp, -"RISCV_SZPTR "\n"
- REG_S" s0, (sp) \n"
- "addi s0, sp, 2*"RISCV_SZPTR "\n"
- "move sp, %[sp] \n"
- "move a0, %[regs] \n"
- "call handle_riscv_irq \n"
- "addi sp, s0, -2*"RISCV_SZPTR"\n"
- REG_L" s0, (sp) \n"
- "addi sp, sp, "RISCV_SZPTR "\n"
- REG_L" ra, (sp) \n"
- "addi sp, sp, "RISCV_SZPTR "\n"
- :
- : [sp] "r" (sp), [regs] "r" (regs)
- : "a0", "a1", "a2", "a3", "a4", "a5", "a6", "a7",
- "t0", "t1", "t2", "t3", "t4", "t5", "t6",
-#ifndef CONFIG_FRAME_POINTER
- "s0",
-#endif
- "memory");
- } else
-#endif
+
+ if (IS_ENABLED(CONFIG_IRQ_STACKS) && on_thread_stack())
+ call_on_irq_stack(regs, handle_riscv_irq);
+ else
handle_riscv_irq(regs);
irqentry_exit(regs, state);
diff --git a/arch/riscv/kernel/traps_misaligned.c b/arch/riscv/kernel/traps_misaligned.c
index 5348d842c7..5255f8134a 100644
--- a/arch/riscv/kernel/traps_misaligned.c
+++ b/arch/riscv/kernel/traps_misaligned.c
@@ -6,12 +6,16 @@
#include <linux/init.h>
#include <linux/mm.h>
#include <linux/module.h>
+#include <linux/perf_event.h>
#include <linux/irq.h>
#include <linux/stringify.h>
#include <asm/processor.h>
#include <asm/ptrace.h>
#include <asm/csr.h>
+#include <asm/entry-common.h>
+#include <asm/hwprobe.h>
+#include <asm/cpufeature.h>
#define INSN_MATCH_LB 0x3
#define INSN_MASK_LB 0x707f
@@ -151,53 +155,134 @@
#define PRECISION_S 0
#define PRECISION_D 1
-#define DECLARE_UNPRIVILEGED_LOAD_FUNCTION(type, insn) \
-static inline type load_##type(const type *addr) \
-{ \
- type val; \
- asm (#insn " %0, %1" \
- : "=&r" (val) : "m" (*addr)); \
- return val; \
+#ifdef CONFIG_FPU
+
+#define FP_GET_RD(insn) (insn >> 7 & 0x1F)
+
+extern void put_f32_reg(unsigned long fp_reg, unsigned long value);
+
+static int set_f32_rd(unsigned long insn, struct pt_regs *regs,
+ unsigned long val)
+{
+ unsigned long fp_reg = FP_GET_RD(insn);
+
+ put_f32_reg(fp_reg, val);
+ regs->status |= SR_FS_DIRTY;
+
+ return 0;
}
-#define DECLARE_UNPRIVILEGED_STORE_FUNCTION(type, insn) \
-static inline void store_##type(type *addr, type val) \
-{ \
- asm volatile (#insn " %0, %1\n" \
- : : "r" (val), "m" (*addr)); \
+extern void put_f64_reg(unsigned long fp_reg, unsigned long value);
+
+static int set_f64_rd(unsigned long insn, struct pt_regs *regs, u64 val)
+{
+ unsigned long fp_reg = FP_GET_RD(insn);
+ unsigned long value;
+
+#if __riscv_xlen == 32
+ value = (unsigned long) &val;
+#else
+ value = val;
+#endif
+ put_f64_reg(fp_reg, value);
+ regs->status |= SR_FS_DIRTY;
+
+ return 0;
}
-DECLARE_UNPRIVILEGED_LOAD_FUNCTION(u8, lbu)
-DECLARE_UNPRIVILEGED_LOAD_FUNCTION(u16, lhu)
-DECLARE_UNPRIVILEGED_LOAD_FUNCTION(s8, lb)
-DECLARE_UNPRIVILEGED_LOAD_FUNCTION(s16, lh)
-DECLARE_UNPRIVILEGED_LOAD_FUNCTION(s32, lw)
-DECLARE_UNPRIVILEGED_STORE_FUNCTION(u8, sb)
-DECLARE_UNPRIVILEGED_STORE_FUNCTION(u16, sh)
-DECLARE_UNPRIVILEGED_STORE_FUNCTION(u32, sw)
-#if defined(CONFIG_64BIT)
-DECLARE_UNPRIVILEGED_LOAD_FUNCTION(u32, lwu)
-DECLARE_UNPRIVILEGED_LOAD_FUNCTION(u64, ld)
-DECLARE_UNPRIVILEGED_STORE_FUNCTION(u64, sd)
-DECLARE_UNPRIVILEGED_LOAD_FUNCTION(ulong, ld)
+#if __riscv_xlen == 32
+extern void get_f64_reg(unsigned long fp_reg, u64 *value);
+
+static u64 get_f64_rs(unsigned long insn, u8 fp_reg_offset,
+ struct pt_regs *regs)
+{
+ unsigned long fp_reg = (insn >> fp_reg_offset) & 0x1F;
+ u64 val;
+
+ get_f64_reg(fp_reg, &val);
+ regs->status |= SR_FS_DIRTY;
+
+ return val;
+}
#else
-DECLARE_UNPRIVILEGED_LOAD_FUNCTION(u32, lw)
-DECLARE_UNPRIVILEGED_LOAD_FUNCTION(ulong, lw)
-static inline u64 load_u64(const u64 *addr)
+extern unsigned long get_f64_reg(unsigned long fp_reg);
+
+static unsigned long get_f64_rs(unsigned long insn, u8 fp_reg_offset,
+ struct pt_regs *regs)
{
- return load_u32((u32 *)addr)
- + ((u64)load_u32((u32 *)addr + 1) << 32);
+ unsigned long fp_reg = (insn >> fp_reg_offset) & 0x1F;
+ unsigned long val;
+
+ val = get_f64_reg(fp_reg);
+ regs->status |= SR_FS_DIRTY;
+
+ return val;
}
-static inline void store_u64(u64 *addr, u64 val)
+#endif
+
+extern unsigned long get_f32_reg(unsigned long fp_reg);
+
+static unsigned long get_f32_rs(unsigned long insn, u8 fp_reg_offset,
+ struct pt_regs *regs)
{
- store_u32((u32 *)addr, val);
- store_u32((u32 *)addr + 1, val >> 32);
+ unsigned long fp_reg = (insn >> fp_reg_offset) & 0x1F;
+ unsigned long val;
+
+ val = get_f32_reg(fp_reg);
+ regs->status |= SR_FS_DIRTY;
+
+ return val;
}
+
+#else /* CONFIG_FPU */
+static void set_f32_rd(unsigned long insn, struct pt_regs *regs,
+ unsigned long val) {}
+
+static void set_f64_rd(unsigned long insn, struct pt_regs *regs, u64 val) {}
+
+static unsigned long get_f64_rs(unsigned long insn, u8 fp_reg_offset,
+ struct pt_regs *regs)
+{
+ return 0;
+}
+
+static unsigned long get_f32_rs(unsigned long insn, u8 fp_reg_offset,
+ struct pt_regs *regs)
+{
+ return 0;
+}
+
#endif
-static inline ulong get_insn(ulong mepc)
+#define GET_F64_RS2(insn, regs) (get_f64_rs(insn, 20, regs))
+#define GET_F64_RS2C(insn, regs) (get_f64_rs(insn, 2, regs))
+#define GET_F64_RS2S(insn, regs) (get_f64_rs(RVC_RS2S(insn), 0, regs))
+
+#define GET_F32_RS2(insn, regs) (get_f32_rs(insn, 20, regs))
+#define GET_F32_RS2C(insn, regs) (get_f32_rs(insn, 2, regs))
+#define GET_F32_RS2S(insn, regs) (get_f32_rs(RVC_RS2S(insn), 0, regs))
+
+#ifdef CONFIG_RISCV_M_MODE
+static inline int load_u8(struct pt_regs *regs, const u8 *addr, u8 *r_val)
+{
+ u8 val;
+
+ asm volatile("lbu %0, %1" : "=&r" (val) : "m" (*addr));
+ *r_val = val;
+
+ return 0;
+}
+
+static inline int store_u8(struct pt_regs *regs, u8 *addr, u8 val)
+{
+ asm volatile ("sb %0, %1\n" : : "r" (val), "m" (*addr));
+
+ return 0;
+}
+
+static inline int get_insn(struct pt_regs *regs, ulong mepc, ulong *r_insn)
{
register ulong __mepc asm ("a2") = mepc;
ulong val, rvc_mask = 3, tmp;
@@ -226,23 +311,119 @@ static inline ulong get_insn(ulong mepc)
: [addr] "r" (__mepc), [rvc_mask] "r" (rvc_mask),
[xlen_minus_16] "i" (XLEN_MINUS_16));
- return val;
+ *r_insn = val;
+
+ return 0;
+}
+#else
+static inline int load_u8(struct pt_regs *regs, const u8 *addr, u8 *r_val)
+{
+ if (user_mode(regs)) {
+ return __get_user(*r_val, addr);
+ } else {
+ *r_val = *addr;
+ return 0;
+ }
+}
+
+static inline int store_u8(struct pt_regs *regs, u8 *addr, u8 val)
+{
+ if (user_mode(regs)) {
+ return __put_user(val, addr);
+ } else {
+ *addr = val;
+ return 0;
+ }
}
+#define __read_insn(regs, insn, insn_addr) \
+({ \
+ int __ret; \
+ \
+ if (user_mode(regs)) { \
+ __ret = __get_user(insn, insn_addr); \
+ } else { \
+ insn = *insn_addr; \
+ __ret = 0; \
+ } \
+ \
+ __ret; \
+})
+
+static inline int get_insn(struct pt_regs *regs, ulong epc, ulong *r_insn)
+{
+ ulong insn = 0;
+
+ if (epc & 0x2) {
+ ulong tmp = 0;
+ u16 __user *insn_addr = (u16 __user *)epc;
+
+ if (__read_insn(regs, insn, insn_addr))
+ return -EFAULT;
+ /* __get_user() uses regular "lw" which sign extend the loaded
+ * value make sure to clear higher order bits in case we "or" it
+ * below with the upper 16 bits half.
+ */
+ insn &= GENMASK(15, 0);
+ if ((insn & __INSN_LENGTH_MASK) != __INSN_LENGTH_32) {
+ *r_insn = insn;
+ return 0;
+ }
+ insn_addr++;
+ if (__read_insn(regs, tmp, insn_addr))
+ return -EFAULT;
+ *r_insn = (tmp << 16) | insn;
+
+ return 0;
+ } else {
+ u32 __user *insn_addr = (u32 __user *)epc;
+
+ if (__read_insn(regs, insn, insn_addr))
+ return -EFAULT;
+ if ((insn & __INSN_LENGTH_MASK) == __INSN_LENGTH_32) {
+ *r_insn = insn;
+ return 0;
+ }
+ insn &= GENMASK(15, 0);
+ *r_insn = insn;
+
+ return 0;
+ }
+}
+#endif
+
union reg_data {
u8 data_bytes[8];
ulong data_ulong;
u64 data_u64;
};
+static bool unaligned_ctl __read_mostly;
+
+/* sysctl hooks */
+int unaligned_enabled __read_mostly = 1; /* Enabled by default */
+
int handle_misaligned_load(struct pt_regs *regs)
{
union reg_data val;
unsigned long epc = regs->epc;
- unsigned long insn = get_insn(epc);
- unsigned long addr = csr_read(mtval);
+ unsigned long insn;
+ unsigned long addr = regs->badaddr;
int i, fp = 0, shift = 0, len = 0;
+ perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, addr);
+
+ *this_cpu_ptr(&misaligned_access_speed) = RISCV_HWPROBE_MISALIGNED_EMULATED;
+
+ if (!unaligned_enabled)
+ return -1;
+
+ if (user_mode(regs) && (current->thread.align_ctl & PR_UNALIGN_SIGBUS))
+ return -1;
+
+ if (get_insn(regs, epc, &insn))
+ return -1;
+
regs->epc = 0;
if ((insn & INSN_MASK_LW) == INSN_MATCH_LW) {
@@ -305,13 +486,21 @@ int handle_misaligned_load(struct pt_regs *regs)
return -1;
}
+ if (!IS_ENABLED(CONFIG_FPU) && fp)
+ return -EOPNOTSUPP;
+
val.data_u64 = 0;
- for (i = 0; i < len; i++)
- val.data_bytes[i] = load_u8((void *)(addr + i));
+ for (i = 0; i < len; i++) {
+ if (load_u8(regs, (void *)(addr + i), &val.data_bytes[i]))
+ return -1;
+ }
- if (fp)
- return -1;
- SET_RD(insn, regs, val.data_ulong << shift >> shift);
+ if (!fp)
+ SET_RD(insn, regs, val.data_ulong << shift >> shift);
+ else if (len == 8)
+ set_f64_rd(insn, regs, val.data_u64);
+ else
+ set_f32_rd(insn, regs, val.data_ulong);
regs->epc = epc + INSN_LEN(insn);
@@ -322,9 +511,20 @@ int handle_misaligned_store(struct pt_regs *regs)
{
union reg_data val;
unsigned long epc = regs->epc;
- unsigned long insn = get_insn(epc);
- unsigned long addr = csr_read(mtval);
- int i, len = 0;
+ unsigned long insn;
+ unsigned long addr = regs->badaddr;
+ int i, len = 0, fp = 0;
+
+ perf_sw_event(PERF_COUNT_SW_ALIGNMENT_FAULTS, 1, regs, addr);
+
+ if (!unaligned_enabled)
+ return -1;
+
+ if (user_mode(regs) && (current->thread.align_ctl & PR_UNALIGN_SIGBUS))
+ return -1;
+
+ if (get_insn(regs, epc, &insn))
+ return -1;
regs->epc = 0;
@@ -336,6 +536,14 @@ int handle_misaligned_store(struct pt_regs *regs)
} else if ((insn & INSN_MASK_SD) == INSN_MATCH_SD) {
len = 8;
#endif
+ } else if ((insn & INSN_MASK_FSD) == INSN_MATCH_FSD) {
+ fp = 1;
+ len = 8;
+ val.data_u64 = GET_F64_RS2(insn, regs);
+ } else if ((insn & INSN_MASK_FSW) == INSN_MATCH_FSW) {
+ fp = 1;
+ len = 4;
+ val.data_ulong = GET_F32_RS2(insn, regs);
} else if ((insn & INSN_MASK_SH) == INSN_MATCH_SH) {
len = 2;
#if defined(CONFIG_64BIT)
@@ -352,15 +560,88 @@ int handle_misaligned_store(struct pt_regs *regs)
} else if ((insn & INSN_MASK_C_SWSP) == INSN_MATCH_C_SWSP) {
len = 4;
val.data_ulong = GET_RS2C(insn, regs);
+ } else if ((insn & INSN_MASK_C_FSD) == INSN_MATCH_C_FSD) {
+ fp = 1;
+ len = 8;
+ val.data_u64 = GET_F64_RS2S(insn, regs);
+ } else if ((insn & INSN_MASK_C_FSDSP) == INSN_MATCH_C_FSDSP) {
+ fp = 1;
+ len = 8;
+ val.data_u64 = GET_F64_RS2C(insn, regs);
+#if !defined(CONFIG_64BIT)
+ } else if ((insn & INSN_MASK_C_FSW) == INSN_MATCH_C_FSW) {
+ fp = 1;
+ len = 4;
+ val.data_ulong = GET_F32_RS2S(insn, regs);
+ } else if ((insn & INSN_MASK_C_FSWSP) == INSN_MATCH_C_FSWSP) {
+ fp = 1;
+ len = 4;
+ val.data_ulong = GET_F32_RS2C(insn, regs);
+#endif
} else {
regs->epc = epc;
return -1;
}
- for (i = 0; i < len; i++)
- store_u8((void *)(addr + i), val.data_bytes[i]);
+ if (!IS_ENABLED(CONFIG_FPU) && fp)
+ return -EOPNOTSUPP;
+
+ for (i = 0; i < len; i++) {
+ if (store_u8(regs, (void *)(addr + i), val.data_bytes[i]))
+ return -1;
+ }
regs->epc = epc + INSN_LEN(insn);
return 0;
}
+
+bool check_unaligned_access_emulated(int cpu)
+{
+ long *mas_ptr = per_cpu_ptr(&misaligned_access_speed, cpu);
+ unsigned long tmp_var, tmp_val;
+ bool misaligned_emu_detected;
+
+ *mas_ptr = RISCV_HWPROBE_MISALIGNED_UNKNOWN;
+
+ __asm__ __volatile__ (
+ " "REG_L" %[tmp], 1(%[ptr])\n"
+ : [tmp] "=r" (tmp_val) : [ptr] "r" (&tmp_var) : "memory");
+
+ misaligned_emu_detected = (*mas_ptr == RISCV_HWPROBE_MISALIGNED_EMULATED);
+ /*
+ * If unaligned_ctl is already set, this means that we detected that all
+ * CPUS uses emulated misaligned access at boot time. If that changed
+ * when hotplugging the new cpu, this is something we don't handle.
+ */
+ if (unlikely(unaligned_ctl && !misaligned_emu_detected)) {
+ pr_crit("CPU misaligned accesses non homogeneous (expected all emulated)\n");
+ while (true)
+ cpu_relax();
+ }
+
+ return misaligned_emu_detected;
+}
+
+void unaligned_emulation_finish(void)
+{
+ int cpu;
+
+ /*
+ * We can only support PR_UNALIGN controls if all CPUs have misaligned
+ * accesses emulated since tasks requesting such control can run on any
+ * CPU.
+ */
+ for_each_present_cpu(cpu) {
+ if (per_cpu(misaligned_access_speed, cpu) !=
+ RISCV_HWPROBE_MISALIGNED_EMULATED) {
+ return;
+ }
+ }
+ unaligned_ctl = true;
+}
+
+bool unaligned_ctl_available(void)
+{
+ return unaligned_ctl;
+}
diff --git a/arch/riscv/kernel/vdso/Makefile b/arch/riscv/kernel/vdso/Makefile
index 6b1dba11bf..9b517fe1b8 100644
--- a/arch/riscv/kernel/vdso/Makefile
+++ b/arch/riscv/kernel/vdso/Makefile
@@ -36,7 +36,7 @@ CPPFLAGS_vdso.lds += -DHAS_VGETTIMEOFDAY
endif
# Disable -pg to prevent insert call site
-CFLAGS_REMOVE_vgettimeofday.o = $(CC_FLAGS_FTRACE)
+CFLAGS_REMOVE_vgettimeofday.o = $(CC_FLAGS_FTRACE) $(CC_FLAGS_SCS)
# Disable profiling and instrumentation for VDSO code
GCOV_PROFILE := n
@@ -73,13 +73,3 @@ quiet_cmd_vdsold = VDSOLD $@
cmd_vdsold = $(LD) $(ld_flags) -T $(filter-out FORCE,$^) -o $@.tmp && \
$(OBJCOPY) $(patsubst %, -G __vdso_%, $(vdso-syms)) $@.tmp $@ && \
rm $@.tmp
-
-# install commands for the unstripped file
-quiet_cmd_vdso_install = INSTALL $@
- cmd_vdso_install = cp $(obj)/$@.dbg $(MODLIB)/vdso/$@
-
-vdso.so: $(obj)/vdso.so.dbg
- @mkdir -p $(MODLIB)/vdso
- $(call cmd,vdso_install)
-
-vdso_install: vdso.so
diff --git a/arch/riscv/kernel/vdso/flush_icache.S b/arch/riscv/kernel/vdso/flush_icache.S
index 82f97d67c2..8f884227e8 100644
--- a/arch/riscv/kernel/vdso/flush_icache.S
+++ b/arch/riscv/kernel/vdso/flush_icache.S
@@ -8,7 +8,7 @@
.text
/* int __vdso_flush_icache(void *start, void *end, unsigned long flags); */
-ENTRY(__vdso_flush_icache)
+SYM_FUNC_START(__vdso_flush_icache)
.cfi_startproc
#ifdef CONFIG_SMP
li a7, __NR_riscv_flush_icache
@@ -19,4 +19,4 @@ ENTRY(__vdso_flush_icache)
#endif
ret
.cfi_endproc
-ENDPROC(__vdso_flush_icache)
+SYM_FUNC_END(__vdso_flush_icache)
diff --git a/arch/riscv/kernel/vdso/getcpu.S b/arch/riscv/kernel/vdso/getcpu.S
index bb0c05e2ff..9c1bd53190 100644
--- a/arch/riscv/kernel/vdso/getcpu.S
+++ b/arch/riscv/kernel/vdso/getcpu.S
@@ -8,11 +8,11 @@
.text
/* int __vdso_getcpu(unsigned *cpu, unsigned *node, void *unused); */
-ENTRY(__vdso_getcpu)
+SYM_FUNC_START(__vdso_getcpu)
.cfi_startproc
/* For now, just do the syscall. */
li a7, __NR_getcpu
ecall
ret
.cfi_endproc
-ENDPROC(__vdso_getcpu)
+SYM_FUNC_END(__vdso_getcpu)
diff --git a/arch/riscv/kernel/vdso/rt_sigreturn.S b/arch/riscv/kernel/vdso/rt_sigreturn.S
index 10438c7c62..3dc022aa89 100644
--- a/arch/riscv/kernel/vdso/rt_sigreturn.S
+++ b/arch/riscv/kernel/vdso/rt_sigreturn.S
@@ -7,10 +7,10 @@
#include <asm/unistd.h>
.text
-ENTRY(__vdso_rt_sigreturn)
+SYM_FUNC_START(__vdso_rt_sigreturn)
.cfi_startproc
.cfi_signal_frame
li a7, __NR_rt_sigreturn
ecall
.cfi_endproc
-ENDPROC(__vdso_rt_sigreturn)
+SYM_FUNC_END(__vdso_rt_sigreturn)
diff --git a/arch/riscv/kernel/vdso/sys_hwprobe.S b/arch/riscv/kernel/vdso/sys_hwprobe.S
index 4e704146c7..77e57f8305 100644
--- a/arch/riscv/kernel/vdso/sys_hwprobe.S
+++ b/arch/riscv/kernel/vdso/sys_hwprobe.S
@@ -5,11 +5,11 @@
#include <asm/unistd.h>
.text
-ENTRY(riscv_hwprobe)
+SYM_FUNC_START(riscv_hwprobe)
.cfi_startproc
li a7, __NR_riscv_hwprobe
ecall
ret
.cfi_endproc
-ENDPROC(riscv_hwprobe)
+SYM_FUNC_END(riscv_hwprobe)
diff --git a/arch/riscv/kernel/vdso/vdso.lds.S b/arch/riscv/kernel/vdso/vdso.lds.S
index 82ce64900f..cbe2a17933 100644
--- a/arch/riscv/kernel/vdso/vdso.lds.S
+++ b/arch/riscv/kernel/vdso/vdso.lds.S
@@ -23,35 +23,31 @@ SECTIONS
.gnu.version_d : { *(.gnu.version_d) }
.gnu.version_r : { *(.gnu.version_r) }
- .note : { *(.note.*) } :text :note
.dynamic : { *(.dynamic) } :text :dynamic
+ .rodata : {
+ *(.rodata .rodata.* .gnu.linkonce.r.*)
+ *(.got.plt) *(.got)
+ *(.data .data.* .gnu.linkonce.d.*)
+ *(.dynbss)
+ *(.bss .bss.* .gnu.linkonce.b.*)
+ }
+
+ .note : { *(.note.*) } :text :note
+
.eh_frame_hdr : { *(.eh_frame_hdr) } :text :eh_frame_hdr
.eh_frame : { KEEP (*(.eh_frame)) } :text
- .rodata : { *(.rodata .rodata.* .gnu.linkonce.r.*) }
-
/*
- * This linker script is used both with -r and with -shared.
- * For the layouts to match, we need to skip more than enough
- * space for the dynamic symbol table, etc. If this amount is
- * insufficient, ld -shared will error; simply increase it here.
+ * Text is well-separated from actual data: there's plenty of
+ * stuff that isn't used at runtime in between.
*/
- . = 0x800;
+ . = ALIGN(16);
.text : { *(.text .text.*) } :text
. = ALIGN(4);
.alternative : {
- __alt_start = .;
*(.alternative)
- __alt_end = .;
- }
-
- .data : {
- *(.got.plt) *(.got)
- *(.data .data.* .gnu.linkonce.d.*)
- *(.dynbss)
- *(.bss .bss.* .gnu.linkonce.b.*)
}
}
diff --git a/arch/riscv/kernel/vector.c b/arch/riscv/kernel/vector.c
index 8d92fb6c52..578b629248 100644
--- a/arch/riscv/kernel/vector.c
+++ b/arch/riscv/kernel/vector.c
@@ -255,7 +255,6 @@ static struct ctl_table riscv_v_default_vstate_table[] = {
.mode = 0644,
.proc_handler = proc_dobool,
},
- { }
};
static int __init riscv_v_sysctl_init(void)