summaryrefslogtreecommitdiffstats
path: root/arch/riscv/net
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-08-07 13:17:46 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-08-07 13:17:46 +0000
commit7f3a4257159dea8e7ef66d1a539dc6df708b8ed3 (patch)
treebcc69b5f4609f348fac49e2f59e210b29eaea783 /arch/riscv/net
parentAdding upstream version 6.9.12. (diff)
downloadlinux-7f3a4257159dea8e7ef66d1a539dc6df708b8ed3.tar.xz
linux-7f3a4257159dea8e7ef66d1a539dc6df708b8ed3.zip
Adding upstream version 6.10.3.upstream/6.10.3
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'arch/riscv/net')
-rw-r--r--arch/riscv/net/bpf_jit.h6
-rw-r--r--arch/riscv/net/bpf_jit_comp64.c271
-rw-r--r--arch/riscv/net/bpf_jit_core.c15
3 files changed, 270 insertions, 22 deletions
diff --git a/arch/riscv/net/bpf_jit.h b/arch/riscv/net/bpf_jit.h
index f4b6b3b9ed..fdbf88ca8b 100644
--- a/arch/riscv/net/bpf_jit.h
+++ b/arch/riscv/net/bpf_jit.h
@@ -81,6 +81,8 @@ struct rv_jit_context {
int nexentries;
unsigned long flags;
int stack_size;
+ u64 arena_vm_start;
+ u64 user_vm_start;
};
/* Convert from ninsns to bytes. */
@@ -606,7 +608,7 @@ static inline u32 rv_nop(void)
return rv_i_insn(0, 0, 0, 0, 0x13);
}
-/* RVC instrutions. */
+/* RVC instructions. */
static inline u16 rvc_addi4spn(u8 rd, u32 imm10)
{
@@ -735,7 +737,7 @@ static inline u16 rvc_swsp(u32 imm8, u8 rs2)
return rv_css_insn(0x6, imm, rs2, 0x2);
}
-/* RVZBB instrutions. */
+/* RVZBB instructions. */
static inline u32 rvzbb_sextb(u8 rd, u8 rs1)
{
return rv_i_insn(0x604, rs1, 1, rd, 0x13);
diff --git a/arch/riscv/net/bpf_jit_comp64.c b/arch/riscv/net/bpf_jit_comp64.c
index fb5d195004..212b015e09 100644
--- a/arch/riscv/net/bpf_jit_comp64.c
+++ b/arch/riscv/net/bpf_jit_comp64.c
@@ -12,12 +12,16 @@
#include <linux/stop_machine.h>
#include <asm/patch.h>
#include <asm/cfi.h>
+#include <asm/percpu.h>
#include "bpf_jit.h"
#define RV_FENTRY_NINSNS 2
+/* imm that allows emit_imm to emit max count insns */
+#define RV_MAX_COUNT_IMM 0x7FFF7FF7FF7FF7FF
#define RV_REG_TCC RV_REG_A6
#define RV_REG_TCC_SAVED RV_REG_S6 /* Store A6 in S6 if program do calls */
+#define RV_REG_ARENA RV_REG_S7 /* For storing arena_vm_start */
static const int regmap[] = {
[BPF_REG_0] = RV_REG_A5,
@@ -255,6 +259,10 @@ static void __build_epilogue(bool is_tail_call, struct rv_jit_context *ctx)
emit_ld(RV_REG_S6, store_offset, RV_REG_SP, ctx);
store_offset -= 8;
}
+ if (ctx->arena_vm_start) {
+ emit_ld(RV_REG_ARENA, store_offset, RV_REG_SP, ctx);
+ store_offset -= 8;
+ }
emit_addi(RV_REG_SP, RV_REG_SP, stack_adjust, ctx);
/* Set return value. */
@@ -548,6 +556,7 @@ static void emit_atomic(u8 rd, u8 rs, s16 off, s32 imm, bool is64,
#define BPF_FIXUP_OFFSET_MASK GENMASK(26, 0)
#define BPF_FIXUP_REG_MASK GENMASK(31, 27)
+#define REG_DONT_CLEAR_MARKER 0 /* RV_REG_ZERO unused in pt_regmap */
bool ex_handler_bpf(const struct exception_table_entry *ex,
struct pt_regs *regs)
@@ -555,7 +564,8 @@ bool ex_handler_bpf(const struct exception_table_entry *ex,
off_t offset = FIELD_GET(BPF_FIXUP_OFFSET_MASK, ex->fixup);
int regs_offset = FIELD_GET(BPF_FIXUP_REG_MASK, ex->fixup);
- *(unsigned long *)((void *)regs + pt_regmap[regs_offset]) = 0;
+ if (regs_offset != REG_DONT_CLEAR_MARKER)
+ *(unsigned long *)((void *)regs + pt_regmap[regs_offset]) = 0;
regs->epc = (unsigned long)&ex->fixup - offset;
return true;
@@ -572,7 +582,8 @@ static int add_exception_handler(const struct bpf_insn *insn,
off_t fixup_offset;
if (!ctx->insns || !ctx->ro_insns || !ctx->prog->aux->extable ||
- (BPF_MODE(insn->code) != BPF_PROBE_MEM && BPF_MODE(insn->code) != BPF_PROBE_MEMSX))
+ (BPF_MODE(insn->code) != BPF_PROBE_MEM && BPF_MODE(insn->code) != BPF_PROBE_MEMSX &&
+ BPF_MODE(insn->code) != BPF_PROBE_MEM32))
return 0;
if (WARN_ON_ONCE(ctx->nexentries >= ctx->prog->aux->num_exentries))
@@ -906,7 +917,7 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im,
orig_call += RV_FENTRY_NINSNS * 4;
if (flags & BPF_TRAMP_F_CALL_ORIG) {
- emit_imm(RV_REG_A0, (const s64)im, ctx);
+ emit_imm(RV_REG_A0, ctx->insns ? (const s64)im : RV_MAX_COUNT_IMM, ctx);
ret = emit_call((const u64)__bpf_tramp_enter, true, ctx);
if (ret)
return ret;
@@ -967,7 +978,7 @@ static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im,
if (flags & BPF_TRAMP_F_CALL_ORIG) {
im->ip_epilogue = ctx->insns + ctx->ninsns;
- emit_imm(RV_REG_A0, (const s64)im, ctx);
+ emit_imm(RV_REG_A0, ctx->insns ? (const s64)im : RV_MAX_COUNT_IMM, ctx);
ret = emit_call((const u64)__bpf_tramp_exit, true, ctx);
if (ret)
goto out;
@@ -1036,6 +1047,7 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image,
{
int ret;
struct rv_jit_context ctx;
+ u32 size = image_end - image;
ctx.ninsns = 0;
/*
@@ -1049,11 +1061,16 @@ int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image,
ctx.ro_insns = image;
ret = __arch_prepare_bpf_trampoline(im, m, tlinks, func_addr, flags, &ctx);
if (ret < 0)
- return ret;
+ goto out;
- bpf_flush_icache(ctx.insns, ctx.insns + ctx.ninsns);
+ if (WARN_ON(size < ninsns_rvoff(ctx.ninsns))) {
+ ret = -E2BIG;
+ goto out;
+ }
- return ninsns_rvoff(ret);
+ bpf_flush_icache(image, image_end);
+out:
+ return ret < 0 ? ret : size;
}
int bpf_jit_emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx,
@@ -1073,6 +1090,33 @@ int bpf_jit_emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx,
/* dst = src */
case BPF_ALU | BPF_MOV | BPF_X:
case BPF_ALU64 | BPF_MOV | BPF_X:
+ if (insn_is_cast_user(insn)) {
+ emit_mv(RV_REG_T1, rs, ctx);
+ emit_zextw(RV_REG_T1, RV_REG_T1, ctx);
+ emit_imm(rd, (ctx->user_vm_start >> 32) << 32, ctx);
+ emit(rv_beq(RV_REG_T1, RV_REG_ZERO, 4), ctx);
+ emit_or(RV_REG_T1, rd, RV_REG_T1, ctx);
+ emit_mv(rd, RV_REG_T1, ctx);
+ break;
+ } else if (insn_is_mov_percpu_addr(insn)) {
+ if (rd != rs)
+ emit_mv(rd, rs, ctx);
+#ifdef CONFIG_SMP
+ /* Load current CPU number in T1 */
+ emit_ld(RV_REG_T1, offsetof(struct thread_info, cpu),
+ RV_REG_TP, ctx);
+ /* << 3 because offsets are 8 bytes */
+ emit_slli(RV_REG_T1, RV_REG_T1, 3, ctx);
+ /* Load address of __per_cpu_offset array in T2 */
+ emit_addr(RV_REG_T2, (u64)&__per_cpu_offset, extra_pass, ctx);
+ /* Add offset of current CPU to __per_cpu_offset */
+ emit_add(RV_REG_T1, RV_REG_T2, RV_REG_T1, ctx);
+ /* Load __per_cpu_offset[cpu] in T1 */
+ emit_ld(RV_REG_T1, 0, RV_REG_T1, ctx);
+ /* Add the offset to Rd */
+ emit_add(rd, rd, RV_REG_T1, ctx);
+#endif
+ }
if (imm == 1) {
/* Special mov32 for zext */
emit_zextw(rd, rd, ctx);
@@ -1457,6 +1501,22 @@ int bpf_jit_emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx,
bool fixed_addr;
u64 addr;
+ /* Inline calls to bpf_get_smp_processor_id()
+ *
+ * RV_REG_TP holds the address of the current CPU's task_struct and thread_info is
+ * at offset 0 in task_struct.
+ * Load cpu from thread_info:
+ * Set R0 to ((struct thread_info *)(RV_REG_TP))->cpu
+ *
+ * This replicates the implementation of raw_smp_processor_id() on RISCV
+ */
+ if (insn->src_reg == 0 && insn->imm == BPF_FUNC_get_smp_processor_id) {
+ /* Load current CPU number in R0 */
+ emit_ld(bpf_to_rv_reg(BPF_REG_0, ctx), offsetof(struct thread_info, cpu),
+ RV_REG_TP, ctx);
+ break;
+ }
+
mark_call(ctx);
ret = bpf_jit_get_func_addr(ctx->prog, insn, extra_pass,
&addr, &fixed_addr);
@@ -1539,6 +1599,11 @@ int bpf_jit_emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx,
case BPF_LDX | BPF_PROBE_MEMSX | BPF_B:
case BPF_LDX | BPF_PROBE_MEMSX | BPF_H:
case BPF_LDX | BPF_PROBE_MEMSX | BPF_W:
+ /* LDX | PROBE_MEM32: dst = *(unsigned size *)(src + RV_REG_ARENA + off) */
+ case BPF_LDX | BPF_PROBE_MEM32 | BPF_B:
+ case BPF_LDX | BPF_PROBE_MEM32 | BPF_H:
+ case BPF_LDX | BPF_PROBE_MEM32 | BPF_W:
+ case BPF_LDX | BPF_PROBE_MEM32 | BPF_DW:
{
int insn_len, insns_start;
bool sign_ext;
@@ -1546,6 +1611,11 @@ int bpf_jit_emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx,
sign_ext = BPF_MODE(insn->code) == BPF_MEMSX ||
BPF_MODE(insn->code) == BPF_PROBE_MEMSX;
+ if (BPF_MODE(insn->code) == BPF_PROBE_MEM32) {
+ emit_add(RV_REG_T2, rs, RV_REG_ARENA, ctx);
+ rs = RV_REG_T2;
+ }
+
switch (BPF_SIZE(code)) {
case BPF_B:
if (is_12b_int(off)) {
@@ -1682,6 +1752,86 @@ int bpf_jit_emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx,
emit_sd(RV_REG_T2, 0, RV_REG_T1, ctx);
break;
+ case BPF_ST | BPF_PROBE_MEM32 | BPF_B:
+ case BPF_ST | BPF_PROBE_MEM32 | BPF_H:
+ case BPF_ST | BPF_PROBE_MEM32 | BPF_W:
+ case BPF_ST | BPF_PROBE_MEM32 | BPF_DW:
+ {
+ int insn_len, insns_start;
+
+ emit_add(RV_REG_T3, rd, RV_REG_ARENA, ctx);
+ rd = RV_REG_T3;
+
+ /* Load imm to a register then store it */
+ emit_imm(RV_REG_T1, imm, ctx);
+
+ switch (BPF_SIZE(code)) {
+ case BPF_B:
+ if (is_12b_int(off)) {
+ insns_start = ctx->ninsns;
+ emit(rv_sb(rd, off, RV_REG_T1), ctx);
+ insn_len = ctx->ninsns - insns_start;
+ break;
+ }
+
+ emit_imm(RV_REG_T2, off, ctx);
+ emit_add(RV_REG_T2, RV_REG_T2, rd, ctx);
+ insns_start = ctx->ninsns;
+ emit(rv_sb(RV_REG_T2, 0, RV_REG_T1), ctx);
+ insn_len = ctx->ninsns - insns_start;
+ break;
+ case BPF_H:
+ if (is_12b_int(off)) {
+ insns_start = ctx->ninsns;
+ emit(rv_sh(rd, off, RV_REG_T1), ctx);
+ insn_len = ctx->ninsns - insns_start;
+ break;
+ }
+
+ emit_imm(RV_REG_T2, off, ctx);
+ emit_add(RV_REG_T2, RV_REG_T2, rd, ctx);
+ insns_start = ctx->ninsns;
+ emit(rv_sh(RV_REG_T2, 0, RV_REG_T1), ctx);
+ insn_len = ctx->ninsns - insns_start;
+ break;
+ case BPF_W:
+ if (is_12b_int(off)) {
+ insns_start = ctx->ninsns;
+ emit_sw(rd, off, RV_REG_T1, ctx);
+ insn_len = ctx->ninsns - insns_start;
+ break;
+ }
+
+ emit_imm(RV_REG_T2, off, ctx);
+ emit_add(RV_REG_T2, RV_REG_T2, rd, ctx);
+ insns_start = ctx->ninsns;
+ emit_sw(RV_REG_T2, 0, RV_REG_T1, ctx);
+ insn_len = ctx->ninsns - insns_start;
+ break;
+ case BPF_DW:
+ if (is_12b_int(off)) {
+ insns_start = ctx->ninsns;
+ emit_sd(rd, off, RV_REG_T1, ctx);
+ insn_len = ctx->ninsns - insns_start;
+ break;
+ }
+
+ emit_imm(RV_REG_T2, off, ctx);
+ emit_add(RV_REG_T2, RV_REG_T2, rd, ctx);
+ insns_start = ctx->ninsns;
+ emit_sd(RV_REG_T2, 0, RV_REG_T1, ctx);
+ insn_len = ctx->ninsns - insns_start;
+ break;
+ }
+
+ ret = add_exception_handler(insn, ctx, REG_DONT_CLEAR_MARKER,
+ insn_len);
+ if (ret)
+ return ret;
+
+ break;
+ }
+
/* STX: *(size *)(dst + off) = src */
case BPF_STX | BPF_MEM | BPF_B:
if (is_12b_int(off)) {
@@ -1728,6 +1878,84 @@ int bpf_jit_emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx,
emit_atomic(rd, rs, off, imm,
BPF_SIZE(code) == BPF_DW, ctx);
break;
+
+ case BPF_STX | BPF_PROBE_MEM32 | BPF_B:
+ case BPF_STX | BPF_PROBE_MEM32 | BPF_H:
+ case BPF_STX | BPF_PROBE_MEM32 | BPF_W:
+ case BPF_STX | BPF_PROBE_MEM32 | BPF_DW:
+ {
+ int insn_len, insns_start;
+
+ emit_add(RV_REG_T2, rd, RV_REG_ARENA, ctx);
+ rd = RV_REG_T2;
+
+ switch (BPF_SIZE(code)) {
+ case BPF_B:
+ if (is_12b_int(off)) {
+ insns_start = ctx->ninsns;
+ emit(rv_sb(rd, off, rs), ctx);
+ insn_len = ctx->ninsns - insns_start;
+ break;
+ }
+
+ emit_imm(RV_REG_T1, off, ctx);
+ emit_add(RV_REG_T1, RV_REG_T1, rd, ctx);
+ insns_start = ctx->ninsns;
+ emit(rv_sb(RV_REG_T1, 0, rs), ctx);
+ insn_len = ctx->ninsns - insns_start;
+ break;
+ case BPF_H:
+ if (is_12b_int(off)) {
+ insns_start = ctx->ninsns;
+ emit(rv_sh(rd, off, rs), ctx);
+ insn_len = ctx->ninsns - insns_start;
+ break;
+ }
+
+ emit_imm(RV_REG_T1, off, ctx);
+ emit_add(RV_REG_T1, RV_REG_T1, rd, ctx);
+ insns_start = ctx->ninsns;
+ emit(rv_sh(RV_REG_T1, 0, rs), ctx);
+ insn_len = ctx->ninsns - insns_start;
+ break;
+ case BPF_W:
+ if (is_12b_int(off)) {
+ insns_start = ctx->ninsns;
+ emit_sw(rd, off, rs, ctx);
+ insn_len = ctx->ninsns - insns_start;
+ break;
+ }
+
+ emit_imm(RV_REG_T1, off, ctx);
+ emit_add(RV_REG_T1, RV_REG_T1, rd, ctx);
+ insns_start = ctx->ninsns;
+ emit_sw(RV_REG_T1, 0, rs, ctx);
+ insn_len = ctx->ninsns - insns_start;
+ break;
+ case BPF_DW:
+ if (is_12b_int(off)) {
+ insns_start = ctx->ninsns;
+ emit_sd(rd, off, rs, ctx);
+ insn_len = ctx->ninsns - insns_start;
+ break;
+ }
+
+ emit_imm(RV_REG_T1, off, ctx);
+ emit_add(RV_REG_T1, RV_REG_T1, rd, ctx);
+ insns_start = ctx->ninsns;
+ emit_sd(RV_REG_T1, 0, rs, ctx);
+ insn_len = ctx->ninsns - insns_start;
+ break;
+ }
+
+ ret = add_exception_handler(insn, ctx, REG_DONT_CLEAR_MARKER,
+ insn_len);
+ if (ret)
+ return ret;
+
+ break;
+ }
+
default:
pr_err("bpf-jit: unknown opcode %02x\n", code);
return -EINVAL;
@@ -1759,6 +1987,8 @@ void bpf_jit_build_prologue(struct rv_jit_context *ctx, bool is_subprog)
stack_adjust += 8;
if (seen_reg(RV_REG_S6, ctx))
stack_adjust += 8;
+ if (ctx->arena_vm_start)
+ stack_adjust += 8;
stack_adjust = round_up(stack_adjust, 16);
stack_adjust += bpf_stack_adjust;
@@ -1810,6 +2040,10 @@ void bpf_jit_build_prologue(struct rv_jit_context *ctx, bool is_subprog)
emit_sd(RV_REG_SP, store_offset, RV_REG_S6, ctx);
store_offset -= 8;
}
+ if (ctx->arena_vm_start) {
+ emit_sd(RV_REG_SP, store_offset, RV_REG_ARENA, ctx);
+ store_offset -= 8;
+ }
emit_addi(RV_REG_FP, RV_REG_SP, stack_adjust, ctx);
@@ -1823,6 +2057,9 @@ void bpf_jit_build_prologue(struct rv_jit_context *ctx, bool is_subprog)
emit_mv(RV_REG_TCC_SAVED, RV_REG_TCC, ctx);
ctx->stack_size = stack_adjust;
+
+ if (ctx->arena_vm_start)
+ emit_imm(RV_REG_ARENA, ctx->arena_vm_start, ctx);
}
void bpf_jit_build_epilogue(struct rv_jit_context *ctx)
@@ -1839,3 +2076,23 @@ bool bpf_jit_supports_ptr_xchg(void)
{
return true;
}
+
+bool bpf_jit_supports_arena(void)
+{
+ return true;
+}
+
+bool bpf_jit_supports_percpu_insn(void)
+{
+ return true;
+}
+
+bool bpf_jit_inlines_helper_call(s32 imm)
+{
+ switch (imm) {
+ case BPF_FUNC_get_smp_processor_id:
+ return true;
+ default:
+ return false;
+ }
+}
diff --git a/arch/riscv/net/bpf_jit_core.c b/arch/riscv/net/bpf_jit_core.c
index 6b3acac30c..0a96abdaca 100644
--- a/arch/riscv/net/bpf_jit_core.c
+++ b/arch/riscv/net/bpf_jit_core.c
@@ -80,6 +80,8 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
goto skip_init_ctx;
}
+ ctx->arena_vm_start = bpf_arena_get_kern_vm_start(prog->aux->arena);
+ ctx->user_vm_start = bpf_arena_get_user_vm_start(prog->aux->arena);
ctx->prog = prog;
ctx->offset = kcalloc(prog->len, sizeof(int), GFP_KERNEL);
if (!ctx->offset) {
@@ -219,19 +221,6 @@ u64 bpf_jit_alloc_exec_limit(void)
return BPF_JIT_REGION_SIZE;
}
-void *bpf_jit_alloc_exec(unsigned long size)
-{
- return __vmalloc_node_range(size, PAGE_SIZE, BPF_JIT_REGION_START,
- BPF_JIT_REGION_END, GFP_KERNEL,
- PAGE_KERNEL, 0, NUMA_NO_NODE,
- __builtin_return_address(0));
-}
-
-void bpf_jit_free_exec(void *addr)
-{
- return vfree(addr);
-}
-
void *bpf_arch_text_copy(void *dst, void *src, size_t len)
{
int ret;