aboutsummaryrefslogtreecommitdiffstats
path: root/arch/riscv/net/bpf_jit_comp64.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/riscv/net/bpf_jit_comp64.c')
-rw-r--r--arch/riscv/net/bpf_jit_comp64.c470
1 files changed, 450 insertions, 20 deletions
diff --git a/arch/riscv/net/bpf_jit_comp64.c b/arch/riscv/net/bpf_jit_comp64.c
index 00df3a8f92ac..c648864c8cd1 100644
--- a/arch/riscv/net/bpf_jit_comp64.c
+++ b/arch/riscv/net/bpf_jit_comp64.c
@@ -8,6 +8,9 @@
#include <linux/bitfield.h>
#include <linux/bpf.h>
#include <linux/filter.h>
+#include <linux/memory.h>
+#include <linux/stop_machine.h>
+#include <asm/patch.h>
#include "bpf_jit.h"
#define RV_REG_TCC RV_REG_A6
@@ -136,6 +139,25 @@ static bool in_auipc_jalr_range(s64 val)
val < ((1L << 31) - (1L << 11));
}
+/* Emit fixed-length instructions for address */
+static int emit_addr(u8 rd, u64 addr, bool extra_pass, struct rv_jit_context *ctx)
+{
+ u64 ip = (u64)(ctx->insns + ctx->ninsns);
+ s64 off = addr - ip;
+ s64 upper = (off + (1 << 11)) >> 12;
+ s64 lower = off & 0xfff;
+
+ if (extra_pass && !in_auipc_jalr_range(off)) {
+ pr_err("bpf-jit: target offset 0x%llx is out of range\n", off);
+ return -ERANGE;
+ }
+
+ emit(rv_auipc(rd, upper), ctx);
+ emit(rv_addi(rd, rd, lower), ctx);
+ return 0;
+}
+
+/* Emit variable-length instructions for 32-bit and 64-bit imm */
static void emit_imm(u8 rd, s64 val, struct rv_jit_context *ctx)
{
/* Note that the immediate from the add is sign-extended,
@@ -219,7 +241,7 @@ static void __build_epilogue(bool is_tail_call, struct rv_jit_context *ctx)
if (!is_tail_call)
emit_mv(RV_REG_A0, RV_REG_A5, ctx);
emit_jalr(RV_REG_ZERO, is_tail_call ? RV_REG_T3 : RV_REG_RA,
- is_tail_call ? 4 : 0, /* skip TCC init */
+ is_tail_call ? 20 : 0, /* skip reserved nops and TCC init */
ctx);
}
@@ -409,12 +431,12 @@ static void emit_sext_32_rd(u8 *rd, struct rv_jit_context *ctx)
*rd = RV_REG_T2;
}
-static int emit_jump_and_link(u8 rd, s64 rvoff, bool force_jalr,
+static int emit_jump_and_link(u8 rd, s64 rvoff, bool fixed_addr,
struct rv_jit_context *ctx)
{
s64 upper, lower;
- if (rvoff && is_21b_int(rvoff) && !force_jalr) {
+ if (rvoff && fixed_addr && is_21b_int(rvoff)) {
emit(rv_jal(rd, rvoff >> 1), ctx);
return 0;
} else if (in_auipc_jalr_range(rvoff)) {
@@ -435,24 +457,17 @@ static bool is_signed_bpf_cond(u8 cond)
cond == BPF_JSGE || cond == BPF_JSLE;
}
-static int emit_call(bool fixed, u64 addr, struct rv_jit_context *ctx)
+static int emit_call(u64 addr, bool fixed_addr, struct rv_jit_context *ctx)
{
s64 off = 0;
u64 ip;
- u8 rd;
- int ret;
if (addr && ctx->insns) {
ip = (u64)(long)(ctx->insns + ctx->ninsns);
off = addr - ip;
}
- ret = emit_jump_and_link(RV_REG_RA, off, !fixed, ctx);
- if (ret)
- return ret;
- rd = bpf_to_rv_reg(BPF_REG_0, ctx);
- emit_mv(rd, RV_REG_A0, ctx);
- return 0;
+ return emit_jump_and_link(RV_REG_RA, off, fixed_addr, ctx);
}
static void emit_atomic(u8 rd, u8 rs, s16 off, s32 imm, bool is64,
@@ -603,6 +618,401 @@ static int add_exception_handler(const struct bpf_insn *insn,
return 0;
}
+static int gen_call_or_nops(void *target, void *ip, u32 *insns)
+{
+ s64 rvoff;
+ int i, ret;
+ struct rv_jit_context ctx;
+
+ ctx.ninsns = 0;
+ ctx.insns = (u16 *)insns;
+
+ if (!target) {
+ for (i = 0; i < 4; i++)
+ emit(rv_nop(), &ctx);
+ return 0;
+ }
+
+ rvoff = (s64)(target - (ip + 4));
+ emit(rv_sd(RV_REG_SP, -8, RV_REG_RA), &ctx);
+ ret = emit_jump_and_link(RV_REG_RA, rvoff, false, &ctx);
+ if (ret)
+ return ret;
+ emit(rv_ld(RV_REG_RA, -8, RV_REG_SP), &ctx);
+
+ return 0;
+}
+
+static int gen_jump_or_nops(void *target, void *ip, u32 *insns)
+{
+ s64 rvoff;
+ struct rv_jit_context ctx;
+
+ ctx.ninsns = 0;
+ ctx.insns = (u16 *)insns;
+
+ if (!target) {
+ emit(rv_nop(), &ctx);
+ emit(rv_nop(), &ctx);
+ return 0;
+ }
+
+ rvoff = (s64)(target - ip);
+ return emit_jump_and_link(RV_REG_ZERO, rvoff, false, &ctx);
+}
+
+int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type poke_type,
+ void *old_addr, void *new_addr)
+{
+ u32 old_insns[4], new_insns[4];
+ bool is_call = poke_type == BPF_MOD_CALL;
+ int (*gen_insns)(void *target, void *ip, u32 *insns);
+ int ninsns = is_call ? 4 : 2;
+ int ret;
+
+ if (!is_bpf_text_address((unsigned long)ip))
+ return -ENOTSUPP;
+
+ gen_insns = is_call ? gen_call_or_nops : gen_jump_or_nops;
+
+ ret = gen_insns(old_addr, ip, old_insns);
+ if (ret)
+ return ret;
+
+ if (memcmp(ip, old_insns, ninsns * 4))
+ return -EFAULT;
+
+ ret = gen_insns(new_addr, ip, new_insns);
+ if (ret)
+ return ret;
+
+ cpus_read_lock();
+ mutex_lock(&text_mutex);
+ if (memcmp(ip, new_insns, ninsns * 4))
+ ret = patch_text(ip, new_insns, ninsns);
+ mutex_unlock(&text_mutex);
+ cpus_read_unlock();
+
+ return ret;
+}
+
+static void store_args(int nregs, int args_off, struct rv_jit_context *ctx)
+{
+ int i;
+
+ for (i = 0; i < nregs; i++) {
+ emit_sd(RV_REG_FP, -args_off, RV_REG_A0 + i, ctx);
+ args_off -= 8;
+ }
+}
+
+static void restore_args(int nregs, int args_off, struct rv_jit_context *ctx)
+{
+ int i;
+
+ for (i = 0; i < nregs; i++) {
+ emit_ld(RV_REG_A0 + i, -args_off, RV_REG_FP, ctx);
+ args_off -= 8;
+ }
+}
+
+static int invoke_bpf_prog(struct bpf_tramp_link *l, int args_off, int retval_off,
+ int run_ctx_off, bool save_ret, struct rv_jit_context *ctx)
+{
+ int ret, branch_off;
+ struct bpf_prog *p = l->link.prog;
+ int cookie_off = offsetof(struct bpf_tramp_run_ctx, bpf_cookie);
+
+ if (l->cookie) {
+ emit_imm(RV_REG_T1, l->cookie, ctx);
+ emit_sd(RV_REG_FP, -run_ctx_off + cookie_off, RV_REG_T1, ctx);
+ } else {
+ emit_sd(RV_REG_FP, -run_ctx_off + cookie_off, RV_REG_ZERO, ctx);
+ }
+
+ /* arg1: prog */
+ emit_imm(RV_REG_A0, (const s64)p, ctx);
+ /* arg2: &run_ctx */
+ emit_addi(RV_REG_A1, RV_REG_FP, -run_ctx_off, ctx);
+ ret = emit_call((const u64)bpf_trampoline_enter(p), true, ctx);
+ if (ret)
+ return ret;
+
+ /* if (__bpf_prog_enter(prog) == 0)
+ * goto skip_exec_of_prog;
+ */
+ branch_off = ctx->ninsns;
+ /* nop reserved for conditional jump */
+ emit(rv_nop(), ctx);
+
+ /* store prog start time */
+ emit_mv(RV_REG_S1, RV_REG_A0, ctx);
+
+ /* arg1: &args_off */
+ emit_addi(RV_REG_A0, RV_REG_FP, -args_off, ctx);
+ if (!p->jited)
+ /* arg2: progs[i]->insnsi for interpreter */
+ emit_imm(RV_REG_A1, (const s64)p->insnsi, ctx);
+ ret = emit_call((const u64)p->bpf_func, true, ctx);
+ if (ret)
+ return ret;
+
+ if (save_ret)
+ emit_sd(RV_REG_FP, -retval_off, regmap[BPF_REG_0], ctx);
+
+ /* update branch with beqz */
+ if (ctx->insns) {
+ int offset = ninsns_rvoff(ctx->ninsns - branch_off);
+ u32 insn = rv_beq(RV_REG_A0, RV_REG_ZERO, offset >> 1);
+ *(u32 *)(ctx->insns + branch_off) = insn;
+ }
+
+ /* arg1: prog */
+ emit_imm(RV_REG_A0, (const s64)p, ctx);
+ /* arg2: prog start time */
+ emit_mv(RV_REG_A1, RV_REG_S1, ctx);
+ /* arg3: &run_ctx */
+ emit_addi(RV_REG_A2, RV_REG_FP, -run_ctx_off, ctx);
+ ret = emit_call((const u64)bpf_trampoline_exit(p), true, ctx);
+
+ return ret;
+}
+
+static int __arch_prepare_bpf_trampoline(struct bpf_tramp_image *im,
+ const struct btf_func_model *m,
+ struct bpf_tramp_links *tlinks,
+ void *func_addr, u32 flags,
+ struct rv_jit_context *ctx)
+{
+ int i, ret, offset;
+ int *branches_off = NULL;
+ int stack_size = 0, nregs = m->nr_args;
+ int retaddr_off, fp_off, retval_off, args_off;
+ int nregs_off, ip_off, run_ctx_off, sreg_off;
+ struct bpf_tramp_links *fentry = &tlinks[BPF_TRAMP_FENTRY];
+ struct bpf_tramp_links *fexit = &tlinks[BPF_TRAMP_FEXIT];
+ struct bpf_tramp_links *fmod_ret = &tlinks[BPF_TRAMP_MODIFY_RETURN];
+ void *orig_call = func_addr;
+ bool save_ret;
+ u32 insn;
+
+ /* Generated trampoline stack layout:
+ *
+ * FP - 8 [ RA of parent func ] return address of parent
+ * function
+ * FP - retaddr_off [ RA of traced func ] return address of traced
+ * function
+ * FP - fp_off [ FP of parent func ]
+ *
+ * FP - retval_off [ return value ] BPF_TRAMP_F_CALL_ORIG or
+ * BPF_TRAMP_F_RET_FENTRY_RET
+ * [ argN ]
+ * [ ... ]
+ * FP - args_off [ arg1 ]
+ *
+ * FP - nregs_off [ regs count ]
+ *
+ * FP - ip_off [ traced func ] BPF_TRAMP_F_IP_ARG
+ *
+ * FP - run_ctx_off [ bpf_tramp_run_ctx ]
+ *
+ * FP - sreg_off [ callee saved reg ]
+ *
+ * [ pads ] pads for 16 bytes alignment
+ */
+
+ if (flags & (BPF_TRAMP_F_ORIG_STACK | BPF_TRAMP_F_SHARE_IPMODIFY))
+ return -ENOTSUPP;
+
+ /* extra regiters for struct arguments */
+ for (i = 0; i < m->nr_args; i++)
+ if (m->arg_flags[i] & BTF_FMODEL_STRUCT_ARG)
+ nregs += round_up(m->arg_size[i], 8) / 8 - 1;
+
+ /* 8 arguments passed by registers */
+ if (nregs > 8)
+ return -ENOTSUPP;
+
+ /* room for parent function return address */
+ stack_size += 8;
+
+ stack_size += 8;
+ retaddr_off = stack_size;
+
+ stack_size += 8;
+ fp_off = stack_size;
+
+ save_ret = flags & (BPF_TRAMP_F_CALL_ORIG | BPF_TRAMP_F_RET_FENTRY_RET);
+ if (save_ret) {
+ stack_size += 8;
+ retval_off = stack_size;
+ }
+
+ stack_size += nregs * 8;
+ args_off = stack_size;
+
+ stack_size += 8;
+ nregs_off = stack_size;
+
+ if (flags & BPF_TRAMP_F_IP_ARG) {
+ stack_size += 8;
+ ip_off = stack_size;
+ }
+
+ stack_size += round_up(sizeof(struct bpf_tramp_run_ctx), 8);
+ run_ctx_off = stack_size;
+
+ stack_size += 8;
+ sreg_off = stack_size;
+
+ stack_size = round_up(stack_size, 16);
+
+ emit_addi(RV_REG_SP, RV_REG_SP, -stack_size, ctx);
+
+ emit_sd(RV_REG_SP, stack_size - retaddr_off, RV_REG_RA, ctx);
+ emit_sd(RV_REG_SP, stack_size - fp_off, RV_REG_FP, ctx);
+
+ emit_addi(RV_REG_FP, RV_REG_SP, stack_size, ctx);
+
+ /* callee saved register S1 to pass start time */
+ emit_sd(RV_REG_FP, -sreg_off, RV_REG_S1, ctx);
+
+ /* store ip address of the traced function */
+ if (flags & BPF_TRAMP_F_IP_ARG) {
+ emit_imm(RV_REG_T1, (const s64)func_addr, ctx);
+ emit_sd(RV_REG_FP, -ip_off, RV_REG_T1, ctx);
+ }
+
+ emit_li(RV_REG_T1, nregs, ctx);
+ emit_sd(RV_REG_FP, -nregs_off, RV_REG_T1, ctx);
+
+ store_args(nregs, args_off, ctx);
+
+ /* skip to actual body of traced function */
+ if (flags & BPF_TRAMP_F_SKIP_FRAME)
+ orig_call += 16;
+
+ if (flags & BPF_TRAMP_F_CALL_ORIG) {
+ emit_imm(RV_REG_A0, (const s64)im, ctx);
+ ret = emit_call((const u64)__bpf_tramp_enter, true, ctx);
+ if (ret)
+ return ret;
+ }
+
+ for (i = 0; i < fentry->nr_links; i++) {
+ ret = invoke_bpf_prog(fentry->links[i], args_off, retval_off, run_ctx_off,
+ flags & BPF_TRAMP_F_RET_FENTRY_RET, ctx);
+ if (ret)
+ return ret;
+ }
+
+ if (fmod_ret->nr_links) {
+ branches_off = kcalloc(fmod_ret->nr_links, sizeof(int), GFP_KERNEL);
+ if (!branches_off)
+ return -ENOMEM;
+
+ /* cleanup to avoid garbage return value confusion */
+ emit_sd(RV_REG_FP, -retval_off, RV_REG_ZERO, ctx);
+ for (i = 0; i < fmod_ret->nr_links; i++) {
+ ret = invoke_bpf_prog(fmod_ret->links[i], args_off, retval_off,
+ run_ctx_off, true, ctx);
+ if (ret)
+ goto out;
+ emit_ld(RV_REG_T1, -retval_off, RV_REG_FP, ctx);
+ branches_off[i] = ctx->ninsns;
+ /* nop reserved for conditional jump */
+ emit(rv_nop(), ctx);
+ }
+ }
+
+ if (flags & BPF_TRAMP_F_CALL_ORIG) {
+ restore_args(nregs, args_off, ctx);
+ ret = emit_call((const u64)orig_call, true, ctx);
+ if (ret)
+ goto out;
+ emit_sd(RV_REG_FP, -retval_off, RV_REG_A0, ctx);
+ im->ip_after_call = ctx->insns + ctx->ninsns;
+ /* 2 nops reserved for auipc+jalr pair */
+ emit(rv_nop(), ctx);
+ emit(rv_nop(), ctx);
+ }
+
+ /* update branches saved in invoke_bpf_mod_ret with bnez */
+ for (i = 0; ctx->insns && i < fmod_ret->nr_links; i++) {
+ offset = ninsns_rvoff(ctx->ninsns - branches_off[i]);
+ insn = rv_bne(RV_REG_T1, RV_REG_ZERO, offset >> 1);
+ *(u32 *)(ctx->insns + branches_off[i]) = insn;
+ }
+
+ for (i = 0; i < fexit->nr_links; i++) {
+ ret = invoke_bpf_prog(fexit->links[i], args_off, retval_off,
+ run_ctx_off, false, ctx);
+ if (ret)
+ goto out;
+ }
+
+ if (flags & BPF_TRAMP_F_CALL_ORIG) {
+ im->ip_epilogue = ctx->insns + ctx->ninsns;
+ emit_imm(RV_REG_A0, (const s64)im, ctx);
+ ret = emit_call((const u64)__bpf_tramp_exit, true, ctx);
+ if (ret)
+ goto out;
+ }
+
+ if (flags & BPF_TRAMP_F_RESTORE_REGS)
+ restore_args(nregs, args_off, ctx);
+
+ if (save_ret)
+ emit_ld(RV_REG_A0, -retval_off, RV_REG_FP, ctx);
+
+ emit_ld(RV_REG_S1, -sreg_off, RV_REG_FP, ctx);
+
+ if (flags & BPF_TRAMP_F_SKIP_FRAME)
+ /* return address of parent function */
+ emit_ld(RV_REG_RA, stack_size - 8, RV_REG_SP, ctx);
+ else
+ /* return address of traced function */
+ emit_ld(RV_REG_RA, stack_size - retaddr_off, RV_REG_SP, ctx);
+
+ emit_ld(RV_REG_FP, stack_size - fp_off, RV_REG_SP, ctx);
+ emit_addi(RV_REG_SP, RV_REG_SP, stack_size, ctx);
+
+ emit_jalr(RV_REG_ZERO, RV_REG_RA, 0, ctx);
+
+ ret = ctx->ninsns;
+out:
+ kfree(branches_off);
+ return ret;
+}
+
+int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image,
+ void *image_end, const struct btf_func_model *m,
+ u32 flags, struct bpf_tramp_links *tlinks,
+ void *func_addr)
+{
+ int ret;
+ struct rv_jit_context ctx;
+
+ ctx.ninsns = 0;
+ ctx.insns = NULL;
+ ret = __arch_prepare_bpf_trampoline(im, m, tlinks, func_addr, flags, &ctx);
+ if (ret < 0)
+ return ret;
+
+ if (ninsns_rvoff(ret) > (long)image_end - (long)image)
+ return -EFBIG;
+
+ ctx.ninsns = 0;
+ ctx.insns = image;
+ ret = __arch_prepare_bpf_trampoline(im, m, tlinks, func_addr, flags, &ctx);
+ if (ret < 0)
+ return ret;
+
+ bpf_flush_icache(ctx.insns, ctx.insns + ctx.ninsns);
+
+ return ninsns_rvoff(ret);
+}
+
int bpf_jit_emit_insn(const struct bpf_insn *insn, struct rv_jit_context *ctx,
bool extra_pass)
{
@@ -894,7 +1304,7 @@ out_be:
/* JUMP off */
case BPF_JMP | BPF_JA:
rvoff = rv_offset(i, off, ctx);
- ret = emit_jump_and_link(RV_REG_ZERO, rvoff, false, ctx);
+ ret = emit_jump_and_link(RV_REG_ZERO, rvoff, true, ctx);
if (ret)
return ret;
break;
@@ -1013,17 +1423,20 @@ out_be:
/* function call */
case BPF_JMP | BPF_CALL:
{
- bool fixed;
+ bool fixed_addr;
u64 addr;
mark_call(ctx);
- ret = bpf_jit_get_func_addr(ctx->prog, insn, extra_pass, &addr,
- &fixed);
+ ret = bpf_jit_get_func_addr(ctx->prog, insn, extra_pass,
+ &addr, &fixed_addr);
if (ret < 0)
return ret;
- ret = emit_call(fixed, addr, ctx);
+
+ ret = emit_call(addr, fixed_addr, ctx);
if (ret)
return ret;
+
+ emit_mv(bpf_to_rv_reg(BPF_REG_0, ctx), RV_REG_A0, ctx);
break;
}
/* tail call */
@@ -1038,7 +1451,7 @@ out_be:
break;
rvoff = epilogue_offset(ctx);
- ret = emit_jump_and_link(RV_REG_ZERO, rvoff, false, ctx);
+ ret = emit_jump_and_link(RV_REG_ZERO, rvoff, true, ctx);
if (ret)
return ret;
break;
@@ -1050,7 +1463,15 @@ out_be:
u64 imm64;
imm64 = (u64)insn1.imm << 32 | (u32)imm;
- emit_imm(rd, imm64, ctx);
+ if (bpf_pseudo_func(insn)) {
+ /* fixed-length insns for extra jit pass */
+ ret = emit_addr(rd, imm64, extra_pass, ctx);
+ if (ret)
+ return ret;
+ } else {
+ emit_imm(rd, imm64, ctx);
+ }
+
return 1;
}
@@ -1243,7 +1664,7 @@ out_be:
void bpf_jit_build_prologue(struct rv_jit_context *ctx)
{
- int stack_adjust = 0, store_offset, bpf_stack_adjust;
+ int i, stack_adjust = 0, store_offset, bpf_stack_adjust;
bpf_stack_adjust = round_up(ctx->prog->aux->stack_depth, 16);
if (bpf_stack_adjust)
@@ -1270,6 +1691,10 @@ void bpf_jit_build_prologue(struct rv_jit_context *ctx)
store_offset = stack_adjust - 8;
+ /* reserve 4 nop insns */
+ for (i = 0; i < 4; i++)
+ emit(rv_nop(), ctx);
+
/* First instruction is always setting the tail-call-counter
* (TCC) register. This instruction is skipped for tail calls.
* Force using a 4-byte (non-compressed) instruction.
@@ -1327,3 +1752,8 @@ void bpf_jit_build_epilogue(struct rv_jit_context *ctx)
{
__build_epilogue(false, ctx);
}
+
+bool bpf_jit_supports_kfunc_call(void)
+{
+ return true;
+}