riscv, bpf: Factor out emit_call for kernel and bpf context
authorPu Lehui <pulehui@huawei.com>
Wed, 15 Feb 2023 13:52:03 +0000 (21:52 +0800)
committerDaniel Borkmann <daniel@iogearbox.net>
Fri, 17 Feb 2023 20:45:30 +0000 (21:45 +0100)
The current emit_call function is not suitable for kernel function call as
it store return value to bpf R0 register. We can separate it out for common
use. Meanwhile, simplify judgment logic, that is, fixed function address
can use jal or auipc+jalr, while the unfixed can use only auipc+jalr.

Signed-off-by: Pu Lehui <pulehui@huawei.com>
Signed-off-by: Daniel Borkmann <daniel@iogearbox.net>
Tested-by: Björn Töpel <bjorn@rivosinc.com>
Acked-by: Björn Töpel <bjorn@rivosinc.com>
Link: https://lore.kernel.org/bpf/20230215135205.1411105-3-pulehui@huaweicloud.com
arch/riscv/net/bpf_jit_comp64.c

index f2417ac..69ebab8 100644 (file)
@@ -428,12 +428,12 @@ static void emit_sext_32_rd(u8 *rd, struct rv_jit_context *ctx)
        *rd = RV_REG_T2;
 }
 
-static int emit_jump_and_link(u8 rd, s64 rvoff, bool force_jalr,
+static int emit_jump_and_link(u8 rd, s64 rvoff, bool fixed_addr,
                              struct rv_jit_context *ctx)
 {
        s64 upper, lower;
 
-       if (rvoff && is_21b_int(rvoff) && !force_jalr) {
+       if (rvoff && fixed_addr && is_21b_int(rvoff)) {
                emit(rv_jal(rd, rvoff >> 1), ctx);
                return 0;
        } else if (in_auipc_jalr_range(rvoff)) {
@@ -454,24 +454,17 @@ static bool is_signed_bpf_cond(u8 cond)
                cond == BPF_JSGE || cond == BPF_JSLE;
 }
 
-static int emit_call(bool fixed, u64 addr, struct rv_jit_context *ctx)
+static int emit_call(u64 addr, bool fixed_addr, struct rv_jit_context *ctx)
 {
        s64 off = 0;
        u64 ip;
-       u8 rd;
-       int ret;
 
        if (addr && ctx->insns) {
                ip = (u64)(long)(ctx->insns + ctx->ninsns);
                off = addr - ip;
        }
 
-       ret = emit_jump_and_link(RV_REG_RA, off, !fixed, ctx);
-       if (ret)
-               return ret;
-       rd = bpf_to_rv_reg(BPF_REG_0, ctx);
-       emit_mv(rd, RV_REG_A0, ctx);
-       return 0;
+       return emit_jump_and_link(RV_REG_RA, off, fixed_addr, ctx);
 }
 
 static void emit_atomic(u8 rd, u8 rs, s16 off, s32 imm, bool is64,
@@ -913,7 +906,7 @@ out_be:
        /* JUMP off */
        case BPF_JMP | BPF_JA:
                rvoff = rv_offset(i, off, ctx);
-               ret = emit_jump_and_link(RV_REG_ZERO, rvoff, false, ctx);
+               ret = emit_jump_and_link(RV_REG_ZERO, rvoff, true, ctx);
                if (ret)
                        return ret;
                break;
@@ -1032,17 +1025,20 @@ out_be:
        /* function call */
        case BPF_JMP | BPF_CALL:
        {
-               bool fixed;
+               bool fixed_addr;
                u64 addr;
 
                mark_call(ctx);
-               ret = bpf_jit_get_func_addr(ctx->prog, insn, extra_pass, &addr,
-                                           &fixed);
+               ret = bpf_jit_get_func_addr(ctx->prog, insn, extra_pass,
+                                           &addr, &fixed_addr);
                if (ret < 0)
                        return ret;
-               ret = emit_call(fixed, addr, ctx);
+
+               ret = emit_call(addr, fixed_addr, ctx);
                if (ret)
                        return ret;
+
+               emit_mv(bpf_to_rv_reg(BPF_REG_0, ctx), RV_REG_A0, ctx);
                break;
        }
        /* tail call */
@@ -1057,7 +1053,7 @@ out_be:
                        break;
 
                rvoff = epilogue_offset(ctx);
-               ret = emit_jump_and_link(RV_REG_ZERO, rvoff, false, ctx);
+               ret = emit_jump_and_link(RV_REG_ZERO, rvoff, true, ctx);
                if (ret)
                        return ret;
                break;