1 // SPDX-License-Identifier: GPL-2.0-only
3 * BPF JIT compiler for LoongArch
5 * Copyright (C) 2022 Loongson Technology Corporation Limited
9 #define REG_TCC LOONGARCH_GPR_A6
10 #define TCC_SAVED LOONGARCH_GPR_S5
12 #define SAVE_RA BIT(0)
13 #define SAVE_TCC BIT(1)
15 static const int regmap[] = {
16 /* return value from in-kernel function, and exit value for eBPF program */
17 [BPF_REG_0] = LOONGARCH_GPR_A5,
18 /* arguments from eBPF program to in-kernel function */
19 [BPF_REG_1] = LOONGARCH_GPR_A0,
20 [BPF_REG_2] = LOONGARCH_GPR_A1,
21 [BPF_REG_3] = LOONGARCH_GPR_A2,
22 [BPF_REG_4] = LOONGARCH_GPR_A3,
23 [BPF_REG_5] = LOONGARCH_GPR_A4,
24 /* callee saved registers that in-kernel function will preserve */
25 [BPF_REG_6] = LOONGARCH_GPR_S0,
26 [BPF_REG_7] = LOONGARCH_GPR_S1,
27 [BPF_REG_8] = LOONGARCH_GPR_S2,
28 [BPF_REG_9] = LOONGARCH_GPR_S3,
29 /* read-only frame pointer to access stack */
30 [BPF_REG_FP] = LOONGARCH_GPR_S4,
31 /* temporary register for blinding constants */
32 [BPF_REG_AX] = LOONGARCH_GPR_T0,
35 static void mark_call(struct jit_ctx *ctx)
37 ctx->flags |= SAVE_RA;
40 static void mark_tail_call(struct jit_ctx *ctx)
42 ctx->flags |= SAVE_TCC;
45 static bool seen_call(struct jit_ctx *ctx)
47 return (ctx->flags & SAVE_RA);
50 static bool seen_tail_call(struct jit_ctx *ctx)
52 return (ctx->flags & SAVE_TCC);
55 static u8 tail_call_reg(struct jit_ctx *ctx)
64 * eBPF prog stack layout:
67 * original $sp ------------> +-------------------------+ <--LOONGARCH_GPR_FP
69 * +-------------------------+
71 * +-------------------------+
73 * +-------------------------+
75 * +-------------------------+
77 * +-------------------------+
79 * +-------------------------+
81 * +-------------------------+
83 * +-------------------------+ <--BPF_REG_FP
84 * | prog->aux->stack_depth |
86 * current $sp -------------> +-------------------------+
89 static void build_prologue(struct jit_ctx *ctx)
91 int stack_adjust = 0, store_offset, bpf_stack_adjust;
93 bpf_stack_adjust = round_up(ctx->prog->aux->stack_depth, 16);
95 /* To store ra, fp, s0, s1, s2, s3, s4 and s5. */
96 stack_adjust += sizeof(long) * 8;
98 stack_adjust = round_up(stack_adjust, 16);
99 stack_adjust += bpf_stack_adjust;
102 * First instruction initializes the tail call count (TCC).
103 * On tail call we skip this instruction, and the TCC is
104 * passed in REG_TCC from the caller.
106 emit_insn(ctx, addid, REG_TCC, LOONGARCH_GPR_ZERO, MAX_TAIL_CALL_CNT);
108 emit_insn(ctx, addid, LOONGARCH_GPR_SP, LOONGARCH_GPR_SP, -stack_adjust);
110 store_offset = stack_adjust - sizeof(long);
111 emit_insn(ctx, std, LOONGARCH_GPR_RA, LOONGARCH_GPR_SP, store_offset);
113 store_offset -= sizeof(long);
114 emit_insn(ctx, std, LOONGARCH_GPR_FP, LOONGARCH_GPR_SP, store_offset);
116 store_offset -= sizeof(long);
117 emit_insn(ctx, std, LOONGARCH_GPR_S0, LOONGARCH_GPR_SP, store_offset);
119 store_offset -= sizeof(long);
120 emit_insn(ctx, std, LOONGARCH_GPR_S1, LOONGARCH_GPR_SP, store_offset);
122 store_offset -= sizeof(long);
123 emit_insn(ctx, std, LOONGARCH_GPR_S2, LOONGARCH_GPR_SP, store_offset);
125 store_offset -= sizeof(long);
126 emit_insn(ctx, std, LOONGARCH_GPR_S3, LOONGARCH_GPR_SP, store_offset);
128 store_offset -= sizeof(long);
129 emit_insn(ctx, std, LOONGARCH_GPR_S4, LOONGARCH_GPR_SP, store_offset);
131 store_offset -= sizeof(long);
132 emit_insn(ctx, std, LOONGARCH_GPR_S5, LOONGARCH_GPR_SP, store_offset);
134 emit_insn(ctx, addid, LOONGARCH_GPR_FP, LOONGARCH_GPR_SP, stack_adjust);
136 if (bpf_stack_adjust)
137 emit_insn(ctx, addid, regmap[BPF_REG_FP], LOONGARCH_GPR_SP, bpf_stack_adjust);
140 * Program contains calls and tail calls, so REG_TCC need
141 * to be saved across calls.
143 if (seen_tail_call(ctx) && seen_call(ctx))
144 move_reg(ctx, TCC_SAVED, REG_TCC);
146 ctx->stack_size = stack_adjust;
149 static void __build_epilogue(struct jit_ctx *ctx, bool is_tail_call)
151 int stack_adjust = ctx->stack_size;
154 load_offset = stack_adjust - sizeof(long);
155 emit_insn(ctx, ldd, LOONGARCH_GPR_RA, LOONGARCH_GPR_SP, load_offset);
157 load_offset -= sizeof(long);
158 emit_insn(ctx, ldd, LOONGARCH_GPR_FP, LOONGARCH_GPR_SP, load_offset);
160 load_offset -= sizeof(long);
161 emit_insn(ctx, ldd, LOONGARCH_GPR_S0, LOONGARCH_GPR_SP, load_offset);
163 load_offset -= sizeof(long);
164 emit_insn(ctx, ldd, LOONGARCH_GPR_S1, LOONGARCH_GPR_SP, load_offset);
166 load_offset -= sizeof(long);
167 emit_insn(ctx, ldd, LOONGARCH_GPR_S2, LOONGARCH_GPR_SP, load_offset);
169 load_offset -= sizeof(long);
170 emit_insn(ctx, ldd, LOONGARCH_GPR_S3, LOONGARCH_GPR_SP, load_offset);
172 load_offset -= sizeof(long);
173 emit_insn(ctx, ldd, LOONGARCH_GPR_S4, LOONGARCH_GPR_SP, load_offset);
175 load_offset -= sizeof(long);
176 emit_insn(ctx, ldd, LOONGARCH_GPR_S5, LOONGARCH_GPR_SP, load_offset);
178 emit_insn(ctx, addid, LOONGARCH_GPR_SP, LOONGARCH_GPR_SP, stack_adjust);
181 /* Set return value */
182 move_reg(ctx, LOONGARCH_GPR_A0, regmap[BPF_REG_0]);
183 /* Return to the caller */
184 emit_insn(ctx, jirl, LOONGARCH_GPR_RA, LOONGARCH_GPR_ZERO, 0);
187 * Call the next bpf prog and skip the first instruction
188 * of TCC initialization.
190 emit_insn(ctx, jirl, LOONGARCH_GPR_T3, LOONGARCH_GPR_ZERO, 1);
194 static void build_epilogue(struct jit_ctx *ctx)
196 __build_epilogue(ctx, false);
199 bool bpf_jit_supports_kfunc_call(void)
204 /* initialized on the first pass of build_body() */
205 static int out_offset = -1;
206 static int emit_bpf_tail_call(struct jit_ctx *ctx)
209 u8 tcc = tail_call_reg(ctx);
210 u8 a1 = LOONGARCH_GPR_A1;
211 u8 a2 = LOONGARCH_GPR_A2;
212 u8 t1 = LOONGARCH_GPR_T1;
213 u8 t2 = LOONGARCH_GPR_T2;
214 u8 t3 = LOONGARCH_GPR_T3;
215 const int idx0 = ctx->idx;
217 #define cur_offset (ctx->idx - idx0)
218 #define jmp_offset (out_offset - (cur_offset))
225 * if (index >= array->map.max_entries)
228 off = offsetof(struct bpf_array, map.max_entries);
229 emit_insn(ctx, ldwu, t1, a1, off);
230 /* bgeu $a2, $t1, jmp_offset */
231 if (emit_tailcall_jmp(ctx, BPF_JGE, a2, t1, jmp_offset) < 0)
238 emit_insn(ctx, addid, REG_TCC, tcc, -1);
239 if (emit_tailcall_jmp(ctx, BPF_JSLT, REG_TCC, LOONGARCH_GPR_ZERO, jmp_offset) < 0)
243 * prog = array->ptrs[index];
247 emit_insn(ctx, alsld, t2, a2, a1, 2);
248 off = offsetof(struct bpf_array, ptrs);
249 emit_insn(ctx, ldd, t2, t2, off);
250 /* beq $t2, $zero, jmp_offset */
251 if (emit_tailcall_jmp(ctx, BPF_JEQ, t2, LOONGARCH_GPR_ZERO, jmp_offset) < 0)
254 /* goto *(prog->bpf_func + 4); */
255 off = offsetof(struct bpf_prog, bpf_func);
256 emit_insn(ctx, ldd, t3, t2, off);
257 __build_epilogue(ctx, true);
260 if (out_offset == -1)
261 out_offset = cur_offset;
262 if (cur_offset != out_offset) {
263 pr_err_once("tail_call out_offset = %d, expected %d!\n",
264 cur_offset, out_offset);
271 pr_info_once("tail_call: jump too far\n");
277 static void emit_atomic(const struct bpf_insn *insn, struct jit_ctx *ctx)
279 const u8 t1 = LOONGARCH_GPR_T1;
280 const u8 t2 = LOONGARCH_GPR_T2;
281 const u8 t3 = LOONGARCH_GPR_T3;
282 const u8 r0 = regmap[BPF_REG_0];
283 const u8 src = regmap[insn->src_reg];
284 const u8 dst = regmap[insn->dst_reg];
285 const s16 off = insn->off;
286 const s32 imm = insn->imm;
287 const bool isdw = BPF_SIZE(insn->code) == BPF_DW;
289 move_imm(ctx, t1, off, false);
290 emit_insn(ctx, addd, t1, dst, t1);
291 move_reg(ctx, t3, src);
294 /* lock *(size *)(dst + off) <op>= src */
297 emit_insn(ctx, amaddd, t2, t1, src);
299 emit_insn(ctx, amaddw, t2, t1, src);
303 emit_insn(ctx, amandd, t2, t1, src);
305 emit_insn(ctx, amandw, t2, t1, src);
309 emit_insn(ctx, amord, t2, t1, src);
311 emit_insn(ctx, amorw, t2, t1, src);
315 emit_insn(ctx, amxord, t2, t1, src);
317 emit_insn(ctx, amxorw, t2, t1, src);
319 /* src = atomic_fetch_<op>(dst + off, src) */
320 case BPF_ADD | BPF_FETCH:
322 emit_insn(ctx, amaddd, src, t1, t3);
324 emit_insn(ctx, amaddw, src, t1, t3);
325 emit_zext_32(ctx, src, true);
328 case BPF_AND | BPF_FETCH:
330 emit_insn(ctx, amandd, src, t1, t3);
332 emit_insn(ctx, amandw, src, t1, t3);
333 emit_zext_32(ctx, src, true);
336 case BPF_OR | BPF_FETCH:
338 emit_insn(ctx, amord, src, t1, t3);
340 emit_insn(ctx, amorw, src, t1, t3);
341 emit_zext_32(ctx, src, true);
344 case BPF_XOR | BPF_FETCH:
346 emit_insn(ctx, amxord, src, t1, t3);
348 emit_insn(ctx, amxorw, src, t1, t3);
349 emit_zext_32(ctx, src, true);
352 /* src = atomic_xchg(dst + off, src); */
355 emit_insn(ctx, amswapd, src, t1, t3);
357 emit_insn(ctx, amswapw, src, t1, t3);
358 emit_zext_32(ctx, src, true);
361 /* r0 = atomic_cmpxchg(dst + off, r0, src); */
363 move_reg(ctx, t2, r0);
365 emit_insn(ctx, lld, r0, t1, 0);
366 emit_insn(ctx, bne, t2, r0, 4);
367 move_reg(ctx, t3, src);
368 emit_insn(ctx, scd, t3, t1, 0);
369 emit_insn(ctx, beq, t3, LOONGARCH_GPR_ZERO, -4);
371 emit_insn(ctx, llw, r0, t1, 0);
372 emit_zext_32(ctx, t2, true);
373 emit_zext_32(ctx, r0, true);
374 emit_insn(ctx, bne, t2, r0, 4);
375 move_reg(ctx, t3, src);
376 emit_insn(ctx, scw, t3, t1, 0);
377 emit_insn(ctx, beq, t3, LOONGARCH_GPR_ZERO, -6);
378 emit_zext_32(ctx, r0, true);
384 static bool is_signed_bpf_cond(u8 cond)
386 return cond == BPF_JSGT || cond == BPF_JSLT ||
387 cond == BPF_JSGE || cond == BPF_JSLE;
390 #define BPF_FIXUP_REG_MASK GENMASK(31, 27)
391 #define BPF_FIXUP_OFFSET_MASK GENMASK(26, 0)
393 bool ex_handler_bpf(const struct exception_table_entry *ex,
394 struct pt_regs *regs)
396 int dst_reg = FIELD_GET(BPF_FIXUP_REG_MASK, ex->fixup);
397 off_t offset = FIELD_GET(BPF_FIXUP_OFFSET_MASK, ex->fixup);
399 regs->regs[dst_reg] = 0;
400 regs->csr_era = (unsigned long)&ex->fixup - offset;
405 /* For accesses to BTF pointers, add an entry to the exception table */
406 static int add_exception_handler(const struct bpf_insn *insn,
412 struct exception_table_entry *ex;
414 if (!ctx->image || !ctx->prog->aux->extable || BPF_MODE(insn->code) != BPF_PROBE_MEM)
417 if (WARN_ON_ONCE(ctx->num_exentries >= ctx->prog->aux->num_exentries))
420 ex = &ctx->prog->aux->extable[ctx->num_exentries];
421 pc = (unsigned long)&ctx->image[ctx->idx - 1];
423 offset = pc - (long)&ex->insn;
424 if (WARN_ON_ONCE(offset >= 0 || offset < INT_MIN))
430 * Since the extable follows the program, the fixup offset is always
431 * negative and limited to BPF_JIT_REGION_SIZE. Store a positive value
432 * to keep things simple, and put the destination register in the upper
433 * bits. We don't need to worry about buildtime or runtime sort
434 * modifying the upper bits because the table is already sorted, and
435 * isn't part of the main exception table.
437 offset = (long)&ex->fixup - (pc + LOONGARCH_INSN_SIZE);
438 if (!FIELD_FIT(BPF_FIXUP_OFFSET_MASK, offset))
441 ex->type = EX_TYPE_BPF;
442 ex->fixup = FIELD_PREP(BPF_FIXUP_OFFSET_MASK, offset) | FIELD_PREP(BPF_FIXUP_REG_MASK, dst_reg);
444 ctx->num_exentries++;
449 static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx, bool extra_pass)
453 bool func_addr_fixed;
454 int i = insn - ctx->prog->insnsi;
456 const u8 code = insn->code;
457 const u8 cond = BPF_OP(code);
458 const u8 t1 = LOONGARCH_GPR_T1;
459 const u8 t2 = LOONGARCH_GPR_T2;
460 const u8 src = regmap[insn->src_reg];
461 const u8 dst = regmap[insn->dst_reg];
462 const s16 off = insn->off;
463 const s32 imm = insn->imm;
464 const u64 imm64 = (u64)(insn + 1)->imm << 32 | (u32)insn->imm;
465 const bool is32 = BPF_CLASS(insn->code) == BPF_ALU || BPF_CLASS(insn->code) == BPF_JMP32;
469 case BPF_ALU | BPF_MOV | BPF_X:
470 case BPF_ALU64 | BPF_MOV | BPF_X:
471 move_reg(ctx, dst, src);
472 emit_zext_32(ctx, dst, is32);
476 case BPF_ALU | BPF_MOV | BPF_K:
477 case BPF_ALU64 | BPF_MOV | BPF_K:
478 move_imm(ctx, dst, imm, is32);
481 /* dst = dst + src */
482 case BPF_ALU | BPF_ADD | BPF_X:
483 case BPF_ALU64 | BPF_ADD | BPF_X:
484 emit_insn(ctx, addd, dst, dst, src);
485 emit_zext_32(ctx, dst, is32);
488 /* dst = dst + imm */
489 case BPF_ALU | BPF_ADD | BPF_K:
490 case BPF_ALU64 | BPF_ADD | BPF_K:
491 if (is_signed_imm12(imm)) {
492 emit_insn(ctx, addid, dst, dst, imm);
494 move_imm(ctx, t1, imm, is32);
495 emit_insn(ctx, addd, dst, dst, t1);
497 emit_zext_32(ctx, dst, is32);
500 /* dst = dst - src */
501 case BPF_ALU | BPF_SUB | BPF_X:
502 case BPF_ALU64 | BPF_SUB | BPF_X:
503 emit_insn(ctx, subd, dst, dst, src);
504 emit_zext_32(ctx, dst, is32);
507 /* dst = dst - imm */
508 case BPF_ALU | BPF_SUB | BPF_K:
509 case BPF_ALU64 | BPF_SUB | BPF_K:
510 if (is_signed_imm12(-imm)) {
511 emit_insn(ctx, addid, dst, dst, -imm);
513 move_imm(ctx, t1, imm, is32);
514 emit_insn(ctx, subd, dst, dst, t1);
516 emit_zext_32(ctx, dst, is32);
519 /* dst = dst * src */
520 case BPF_ALU | BPF_MUL | BPF_X:
521 case BPF_ALU64 | BPF_MUL | BPF_X:
522 emit_insn(ctx, muld, dst, dst, src);
523 emit_zext_32(ctx, dst, is32);
526 /* dst = dst * imm */
527 case BPF_ALU | BPF_MUL | BPF_K:
528 case BPF_ALU64 | BPF_MUL | BPF_K:
529 move_imm(ctx, t1, imm, is32);
530 emit_insn(ctx, muld, dst, dst, t1);
531 emit_zext_32(ctx, dst, is32);
534 /* dst = dst / src */
535 case BPF_ALU | BPF_DIV | BPF_X:
536 case BPF_ALU64 | BPF_DIV | BPF_X:
537 emit_zext_32(ctx, dst, is32);
538 move_reg(ctx, t1, src);
539 emit_zext_32(ctx, t1, is32);
540 emit_insn(ctx, divdu, dst, dst, t1);
541 emit_zext_32(ctx, dst, is32);
544 /* dst = dst / imm */
545 case BPF_ALU | BPF_DIV | BPF_K:
546 case BPF_ALU64 | BPF_DIV | BPF_K:
547 move_imm(ctx, t1, imm, is32);
548 emit_zext_32(ctx, dst, is32);
549 emit_insn(ctx, divdu, dst, dst, t1);
550 emit_zext_32(ctx, dst, is32);
553 /* dst = dst % src */
554 case BPF_ALU | BPF_MOD | BPF_X:
555 case BPF_ALU64 | BPF_MOD | BPF_X:
556 emit_zext_32(ctx, dst, is32);
557 move_reg(ctx, t1, src);
558 emit_zext_32(ctx, t1, is32);
559 emit_insn(ctx, moddu, dst, dst, t1);
560 emit_zext_32(ctx, dst, is32);
563 /* dst = dst % imm */
564 case BPF_ALU | BPF_MOD | BPF_K:
565 case BPF_ALU64 | BPF_MOD | BPF_K:
566 move_imm(ctx, t1, imm, is32);
567 emit_zext_32(ctx, dst, is32);
568 emit_insn(ctx, moddu, dst, dst, t1);
569 emit_zext_32(ctx, dst, is32);
573 case BPF_ALU | BPF_NEG:
574 case BPF_ALU64 | BPF_NEG:
575 move_imm(ctx, t1, imm, is32);
576 emit_insn(ctx, subd, dst, LOONGARCH_GPR_ZERO, dst);
577 emit_zext_32(ctx, dst, is32);
580 /* dst = dst & src */
581 case BPF_ALU | BPF_AND | BPF_X:
582 case BPF_ALU64 | BPF_AND | BPF_X:
583 emit_insn(ctx, and, dst, dst, src);
584 emit_zext_32(ctx, dst, is32);
587 /* dst = dst & imm */
588 case BPF_ALU | BPF_AND | BPF_K:
589 case BPF_ALU64 | BPF_AND | BPF_K:
590 if (is_unsigned_imm12(imm)) {
591 emit_insn(ctx, andi, dst, dst, imm);
593 move_imm(ctx, t1, imm, is32);
594 emit_insn(ctx, and, dst, dst, t1);
596 emit_zext_32(ctx, dst, is32);
599 /* dst = dst | src */
600 case BPF_ALU | BPF_OR | BPF_X:
601 case BPF_ALU64 | BPF_OR | BPF_X:
602 emit_insn(ctx, or, dst, dst, src);
603 emit_zext_32(ctx, dst, is32);
606 /* dst = dst | imm */
607 case BPF_ALU | BPF_OR | BPF_K:
608 case BPF_ALU64 | BPF_OR | BPF_K:
609 if (is_unsigned_imm12(imm)) {
610 emit_insn(ctx, ori, dst, dst, imm);
612 move_imm(ctx, t1, imm, is32);
613 emit_insn(ctx, or, dst, dst, t1);
615 emit_zext_32(ctx, dst, is32);
618 /* dst = dst ^ src */
619 case BPF_ALU | BPF_XOR | BPF_X:
620 case BPF_ALU64 | BPF_XOR | BPF_X:
621 emit_insn(ctx, xor, dst, dst, src);
622 emit_zext_32(ctx, dst, is32);
625 /* dst = dst ^ imm */
626 case BPF_ALU | BPF_XOR | BPF_K:
627 case BPF_ALU64 | BPF_XOR | BPF_K:
628 if (is_unsigned_imm12(imm)) {
629 emit_insn(ctx, xori, dst, dst, imm);
631 move_imm(ctx, t1, imm, is32);
632 emit_insn(ctx, xor, dst, dst, t1);
634 emit_zext_32(ctx, dst, is32);
637 /* dst = dst << src (logical) */
638 case BPF_ALU | BPF_LSH | BPF_X:
639 emit_insn(ctx, sllw, dst, dst, src);
640 emit_zext_32(ctx, dst, is32);
643 case BPF_ALU64 | BPF_LSH | BPF_X:
644 emit_insn(ctx, slld, dst, dst, src);
647 /* dst = dst << imm (logical) */
648 case BPF_ALU | BPF_LSH | BPF_K:
649 emit_insn(ctx, slliw, dst, dst, imm);
650 emit_zext_32(ctx, dst, is32);
653 case BPF_ALU64 | BPF_LSH | BPF_K:
654 emit_insn(ctx, sllid, dst, dst, imm);
657 /* dst = dst >> src (logical) */
658 case BPF_ALU | BPF_RSH | BPF_X:
659 emit_insn(ctx, srlw, dst, dst, src);
660 emit_zext_32(ctx, dst, is32);
663 case BPF_ALU64 | BPF_RSH | BPF_X:
664 emit_insn(ctx, srld, dst, dst, src);
667 /* dst = dst >> imm (logical) */
668 case BPF_ALU | BPF_RSH | BPF_K:
669 emit_insn(ctx, srliw, dst, dst, imm);
670 emit_zext_32(ctx, dst, is32);
673 case BPF_ALU64 | BPF_RSH | BPF_K:
674 emit_insn(ctx, srlid, dst, dst, imm);
677 /* dst = dst >> src (arithmetic) */
678 case BPF_ALU | BPF_ARSH | BPF_X:
679 emit_insn(ctx, sraw, dst, dst, src);
680 emit_zext_32(ctx, dst, is32);
683 case BPF_ALU64 | BPF_ARSH | BPF_X:
684 emit_insn(ctx, srad, dst, dst, src);
687 /* dst = dst >> imm (arithmetic) */
688 case BPF_ALU | BPF_ARSH | BPF_K:
689 emit_insn(ctx, sraiw, dst, dst, imm);
690 emit_zext_32(ctx, dst, is32);
693 case BPF_ALU64 | BPF_ARSH | BPF_K:
694 emit_insn(ctx, sraid, dst, dst, imm);
697 /* dst = BSWAP##imm(dst) */
698 case BPF_ALU | BPF_END | BPF_FROM_LE:
701 /* zero-extend 16 bits into 64 bits */
702 emit_insn(ctx, bstrpickd, dst, dst, 15, 0);
705 /* zero-extend 32 bits into 64 bits */
706 emit_zext_32(ctx, dst, is32);
714 case BPF_ALU | BPF_END | BPF_FROM_BE:
717 emit_insn(ctx, revb2h, dst, dst);
718 /* zero-extend 16 bits into 64 bits */
719 emit_insn(ctx, bstrpickd, dst, dst, 15, 0);
722 emit_insn(ctx, revb2w, dst, dst);
723 /* zero-extend 32 bits into 64 bits */
724 emit_zext_32(ctx, dst, is32);
727 emit_insn(ctx, revbd, dst, dst);
732 /* PC += off if dst cond src */
733 case BPF_JMP | BPF_JEQ | BPF_X:
734 case BPF_JMP | BPF_JNE | BPF_X:
735 case BPF_JMP | BPF_JGT | BPF_X:
736 case BPF_JMP | BPF_JGE | BPF_X:
737 case BPF_JMP | BPF_JLT | BPF_X:
738 case BPF_JMP | BPF_JLE | BPF_X:
739 case BPF_JMP | BPF_JSGT | BPF_X:
740 case BPF_JMP | BPF_JSGE | BPF_X:
741 case BPF_JMP | BPF_JSLT | BPF_X:
742 case BPF_JMP | BPF_JSLE | BPF_X:
743 case BPF_JMP32 | BPF_JEQ | BPF_X:
744 case BPF_JMP32 | BPF_JNE | BPF_X:
745 case BPF_JMP32 | BPF_JGT | BPF_X:
746 case BPF_JMP32 | BPF_JGE | BPF_X:
747 case BPF_JMP32 | BPF_JLT | BPF_X:
748 case BPF_JMP32 | BPF_JLE | BPF_X:
749 case BPF_JMP32 | BPF_JSGT | BPF_X:
750 case BPF_JMP32 | BPF_JSGE | BPF_X:
751 case BPF_JMP32 | BPF_JSLT | BPF_X:
752 case BPF_JMP32 | BPF_JSLE | BPF_X:
753 jmp_offset = bpf2la_offset(i, off, ctx);
754 move_reg(ctx, t1, dst);
755 move_reg(ctx, t2, src);
756 if (is_signed_bpf_cond(BPF_OP(code))) {
757 emit_sext_32(ctx, t1, is32);
758 emit_sext_32(ctx, t2, is32);
760 emit_zext_32(ctx, t1, is32);
761 emit_zext_32(ctx, t2, is32);
763 if (emit_cond_jmp(ctx, cond, t1, t2, jmp_offset) < 0)
767 /* PC += off if dst cond imm */
768 case BPF_JMP | BPF_JEQ | BPF_K:
769 case BPF_JMP | BPF_JNE | BPF_K:
770 case BPF_JMP | BPF_JGT | BPF_K:
771 case BPF_JMP | BPF_JGE | BPF_K:
772 case BPF_JMP | BPF_JLT | BPF_K:
773 case BPF_JMP | BPF_JLE | BPF_K:
774 case BPF_JMP | BPF_JSGT | BPF_K:
775 case BPF_JMP | BPF_JSGE | BPF_K:
776 case BPF_JMP | BPF_JSLT | BPF_K:
777 case BPF_JMP | BPF_JSLE | BPF_K:
778 case BPF_JMP32 | BPF_JEQ | BPF_K:
779 case BPF_JMP32 | BPF_JNE | BPF_K:
780 case BPF_JMP32 | BPF_JGT | BPF_K:
781 case BPF_JMP32 | BPF_JGE | BPF_K:
782 case BPF_JMP32 | BPF_JLT | BPF_K:
783 case BPF_JMP32 | BPF_JLE | BPF_K:
784 case BPF_JMP32 | BPF_JSGT | BPF_K:
785 case BPF_JMP32 | BPF_JSGE | BPF_K:
786 case BPF_JMP32 | BPF_JSLT | BPF_K:
787 case BPF_JMP32 | BPF_JSLE | BPF_K:
788 jmp_offset = bpf2la_offset(i, off, ctx);
790 move_imm(ctx, t1, imm, false);
793 /* If imm is 0, simply use zero register. */
794 tm = LOONGARCH_GPR_ZERO;
796 move_reg(ctx, t2, dst);
797 if (is_signed_bpf_cond(BPF_OP(code))) {
798 emit_sext_32(ctx, tm, is32);
799 emit_sext_32(ctx, t2, is32);
801 emit_zext_32(ctx, tm, is32);
802 emit_zext_32(ctx, t2, is32);
804 if (emit_cond_jmp(ctx, cond, t2, tm, jmp_offset) < 0)
808 /* PC += off if dst & src */
809 case BPF_JMP | BPF_JSET | BPF_X:
810 case BPF_JMP32 | BPF_JSET | BPF_X:
811 jmp_offset = bpf2la_offset(i, off, ctx);
812 emit_insn(ctx, and, t1, dst, src);
813 emit_zext_32(ctx, t1, is32);
814 if (emit_cond_jmp(ctx, cond, t1, LOONGARCH_GPR_ZERO, jmp_offset) < 0)
818 /* PC += off if dst & imm */
819 case BPF_JMP | BPF_JSET | BPF_K:
820 case BPF_JMP32 | BPF_JSET | BPF_K:
821 jmp_offset = bpf2la_offset(i, off, ctx);
822 move_imm(ctx, t1, imm, is32);
823 emit_insn(ctx, and, t1, dst, t1);
824 emit_zext_32(ctx, t1, is32);
825 if (emit_cond_jmp(ctx, cond, t1, LOONGARCH_GPR_ZERO, jmp_offset) < 0)
830 case BPF_JMP | BPF_JA:
831 jmp_offset = bpf2la_offset(i, off, ctx);
832 if (emit_uncond_jmp(ctx, jmp_offset) < 0)
837 case BPF_JMP | BPF_CALL:
839 ret = bpf_jit_get_func_addr(ctx->prog, insn, extra_pass,
840 &func_addr, &func_addr_fixed);
844 move_addr(ctx, t1, func_addr);
845 emit_insn(ctx, jirl, t1, LOONGARCH_GPR_RA, 0);
846 move_reg(ctx, regmap[BPF_REG_0], LOONGARCH_GPR_A0);
850 case BPF_JMP | BPF_TAIL_CALL:
852 if (emit_bpf_tail_call(ctx) < 0)
856 /* function return */
857 case BPF_JMP | BPF_EXIT:
858 emit_sext_32(ctx, regmap[BPF_REG_0], true);
860 if (i == ctx->prog->len - 1)
863 jmp_offset = epilogue_offset(ctx);
864 if (emit_uncond_jmp(ctx, jmp_offset) < 0)
869 case BPF_LD | BPF_IMM | BPF_DW:
870 move_imm(ctx, dst, imm64, is32);
873 /* dst = *(size *)(src + off) */
874 case BPF_LDX | BPF_MEM | BPF_B:
875 case BPF_LDX | BPF_MEM | BPF_H:
876 case BPF_LDX | BPF_MEM | BPF_W:
877 case BPF_LDX | BPF_MEM | BPF_DW:
878 case BPF_LDX | BPF_PROBE_MEM | BPF_DW:
879 case BPF_LDX | BPF_PROBE_MEM | BPF_W:
880 case BPF_LDX | BPF_PROBE_MEM | BPF_H:
881 case BPF_LDX | BPF_PROBE_MEM | BPF_B:
882 switch (BPF_SIZE(code)) {
884 if (is_signed_imm12(off)) {
885 emit_insn(ctx, ldbu, dst, src, off);
887 move_imm(ctx, t1, off, is32);
888 emit_insn(ctx, ldxbu, dst, src, t1);
892 if (is_signed_imm12(off)) {
893 emit_insn(ctx, ldhu, dst, src, off);
895 move_imm(ctx, t1, off, is32);
896 emit_insn(ctx, ldxhu, dst, src, t1);
900 if (is_signed_imm12(off)) {
901 emit_insn(ctx, ldwu, dst, src, off);
902 } else if (is_signed_imm14(off)) {
903 emit_insn(ctx, ldptrw, dst, src, off);
905 move_imm(ctx, t1, off, is32);
906 emit_insn(ctx, ldxwu, dst, src, t1);
910 move_imm(ctx, t1, off, is32);
911 emit_insn(ctx, ldxd, dst, src, t1);
915 ret = add_exception_handler(insn, ctx, dst);
920 /* *(size *)(dst + off) = imm */
921 case BPF_ST | BPF_MEM | BPF_B:
922 case BPF_ST | BPF_MEM | BPF_H:
923 case BPF_ST | BPF_MEM | BPF_W:
924 case BPF_ST | BPF_MEM | BPF_DW:
925 switch (BPF_SIZE(code)) {
927 move_imm(ctx, t1, imm, is32);
928 if (is_signed_imm12(off)) {
929 emit_insn(ctx, stb, t1, dst, off);
931 move_imm(ctx, t2, off, is32);
932 emit_insn(ctx, stxb, t1, dst, t2);
936 move_imm(ctx, t1, imm, is32);
937 if (is_signed_imm12(off)) {
938 emit_insn(ctx, sth, t1, dst, off);
940 move_imm(ctx, t2, off, is32);
941 emit_insn(ctx, stxh, t1, dst, t2);
945 move_imm(ctx, t1, imm, is32);
946 if (is_signed_imm12(off)) {
947 emit_insn(ctx, stw, t1, dst, off);
948 } else if (is_signed_imm14(off)) {
949 emit_insn(ctx, stptrw, t1, dst, off);
951 move_imm(ctx, t2, off, is32);
952 emit_insn(ctx, stxw, t1, dst, t2);
956 move_imm(ctx, t1, imm, is32);
957 if (is_signed_imm12(off)) {
958 emit_insn(ctx, std, t1, dst, off);
959 } else if (is_signed_imm14(off)) {
960 emit_insn(ctx, stptrd, t1, dst, off);
962 move_imm(ctx, t2, off, is32);
963 emit_insn(ctx, stxd, t1, dst, t2);
969 /* *(size *)(dst + off) = src */
970 case BPF_STX | BPF_MEM | BPF_B:
971 case BPF_STX | BPF_MEM | BPF_H:
972 case BPF_STX | BPF_MEM | BPF_W:
973 case BPF_STX | BPF_MEM | BPF_DW:
974 switch (BPF_SIZE(code)) {
976 if (is_signed_imm12(off)) {
977 emit_insn(ctx, stb, src, dst, off);
979 move_imm(ctx, t1, off, is32);
980 emit_insn(ctx, stxb, src, dst, t1);
984 if (is_signed_imm12(off)) {
985 emit_insn(ctx, sth, src, dst, off);
987 move_imm(ctx, t1, off, is32);
988 emit_insn(ctx, stxh, src, dst, t1);
992 if (is_signed_imm12(off)) {
993 emit_insn(ctx, stw, src, dst, off);
994 } else if (is_signed_imm14(off)) {
995 emit_insn(ctx, stptrw, src, dst, off);
997 move_imm(ctx, t1, off, is32);
998 emit_insn(ctx, stxw, src, dst, t1);
1002 if (is_signed_imm12(off)) {
1003 emit_insn(ctx, std, src, dst, off);
1004 } else if (is_signed_imm14(off)) {
1005 emit_insn(ctx, stptrd, src, dst, off);
1007 move_imm(ctx, t1, off, is32);
1008 emit_insn(ctx, stxd, src, dst, t1);
1014 case BPF_STX | BPF_ATOMIC | BPF_W:
1015 case BPF_STX | BPF_ATOMIC | BPF_DW:
1016 emit_atomic(insn, ctx);
1019 /* Speculation barrier */
1020 case BPF_ST | BPF_NOSPEC:
1024 pr_err("bpf_jit: unknown opcode %02x\n", code);
1031 pr_info_once("bpf_jit: opcode %02x, jump too far\n", code);
1035 static int build_body(struct jit_ctx *ctx, bool extra_pass)
1038 const struct bpf_prog *prog = ctx->prog;
1040 for (i = 0; i < prog->len; i++) {
1041 const struct bpf_insn *insn = &prog->insnsi[i];
1044 if (ctx->image == NULL)
1045 ctx->offset[i] = ctx->idx;
1047 ret = build_insn(insn, ctx, extra_pass);
1050 if (ctx->image == NULL)
1051 ctx->offset[i] = ctx->idx;
1058 if (ctx->image == NULL)
1059 ctx->offset[i] = ctx->idx;
1064 /* Fill space with break instructions */
1065 static void jit_fill_hole(void *area, unsigned int size)
1069 /* We are guaranteed to have aligned memory */
1070 for (ptr = area; size >= sizeof(u32); size -= sizeof(u32))
1071 *ptr++ = INSN_BREAK;
1074 static int validate_code(struct jit_ctx *ctx)
1077 union loongarch_instruction insn;
1079 for (i = 0; i < ctx->idx; i++) {
1080 insn = ctx->image[i];
1081 /* Check INSN_BREAK */
1082 if (insn.word == INSN_BREAK)
1086 if (WARN_ON_ONCE(ctx->num_exentries != ctx->prog->aux->num_exentries))
1092 struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
1094 bool tmp_blinded = false, extra_pass = false;
1096 int image_size, prog_size, extable_size;
1098 struct jit_data *jit_data;
1099 struct bpf_binary_header *header;
1100 struct bpf_prog *tmp, *orig_prog = prog;
1103 * If BPF JIT was not enabled then we must fall back to
1106 if (!prog->jit_requested)
1109 tmp = bpf_jit_blind_constants(prog);
1111 * If blinding was requested and we failed during blinding,
1112 * we must fall back to the interpreter. Otherwise, we save
1113 * the new JITed code.
1123 jit_data = prog->aux->jit_data;
1125 jit_data = kzalloc(sizeof(*jit_data), GFP_KERNEL);
1130 prog->aux->jit_data = jit_data;
1132 if (jit_data->ctx.offset) {
1133 ctx = jit_data->ctx;
1134 image_ptr = jit_data->image;
1135 header = jit_data->header;
1137 prog_size = sizeof(u32) * ctx.idx;
1141 memset(&ctx, 0, sizeof(ctx));
1144 ctx.offset = kvcalloc(prog->len + 1, sizeof(u32), GFP_KERNEL);
1145 if (ctx.offset == NULL) {
1150 /* 1. Initial fake pass to compute ctx->idx and set ctx->flags */
1151 build_prologue(&ctx);
1152 if (build_body(&ctx, extra_pass)) {
1156 ctx.epilogue_offset = ctx.idx;
1157 build_epilogue(&ctx);
1159 extable_size = prog->aux->num_exentries * sizeof(struct exception_table_entry);
1161 /* Now we know the actual image size.
1162 * As each LoongArch instruction is of length 32bit,
1163 * we are translating number of JITed intructions into
1164 * the size required to store these JITed code.
1166 prog_size = sizeof(u32) * ctx.idx;
1167 image_size = prog_size + extable_size;
1168 /* Now we know the size of the structure to make */
1169 header = bpf_jit_binary_alloc(image_size, &image_ptr,
1170 sizeof(u32), jit_fill_hole);
1171 if (header == NULL) {
1176 /* 2. Now, the actual pass to generate final JIT code */
1177 ctx.image = (union loongarch_instruction *)image_ptr;
1179 prog->aux->extable = (void *)image_ptr + prog_size;
1183 ctx.num_exentries = 0;
1185 build_prologue(&ctx);
1186 if (build_body(&ctx, extra_pass)) {
1187 bpf_jit_binary_free(header);
1191 build_epilogue(&ctx);
1193 /* 3. Extra pass to validate JITed code */
1194 if (validate_code(&ctx)) {
1195 bpf_jit_binary_free(header);
1200 /* And we're done */
1201 if (bpf_jit_enable > 1)
1202 bpf_jit_dump(prog->len, prog_size, 2, ctx.image);
1204 /* Update the icache */
1205 flush_icache_range((unsigned long)header, (unsigned long)(ctx.image + ctx.idx));
1207 if (!prog->is_func || extra_pass) {
1208 if (extra_pass && ctx.idx != jit_data->ctx.idx) {
1209 pr_err_once("multi-func JIT bug %d != %d\n",
1210 ctx.idx, jit_data->ctx.idx);
1211 bpf_jit_binary_free(header);
1212 prog->bpf_func = NULL;
1214 prog->jited_len = 0;
1217 bpf_jit_binary_lock_ro(header);
1219 jit_data->ctx = ctx;
1220 jit_data->image = image_ptr;
1221 jit_data->header = header;
1224 prog->jited_len = prog_size;
1225 prog->bpf_func = (void *)ctx.image;
1227 if (!prog->is_func || extra_pass) {
1230 /* offset[prog->len] is the size of program */
1231 for (i = 0; i <= prog->len; i++)
1232 ctx.offset[i] *= LOONGARCH_INSN_SIZE;
1233 bpf_prog_fill_jited_linfo(prog, ctx.offset + 1);
1238 prog->aux->jit_data = NULL;
1243 bpf_jit_prog_release_other(prog, prog == orig_prog ? tmp : orig_prog);
1250 /* Indicate the JIT backend supports mixing bpf2bpf and tailcalls. */
1251 bool bpf_jit_supports_subprog_tailcalls(void)