1 // SPDX-License-Identifier: GPL-2.0-only
3 * BPF JIT compiler for ARM64
5 * Copyright (C) 2014-2016 Zi Shen Lim <zlim.lnx@gmail.com>
8 #define pr_fmt(fmt) "bpf_jit: " fmt
10 #include <linux/bitfield.h>
11 #include <linux/bpf.h>
12 #include <linux/filter.h>
13 #include <linux/memory.h>
14 #include <linux/printk.h>
15 #include <linux/slab.h>
17 #include <asm/asm-extable.h>
18 #include <asm/byteorder.h>
19 #include <asm/cacheflush.h>
20 #include <asm/debug-monitors.h>
22 #include <asm/patching.h>
23 #include <asm/set_memory.h>
27 #define TMP_REG_1 (MAX_BPF_JIT_REG + 0)
28 #define TMP_REG_2 (MAX_BPF_JIT_REG + 1)
29 #define TCALL_CNT (MAX_BPF_JIT_REG + 2)
30 #define TMP_REG_3 (MAX_BPF_JIT_REG + 3)
31 #define FP_BOTTOM (MAX_BPF_JIT_REG + 4)
33 #define check_imm(bits, imm) do { \
34 if ((((imm) > 0) && ((imm) >> (bits))) || \
35 (((imm) < 0) && (~(imm) >> (bits)))) { \
36 pr_info("[%2d] imm=%d(0x%x) out of range\n", \
41 #define check_imm19(imm) check_imm(19, imm)
42 #define check_imm26(imm) check_imm(26, imm)
44 /* Map BPF registers to A64 registers */
45 static const int bpf2a64[] = {
46 /* return value from in-kernel function, and exit value from eBPF */
47 [BPF_REG_0] = A64_R(7),
48 /* arguments from eBPF program to in-kernel function */
49 [BPF_REG_1] = A64_R(0),
50 [BPF_REG_2] = A64_R(1),
51 [BPF_REG_3] = A64_R(2),
52 [BPF_REG_4] = A64_R(3),
53 [BPF_REG_5] = A64_R(4),
54 /* callee saved registers that in-kernel function will preserve */
55 [BPF_REG_6] = A64_R(19),
56 [BPF_REG_7] = A64_R(20),
57 [BPF_REG_8] = A64_R(21),
58 [BPF_REG_9] = A64_R(22),
59 /* read-only frame pointer to access stack */
60 [BPF_REG_FP] = A64_R(25),
61 /* temporary registers for BPF JIT */
62 [TMP_REG_1] = A64_R(10),
63 [TMP_REG_2] = A64_R(11),
64 [TMP_REG_3] = A64_R(12),
66 [TCALL_CNT] = A64_R(26),
67 /* temporary register for blinding constants */
68 [BPF_REG_AX] = A64_R(9),
69 [FP_BOTTOM] = A64_R(27),
73 const struct bpf_prog *prog;
84 u32 insn_ldr; /* load target */
85 u32 insn_br; /* branch to target */
86 u64 target; /* target value */
89 #define PLT_TARGET_SIZE sizeof_field(struct bpf_plt, target)
90 #define PLT_TARGET_OFFSET offsetof(struct bpf_plt, target)
92 static inline void emit(const u32 insn, struct jit_ctx *ctx)
94 if (ctx->image != NULL)
95 ctx->image[ctx->idx] = cpu_to_le32(insn);
100 static inline void emit_a64_mov_i(const int is64, const int reg,
101 const s32 val, struct jit_ctx *ctx)
104 u16 lo = val & 0xffff;
108 emit(A64_MOVN(is64, reg, (u16)~lo, 0), ctx);
110 emit(A64_MOVN(is64, reg, (u16)~hi, 16), ctx);
112 emit(A64_MOVK(is64, reg, lo, 0), ctx);
115 emit(A64_MOVZ(is64, reg, lo, 0), ctx);
117 emit(A64_MOVK(is64, reg, hi, 16), ctx);
121 static int i64_i16_blocks(const u64 val, bool inverse)
123 return (((val >> 0) & 0xffff) != (inverse ? 0xffff : 0x0000)) +
124 (((val >> 16) & 0xffff) != (inverse ? 0xffff : 0x0000)) +
125 (((val >> 32) & 0xffff) != (inverse ? 0xffff : 0x0000)) +
126 (((val >> 48) & 0xffff) != (inverse ? 0xffff : 0x0000));
129 static inline void emit_a64_mov_i64(const int reg, const u64 val,
132 u64 nrm_tmp = val, rev_tmp = ~val;
136 if (!(nrm_tmp >> 32))
137 return emit_a64_mov_i(0, reg, (u32)val, ctx);
139 inverse = i64_i16_blocks(nrm_tmp, true) < i64_i16_blocks(nrm_tmp, false);
140 shift = max(round_down((inverse ? (fls64(rev_tmp) - 1) :
141 (fls64(nrm_tmp) - 1)), 16), 0);
143 emit(A64_MOVN(1, reg, (rev_tmp >> shift) & 0xffff, shift), ctx);
145 emit(A64_MOVZ(1, reg, (nrm_tmp >> shift) & 0xffff, shift), ctx);
148 if (((nrm_tmp >> shift) & 0xffff) != (inverse ? 0xffff : 0x0000))
149 emit(A64_MOVK(1, reg, (nrm_tmp >> shift) & 0xffff, shift), ctx);
154 static inline void emit_bti(u32 insn, struct jit_ctx *ctx)
156 if (IS_ENABLED(CONFIG_ARM64_BTI_KERNEL))
161 * Kernel addresses in the vmalloc space use at most 48 bits, and the
162 * remaining bits are guaranteed to be 0x1. So we can compose the address
163 * with a fixed length movn/movk/movk sequence.
165 static inline void emit_addr_mov_i64(const int reg, const u64 val,
171 emit(A64_MOVN(1, reg, ~tmp & 0xffff, shift), ctx);
175 emit(A64_MOVK(1, reg, tmp & 0xffff, shift), ctx);
179 static inline void emit_call(u64 target, struct jit_ctx *ctx)
181 u8 tmp = bpf2a64[TMP_REG_1];
183 emit_addr_mov_i64(tmp, target, ctx);
184 emit(A64_BLR(tmp), ctx);
187 static inline int bpf2a64_offset(int bpf_insn, int off,
188 const struct jit_ctx *ctx)
190 /* BPF JMP offset is relative to the next instruction */
193 * Whereas arm64 branch instructions encode the offset
194 * from the branch itself, so we must subtract 1 from the
195 * instruction offset.
197 return ctx->offset[bpf_insn + off] - (ctx->offset[bpf_insn] - 1);
200 static void jit_fill_hole(void *area, unsigned int size)
203 /* We are guaranteed to have aligned memory. */
204 for (ptr = area; size >= sizeof(u32); size -= sizeof(u32))
205 *ptr++ = cpu_to_le32(AARCH64_BREAK_FAULT);
208 static inline int epilogue_offset(const struct jit_ctx *ctx)
210 int to = ctx->epilogue_offset;
216 static bool is_addsub_imm(u32 imm)
218 /* Either imm12 or shifted imm12. */
219 return !(imm & ~0xfff) || !(imm & ~0xfff000);
223 * There are 3 types of AArch64 LDR/STR (immediate) instruction:
224 * Post-index, Pre-index, Unsigned offset.
226 * For BPF ldr/str, the "unsigned offset" type is sufficient.
228 * "Unsigned offset" type LDR(immediate) format:
231 * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
232 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
233 * |x x|1 1 1 0 0 1 0 1| imm12 | Rn | Rt |
234 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
237 * "Unsigned offset" type STR(immediate) format:
239 * 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
240 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
241 * |x x|1 1 1 0 0 1 0 0| imm12 | Rn | Rt |
242 * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
245 * The offset is calculated from imm12 and scale in the following way:
247 * offset = (u64)imm12 << scale
249 static bool is_lsi_offset(int offset, int scale)
254 if (offset > (0xFFF << scale))
257 if (offset & ((1 << scale) - 1))
263 /* generated prologue:
264 * bti c // if CONFIG_ARM64_BTI_KERNEL
267 * paciasp // if CONFIG_ARM64_PTR_AUTH_KERNEL
268 * stp x29, lr, [sp, #-16]!
270 * stp x19, x20, [sp, #-16]!
271 * stp x21, x22, [sp, #-16]!
272 * stp x25, x26, [sp, #-16]!
273 * stp x27, x28, [sp, #-16]!
279 #define BTI_INSNS (IS_ENABLED(CONFIG_ARM64_BTI_KERNEL) ? 1 : 0)
280 #define PAC_INSNS (IS_ENABLED(CONFIG_ARM64_PTR_AUTH_KERNEL) ? 1 : 0)
282 /* Offset of nop instruction in bpf prog entry to be poked */
283 #define POKE_OFFSET (BTI_INSNS + 1)
285 /* Tail call offset to jump into */
286 #define PROLOGUE_OFFSET (BTI_INSNS + 2 + PAC_INSNS + 8)
288 static int build_prologue(struct jit_ctx *ctx, bool ebpf_from_cbpf)
290 const struct bpf_prog *prog = ctx->prog;
291 const bool is_main_prog = prog->aux->func_idx == 0;
292 const u8 r6 = bpf2a64[BPF_REG_6];
293 const u8 r7 = bpf2a64[BPF_REG_7];
294 const u8 r8 = bpf2a64[BPF_REG_8];
295 const u8 r9 = bpf2a64[BPF_REG_9];
296 const u8 fp = bpf2a64[BPF_REG_FP];
297 const u8 tcc = bpf2a64[TCALL_CNT];
298 const u8 fpb = bpf2a64[FP_BOTTOM];
299 const int idx0 = ctx->idx;
303 * BPF prog stack layout
306 * original A64_SP => 0:+-----+ BPF prologue
308 * current A64_FP => -16:+-----+
309 * | ... | callee saved registers
310 * BPF fp register => -64:+-----+ <= (BPF_FP)
312 * | ... | BPF prog stack
314 * +-----+ <= (BPF_FP - prog->aux->stack_depth)
316 * current A64_SP => +-----+ <= (BPF_FP - ctx->stack_size)
318 * | ... | Function call stack
325 emit_bti(A64_BTI_C, ctx);
327 emit(A64_MOV(1, A64_R(9), A64_LR), ctx);
331 if (IS_ENABLED(CONFIG_ARM64_PTR_AUTH_KERNEL))
332 emit(A64_PACIASP, ctx);
334 /* Save FP and LR registers to stay align with ARM64 AAPCS */
335 emit(A64_PUSH(A64_FP, A64_LR, A64_SP), ctx);
336 emit(A64_MOV(1, A64_FP, A64_SP), ctx);
338 /* Save callee-saved registers */
339 emit(A64_PUSH(r6, r7, A64_SP), ctx);
340 emit(A64_PUSH(r8, r9, A64_SP), ctx);
341 emit(A64_PUSH(fp, tcc, A64_SP), ctx);
342 emit(A64_PUSH(fpb, A64_R(28), A64_SP), ctx);
344 /* Set up BPF prog stack base register */
345 emit(A64_MOV(1, fp, A64_SP), ctx);
347 if (!ebpf_from_cbpf && is_main_prog) {
348 /* Initialize tail_call_cnt */
349 emit(A64_MOVZ(1, tcc, 0, 0), ctx);
351 cur_offset = ctx->idx - idx0;
352 if (cur_offset != PROLOGUE_OFFSET) {
353 pr_err_once("PROLOGUE_OFFSET = %d, expected %d!\n",
354 cur_offset, PROLOGUE_OFFSET);
358 /* BTI landing pad for the tail call, done with a BR */
359 emit_bti(A64_BTI_J, ctx);
362 emit(A64_SUB_I(1, fpb, fp, ctx->fpb_offset), ctx);
364 /* Stack must be multiples of 16B */
365 ctx->stack_size = round_up(prog->aux->stack_depth, 16);
367 /* Set up function call stack */
368 emit(A64_SUB_I(1, A64_SP, A64_SP, ctx->stack_size), ctx);
372 static int out_offset = -1; /* initialized on the first pass of build_body() */
373 static int emit_bpf_tail_call(struct jit_ctx *ctx)
375 /* bpf_tail_call(void *prog_ctx, struct bpf_array *array, u64 index) */
376 const u8 r2 = bpf2a64[BPF_REG_2];
377 const u8 r3 = bpf2a64[BPF_REG_3];
379 const u8 tmp = bpf2a64[TMP_REG_1];
380 const u8 prg = bpf2a64[TMP_REG_2];
381 const u8 tcc = bpf2a64[TCALL_CNT];
382 const int idx0 = ctx->idx;
383 #define cur_offset (ctx->idx - idx0)
384 #define jmp_offset (out_offset - (cur_offset))
387 /* if (index >= array->map.max_entries)
390 off = offsetof(struct bpf_array, map.max_entries);
391 emit_a64_mov_i64(tmp, off, ctx);
392 emit(A64_LDR32(tmp, r2, tmp), ctx);
393 emit(A64_MOV(0, r3, r3), ctx);
394 emit(A64_CMP(0, r3, tmp), ctx);
395 emit(A64_B_(A64_COND_CS, jmp_offset), ctx);
398 * if (tail_call_cnt >= MAX_TAIL_CALL_CNT)
402 emit_a64_mov_i64(tmp, MAX_TAIL_CALL_CNT, ctx);
403 emit(A64_CMP(1, tcc, tmp), ctx);
404 emit(A64_B_(A64_COND_CS, jmp_offset), ctx);
405 emit(A64_ADD_I(1, tcc, tcc, 1), ctx);
407 /* prog = array->ptrs[index];
411 off = offsetof(struct bpf_array, ptrs);
412 emit_a64_mov_i64(tmp, off, ctx);
413 emit(A64_ADD(1, tmp, r2, tmp), ctx);
414 emit(A64_LSL(1, prg, r3, 3), ctx);
415 emit(A64_LDR64(prg, tmp, prg), ctx);
416 emit(A64_CBZ(1, prg, jmp_offset), ctx);
418 /* goto *(prog->bpf_func + prologue_offset); */
419 off = offsetof(struct bpf_prog, bpf_func);
420 emit_a64_mov_i64(tmp, off, ctx);
421 emit(A64_LDR64(tmp, prg, tmp), ctx);
422 emit(A64_ADD_I(1, tmp, tmp, sizeof(u32) * PROLOGUE_OFFSET), ctx);
423 emit(A64_ADD_I(1, A64_SP, A64_SP, ctx->stack_size), ctx);
424 emit(A64_BR(tmp), ctx);
427 if (out_offset == -1)
428 out_offset = cur_offset;
429 if (cur_offset != out_offset) {
430 pr_err_once("tail_call out_offset = %d, expected %d!\n",
431 cur_offset, out_offset);
439 #ifdef CONFIG_ARM64_LSE_ATOMICS
440 static int emit_lse_atomic(const struct bpf_insn *insn, struct jit_ctx *ctx)
442 const u8 code = insn->code;
443 const u8 dst = bpf2a64[insn->dst_reg];
444 const u8 src = bpf2a64[insn->src_reg];
445 const u8 tmp = bpf2a64[TMP_REG_1];
446 const u8 tmp2 = bpf2a64[TMP_REG_2];
447 const bool isdw = BPF_SIZE(code) == BPF_DW;
448 const s16 off = insn->off;
454 emit_a64_mov_i(1, tmp, off, ctx);
455 emit(A64_ADD(1, tmp, tmp, dst), ctx);
460 /* lock *(u32/u64 *)(dst_reg + off) <op>= src_reg */
462 emit(A64_STADD(isdw, reg, src), ctx);
465 emit(A64_MVN(isdw, tmp2, src), ctx);
466 emit(A64_STCLR(isdw, reg, tmp2), ctx);
469 emit(A64_STSET(isdw, reg, src), ctx);
472 emit(A64_STEOR(isdw, reg, src), ctx);
474 /* src_reg = atomic_fetch_<op>(dst_reg + off, src_reg) */
475 case BPF_ADD | BPF_FETCH:
476 emit(A64_LDADDAL(isdw, src, reg, src), ctx);
478 case BPF_AND | BPF_FETCH:
479 emit(A64_MVN(isdw, tmp2, src), ctx);
480 emit(A64_LDCLRAL(isdw, src, reg, tmp2), ctx);
482 case BPF_OR | BPF_FETCH:
483 emit(A64_LDSETAL(isdw, src, reg, src), ctx);
485 case BPF_XOR | BPF_FETCH:
486 emit(A64_LDEORAL(isdw, src, reg, src), ctx);
488 /* src_reg = atomic_xchg(dst_reg + off, src_reg); */
490 emit(A64_SWPAL(isdw, src, reg, src), ctx);
492 /* r0 = atomic_cmpxchg(dst_reg + off, r0, src_reg); */
494 emit(A64_CASAL(isdw, src, reg, bpf2a64[BPF_REG_0]), ctx);
497 pr_err_once("unknown atomic op code %02x\n", insn->imm);
504 static inline int emit_lse_atomic(const struct bpf_insn *insn, struct jit_ctx *ctx)
510 static int emit_ll_sc_atomic(const struct bpf_insn *insn, struct jit_ctx *ctx)
512 const u8 code = insn->code;
513 const u8 dst = bpf2a64[insn->dst_reg];
514 const u8 src = bpf2a64[insn->src_reg];
515 const u8 tmp = bpf2a64[TMP_REG_1];
516 const u8 tmp2 = bpf2a64[TMP_REG_2];
517 const u8 tmp3 = bpf2a64[TMP_REG_3];
518 const int i = insn - ctx->prog->insnsi;
519 const s32 imm = insn->imm;
520 const s16 off = insn->off;
521 const bool isdw = BPF_SIZE(code) == BPF_DW;
528 emit_a64_mov_i(1, tmp, off, ctx);
529 emit(A64_ADD(1, tmp, tmp, dst), ctx);
533 if (imm == BPF_ADD || imm == BPF_AND ||
534 imm == BPF_OR || imm == BPF_XOR) {
535 /* lock *(u32/u64 *)(dst_reg + off) <op>= src_reg */
536 emit(A64_LDXR(isdw, tmp2, reg), ctx);
538 emit(A64_ADD(isdw, tmp2, tmp2, src), ctx);
539 else if (imm == BPF_AND)
540 emit(A64_AND(isdw, tmp2, tmp2, src), ctx);
541 else if (imm == BPF_OR)
542 emit(A64_ORR(isdw, tmp2, tmp2, src), ctx);
544 emit(A64_EOR(isdw, tmp2, tmp2, src), ctx);
545 emit(A64_STXR(isdw, tmp2, reg, tmp3), ctx);
547 check_imm19(jmp_offset);
548 emit(A64_CBNZ(0, tmp3, jmp_offset), ctx);
549 } else if (imm == (BPF_ADD | BPF_FETCH) ||
550 imm == (BPF_AND | BPF_FETCH) ||
551 imm == (BPF_OR | BPF_FETCH) ||
552 imm == (BPF_XOR | BPF_FETCH)) {
553 /* src_reg = atomic_fetch_<op>(dst_reg + off, src_reg) */
554 const u8 ax = bpf2a64[BPF_REG_AX];
556 emit(A64_MOV(isdw, ax, src), ctx);
557 emit(A64_LDXR(isdw, src, reg), ctx);
558 if (imm == (BPF_ADD | BPF_FETCH))
559 emit(A64_ADD(isdw, tmp2, src, ax), ctx);
560 else if (imm == (BPF_AND | BPF_FETCH))
561 emit(A64_AND(isdw, tmp2, src, ax), ctx);
562 else if (imm == (BPF_OR | BPF_FETCH))
563 emit(A64_ORR(isdw, tmp2, src, ax), ctx);
565 emit(A64_EOR(isdw, tmp2, src, ax), ctx);
566 emit(A64_STLXR(isdw, tmp2, reg, tmp3), ctx);
568 check_imm19(jmp_offset);
569 emit(A64_CBNZ(0, tmp3, jmp_offset), ctx);
570 emit(A64_DMB_ISH, ctx);
571 } else if (imm == BPF_XCHG) {
572 /* src_reg = atomic_xchg(dst_reg + off, src_reg); */
573 emit(A64_MOV(isdw, tmp2, src), ctx);
574 emit(A64_LDXR(isdw, src, reg), ctx);
575 emit(A64_STLXR(isdw, tmp2, reg, tmp3), ctx);
577 check_imm19(jmp_offset);
578 emit(A64_CBNZ(0, tmp3, jmp_offset), ctx);
579 emit(A64_DMB_ISH, ctx);
580 } else if (imm == BPF_CMPXCHG) {
581 /* r0 = atomic_cmpxchg(dst_reg + off, r0, src_reg); */
582 const u8 r0 = bpf2a64[BPF_REG_0];
584 emit(A64_MOV(isdw, tmp2, r0), ctx);
585 emit(A64_LDXR(isdw, r0, reg), ctx);
586 emit(A64_EOR(isdw, tmp3, r0, tmp2), ctx);
588 check_imm19(jmp_offset);
589 emit(A64_CBNZ(isdw, tmp3, jmp_offset), ctx);
590 emit(A64_STLXR(isdw, src, reg, tmp3), ctx);
592 check_imm19(jmp_offset);
593 emit(A64_CBNZ(0, tmp3, jmp_offset), ctx);
594 emit(A64_DMB_ISH, ctx);
596 pr_err_once("unknown atomic op code %02x\n", imm);
603 void dummy_tramp(void);
606 " .pushsection .text, \"ax\", @progbits\n"
607 " .global dummy_tramp\n"
608 " .type dummy_tramp, %function\n"
610 #if IS_ENABLED(CONFIG_ARM64_BTI_KERNEL)
611 " bti j\n" /* dummy_tramp is called via "br x10" */
616 " .size dummy_tramp, .-dummy_tramp\n"
620 /* build a plt initialized like this:
628 * when a long jump trampoline is attached, target is filled with the
629 * trampoline address, and when the trampoline is removed, target is
630 * restored to dummy_tramp address.
632 static void build_plt(struct jit_ctx *ctx)
634 const u8 tmp = bpf2a64[TMP_REG_1];
635 struct bpf_plt *plt = NULL;
637 /* make sure target is 64-bit aligned */
638 if ((ctx->idx + PLT_TARGET_OFFSET / AARCH64_INSN_SIZE) % 2)
641 plt = (struct bpf_plt *)(ctx->image + ctx->idx);
642 /* plt is called via bl, no BTI needed here */
643 emit(A64_LDR64LIT(tmp, 2 * AARCH64_INSN_SIZE), ctx);
644 emit(A64_BR(tmp), ctx);
647 plt->target = (u64)&dummy_tramp;
650 static void build_epilogue(struct jit_ctx *ctx)
652 const u8 r0 = bpf2a64[BPF_REG_0];
653 const u8 r6 = bpf2a64[BPF_REG_6];
654 const u8 r7 = bpf2a64[BPF_REG_7];
655 const u8 r8 = bpf2a64[BPF_REG_8];
656 const u8 r9 = bpf2a64[BPF_REG_9];
657 const u8 fp = bpf2a64[BPF_REG_FP];
658 const u8 fpb = bpf2a64[FP_BOTTOM];
660 /* We're done with BPF stack */
661 emit(A64_ADD_I(1, A64_SP, A64_SP, ctx->stack_size), ctx);
663 /* Restore x27 and x28 */
664 emit(A64_POP(fpb, A64_R(28), A64_SP), ctx);
665 /* Restore fs (x25) and x26 */
666 emit(A64_POP(fp, A64_R(26), A64_SP), ctx);
668 /* Restore callee-saved register */
669 emit(A64_POP(r8, r9, A64_SP), ctx);
670 emit(A64_POP(r6, r7, A64_SP), ctx);
672 /* Restore FP/LR registers */
673 emit(A64_POP(A64_FP, A64_LR, A64_SP), ctx);
675 /* Set return value */
676 emit(A64_MOV(1, A64_R(0), r0), ctx);
678 /* Authenticate lr */
679 if (IS_ENABLED(CONFIG_ARM64_PTR_AUTH_KERNEL))
680 emit(A64_AUTIASP, ctx);
682 emit(A64_RET(A64_LR), ctx);
685 #define BPF_FIXUP_OFFSET_MASK GENMASK(26, 0)
686 #define BPF_FIXUP_REG_MASK GENMASK(31, 27)
688 bool ex_handler_bpf(const struct exception_table_entry *ex,
689 struct pt_regs *regs)
691 off_t offset = FIELD_GET(BPF_FIXUP_OFFSET_MASK, ex->fixup);
692 int dst_reg = FIELD_GET(BPF_FIXUP_REG_MASK, ex->fixup);
694 regs->regs[dst_reg] = 0;
695 regs->pc = (unsigned long)&ex->fixup - offset;
699 /* For accesses to BTF pointers, add an entry to the exception table */
700 static int add_exception_handler(const struct bpf_insn *insn,
706 struct exception_table_entry *ex;
712 if (BPF_MODE(insn->code) != BPF_PROBE_MEM)
715 if (!ctx->prog->aux->extable ||
716 WARN_ON_ONCE(ctx->exentry_idx >= ctx->prog->aux->num_exentries))
719 ex = &ctx->prog->aux->extable[ctx->exentry_idx];
720 pc = (unsigned long)&ctx->image[ctx->idx - 1];
722 offset = pc - (long)&ex->insn;
723 if (WARN_ON_ONCE(offset >= 0 || offset < INT_MIN))
728 * Since the extable follows the program, the fixup offset is always
729 * negative and limited to BPF_JIT_REGION_SIZE. Store a positive value
730 * to keep things simple, and put the destination register in the upper
731 * bits. We don't need to worry about buildtime or runtime sort
732 * modifying the upper bits because the table is already sorted, and
733 * isn't part of the main exception table.
735 offset = (long)&ex->fixup - (pc + AARCH64_INSN_SIZE);
736 if (!FIELD_FIT(BPF_FIXUP_OFFSET_MASK, offset))
739 ex->fixup = FIELD_PREP(BPF_FIXUP_OFFSET_MASK, offset) |
740 FIELD_PREP(BPF_FIXUP_REG_MASK, dst_reg);
742 ex->type = EX_TYPE_BPF;
748 /* JITs an eBPF instruction.
750 * 0 - successfully JITed an 8-byte eBPF instruction.
751 * >0 - successfully JITed a 16-byte eBPF instruction.
752 * <0 - failed to JIT.
754 static int build_insn(const struct bpf_insn *insn, struct jit_ctx *ctx,
757 const u8 code = insn->code;
758 const u8 dst = bpf2a64[insn->dst_reg];
759 const u8 src = bpf2a64[insn->src_reg];
760 const u8 tmp = bpf2a64[TMP_REG_1];
761 const u8 tmp2 = bpf2a64[TMP_REG_2];
762 const u8 fp = bpf2a64[BPF_REG_FP];
763 const u8 fpb = bpf2a64[FP_BOTTOM];
764 const s16 off = insn->off;
765 const s32 imm = insn->imm;
766 const int i = insn - ctx->prog->insnsi;
767 const bool is64 = BPF_CLASS(code) == BPF_ALU64 ||
768 BPF_CLASS(code) == BPF_JMP;
779 case BPF_ALU | BPF_MOV | BPF_X:
780 case BPF_ALU64 | BPF_MOV | BPF_X:
781 emit(A64_MOV(is64, dst, src), ctx);
783 /* dst = dst OP src */
784 case BPF_ALU | BPF_ADD | BPF_X:
785 case BPF_ALU64 | BPF_ADD | BPF_X:
786 emit(A64_ADD(is64, dst, dst, src), ctx);
788 case BPF_ALU | BPF_SUB | BPF_X:
789 case BPF_ALU64 | BPF_SUB | BPF_X:
790 emit(A64_SUB(is64, dst, dst, src), ctx);
792 case BPF_ALU | BPF_AND | BPF_X:
793 case BPF_ALU64 | BPF_AND | BPF_X:
794 emit(A64_AND(is64, dst, dst, src), ctx);
796 case BPF_ALU | BPF_OR | BPF_X:
797 case BPF_ALU64 | BPF_OR | BPF_X:
798 emit(A64_ORR(is64, dst, dst, src), ctx);
800 case BPF_ALU | BPF_XOR | BPF_X:
801 case BPF_ALU64 | BPF_XOR | BPF_X:
802 emit(A64_EOR(is64, dst, dst, src), ctx);
804 case BPF_ALU | BPF_MUL | BPF_X:
805 case BPF_ALU64 | BPF_MUL | BPF_X:
806 emit(A64_MUL(is64, dst, dst, src), ctx);
808 case BPF_ALU | BPF_DIV | BPF_X:
809 case BPF_ALU64 | BPF_DIV | BPF_X:
810 emit(A64_UDIV(is64, dst, dst, src), ctx);
812 case BPF_ALU | BPF_MOD | BPF_X:
813 case BPF_ALU64 | BPF_MOD | BPF_X:
814 emit(A64_UDIV(is64, tmp, dst, src), ctx);
815 emit(A64_MSUB(is64, dst, dst, tmp, src), ctx);
817 case BPF_ALU | BPF_LSH | BPF_X:
818 case BPF_ALU64 | BPF_LSH | BPF_X:
819 emit(A64_LSLV(is64, dst, dst, src), ctx);
821 case BPF_ALU | BPF_RSH | BPF_X:
822 case BPF_ALU64 | BPF_RSH | BPF_X:
823 emit(A64_LSRV(is64, dst, dst, src), ctx);
825 case BPF_ALU | BPF_ARSH | BPF_X:
826 case BPF_ALU64 | BPF_ARSH | BPF_X:
827 emit(A64_ASRV(is64, dst, dst, src), ctx);
830 case BPF_ALU | BPF_NEG:
831 case BPF_ALU64 | BPF_NEG:
832 emit(A64_NEG(is64, dst, dst), ctx);
834 /* dst = BSWAP##imm(dst) */
835 case BPF_ALU | BPF_END | BPF_FROM_LE:
836 case BPF_ALU | BPF_END | BPF_FROM_BE:
837 #ifdef CONFIG_CPU_BIG_ENDIAN
838 if (BPF_SRC(code) == BPF_FROM_BE)
840 #else /* !CONFIG_CPU_BIG_ENDIAN */
841 if (BPF_SRC(code) == BPF_FROM_LE)
846 emit(A64_REV16(is64, dst, dst), ctx);
847 /* zero-extend 16 bits into 64 bits */
848 emit(A64_UXTH(is64, dst, dst), ctx);
851 emit(A64_REV32(is64, dst, dst), ctx);
852 /* upper 32 bits already cleared */
855 emit(A64_REV64(dst, dst), ctx);
862 /* zero-extend 16 bits into 64 bits */
863 emit(A64_UXTH(is64, dst, dst), ctx);
866 /* zero-extend 32 bits into 64 bits */
867 emit(A64_UXTW(is64, dst, dst), ctx);
875 case BPF_ALU | BPF_MOV | BPF_K:
876 case BPF_ALU64 | BPF_MOV | BPF_K:
877 emit_a64_mov_i(is64, dst, imm, ctx);
879 /* dst = dst OP imm */
880 case BPF_ALU | BPF_ADD | BPF_K:
881 case BPF_ALU64 | BPF_ADD | BPF_K:
882 if (is_addsub_imm(imm)) {
883 emit(A64_ADD_I(is64, dst, dst, imm), ctx);
884 } else if (is_addsub_imm(-imm)) {
885 emit(A64_SUB_I(is64, dst, dst, -imm), ctx);
887 emit_a64_mov_i(is64, tmp, imm, ctx);
888 emit(A64_ADD(is64, dst, dst, tmp), ctx);
891 case BPF_ALU | BPF_SUB | BPF_K:
892 case BPF_ALU64 | BPF_SUB | BPF_K:
893 if (is_addsub_imm(imm)) {
894 emit(A64_SUB_I(is64, dst, dst, imm), ctx);
895 } else if (is_addsub_imm(-imm)) {
896 emit(A64_ADD_I(is64, dst, dst, -imm), ctx);
898 emit_a64_mov_i(is64, tmp, imm, ctx);
899 emit(A64_SUB(is64, dst, dst, tmp), ctx);
902 case BPF_ALU | BPF_AND | BPF_K:
903 case BPF_ALU64 | BPF_AND | BPF_K:
904 a64_insn = A64_AND_I(is64, dst, dst, imm);
905 if (a64_insn != AARCH64_BREAK_FAULT) {
908 emit_a64_mov_i(is64, tmp, imm, ctx);
909 emit(A64_AND(is64, dst, dst, tmp), ctx);
912 case BPF_ALU | BPF_OR | BPF_K:
913 case BPF_ALU64 | BPF_OR | BPF_K:
914 a64_insn = A64_ORR_I(is64, dst, dst, imm);
915 if (a64_insn != AARCH64_BREAK_FAULT) {
918 emit_a64_mov_i(is64, tmp, imm, ctx);
919 emit(A64_ORR(is64, dst, dst, tmp), ctx);
922 case BPF_ALU | BPF_XOR | BPF_K:
923 case BPF_ALU64 | BPF_XOR | BPF_K:
924 a64_insn = A64_EOR_I(is64, dst, dst, imm);
925 if (a64_insn != AARCH64_BREAK_FAULT) {
928 emit_a64_mov_i(is64, tmp, imm, ctx);
929 emit(A64_EOR(is64, dst, dst, tmp), ctx);
932 case BPF_ALU | BPF_MUL | BPF_K:
933 case BPF_ALU64 | BPF_MUL | BPF_K:
934 emit_a64_mov_i(is64, tmp, imm, ctx);
935 emit(A64_MUL(is64, dst, dst, tmp), ctx);
937 case BPF_ALU | BPF_DIV | BPF_K:
938 case BPF_ALU64 | BPF_DIV | BPF_K:
939 emit_a64_mov_i(is64, tmp, imm, ctx);
940 emit(A64_UDIV(is64, dst, dst, tmp), ctx);
942 case BPF_ALU | BPF_MOD | BPF_K:
943 case BPF_ALU64 | BPF_MOD | BPF_K:
944 emit_a64_mov_i(is64, tmp2, imm, ctx);
945 emit(A64_UDIV(is64, tmp, dst, tmp2), ctx);
946 emit(A64_MSUB(is64, dst, dst, tmp, tmp2), ctx);
948 case BPF_ALU | BPF_LSH | BPF_K:
949 case BPF_ALU64 | BPF_LSH | BPF_K:
950 emit(A64_LSL(is64, dst, dst, imm), ctx);
952 case BPF_ALU | BPF_RSH | BPF_K:
953 case BPF_ALU64 | BPF_RSH | BPF_K:
954 emit(A64_LSR(is64, dst, dst, imm), ctx);
956 case BPF_ALU | BPF_ARSH | BPF_K:
957 case BPF_ALU64 | BPF_ARSH | BPF_K:
958 emit(A64_ASR(is64, dst, dst, imm), ctx);
962 case BPF_JMP | BPF_JA:
963 jmp_offset = bpf2a64_offset(i, off, ctx);
964 check_imm26(jmp_offset);
965 emit(A64_B(jmp_offset), ctx);
967 /* IF (dst COND src) JUMP off */
968 case BPF_JMP | BPF_JEQ | BPF_X:
969 case BPF_JMP | BPF_JGT | BPF_X:
970 case BPF_JMP | BPF_JLT | BPF_X:
971 case BPF_JMP | BPF_JGE | BPF_X:
972 case BPF_JMP | BPF_JLE | BPF_X:
973 case BPF_JMP | BPF_JNE | BPF_X:
974 case BPF_JMP | BPF_JSGT | BPF_X:
975 case BPF_JMP | BPF_JSLT | BPF_X:
976 case BPF_JMP | BPF_JSGE | BPF_X:
977 case BPF_JMP | BPF_JSLE | BPF_X:
978 case BPF_JMP32 | BPF_JEQ | BPF_X:
979 case BPF_JMP32 | BPF_JGT | BPF_X:
980 case BPF_JMP32 | BPF_JLT | BPF_X:
981 case BPF_JMP32 | BPF_JGE | BPF_X:
982 case BPF_JMP32 | BPF_JLE | BPF_X:
983 case BPF_JMP32 | BPF_JNE | BPF_X:
984 case BPF_JMP32 | BPF_JSGT | BPF_X:
985 case BPF_JMP32 | BPF_JSLT | BPF_X:
986 case BPF_JMP32 | BPF_JSGE | BPF_X:
987 case BPF_JMP32 | BPF_JSLE | BPF_X:
988 emit(A64_CMP(is64, dst, src), ctx);
990 jmp_offset = bpf2a64_offset(i, off, ctx);
991 check_imm19(jmp_offset);
992 switch (BPF_OP(code)) {
994 jmp_cond = A64_COND_EQ;
997 jmp_cond = A64_COND_HI;
1000 jmp_cond = A64_COND_CC;
1003 jmp_cond = A64_COND_CS;
1006 jmp_cond = A64_COND_LS;
1010 jmp_cond = A64_COND_NE;
1013 jmp_cond = A64_COND_GT;
1016 jmp_cond = A64_COND_LT;
1019 jmp_cond = A64_COND_GE;
1022 jmp_cond = A64_COND_LE;
1027 emit(A64_B_(jmp_cond, jmp_offset), ctx);
1029 case BPF_JMP | BPF_JSET | BPF_X:
1030 case BPF_JMP32 | BPF_JSET | BPF_X:
1031 emit(A64_TST(is64, dst, src), ctx);
1033 /* IF (dst COND imm) JUMP off */
1034 case BPF_JMP | BPF_JEQ | BPF_K:
1035 case BPF_JMP | BPF_JGT | BPF_K:
1036 case BPF_JMP | BPF_JLT | BPF_K:
1037 case BPF_JMP | BPF_JGE | BPF_K:
1038 case BPF_JMP | BPF_JLE | BPF_K:
1039 case BPF_JMP | BPF_JNE | BPF_K:
1040 case BPF_JMP | BPF_JSGT | BPF_K:
1041 case BPF_JMP | BPF_JSLT | BPF_K:
1042 case BPF_JMP | BPF_JSGE | BPF_K:
1043 case BPF_JMP | BPF_JSLE | BPF_K:
1044 case BPF_JMP32 | BPF_JEQ | BPF_K:
1045 case BPF_JMP32 | BPF_JGT | BPF_K:
1046 case BPF_JMP32 | BPF_JLT | BPF_K:
1047 case BPF_JMP32 | BPF_JGE | BPF_K:
1048 case BPF_JMP32 | BPF_JLE | BPF_K:
1049 case BPF_JMP32 | BPF_JNE | BPF_K:
1050 case BPF_JMP32 | BPF_JSGT | BPF_K:
1051 case BPF_JMP32 | BPF_JSLT | BPF_K:
1052 case BPF_JMP32 | BPF_JSGE | BPF_K:
1053 case BPF_JMP32 | BPF_JSLE | BPF_K:
1054 if (is_addsub_imm(imm)) {
1055 emit(A64_CMP_I(is64, dst, imm), ctx);
1056 } else if (is_addsub_imm(-imm)) {
1057 emit(A64_CMN_I(is64, dst, -imm), ctx);
1059 emit_a64_mov_i(is64, tmp, imm, ctx);
1060 emit(A64_CMP(is64, dst, tmp), ctx);
1063 case BPF_JMP | BPF_JSET | BPF_K:
1064 case BPF_JMP32 | BPF_JSET | BPF_K:
1065 a64_insn = A64_TST_I(is64, dst, imm);
1066 if (a64_insn != AARCH64_BREAK_FAULT) {
1067 emit(a64_insn, ctx);
1069 emit_a64_mov_i(is64, tmp, imm, ctx);
1070 emit(A64_TST(is64, dst, tmp), ctx);
1074 case BPF_JMP | BPF_CALL:
1076 const u8 r0 = bpf2a64[BPF_REG_0];
1077 bool func_addr_fixed;
1080 ret = bpf_jit_get_func_addr(ctx->prog, insn, extra_pass,
1081 &func_addr, &func_addr_fixed);
1084 emit_call(func_addr, ctx);
1085 emit(A64_MOV(1, r0, A64_R(0)), ctx);
1089 case BPF_JMP | BPF_TAIL_CALL:
1090 if (emit_bpf_tail_call(ctx))
1093 /* function return */
1094 case BPF_JMP | BPF_EXIT:
1095 /* Optimization: when last instruction is EXIT,
1096 simply fallthrough to epilogue. */
1097 if (i == ctx->prog->len - 1)
1099 jmp_offset = epilogue_offset(ctx);
1100 check_imm26(jmp_offset);
1101 emit(A64_B(jmp_offset), ctx);
1105 case BPF_LD | BPF_IMM | BPF_DW:
1107 const struct bpf_insn insn1 = insn[1];
1110 imm64 = (u64)insn1.imm << 32 | (u32)imm;
1111 if (bpf_pseudo_func(insn))
1112 emit_addr_mov_i64(dst, imm64, ctx);
1114 emit_a64_mov_i64(dst, imm64, ctx);
1119 /* LDX: dst = *(size *)(src + off) */
1120 case BPF_LDX | BPF_MEM | BPF_W:
1121 case BPF_LDX | BPF_MEM | BPF_H:
1122 case BPF_LDX | BPF_MEM | BPF_B:
1123 case BPF_LDX | BPF_MEM | BPF_DW:
1124 case BPF_LDX | BPF_PROBE_MEM | BPF_DW:
1125 case BPF_LDX | BPF_PROBE_MEM | BPF_W:
1126 case BPF_LDX | BPF_PROBE_MEM | BPF_H:
1127 case BPF_LDX | BPF_PROBE_MEM | BPF_B:
1128 if (ctx->fpb_offset > 0 && src == fp) {
1130 off_adj = off + ctx->fpb_offset;
1135 switch (BPF_SIZE(code)) {
1137 if (is_lsi_offset(off_adj, 2)) {
1138 emit(A64_LDR32I(dst, src_adj, off_adj), ctx);
1140 emit_a64_mov_i(1, tmp, off, ctx);
1141 emit(A64_LDR32(dst, src, tmp), ctx);
1145 if (is_lsi_offset(off_adj, 1)) {
1146 emit(A64_LDRHI(dst, src_adj, off_adj), ctx);
1148 emit_a64_mov_i(1, tmp, off, ctx);
1149 emit(A64_LDRH(dst, src, tmp), ctx);
1153 if (is_lsi_offset(off_adj, 0)) {
1154 emit(A64_LDRBI(dst, src_adj, off_adj), ctx);
1156 emit_a64_mov_i(1, tmp, off, ctx);
1157 emit(A64_LDRB(dst, src, tmp), ctx);
1161 if (is_lsi_offset(off_adj, 3)) {
1162 emit(A64_LDR64I(dst, src_adj, off_adj), ctx);
1164 emit_a64_mov_i(1, tmp, off, ctx);
1165 emit(A64_LDR64(dst, src, tmp), ctx);
1170 ret = add_exception_handler(insn, ctx, dst);
1175 /* speculation barrier */
1176 case BPF_ST | BPF_NOSPEC:
1178 * Nothing required here.
1180 * In case of arm64, we rely on the firmware mitigation of
1181 * Speculative Store Bypass as controlled via the ssbd kernel
1182 * parameter. Whenever the mitigation is enabled, it works
1183 * for all of the kernel code with no need to provide any
1184 * additional instructions.
1188 /* ST: *(size *)(dst + off) = imm */
1189 case BPF_ST | BPF_MEM | BPF_W:
1190 case BPF_ST | BPF_MEM | BPF_H:
1191 case BPF_ST | BPF_MEM | BPF_B:
1192 case BPF_ST | BPF_MEM | BPF_DW:
1193 if (ctx->fpb_offset > 0 && dst == fp) {
1195 off_adj = off + ctx->fpb_offset;
1200 /* Load imm to a register then store it */
1201 emit_a64_mov_i(1, tmp, imm, ctx);
1202 switch (BPF_SIZE(code)) {
1204 if (is_lsi_offset(off_adj, 2)) {
1205 emit(A64_STR32I(tmp, dst_adj, off_adj), ctx);
1207 emit_a64_mov_i(1, tmp2, off, ctx);
1208 emit(A64_STR32(tmp, dst, tmp2), ctx);
1212 if (is_lsi_offset(off_adj, 1)) {
1213 emit(A64_STRHI(tmp, dst_adj, off_adj), ctx);
1215 emit_a64_mov_i(1, tmp2, off, ctx);
1216 emit(A64_STRH(tmp, dst, tmp2), ctx);
1220 if (is_lsi_offset(off_adj, 0)) {
1221 emit(A64_STRBI(tmp, dst_adj, off_adj), ctx);
1223 emit_a64_mov_i(1, tmp2, off, ctx);
1224 emit(A64_STRB(tmp, dst, tmp2), ctx);
1228 if (is_lsi_offset(off_adj, 3)) {
1229 emit(A64_STR64I(tmp, dst_adj, off_adj), ctx);
1231 emit_a64_mov_i(1, tmp2, off, ctx);
1232 emit(A64_STR64(tmp, dst, tmp2), ctx);
1238 /* STX: *(size *)(dst + off) = src */
1239 case BPF_STX | BPF_MEM | BPF_W:
1240 case BPF_STX | BPF_MEM | BPF_H:
1241 case BPF_STX | BPF_MEM | BPF_B:
1242 case BPF_STX | BPF_MEM | BPF_DW:
1243 if (ctx->fpb_offset > 0 && dst == fp) {
1245 off_adj = off + ctx->fpb_offset;
1250 switch (BPF_SIZE(code)) {
1252 if (is_lsi_offset(off_adj, 2)) {
1253 emit(A64_STR32I(src, dst_adj, off_adj), ctx);
1255 emit_a64_mov_i(1, tmp, off, ctx);
1256 emit(A64_STR32(src, dst, tmp), ctx);
1260 if (is_lsi_offset(off_adj, 1)) {
1261 emit(A64_STRHI(src, dst_adj, off_adj), ctx);
1263 emit_a64_mov_i(1, tmp, off, ctx);
1264 emit(A64_STRH(src, dst, tmp), ctx);
1268 if (is_lsi_offset(off_adj, 0)) {
1269 emit(A64_STRBI(src, dst_adj, off_adj), ctx);
1271 emit_a64_mov_i(1, tmp, off, ctx);
1272 emit(A64_STRB(src, dst, tmp), ctx);
1276 if (is_lsi_offset(off_adj, 3)) {
1277 emit(A64_STR64I(src, dst_adj, off_adj), ctx);
1279 emit_a64_mov_i(1, tmp, off, ctx);
1280 emit(A64_STR64(src, dst, tmp), ctx);
1286 case BPF_STX | BPF_ATOMIC | BPF_W:
1287 case BPF_STX | BPF_ATOMIC | BPF_DW:
1288 if (cpus_have_cap(ARM64_HAS_LSE_ATOMICS))
1289 ret = emit_lse_atomic(insn, ctx);
1291 ret = emit_ll_sc_atomic(insn, ctx);
1297 pr_err_once("unknown opcode %02x\n", code);
1305 * Return 0 if FP may change at runtime, otherwise find the minimum negative
1306 * offset to FP, converts it to positive number, and align down to 8 bytes.
1308 static int find_fpb_offset(struct bpf_prog *prog)
1313 for (i = 0; i < prog->len; i++) {
1314 const struct bpf_insn *insn = &prog->insnsi[i];
1315 const u8 class = BPF_CLASS(insn->code);
1316 const u8 mode = BPF_MODE(insn->code);
1317 const u8 src = insn->src_reg;
1318 const u8 dst = insn->dst_reg;
1319 const s32 imm = insn->imm;
1320 const s16 off = insn->off;
1325 /* fp holds atomic operation result */
1326 if (class == BPF_STX && mode == BPF_ATOMIC &&
1327 ((imm == BPF_XCHG ||
1328 imm == (BPF_FETCH | BPF_ADD) ||
1329 imm == (BPF_FETCH | BPF_AND) ||
1330 imm == (BPF_FETCH | BPF_XOR) ||
1331 imm == (BPF_FETCH | BPF_OR)) &&
1335 if (mode == BPF_MEM && dst == BPF_REG_FP &&
1346 /* fp holds load result */
1347 if (dst == BPF_REG_FP)
1350 if (class == BPF_LDX && mode == BPF_MEM &&
1351 src == BPF_REG_FP && off < offset)
1358 /* fp holds ALU result */
1359 if (dst == BPF_REG_FP)
1366 * safely be converted to a positive 'int', since insn->off
1370 /* align down to 8 bytes */
1371 offset = ALIGN_DOWN(offset, 8);
1377 static int build_body(struct jit_ctx *ctx, bool extra_pass)
1379 const struct bpf_prog *prog = ctx->prog;
1383 * - offset[0] offset of the end of prologue,
1384 * start of the 1st instruction.
1385 * - offset[1] - offset of the end of 1st instruction,
1386 * start of the 2nd instruction
1388 * - offset[3] - offset of the end of 3rd instruction,
1389 * start of 4th instruction
1391 for (i = 0; i < prog->len; i++) {
1392 const struct bpf_insn *insn = &prog->insnsi[i];
1395 if (ctx->image == NULL)
1396 ctx->offset[i] = ctx->idx;
1397 ret = build_insn(insn, ctx, extra_pass);
1400 if (ctx->image == NULL)
1401 ctx->offset[i] = ctx->idx;
1408 * offset is allocated with prog->len + 1 so fill in
1409 * the last element with the offset after the last
1410 * instruction (end of program)
1412 if (ctx->image == NULL)
1413 ctx->offset[i] = ctx->idx;
1418 static int validate_code(struct jit_ctx *ctx)
1422 for (i = 0; i < ctx->idx; i++) {
1423 u32 a64_insn = le32_to_cpu(ctx->image[i]);
1425 if (a64_insn == AARCH64_BREAK_FAULT)
1431 static int validate_ctx(struct jit_ctx *ctx)
1433 if (validate_code(ctx))
1436 if (WARN_ON_ONCE(ctx->exentry_idx != ctx->prog->aux->num_exentries))
1442 static inline void bpf_flush_icache(void *start, void *end)
1444 flush_icache_range((unsigned long)start, (unsigned long)end);
1447 struct arm64_jit_data {
1448 struct bpf_binary_header *header;
1453 struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
1455 int image_size, prog_size, extable_size, extable_align, extable_offset;
1456 struct bpf_prog *tmp, *orig_prog = prog;
1457 struct bpf_binary_header *header;
1458 struct arm64_jit_data *jit_data;
1459 bool was_classic = bpf_prog_was_classic(prog);
1460 bool tmp_blinded = false;
1461 bool extra_pass = false;
1465 if (!prog->jit_requested)
1468 tmp = bpf_jit_blind_constants(prog);
1469 /* If blinding was requested and we failed during blinding,
1470 * we must fall back to the interpreter.
1479 jit_data = prog->aux->jit_data;
1481 jit_data = kzalloc(sizeof(*jit_data), GFP_KERNEL);
1486 prog->aux->jit_data = jit_data;
1488 if (jit_data->ctx.offset) {
1489 ctx = jit_data->ctx;
1490 image_ptr = jit_data->image;
1491 header = jit_data->header;
1493 prog_size = sizeof(u32) * ctx.idx;
1496 memset(&ctx, 0, sizeof(ctx));
1499 ctx.offset = kvcalloc(prog->len + 1, sizeof(int), GFP_KERNEL);
1500 if (ctx.offset == NULL) {
1505 ctx.fpb_offset = find_fpb_offset(prog);
1508 * 1. Initial fake pass to compute ctx->idx and ctx->offset.
1510 * BPF line info needs ctx->offset[i] to be the offset of
1511 * instruction[i] in jited image, so build prologue first.
1513 if (build_prologue(&ctx, was_classic)) {
1518 if (build_body(&ctx, extra_pass)) {
1523 ctx.epilogue_offset = ctx.idx;
1524 build_epilogue(&ctx);
1527 extable_align = __alignof__(struct exception_table_entry);
1528 extable_size = prog->aux->num_exentries *
1529 sizeof(struct exception_table_entry);
1531 /* Now we know the actual image size. */
1532 prog_size = sizeof(u32) * ctx.idx;
1533 /* also allocate space for plt target */
1534 extable_offset = round_up(prog_size + PLT_TARGET_SIZE, extable_align);
1535 image_size = extable_offset + extable_size;
1536 header = bpf_jit_binary_alloc(image_size, &image_ptr,
1537 sizeof(u32), jit_fill_hole);
1538 if (header == NULL) {
1543 /* 2. Now, the actual pass. */
1545 ctx.image = (__le32 *)image_ptr;
1547 prog->aux->extable = (void *)image_ptr + extable_offset;
1550 ctx.exentry_idx = 0;
1552 build_prologue(&ctx, was_classic);
1554 if (build_body(&ctx, extra_pass)) {
1555 bpf_jit_binary_free(header);
1560 build_epilogue(&ctx);
1563 /* 3. Extra pass to validate JITed code. */
1564 if (validate_ctx(&ctx)) {
1565 bpf_jit_binary_free(header);
1570 /* And we're done. */
1571 if (bpf_jit_enable > 1)
1572 bpf_jit_dump(prog->len, prog_size, 2, ctx.image);
1574 bpf_flush_icache(header, ctx.image + ctx.idx);
1576 if (!prog->is_func || extra_pass) {
1577 if (extra_pass && ctx.idx != jit_data->ctx.idx) {
1578 pr_err_once("multi-func JIT bug %d != %d\n",
1579 ctx.idx, jit_data->ctx.idx);
1580 bpf_jit_binary_free(header);
1581 prog->bpf_func = NULL;
1583 prog->jited_len = 0;
1586 bpf_jit_binary_lock_ro(header);
1588 jit_data->ctx = ctx;
1589 jit_data->image = image_ptr;
1590 jit_data->header = header;
1592 prog->bpf_func = (void *)ctx.image;
1594 prog->jited_len = prog_size;
1596 if (!prog->is_func || extra_pass) {
1599 /* offset[prog->len] is the size of program */
1600 for (i = 0; i <= prog->len; i++)
1601 ctx.offset[i] *= AARCH64_INSN_SIZE;
1602 bpf_prog_fill_jited_linfo(prog, ctx.offset + 1);
1606 prog->aux->jit_data = NULL;
1610 bpf_jit_prog_release_other(prog, prog == orig_prog ?
1615 bool bpf_jit_supports_kfunc_call(void)
1620 u64 bpf_jit_alloc_exec_limit(void)
1622 return VMALLOC_END - VMALLOC_START;
1625 void *bpf_jit_alloc_exec(unsigned long size)
1627 /* Memory is intended to be executable, reset the pointer tag. */
1628 return kasan_reset_tag(vmalloc(size));
1631 void bpf_jit_free_exec(void *addr)
1636 /* Indicate the JIT backend supports mixing bpf2bpf and tailcalls. */
1637 bool bpf_jit_supports_subprog_tailcalls(void)
1642 static void invoke_bpf_prog(struct jit_ctx *ctx, struct bpf_tramp_link *l,
1643 int args_off, int retval_off, int run_ctx_off,
1649 struct bpf_prog *p = l->link.prog;
1650 int cookie_off = offsetof(struct bpf_tramp_run_ctx, bpf_cookie);
1652 enter_prog = (u64)bpf_trampoline_enter(p);
1653 exit_prog = (u64)bpf_trampoline_exit(p);
1655 if (l->cookie == 0) {
1656 /* if cookie is zero, one instruction is enough to store it */
1657 emit(A64_STR64I(A64_ZR, A64_SP, run_ctx_off + cookie_off), ctx);
1659 emit_a64_mov_i64(A64_R(10), l->cookie, ctx);
1660 emit(A64_STR64I(A64_R(10), A64_SP, run_ctx_off + cookie_off),
1664 /* save p to callee saved register x19 to avoid loading p with mov_i64
1667 emit_addr_mov_i64(A64_R(19), (const u64)p, ctx);
1670 emit(A64_MOV(1, A64_R(0), A64_R(19)), ctx);
1671 /* arg2: &run_ctx */
1672 emit(A64_ADD_I(1, A64_R(1), A64_SP, run_ctx_off), ctx);
1674 emit_call(enter_prog, ctx);
1676 /* if (__bpf_prog_enter(prog) == 0)
1677 * goto skip_exec_of_prog;
1679 branch = ctx->image + ctx->idx;
1682 /* save return value to callee saved register x20 */
1683 emit(A64_MOV(1, A64_R(20), A64_R(0)), ctx);
1685 emit(A64_ADD_I(1, A64_R(0), A64_SP, args_off), ctx);
1687 emit_addr_mov_i64(A64_R(1), (const u64)p->insnsi, ctx);
1689 emit_call((const u64)p->bpf_func, ctx);
1692 emit(A64_STR64I(A64_R(0), A64_SP, retval_off), ctx);
1695 int offset = &ctx->image[ctx->idx] - branch;
1696 *branch = cpu_to_le32(A64_CBZ(1, A64_R(0), offset));
1700 emit(A64_MOV(1, A64_R(0), A64_R(19)), ctx);
1701 /* arg2: start time */
1702 emit(A64_MOV(1, A64_R(1), A64_R(20)), ctx);
1703 /* arg3: &run_ctx */
1704 emit(A64_ADD_I(1, A64_R(2), A64_SP, run_ctx_off), ctx);
1706 emit_call(exit_prog, ctx);
1709 static void invoke_bpf_mod_ret(struct jit_ctx *ctx, struct bpf_tramp_links *tl,
1710 int args_off, int retval_off, int run_ctx_off,
1715 /* The first fmod_ret program will receive a garbage return value.
1716 * Set this to 0 to avoid confusing the program.
1718 emit(A64_STR64I(A64_ZR, A64_SP, retval_off), ctx);
1719 for (i = 0; i < tl->nr_links; i++) {
1720 invoke_bpf_prog(ctx, tl->links[i], args_off, retval_off,
1722 /* if (*(u64 *)(sp + retval_off) != 0)
1725 emit(A64_LDR64I(A64_R(10), A64_SP, retval_off), ctx);
1726 /* Save the location of branch, and generate a nop.
1727 * This nop will be replaced with a cbnz later.
1729 branches[i] = ctx->image + ctx->idx;
1734 static void save_args(struct jit_ctx *ctx, int args_off, int nregs)
1738 for (i = 0; i < nregs; i++) {
1739 emit(A64_STR64I(i, A64_SP, args_off), ctx);
1744 static void restore_args(struct jit_ctx *ctx, int args_off, int nregs)
1748 for (i = 0; i < nregs; i++) {
1749 emit(A64_LDR64I(i, A64_SP, args_off), ctx);
1754 /* Based on the x86's implementation of arch_prepare_bpf_trampoline().
1756 * bpf prog and function entry before bpf trampoline hooked:
1760 * bpf prog and function entry after bpf trampoline hooked:
1762 * bl <bpf_trampoline or plt>
1765 static int prepare_trampoline(struct jit_ctx *ctx, struct bpf_tramp_image *im,
1766 struct bpf_tramp_links *tlinks, void *orig_call,
1767 int nregs, u32 flags)
1778 struct bpf_tramp_links *fentry = &tlinks[BPF_TRAMP_FENTRY];
1779 struct bpf_tramp_links *fexit = &tlinks[BPF_TRAMP_FEXIT];
1780 struct bpf_tramp_links *fmod_ret = &tlinks[BPF_TRAMP_MODIFY_RETURN];
1782 __le32 **branches = NULL;
1784 /* trampoline stack layout:
1787 * SP + retaddr_off [ self ip ]
1790 * [ padding ] align SP to multiples of 16
1792 * [ x20 ] callee saved reg x20
1793 * SP + regs_off [ x19 ] callee saved reg x19
1795 * SP + retval_off [ return value ] BPF_TRAMP_F_CALL_ORIG or
1796 * BPF_TRAMP_F_RET_FENTRY_RET
1800 * SP + args_off [ arg reg 1 ]
1802 * SP + nregs_off [ arg regs count ]
1804 * SP + ip_off [ traced function ] BPF_TRAMP_F_IP_ARG flag
1806 * SP + run_ctx_off [ bpf_tramp_run_ctx ]
1810 run_ctx_off = stack_size;
1811 /* room for bpf_tramp_run_ctx */
1812 stack_size += round_up(sizeof(struct bpf_tramp_run_ctx), 8);
1814 ip_off = stack_size;
1815 /* room for IP address argument */
1816 if (flags & BPF_TRAMP_F_IP_ARG)
1819 nregs_off = stack_size;
1820 /* room for args count */
1823 args_off = stack_size;
1825 stack_size += nregs * 8;
1827 /* room for return value */
1828 retval_off = stack_size;
1829 save_ret = flags & (BPF_TRAMP_F_CALL_ORIG | BPF_TRAMP_F_RET_FENTRY_RET);
1833 /* room for callee saved registers, currently x19 and x20 are used */
1834 regs_off = stack_size;
1837 /* round up to multiples of 16 to avoid SPAlignmentFault */
1838 stack_size = round_up(stack_size, 16);
1840 /* return address locates above FP */
1841 retaddr_off = stack_size + 8;
1843 /* bpf trampoline may be invoked by 3 instruction types:
1844 * 1. bl, attached to bpf prog or kernel function via short jump
1845 * 2. br, attached to bpf prog or kernel function via long jump
1846 * 3. blr, working as a function pointer, used by struct_ops.
1847 * So BTI_JC should used here to support both br and blr.
1849 emit_bti(A64_BTI_JC, ctx);
1851 /* frame for parent function */
1852 emit(A64_PUSH(A64_FP, A64_R(9), A64_SP), ctx);
1853 emit(A64_MOV(1, A64_FP, A64_SP), ctx);
1855 /* frame for patched function */
1856 emit(A64_PUSH(A64_FP, A64_LR, A64_SP), ctx);
1857 emit(A64_MOV(1, A64_FP, A64_SP), ctx);
1859 /* allocate stack space */
1860 emit(A64_SUB_I(1, A64_SP, A64_SP, stack_size), ctx);
1862 if (flags & BPF_TRAMP_F_IP_ARG) {
1863 /* save ip address of the traced function */
1864 emit_addr_mov_i64(A64_R(10), (const u64)orig_call, ctx);
1865 emit(A64_STR64I(A64_R(10), A64_SP, ip_off), ctx);
1868 /* save arg regs count*/
1869 emit(A64_MOVZ(1, A64_R(10), nregs, 0), ctx);
1870 emit(A64_STR64I(A64_R(10), A64_SP, nregs_off), ctx);
1873 save_args(ctx, args_off, nregs);
1875 /* save callee saved registers */
1876 emit(A64_STR64I(A64_R(19), A64_SP, regs_off), ctx);
1877 emit(A64_STR64I(A64_R(20), A64_SP, regs_off + 8), ctx);
1879 if (flags & BPF_TRAMP_F_CALL_ORIG) {
1880 emit_addr_mov_i64(A64_R(0), (const u64)im, ctx);
1881 emit_call((const u64)__bpf_tramp_enter, ctx);
1884 for (i = 0; i < fentry->nr_links; i++)
1885 invoke_bpf_prog(ctx, fentry->links[i], args_off,
1886 retval_off, run_ctx_off,
1887 flags & BPF_TRAMP_F_RET_FENTRY_RET);
1889 if (fmod_ret->nr_links) {
1890 branches = kcalloc(fmod_ret->nr_links, sizeof(__le32 *),
1895 invoke_bpf_mod_ret(ctx, fmod_ret, args_off, retval_off,
1896 run_ctx_off, branches);
1899 if (flags & BPF_TRAMP_F_CALL_ORIG) {
1900 restore_args(ctx, args_off, nregs);
1901 /* call original func */
1902 emit(A64_LDR64I(A64_R(10), A64_SP, retaddr_off), ctx);
1903 emit(A64_ADR(A64_LR, AARCH64_INSN_SIZE * 2), ctx);
1904 emit(A64_RET(A64_R(10)), ctx);
1905 /* store return value */
1906 emit(A64_STR64I(A64_R(0), A64_SP, retval_off), ctx);
1907 /* reserve a nop for bpf_tramp_image_put */
1908 im->ip_after_call = ctx->image + ctx->idx;
1912 /* update the branches saved in invoke_bpf_mod_ret with cbnz */
1913 for (i = 0; i < fmod_ret->nr_links && ctx->image != NULL; i++) {
1914 int offset = &ctx->image[ctx->idx] - branches[i];
1915 *branches[i] = cpu_to_le32(A64_CBNZ(1, A64_R(10), offset));
1918 for (i = 0; i < fexit->nr_links; i++)
1919 invoke_bpf_prog(ctx, fexit->links[i], args_off, retval_off,
1920 run_ctx_off, false);
1922 if (flags & BPF_TRAMP_F_CALL_ORIG) {
1923 im->ip_epilogue = ctx->image + ctx->idx;
1924 emit_addr_mov_i64(A64_R(0), (const u64)im, ctx);
1925 emit_call((const u64)__bpf_tramp_exit, ctx);
1928 if (flags & BPF_TRAMP_F_RESTORE_REGS)
1929 restore_args(ctx, args_off, nregs);
1931 /* restore callee saved register x19 and x20 */
1932 emit(A64_LDR64I(A64_R(19), A64_SP, regs_off), ctx);
1933 emit(A64_LDR64I(A64_R(20), A64_SP, regs_off + 8), ctx);
1936 emit(A64_LDR64I(A64_R(0), A64_SP, retval_off), ctx);
1939 emit(A64_MOV(1, A64_SP, A64_FP), ctx);
1942 emit(A64_POP(A64_FP, A64_LR, A64_SP), ctx);
1943 emit(A64_POP(A64_FP, A64_R(9), A64_SP), ctx);
1945 if (flags & BPF_TRAMP_F_SKIP_FRAME) {
1946 /* skip patched function, return to parent */
1947 emit(A64_MOV(1, A64_LR, A64_R(9)), ctx);
1948 emit(A64_RET(A64_R(9)), ctx);
1950 /* return to patched function */
1951 emit(A64_MOV(1, A64_R(10), A64_LR), ctx);
1952 emit(A64_MOV(1, A64_LR, A64_R(9)), ctx);
1953 emit(A64_RET(A64_R(10)), ctx);
1957 bpf_flush_icache(ctx->image, ctx->image + ctx->idx);
1964 int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image,
1965 void *image_end, const struct btf_func_model *m,
1966 u32 flags, struct bpf_tramp_links *tlinks,
1970 int nregs = m->nr_args;
1971 int max_insns = ((long)image_end - (long)image) / AARCH64_INSN_SIZE;
1972 struct jit_ctx ctx = {
1977 /* extra registers needed for struct argument */
1978 for (i = 0; i < MAX_BPF_FUNC_ARGS; i++) {
1979 /* The arg_size is at most 16 bytes, enforced by the verifier. */
1980 if (m->arg_flags[i] & BTF_FMODEL_STRUCT_ARG)
1981 nregs += (m->arg_size[i] + 7) / 8 - 1;
1984 /* the first 8 registers are used for arguments */
1988 ret = prepare_trampoline(&ctx, im, tlinks, orig_call, nregs, flags);
1992 if (ret > max_insns)
1998 jit_fill_hole(image, (unsigned int)(image_end - image));
1999 ret = prepare_trampoline(&ctx, im, tlinks, orig_call, nregs, flags);
2001 if (ret > 0 && validate_code(&ctx) < 0)
2005 ret *= AARCH64_INSN_SIZE;
2010 static bool is_long_jump(void *ip, void *target)
2014 /* NULL target means this is a NOP */
2018 offset = (long)target - (long)ip;
2019 return offset < -SZ_128M || offset >= SZ_128M;
2022 static int gen_branch_or_nop(enum aarch64_insn_branch_type type, void *ip,
2023 void *addr, void *plt, u32 *insn)
2028 *insn = aarch64_insn_gen_nop();
2032 if (is_long_jump(ip, addr))
2037 *insn = aarch64_insn_gen_branch_imm((unsigned long)ip,
2038 (unsigned long)target,
2041 return *insn != AARCH64_BREAK_FAULT ? 0 : -EFAULT;
2044 /* Replace the branch instruction from @ip to @old_addr in a bpf prog or a bpf
2045 * trampoline with the branch instruction from @ip to @new_addr. If @old_addr
2046 * or @new_addr is NULL, the old or new instruction is NOP.
2048 * When @ip is the bpf prog entry, a bpf trampoline is being attached or
2049 * detached. Since bpf trampoline and bpf prog are allocated separately with
2050 * vmalloc, the address distance may exceed 128MB, the maximum branch range.
2051 * So long jump should be handled.
2053 * When a bpf prog is constructed, a plt pointing to empty trampoline
2054 * dummy_tramp is placed at the end:
2066 * .quad dummy_tramp // plt target
2068 * This is also the state when no trampoline is attached.
2070 * When a short-jump bpf trampoline is attached, the patchsite is patched
2071 * to a bl instruction to the trampoline directly:
2075 * bl <short-jump bpf trampoline address> // patchsite
2083 * .quad dummy_tramp // plt target
2085 * When a long-jump bpf trampoline is attached, the plt target is filled with
2086 * the trampoline address and the patchsite is patched to a bl instruction to
2091 * bl plt // patchsite
2099 * .quad <long-jump bpf trampoline address> // plt target
2101 * The dummy_tramp is used to prevent another CPU from jumping to unknown
2102 * locations during the patching process, making the patching process easier.
2104 int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type poke_type,
2105 void *old_addr, void *new_addr)
2111 struct bpf_plt *plt = NULL;
2112 unsigned long size = 0UL;
2113 unsigned long offset = ~0UL;
2114 enum aarch64_insn_branch_type branch_type;
2115 char namebuf[KSYM_NAME_LEN];
2117 u64 plt_target = 0ULL;
2118 bool poking_bpf_entry;
2120 if (!__bpf_address_lookup((unsigned long)ip, &size, &offset, namebuf))
2121 /* Only poking bpf text is supported. Since kernel function
2122 * entry is set up by ftrace, we reply on ftrace to poke kernel
2127 image = ip - offset;
2128 /* zero offset means we're poking bpf prog entry */
2129 poking_bpf_entry = (offset == 0UL);
2131 /* bpf prog entry, find plt and the real patchsite */
2132 if (poking_bpf_entry) {
2133 /* plt locates at the end of bpf prog */
2134 plt = image + size - PLT_TARGET_OFFSET;
2136 /* skip to the nop instruction in bpf prog entry:
2137 * bti c // if BTI enabled
2141 ip = image + POKE_OFFSET * AARCH64_INSN_SIZE;
2144 /* long jump is only possible at bpf prog entry */
2145 if (WARN_ON((is_long_jump(ip, new_addr) || is_long_jump(ip, old_addr)) &&
2149 if (poke_type == BPF_MOD_CALL)
2150 branch_type = AARCH64_INSN_BRANCH_LINK;
2152 branch_type = AARCH64_INSN_BRANCH_NOLINK;
2154 if (gen_branch_or_nop(branch_type, ip, old_addr, plt, &old_insn) < 0)
2157 if (gen_branch_or_nop(branch_type, ip, new_addr, plt, &new_insn) < 0)
2160 if (is_long_jump(ip, new_addr))
2161 plt_target = (u64)new_addr;
2162 else if (is_long_jump(ip, old_addr))
2163 /* if the old target is a long jump and the new target is not,
2164 * restore the plt target to dummy_tramp, so there is always a
2165 * legal and harmless address stored in plt target, and we'll
2166 * never jump from plt to an unknown place.
2168 plt_target = (u64)&dummy_tramp;
2171 /* non-zero plt_target indicates we're patching a bpf prog,
2172 * which is read only.
2174 if (set_memory_rw(PAGE_MASK & ((uintptr_t)&plt->target), 1))
2176 WRITE_ONCE(plt->target, plt_target);
2177 set_memory_ro(PAGE_MASK & ((uintptr_t)&plt->target), 1);
2178 /* since plt target points to either the new trampoline
2179 * or dummy_tramp, even if another CPU reads the old plt
2180 * target value before fetching the bl instruction to plt,
2181 * it will be brought back by dummy_tramp, so no barrier is
2186 /* if the old target and the new target are both long jumps, no
2187 * patching is required
2189 if (old_insn == new_insn)
2192 mutex_lock(&text_mutex);
2193 if (aarch64_insn_read(ip, &replaced)) {
2198 if (replaced != old_insn) {
2203 /* We call aarch64_insn_patch_text_nosync() to replace instruction
2204 * atomically, so no other CPUs will fetch a half-new and half-old
2205 * instruction. But there is chance that another CPU executes the
2206 * old instruction after the patching operation finishes (e.g.,
2207 * pipeline not flushed, or icache not synchronized yet).
2209 * 1. when a new trampoline is attached, it is not a problem for
2210 * different CPUs to jump to different trampolines temporarily.
2212 * 2. when an old trampoline is freed, we should wait for all other
2213 * CPUs to exit the trampoline and make sure the trampoline is no
2214 * longer reachable, since bpf_tramp_image_put() function already
2215 * uses percpu_ref and task-based rcu to do the sync, no need to call
2216 * the sync version here, see bpf_tramp_image_put() for details.
2218 ret = aarch64_insn_patch_text_nosync(ip, new_insn);
2220 mutex_unlock(&text_mutex);