1 // SPDX-License-Identifier: GPL-2.0-only
5 * Copyright (C) 2011-2013 Eric Dumazet (eric.dumazet@gmail.com)
6 * Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
8 #include <linux/netdevice.h>
9 #include <linux/filter.h>
10 #include <linux/if_vlan.h>
11 #include <linux/bpf.h>
12 #include <linux/memory.h>
13 #include <linux/sort.h>
14 #include <asm/extable.h>
15 #include <asm/set_memory.h>
16 #include <asm/nospec-branch.h>
17 #include <asm/text-patching.h>
19 static u8 *emit_code(u8 *ptr, u32 bytes, unsigned int len)
32 #define EMIT(bytes, len) \
33 do { prog = emit_code(prog, bytes, len); } while (0)
35 #define EMIT1(b1) EMIT(b1, 1)
36 #define EMIT2(b1, b2) EMIT((b1) + ((b2) << 8), 2)
37 #define EMIT3(b1, b2, b3) EMIT((b1) + ((b2) << 8) + ((b3) << 16), 3)
38 #define EMIT4(b1, b2, b3, b4) EMIT((b1) + ((b2) << 8) + ((b3) << 16) + ((b4) << 24), 4)
40 #define EMIT1_off32(b1, off) \
41 do { EMIT1(b1); EMIT(off, 4); } while (0)
42 #define EMIT2_off32(b1, b2, off) \
43 do { EMIT2(b1, b2); EMIT(off, 4); } while (0)
44 #define EMIT3_off32(b1, b2, b3, off) \
45 do { EMIT3(b1, b2, b3); EMIT(off, 4); } while (0)
46 #define EMIT4_off32(b1, b2, b3, b4, off) \
47 do { EMIT4(b1, b2, b3, b4); EMIT(off, 4); } while (0)
49 #ifdef CONFIG_X86_KERNEL_IBT
50 #define EMIT_ENDBR() EMIT(gen_endbr(), 4)
55 static bool is_imm8(int value)
57 return value <= 127 && value >= -128;
60 static bool is_simm32(s64 value)
62 return value == (s64)(s32)value;
65 static bool is_uimm32(u64 value)
67 return value == (u64)(u32)value;
71 #define EMIT_mov(DST, SRC) \
74 EMIT3(add_2mod(0x48, DST, SRC), 0x89, add_2reg(0xC0, DST, SRC)); \
77 static int bpf_size_to_x86_bytes(int bpf_size)
79 if (bpf_size == BPF_W)
81 else if (bpf_size == BPF_H)
83 else if (bpf_size == BPF_B)
85 else if (bpf_size == BPF_DW)
92 * List of x86 cond jumps opcodes (. + s8)
93 * Add 0x10 (and an extra 0x0f) to generate far jumps (. + s32)
106 /* Pick a register outside of BPF range for JIT internal work */
107 #define AUX_REG (MAX_BPF_JIT_REG + 1)
108 #define X86_REG_R9 (MAX_BPF_JIT_REG + 2)
111 * The following table maps BPF registers to x86-64 registers.
113 * x86-64 register R12 is unused, since if used as base address
114 * register in load/store instructions, it always needs an
115 * extra byte of encoding and is callee saved.
117 * x86-64 register R9 is not used by BPF programs, but can be used by BPF
118 * trampoline. x86-64 register R10 is used for blinding (if enabled).
120 static const int reg2hex[] = {
121 [BPF_REG_0] = 0, /* RAX */
122 [BPF_REG_1] = 7, /* RDI */
123 [BPF_REG_2] = 6, /* RSI */
124 [BPF_REG_3] = 2, /* RDX */
125 [BPF_REG_4] = 1, /* RCX */
126 [BPF_REG_5] = 0, /* R8 */
127 [BPF_REG_6] = 3, /* RBX callee saved */
128 [BPF_REG_7] = 5, /* R13 callee saved */
129 [BPF_REG_8] = 6, /* R14 callee saved */
130 [BPF_REG_9] = 7, /* R15 callee saved */
131 [BPF_REG_FP] = 5, /* RBP readonly */
132 [BPF_REG_AX] = 2, /* R10 temp register */
133 [AUX_REG] = 3, /* R11 temp register */
134 [X86_REG_R9] = 1, /* R9 register, 6th function argument */
137 static const int reg2pt_regs[] = {
138 [BPF_REG_0] = offsetof(struct pt_regs, ax),
139 [BPF_REG_1] = offsetof(struct pt_regs, di),
140 [BPF_REG_2] = offsetof(struct pt_regs, si),
141 [BPF_REG_3] = offsetof(struct pt_regs, dx),
142 [BPF_REG_4] = offsetof(struct pt_regs, cx),
143 [BPF_REG_5] = offsetof(struct pt_regs, r8),
144 [BPF_REG_6] = offsetof(struct pt_regs, bx),
145 [BPF_REG_7] = offsetof(struct pt_regs, r13),
146 [BPF_REG_8] = offsetof(struct pt_regs, r14),
147 [BPF_REG_9] = offsetof(struct pt_regs, r15),
151 * is_ereg() == true if BPF register 'reg' maps to x86-64 r8..r15
152 * which need extra byte of encoding.
153 * rax,rcx,...,rbp have simpler encoding
155 static bool is_ereg(u32 reg)
157 return (1 << reg) & (BIT(BPF_REG_5) |
167 * is_ereg_8l() == true if BPF register 'reg' is mapped to access x86-64
168 * lower 8-bit registers dil,sil,bpl,spl,r8b..r15b, which need extra byte
169 * of encoding. al,cl,dl,bl have simpler encoding.
171 static bool is_ereg_8l(u32 reg)
173 return is_ereg(reg) ||
174 (1 << reg) & (BIT(BPF_REG_1) |
179 static bool is_axreg(u32 reg)
181 return reg == BPF_REG_0;
184 /* Add modifiers if 'reg' maps to x86-64 registers R8..R15 */
185 static u8 add_1mod(u8 byte, u32 reg)
192 static u8 add_2mod(u8 byte, u32 r1, u32 r2)
201 /* Encode 'dst_reg' register into x86-64 opcode 'byte' */
202 static u8 add_1reg(u8 byte, u32 dst_reg)
204 return byte + reg2hex[dst_reg];
207 /* Encode 'dst_reg' and 'src_reg' registers into x86-64 opcode 'byte' */
208 static u8 add_2reg(u8 byte, u32 dst_reg, u32 src_reg)
210 return byte + reg2hex[dst_reg] + (reg2hex[src_reg] << 3);
213 /* Some 1-byte opcodes for binary ALU operations */
214 static u8 simple_alu_opcodes[] = {
225 static void jit_fill_hole(void *area, unsigned int size)
227 /* Fill whole space with INT3 instructions */
228 memset(area, 0xcc, size);
231 int bpf_arch_text_invalidate(void *dst, size_t len)
233 return IS_ERR_OR_NULL(text_poke_set(dst, 0xcc, len));
237 int cleanup_addr; /* Epilogue code offset */
240 * Program specific offsets of labels in the code; these rely on the
241 * JIT doing at least 2 passes, recording the position on the first
242 * pass, only to generate the correct offset on the second pass.
244 int tail_call_direct_label;
245 int tail_call_indirect_label;
248 /* Maximum number of bytes emitted while JITing one eBPF insn */
249 #define BPF_MAX_INSN_SIZE 128
250 #define BPF_INSN_SAFETY 64
252 /* Number of bytes emit_patch() needs to generate instructions */
253 #define X86_PATCH_SIZE 5
254 /* Number of bytes that will be skipped on tailcall */
255 #define X86_TAIL_CALL_OFFSET (11 + ENDBR_INSN_SIZE)
257 static void push_callee_regs(u8 **pprog, bool *callee_regs_used)
261 if (callee_regs_used[0])
262 EMIT1(0x53); /* push rbx */
263 if (callee_regs_used[1])
264 EMIT2(0x41, 0x55); /* push r13 */
265 if (callee_regs_used[2])
266 EMIT2(0x41, 0x56); /* push r14 */
267 if (callee_regs_used[3])
268 EMIT2(0x41, 0x57); /* push r15 */
272 static void pop_callee_regs(u8 **pprog, bool *callee_regs_used)
276 if (callee_regs_used[3])
277 EMIT2(0x41, 0x5F); /* pop r15 */
278 if (callee_regs_used[2])
279 EMIT2(0x41, 0x5E); /* pop r14 */
280 if (callee_regs_used[1])
281 EMIT2(0x41, 0x5D); /* pop r13 */
282 if (callee_regs_used[0])
283 EMIT1(0x5B); /* pop rbx */
288 * Emit x86-64 prologue code for BPF program.
289 * bpf_tail_call helper will skip the first X86_TAIL_CALL_OFFSET bytes
290 * while jumping to another program
292 static void emit_prologue(u8 **pprog, u32 stack_depth, bool ebpf_from_cbpf,
293 bool tail_call_reachable, bool is_subprog)
297 /* BPF trampoline can be made to work without these nops,
298 * but let's waste 5 bytes for now and optimize later
301 memcpy(prog, x86_nops[5], X86_PATCH_SIZE);
302 prog += X86_PATCH_SIZE;
303 if (!ebpf_from_cbpf) {
304 if (tail_call_reachable && !is_subprog)
305 EMIT2(0x31, 0xC0); /* xor eax, eax */
307 EMIT2(0x66, 0x90); /* nop2 */
309 EMIT1(0x55); /* push rbp */
310 EMIT3(0x48, 0x89, 0xE5); /* mov rbp, rsp */
312 /* X86_TAIL_CALL_OFFSET is here */
315 /* sub rsp, rounded_stack_depth */
317 EMIT3_off32(0x48, 0x81, 0xEC, round_up(stack_depth, 8));
318 if (tail_call_reachable)
319 EMIT1(0x50); /* push rax */
323 static int emit_patch(u8 **pprog, void *func, void *ip, u8 opcode)
328 offset = func - (ip + X86_PATCH_SIZE);
329 if (!is_simm32(offset)) {
330 pr_err("Target call %p is out of range\n", func);
333 EMIT1_off32(opcode, offset);
338 static int emit_call(u8 **pprog, void *func, void *ip)
340 return emit_patch(pprog, func, ip, 0xE8);
343 static int emit_jump(u8 **pprog, void *func, void *ip)
345 return emit_patch(pprog, func, ip, 0xE9);
348 static int __bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t,
349 void *old_addr, void *new_addr)
351 const u8 *nop_insn = x86_nops[5];
352 u8 old_insn[X86_PATCH_SIZE];
353 u8 new_insn[X86_PATCH_SIZE];
357 memcpy(old_insn, nop_insn, X86_PATCH_SIZE);
360 ret = t == BPF_MOD_CALL ?
361 emit_call(&prog, old_addr, ip) :
362 emit_jump(&prog, old_addr, ip);
367 memcpy(new_insn, nop_insn, X86_PATCH_SIZE);
370 ret = t == BPF_MOD_CALL ?
371 emit_call(&prog, new_addr, ip) :
372 emit_jump(&prog, new_addr, ip);
378 mutex_lock(&text_mutex);
379 if (memcmp(ip, old_insn, X86_PATCH_SIZE))
382 if (memcmp(ip, new_insn, X86_PATCH_SIZE)) {
383 text_poke_bp(ip, new_insn, X86_PATCH_SIZE, NULL);
387 mutex_unlock(&text_mutex);
391 int bpf_arch_text_poke(void *ip, enum bpf_text_poke_type t,
392 void *old_addr, void *new_addr)
394 if (!is_kernel_text((long)ip) &&
395 !is_bpf_text_address((long)ip))
396 /* BPF poking in modules is not supported */
400 * See emit_prologue(), for IBT builds the trampoline hook is preceded
401 * with an ENDBR instruction.
403 if (is_endbr(*(u32 *)ip))
404 ip += ENDBR_INSN_SIZE;
406 return __bpf_arch_text_poke(ip, t, old_addr, new_addr);
409 #define EMIT_LFENCE() EMIT3(0x0F, 0xAE, 0xE8)
411 static void emit_indirect_jump(u8 **pprog, int reg, u8 *ip)
415 if (cpu_feature_enabled(X86_FEATURE_RETPOLINE_LFENCE)) {
417 EMIT2(0xFF, 0xE0 + reg);
418 } else if (cpu_feature_enabled(X86_FEATURE_RETPOLINE)) {
419 OPTIMIZER_HIDE_VAR(reg);
420 emit_jump(&prog, &__x86_indirect_thunk_array[reg], ip);
422 EMIT2(0xFF, 0xE0 + reg); /* jmp *%\reg */
423 if (IS_ENABLED(CONFIG_RETPOLINE) || IS_ENABLED(CONFIG_SLS))
424 EMIT1(0xCC); /* int3 */
430 static void emit_return(u8 **pprog, u8 *ip)
434 if (cpu_feature_enabled(X86_FEATURE_RETHUNK)) {
435 emit_jump(&prog, &__x86_return_thunk, ip);
437 EMIT1(0xC3); /* ret */
438 if (IS_ENABLED(CONFIG_SLS))
439 EMIT1(0xCC); /* int3 */
446 * Generate the following code:
448 * ... bpf_tail_call(void *ctx, struct bpf_array *array, u64 index) ...
449 * if (index >= array->map.max_entries)
451 * if (tail_call_cnt++ >= MAX_TAIL_CALL_CNT)
453 * prog = array->ptrs[index];
456 * goto *(prog->bpf_func + prologue_size);
459 static void emit_bpf_tail_call_indirect(u8 **pprog, bool *callee_regs_used,
460 u32 stack_depth, u8 *ip,
461 struct jit_context *ctx)
463 int tcc_off = -4 - round_up(stack_depth, 8);
464 u8 *prog = *pprog, *start = *pprog;
468 * rdi - pointer to ctx
469 * rsi - pointer to bpf_array
470 * rdx - index in bpf_array
474 * if (index >= array->map.max_entries)
477 EMIT2(0x89, 0xD2); /* mov edx, edx */
478 EMIT3(0x39, 0x56, /* cmp dword ptr [rsi + 16], edx */
479 offsetof(struct bpf_array, map.max_entries));
481 offset = ctx->tail_call_indirect_label - (prog + 2 - start);
482 EMIT2(X86_JBE, offset); /* jbe out */
485 * if (tail_call_cnt++ >= MAX_TAIL_CALL_CNT)
488 EMIT2_off32(0x8B, 0x85, tcc_off); /* mov eax, dword ptr [rbp - tcc_off] */
489 EMIT3(0x83, 0xF8, MAX_TAIL_CALL_CNT); /* cmp eax, MAX_TAIL_CALL_CNT */
491 offset = ctx->tail_call_indirect_label - (prog + 2 - start);
492 EMIT2(X86_JAE, offset); /* jae out */
493 EMIT3(0x83, 0xC0, 0x01); /* add eax, 1 */
494 EMIT2_off32(0x89, 0x85, tcc_off); /* mov dword ptr [rbp - tcc_off], eax */
496 /* prog = array->ptrs[index]; */
497 EMIT4_off32(0x48, 0x8B, 0x8C, 0xD6, /* mov rcx, [rsi + rdx * 8 + offsetof(...)] */
498 offsetof(struct bpf_array, ptrs));
504 EMIT3(0x48, 0x85, 0xC9); /* test rcx,rcx */
506 offset = ctx->tail_call_indirect_label - (prog + 2 - start);
507 EMIT2(X86_JE, offset); /* je out */
509 pop_callee_regs(&prog, callee_regs_used);
511 EMIT1(0x58); /* pop rax */
513 EMIT3_off32(0x48, 0x81, 0xC4, /* add rsp, sd */
514 round_up(stack_depth, 8));
516 /* goto *(prog->bpf_func + X86_TAIL_CALL_OFFSET); */
517 EMIT4(0x48, 0x8B, 0x49, /* mov rcx, qword ptr [rcx + 32] */
518 offsetof(struct bpf_prog, bpf_func));
519 EMIT4(0x48, 0x83, 0xC1, /* add rcx, X86_TAIL_CALL_OFFSET */
520 X86_TAIL_CALL_OFFSET);
522 * Now we're ready to jump into next BPF program
523 * rdi == ctx (1st arg)
524 * rcx == prog->bpf_func + X86_TAIL_CALL_OFFSET
526 emit_indirect_jump(&prog, 1 /* rcx */, ip + (prog - start));
529 ctx->tail_call_indirect_label = prog - start;
533 static void emit_bpf_tail_call_direct(struct bpf_jit_poke_descriptor *poke,
535 bool *callee_regs_used, u32 stack_depth,
536 struct jit_context *ctx)
538 int tcc_off = -4 - round_up(stack_depth, 8);
539 u8 *prog = *pprog, *start = *pprog;
543 * if (tail_call_cnt++ >= MAX_TAIL_CALL_CNT)
546 EMIT2_off32(0x8B, 0x85, tcc_off); /* mov eax, dword ptr [rbp - tcc_off] */
547 EMIT3(0x83, 0xF8, MAX_TAIL_CALL_CNT); /* cmp eax, MAX_TAIL_CALL_CNT */
549 offset = ctx->tail_call_direct_label - (prog + 2 - start);
550 EMIT2(X86_JAE, offset); /* jae out */
551 EMIT3(0x83, 0xC0, 0x01); /* add eax, 1 */
552 EMIT2_off32(0x89, 0x85, tcc_off); /* mov dword ptr [rbp - tcc_off], eax */
554 poke->tailcall_bypass = ip + (prog - start);
555 poke->adj_off = X86_TAIL_CALL_OFFSET;
556 poke->tailcall_target = ip + ctx->tail_call_direct_label - X86_PATCH_SIZE;
557 poke->bypass_addr = (u8 *)poke->tailcall_target + X86_PATCH_SIZE;
559 emit_jump(&prog, (u8 *)poke->tailcall_target + X86_PATCH_SIZE,
560 poke->tailcall_bypass);
562 pop_callee_regs(&prog, callee_regs_used);
563 EMIT1(0x58); /* pop rax */
565 EMIT3_off32(0x48, 0x81, 0xC4, round_up(stack_depth, 8));
567 memcpy(prog, x86_nops[5], X86_PATCH_SIZE);
568 prog += X86_PATCH_SIZE;
571 ctx->tail_call_direct_label = prog - start;
576 static void bpf_tail_call_direct_fixup(struct bpf_prog *prog)
578 struct bpf_jit_poke_descriptor *poke;
579 struct bpf_array *array;
580 struct bpf_prog *target;
583 for (i = 0; i < prog->aux->size_poke_tab; i++) {
584 poke = &prog->aux->poke_tab[i];
585 if (poke->aux && poke->aux != prog->aux)
588 WARN_ON_ONCE(READ_ONCE(poke->tailcall_target_stable));
590 if (poke->reason != BPF_POKE_REASON_TAIL_CALL)
593 array = container_of(poke->tail_call.map, struct bpf_array, map);
594 mutex_lock(&array->aux->poke_mutex);
595 target = array->ptrs[poke->tail_call.key];
597 ret = __bpf_arch_text_poke(poke->tailcall_target,
599 (u8 *)target->bpf_func +
602 ret = __bpf_arch_text_poke(poke->tailcall_bypass,
604 (u8 *)poke->tailcall_target +
605 X86_PATCH_SIZE, NULL);
608 WRITE_ONCE(poke->tailcall_target_stable, true);
609 mutex_unlock(&array->aux->poke_mutex);
613 static void emit_mov_imm32(u8 **pprog, bool sign_propagate,
614 u32 dst_reg, const u32 imm32)
620 * Optimization: if imm32 is positive, use 'mov %eax, imm32'
621 * (which zero-extends imm32) to save 2 bytes.
623 if (sign_propagate && (s32)imm32 < 0) {
624 /* 'mov %rax, imm32' sign extends imm32 */
625 b1 = add_1mod(0x48, dst_reg);
628 EMIT3_off32(b1, b2, add_1reg(b3, dst_reg), imm32);
633 * Optimization: if imm32 is zero, use 'xor %eax, %eax'
637 if (is_ereg(dst_reg))
638 EMIT1(add_2mod(0x40, dst_reg, dst_reg));
641 EMIT2(b2, add_2reg(b3, dst_reg, dst_reg));
645 /* mov %eax, imm32 */
646 if (is_ereg(dst_reg))
647 EMIT1(add_1mod(0x40, dst_reg));
648 EMIT1_off32(add_1reg(0xB8, dst_reg), imm32);
653 static void emit_mov_imm64(u8 **pprog, u32 dst_reg,
654 const u32 imm32_hi, const u32 imm32_lo)
658 if (is_uimm32(((u64)imm32_hi << 32) | (u32)imm32_lo)) {
660 * For emitting plain u32, where sign bit must not be
661 * propagated LLVM tends to load imm64 over mov32
662 * directly, so save couple of bytes by just doing
663 * 'mov %eax, imm32' instead.
665 emit_mov_imm32(&prog, false, dst_reg, imm32_lo);
667 /* movabsq rax, imm64 */
668 EMIT2(add_1mod(0x48, dst_reg), add_1reg(0xB8, dst_reg));
676 static void emit_mov_reg(u8 **pprog, bool is64, u32 dst_reg, u32 src_reg)
682 EMIT_mov(dst_reg, src_reg);
685 if (is_ereg(dst_reg) || is_ereg(src_reg))
686 EMIT1(add_2mod(0x40, dst_reg, src_reg));
687 EMIT2(0x89, add_2reg(0xC0, dst_reg, src_reg));
693 /* Emit the suffix (ModR/M etc) for addressing *(ptr_reg + off) and val_reg */
694 static void emit_insn_suffix(u8 **pprog, u32 ptr_reg, u32 val_reg, int off)
699 /* 1-byte signed displacement.
701 * If off == 0 we could skip this and save one extra byte, but
702 * special case of x86 R13 which always needs an offset is not
705 EMIT2(add_2reg(0x40, ptr_reg, val_reg), off);
707 /* 4-byte signed displacement */
708 EMIT1_off32(add_2reg(0x80, ptr_reg, val_reg), off);
714 * Emit a REX byte if it will be necessary to address these registers
716 static void maybe_emit_mod(u8 **pprog, u32 dst_reg, u32 src_reg, bool is64)
721 EMIT1(add_2mod(0x48, dst_reg, src_reg));
722 else if (is_ereg(dst_reg) || is_ereg(src_reg))
723 EMIT1(add_2mod(0x40, dst_reg, src_reg));
728 * Similar version of maybe_emit_mod() for a single register
730 static void maybe_emit_1mod(u8 **pprog, u32 reg, bool is64)
735 EMIT1(add_1mod(0x48, reg));
736 else if (is_ereg(reg))
737 EMIT1(add_1mod(0x40, reg));
741 /* LDX: dst_reg = *(u8*)(src_reg + off) */
742 static void emit_ldx(u8 **pprog, u32 size, u32 dst_reg, u32 src_reg, int off)
748 /* Emit 'movzx rax, byte ptr [rax + off]' */
749 EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x0F, 0xB6);
752 /* Emit 'movzx rax, word ptr [rax + off]' */
753 EMIT3(add_2mod(0x48, src_reg, dst_reg), 0x0F, 0xB7);
756 /* Emit 'mov eax, dword ptr [rax+0x14]' */
757 if (is_ereg(dst_reg) || is_ereg(src_reg))
758 EMIT2(add_2mod(0x40, src_reg, dst_reg), 0x8B);
763 /* Emit 'mov rax, qword ptr [rax+0x14]' */
764 EMIT2(add_2mod(0x48, src_reg, dst_reg), 0x8B);
767 emit_insn_suffix(&prog, src_reg, dst_reg, off);
771 /* STX: *(u8*)(dst_reg + off) = src_reg */
772 static void emit_stx(u8 **pprog, u32 size, u32 dst_reg, u32 src_reg, int off)
778 /* Emit 'mov byte ptr [rax + off], al' */
779 if (is_ereg(dst_reg) || is_ereg_8l(src_reg))
780 /* Add extra byte for eregs or SIL,DIL,BPL in src_reg */
781 EMIT2(add_2mod(0x40, dst_reg, src_reg), 0x88);
786 if (is_ereg(dst_reg) || is_ereg(src_reg))
787 EMIT3(0x66, add_2mod(0x40, dst_reg, src_reg), 0x89);
792 if (is_ereg(dst_reg) || is_ereg(src_reg))
793 EMIT2(add_2mod(0x40, dst_reg, src_reg), 0x89);
798 EMIT2(add_2mod(0x48, dst_reg, src_reg), 0x89);
801 emit_insn_suffix(&prog, dst_reg, src_reg, off);
805 static int emit_atomic(u8 **pprog, u8 atomic_op,
806 u32 dst_reg, u32 src_reg, s16 off, u8 bpf_size)
810 EMIT1(0xF0); /* lock prefix */
812 maybe_emit_mod(&prog, dst_reg, src_reg, bpf_size == BPF_DW);
820 /* lock *(u32/u64*)(dst_reg + off) <op>= src_reg */
821 EMIT1(simple_alu_opcodes[atomic_op]);
823 case BPF_ADD | BPF_FETCH:
824 /* src_reg = atomic_fetch_add(dst_reg + off, src_reg); */
828 /* src_reg = atomic_xchg(dst_reg + off, src_reg); */
832 /* r0 = atomic_cmpxchg(dst_reg + off, r0, src_reg); */
836 pr_err("bpf_jit: unknown atomic opcode %02x\n", atomic_op);
840 emit_insn_suffix(&prog, dst_reg, src_reg, off);
846 bool ex_handler_bpf(const struct exception_table_entry *x, struct pt_regs *regs)
848 u32 reg = x->fixup >> 8;
850 /* jump over faulting load and clear dest register */
851 *(unsigned long *)((void *)regs + reg) = 0;
852 regs->ip += x->fixup & 0xff;
856 static void detect_reg_usage(struct bpf_insn *insn, int insn_cnt,
857 bool *regs_used, bool *tail_call_seen)
861 for (i = 1; i <= insn_cnt; i++, insn++) {
862 if (insn->code == (BPF_JMP | BPF_TAIL_CALL))
863 *tail_call_seen = true;
864 if (insn->dst_reg == BPF_REG_6 || insn->src_reg == BPF_REG_6)
866 if (insn->dst_reg == BPF_REG_7 || insn->src_reg == BPF_REG_7)
868 if (insn->dst_reg == BPF_REG_8 || insn->src_reg == BPF_REG_8)
870 if (insn->dst_reg == BPF_REG_9 || insn->src_reg == BPF_REG_9)
875 static void emit_nops(u8 **pprog, int len)
883 if (noplen > ASM_NOP_MAX)
884 noplen = ASM_NOP_MAX;
886 for (i = 0; i < noplen; i++)
887 EMIT1(x86_nops[noplen][i]);
894 /* emit the 3-byte VEX prefix
896 * r: same as rex.r, extra bit for ModRM reg field
897 * x: same as rex.x, extra bit for SIB index field
898 * b: same as rex.b, extra bit for ModRM r/m, or SIB base
899 * m: opcode map select, encoding escape bytes e.g. 0x0f38
900 * w: same as rex.w (32 bit or 64 bit) or opcode specific
901 * src_reg2: additional source reg (encoded as BPF reg)
902 * l: vector length (128 bit or 256 bit) or reserved
903 * pp: opcode prefix (none, 0x66, 0xf2 or 0xf3)
905 static void emit_3vex(u8 **pprog, bool r, bool x, bool b, u8 m,
906 bool w, u8 src_reg2, bool l, u8 pp)
909 const u8 b0 = 0xc4; /* first byte of 3-byte VEX prefix */
911 u8 vvvv = reg2hex[src_reg2];
913 /* reg2hex gives only the lower 3 bit of vvvv */
914 if (is_ereg(src_reg2))
918 * 2nd byte of 3-byte VEX prefix
919 * ~ means bit inverted encoding
922 * +---+---+---+---+---+---+---+---+
924 * +---+---+---+---+---+---+---+---+
926 b1 = (!r << 7) | (!x << 6) | (!b << 5) | (m & 0x1f);
928 * 3rd byte of 3-byte VEX prefix
931 * +---+---+---+---+---+---+---+---+
932 * | W | ~vvvv | L | pp |
933 * +---+---+---+---+---+---+---+---+
935 b2 = (w << 7) | ((~vvvv & 0xf) << 3) | (l << 2) | (pp & 3);
941 /* emit BMI2 shift instruction */
942 static void emit_shiftx(u8 **pprog, u32 dst_reg, u8 src_reg, bool is64, u8 op)
945 bool r = is_ereg(dst_reg);
946 u8 m = 2; /* escape code 0f38 */
948 emit_3vex(&prog, r, false, r, m, is64, src_reg, false, op);
949 EMIT2(0xf7, add_2reg(0xC0, dst_reg, dst_reg));
953 #define INSN_SZ_DIFF (((addrs[i] - addrs[i - 1]) - (prog - temp)))
955 static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image, u8 *rw_image,
956 int oldproglen, struct jit_context *ctx, bool jmp_padding)
958 bool tail_call_reachable = bpf_prog->aux->tail_call_reachable;
959 struct bpf_insn *insn = bpf_prog->insnsi;
960 bool callee_regs_used[4] = {};
961 int insn_cnt = bpf_prog->len;
962 bool tail_call_seen = false;
963 bool seen_exit = false;
964 u8 temp[BPF_MAX_INSN_SIZE + BPF_INSN_SAFETY];
966 int ilen, proglen = 0;
970 detect_reg_usage(insn, insn_cnt, callee_regs_used,
973 /* tail call's presence in current prog implies it is reachable */
974 tail_call_reachable |= tail_call_seen;
976 emit_prologue(&prog, bpf_prog->aux->stack_depth,
977 bpf_prog_was_classic(bpf_prog), tail_call_reachable,
978 bpf_prog->aux->func_idx != 0);
979 push_callee_regs(&prog, callee_regs_used);
983 memcpy(rw_image + proglen, temp, ilen);
988 for (i = 1; i <= insn_cnt; i++, insn++) {
989 const s32 imm32 = insn->imm;
990 u32 dst_reg = insn->dst_reg;
991 u32 src_reg = insn->src_reg;
999 switch (insn->code) {
1001 case BPF_ALU | BPF_ADD | BPF_X:
1002 case BPF_ALU | BPF_SUB | BPF_X:
1003 case BPF_ALU | BPF_AND | BPF_X:
1004 case BPF_ALU | BPF_OR | BPF_X:
1005 case BPF_ALU | BPF_XOR | BPF_X:
1006 case BPF_ALU64 | BPF_ADD | BPF_X:
1007 case BPF_ALU64 | BPF_SUB | BPF_X:
1008 case BPF_ALU64 | BPF_AND | BPF_X:
1009 case BPF_ALU64 | BPF_OR | BPF_X:
1010 case BPF_ALU64 | BPF_XOR | BPF_X:
1011 maybe_emit_mod(&prog, dst_reg, src_reg,
1012 BPF_CLASS(insn->code) == BPF_ALU64);
1013 b2 = simple_alu_opcodes[BPF_OP(insn->code)];
1014 EMIT2(b2, add_2reg(0xC0, dst_reg, src_reg));
1017 case BPF_ALU64 | BPF_MOV | BPF_X:
1018 case BPF_ALU | BPF_MOV | BPF_X:
1020 BPF_CLASS(insn->code) == BPF_ALU64,
1025 case BPF_ALU | BPF_NEG:
1026 case BPF_ALU64 | BPF_NEG:
1027 maybe_emit_1mod(&prog, dst_reg,
1028 BPF_CLASS(insn->code) == BPF_ALU64);
1029 EMIT2(0xF7, add_1reg(0xD8, dst_reg));
1032 case BPF_ALU | BPF_ADD | BPF_K:
1033 case BPF_ALU | BPF_SUB | BPF_K:
1034 case BPF_ALU | BPF_AND | BPF_K:
1035 case BPF_ALU | BPF_OR | BPF_K:
1036 case BPF_ALU | BPF_XOR | BPF_K:
1037 case BPF_ALU64 | BPF_ADD | BPF_K:
1038 case BPF_ALU64 | BPF_SUB | BPF_K:
1039 case BPF_ALU64 | BPF_AND | BPF_K:
1040 case BPF_ALU64 | BPF_OR | BPF_K:
1041 case BPF_ALU64 | BPF_XOR | BPF_K:
1042 maybe_emit_1mod(&prog, dst_reg,
1043 BPF_CLASS(insn->code) == BPF_ALU64);
1046 * b3 holds 'normal' opcode, b2 short form only valid
1047 * in case dst is eax/rax.
1049 switch (BPF_OP(insn->code)) {
1073 EMIT3(0x83, add_1reg(b3, dst_reg), imm32);
1074 else if (is_axreg(dst_reg))
1075 EMIT1_off32(b2, imm32);
1077 EMIT2_off32(0x81, add_1reg(b3, dst_reg), imm32);
1080 case BPF_ALU64 | BPF_MOV | BPF_K:
1081 case BPF_ALU | BPF_MOV | BPF_K:
1082 emit_mov_imm32(&prog, BPF_CLASS(insn->code) == BPF_ALU64,
1086 case BPF_LD | BPF_IMM | BPF_DW:
1087 emit_mov_imm64(&prog, dst_reg, insn[1].imm, insn[0].imm);
1092 /* dst %= src, dst /= src, dst %= imm32, dst /= imm32 */
1093 case BPF_ALU | BPF_MOD | BPF_X:
1094 case BPF_ALU | BPF_DIV | BPF_X:
1095 case BPF_ALU | BPF_MOD | BPF_K:
1096 case BPF_ALU | BPF_DIV | BPF_K:
1097 case BPF_ALU64 | BPF_MOD | BPF_X:
1098 case BPF_ALU64 | BPF_DIV | BPF_X:
1099 case BPF_ALU64 | BPF_MOD | BPF_K:
1100 case BPF_ALU64 | BPF_DIV | BPF_K: {
1101 bool is64 = BPF_CLASS(insn->code) == BPF_ALU64;
1103 if (dst_reg != BPF_REG_0)
1104 EMIT1(0x50); /* push rax */
1105 if (dst_reg != BPF_REG_3)
1106 EMIT1(0x52); /* push rdx */
1108 if (BPF_SRC(insn->code) == BPF_X) {
1109 if (src_reg == BPF_REG_0 ||
1110 src_reg == BPF_REG_3) {
1111 /* mov r11, src_reg */
1112 EMIT_mov(AUX_REG, src_reg);
1116 /* mov r11, imm32 */
1117 EMIT3_off32(0x49, 0xC7, 0xC3, imm32);
1121 if (dst_reg != BPF_REG_0)
1122 /* mov rax, dst_reg */
1123 emit_mov_reg(&prog, is64, BPF_REG_0, dst_reg);
1127 * equivalent to 'xor rdx, rdx', but one byte less
1132 maybe_emit_1mod(&prog, src_reg, is64);
1133 EMIT2(0xF7, add_1reg(0xF0, src_reg));
1135 if (BPF_OP(insn->code) == BPF_MOD &&
1136 dst_reg != BPF_REG_3)
1137 /* mov dst_reg, rdx */
1138 emit_mov_reg(&prog, is64, dst_reg, BPF_REG_3);
1139 else if (BPF_OP(insn->code) == BPF_DIV &&
1140 dst_reg != BPF_REG_0)
1141 /* mov dst_reg, rax */
1142 emit_mov_reg(&prog, is64, dst_reg, BPF_REG_0);
1144 if (dst_reg != BPF_REG_3)
1145 EMIT1(0x5A); /* pop rdx */
1146 if (dst_reg != BPF_REG_0)
1147 EMIT1(0x58); /* pop rax */
1151 case BPF_ALU | BPF_MUL | BPF_K:
1152 case BPF_ALU64 | BPF_MUL | BPF_K:
1153 maybe_emit_mod(&prog, dst_reg, dst_reg,
1154 BPF_CLASS(insn->code) == BPF_ALU64);
1157 /* imul dst_reg, dst_reg, imm8 */
1158 EMIT3(0x6B, add_2reg(0xC0, dst_reg, dst_reg),
1161 /* imul dst_reg, dst_reg, imm32 */
1163 add_2reg(0xC0, dst_reg, dst_reg),
1167 case BPF_ALU | BPF_MUL | BPF_X:
1168 case BPF_ALU64 | BPF_MUL | BPF_X:
1169 maybe_emit_mod(&prog, src_reg, dst_reg,
1170 BPF_CLASS(insn->code) == BPF_ALU64);
1172 /* imul dst_reg, src_reg */
1173 EMIT3(0x0F, 0xAF, add_2reg(0xC0, src_reg, dst_reg));
1177 case BPF_ALU | BPF_LSH | BPF_K:
1178 case BPF_ALU | BPF_RSH | BPF_K:
1179 case BPF_ALU | BPF_ARSH | BPF_K:
1180 case BPF_ALU64 | BPF_LSH | BPF_K:
1181 case BPF_ALU64 | BPF_RSH | BPF_K:
1182 case BPF_ALU64 | BPF_ARSH | BPF_K:
1183 maybe_emit_1mod(&prog, dst_reg,
1184 BPF_CLASS(insn->code) == BPF_ALU64);
1186 b3 = simple_alu_opcodes[BPF_OP(insn->code)];
1188 EMIT2(0xD1, add_1reg(b3, dst_reg));
1190 EMIT3(0xC1, add_1reg(b3, dst_reg), imm32);
1193 case BPF_ALU | BPF_LSH | BPF_X:
1194 case BPF_ALU | BPF_RSH | BPF_X:
1195 case BPF_ALU | BPF_ARSH | BPF_X:
1196 case BPF_ALU64 | BPF_LSH | BPF_X:
1197 case BPF_ALU64 | BPF_RSH | BPF_X:
1198 case BPF_ALU64 | BPF_ARSH | BPF_X:
1199 /* BMI2 shifts aren't better when shift count is already in rcx */
1200 if (boot_cpu_has(X86_FEATURE_BMI2) && src_reg != BPF_REG_4) {
1201 /* shrx/sarx/shlx dst_reg, dst_reg, src_reg */
1202 bool w = (BPF_CLASS(insn->code) == BPF_ALU64);
1205 switch (BPF_OP(insn->code)) {
1207 op = 1; /* prefix 0x66 */
1210 op = 3; /* prefix 0xf2 */
1213 op = 2; /* prefix 0xf3 */
1217 emit_shiftx(&prog, dst_reg, src_reg, w, op);
1222 if (src_reg != BPF_REG_4) { /* common case */
1223 /* Check for bad case when dst_reg == rcx */
1224 if (dst_reg == BPF_REG_4) {
1225 /* mov r11, dst_reg */
1226 EMIT_mov(AUX_REG, dst_reg);
1229 EMIT1(0x51); /* push rcx */
1231 /* mov rcx, src_reg */
1232 EMIT_mov(BPF_REG_4, src_reg);
1235 /* shl %rax, %cl | shr %rax, %cl | sar %rax, %cl */
1236 maybe_emit_1mod(&prog, dst_reg,
1237 BPF_CLASS(insn->code) == BPF_ALU64);
1239 b3 = simple_alu_opcodes[BPF_OP(insn->code)];
1240 EMIT2(0xD3, add_1reg(b3, dst_reg));
1242 if (src_reg != BPF_REG_4) {
1243 if (insn->dst_reg == BPF_REG_4)
1244 /* mov dst_reg, r11 */
1245 EMIT_mov(insn->dst_reg, AUX_REG);
1247 EMIT1(0x59); /* pop rcx */
1252 case BPF_ALU | BPF_END | BPF_FROM_BE:
1255 /* Emit 'ror %ax, 8' to swap lower 2 bytes */
1257 if (is_ereg(dst_reg))
1259 EMIT3(0xC1, add_1reg(0xC8, dst_reg), 8);
1261 /* Emit 'movzwl eax, ax' */
1262 if (is_ereg(dst_reg))
1263 EMIT3(0x45, 0x0F, 0xB7);
1266 EMIT1(add_2reg(0xC0, dst_reg, dst_reg));
1269 /* Emit 'bswap eax' to swap lower 4 bytes */
1270 if (is_ereg(dst_reg))
1274 EMIT1(add_1reg(0xC8, dst_reg));
1277 /* Emit 'bswap rax' to swap 8 bytes */
1278 EMIT3(add_1mod(0x48, dst_reg), 0x0F,
1279 add_1reg(0xC8, dst_reg));
1284 case BPF_ALU | BPF_END | BPF_FROM_LE:
1288 * Emit 'movzwl eax, ax' to zero extend 16-bit
1291 if (is_ereg(dst_reg))
1292 EMIT3(0x45, 0x0F, 0xB7);
1295 EMIT1(add_2reg(0xC0, dst_reg, dst_reg));
1298 /* Emit 'mov eax, eax' to clear upper 32-bits */
1299 if (is_ereg(dst_reg))
1301 EMIT2(0x89, add_2reg(0xC0, dst_reg, dst_reg));
1309 /* speculation barrier */
1310 case BPF_ST | BPF_NOSPEC:
1314 /* ST: *(u8*)(dst_reg + off) = imm */
1315 case BPF_ST | BPF_MEM | BPF_B:
1316 if (is_ereg(dst_reg))
1321 case BPF_ST | BPF_MEM | BPF_H:
1322 if (is_ereg(dst_reg))
1323 EMIT3(0x66, 0x41, 0xC7);
1327 case BPF_ST | BPF_MEM | BPF_W:
1328 if (is_ereg(dst_reg))
1333 case BPF_ST | BPF_MEM | BPF_DW:
1334 EMIT2(add_1mod(0x48, dst_reg), 0xC7);
1336 st: if (is_imm8(insn->off))
1337 EMIT2(add_1reg(0x40, dst_reg), insn->off);
1339 EMIT1_off32(add_1reg(0x80, dst_reg), insn->off);
1341 EMIT(imm32, bpf_size_to_x86_bytes(BPF_SIZE(insn->code)));
1344 /* STX: *(u8*)(dst_reg + off) = src_reg */
1345 case BPF_STX | BPF_MEM | BPF_B:
1346 case BPF_STX | BPF_MEM | BPF_H:
1347 case BPF_STX | BPF_MEM | BPF_W:
1348 case BPF_STX | BPF_MEM | BPF_DW:
1349 emit_stx(&prog, BPF_SIZE(insn->code), dst_reg, src_reg, insn->off);
1352 /* LDX: dst_reg = *(u8*)(src_reg + off) */
1353 case BPF_LDX | BPF_MEM | BPF_B:
1354 case BPF_LDX | BPF_PROBE_MEM | BPF_B:
1355 case BPF_LDX | BPF_MEM | BPF_H:
1356 case BPF_LDX | BPF_PROBE_MEM | BPF_H:
1357 case BPF_LDX | BPF_MEM | BPF_W:
1358 case BPF_LDX | BPF_PROBE_MEM | BPF_W:
1359 case BPF_LDX | BPF_MEM | BPF_DW:
1360 case BPF_LDX | BPF_PROBE_MEM | BPF_DW:
1361 if (BPF_MODE(insn->code) == BPF_PROBE_MEM) {
1362 /* Though the verifier prevents negative insn->off in BPF_PROBE_MEM
1363 * add abs(insn->off) to the limit to make sure that negative
1364 * offset won't be an issue.
1365 * insn->off is s16, so it won't affect valid pointers.
1367 u64 limit = TASK_SIZE_MAX + PAGE_SIZE + abs(insn->off);
1368 u8 *end_of_jmp1, *end_of_jmp2;
1370 /* Conservatively check that src_reg + insn->off is a kernel address:
1371 * 1. src_reg + insn->off >= limit
1372 * 2. src_reg + insn->off doesn't become small positive.
1373 * Cannot do src_reg + insn->off >= limit in one branch,
1374 * since it needs two spare registers, but JIT has only one.
1377 /* movabsq r11, limit */
1378 EMIT2(add_1mod(0x48, AUX_REG), add_1reg(0xB8, AUX_REG));
1379 EMIT((u32)limit, 4);
1380 EMIT(limit >> 32, 4);
1381 /* cmp src_reg, r11 */
1382 maybe_emit_mod(&prog, src_reg, AUX_REG, true);
1383 EMIT2(0x39, add_2reg(0xC0, src_reg, AUX_REG));
1384 /* if unsigned '<' goto end_of_jmp2 */
1388 /* mov r11, src_reg */
1389 emit_mov_reg(&prog, true, AUX_REG, src_reg);
1390 /* add r11, insn->off */
1391 maybe_emit_1mod(&prog, AUX_REG, true);
1392 EMIT2_off32(0x81, add_1reg(0xC0, AUX_REG), insn->off);
1393 /* jmp if not carry to start_of_ldx
1394 * Otherwise ERR_PTR(-EINVAL) + 128 will be the user addr
1395 * that has to be rejected.
1397 EMIT2(0x73 /* JNC */, 0);
1400 /* xor dst_reg, dst_reg */
1401 emit_mov_imm32(&prog, false, dst_reg, 0);
1402 /* jmp byte_after_ldx */
1405 /* populate jmp_offset for JB above to jump to xor dst_reg */
1406 end_of_jmp1[-1] = end_of_jmp2 - end_of_jmp1;
1407 /* populate jmp_offset for JNC above to jump to start_of_ldx */
1408 start_of_ldx = prog;
1409 end_of_jmp2[-1] = start_of_ldx - end_of_jmp2;
1411 emit_ldx(&prog, BPF_SIZE(insn->code), dst_reg, src_reg, insn->off);
1412 if (BPF_MODE(insn->code) == BPF_PROBE_MEM) {
1413 struct exception_table_entry *ex;
1414 u8 *_insn = image + proglen + (start_of_ldx - temp);
1417 /* populate jmp_offset for JMP above */
1418 start_of_ldx[-1] = prog - start_of_ldx;
1420 if (!bpf_prog->aux->extable)
1423 if (excnt >= bpf_prog->aux->num_exentries) {
1424 pr_err("ex gen bug\n");
1427 ex = &bpf_prog->aux->extable[excnt++];
1429 delta = _insn - (u8 *)&ex->insn;
1430 if (!is_simm32(delta)) {
1431 pr_err("extable->insn doesn't fit into 32-bit\n");
1434 /* switch ex to rw buffer for writes */
1435 ex = (void *)rw_image + ((void *)ex - (void *)image);
1439 ex->data = EX_TYPE_BPF;
1441 if (dst_reg > BPF_REG_9) {
1442 pr_err("verifier error\n");
1446 * Compute size of x86 insn and its target dest x86 register.
1447 * ex_handler_bpf() will use lower 8 bits to adjust
1448 * pt_regs->ip to jump over this x86 instruction
1449 * and upper bits to figure out which pt_regs to zero out.
1450 * End result: x86 insn "mov rbx, qword ptr [rax+0x14]"
1451 * of 4 bytes will be ignored and rbx will be zero inited.
1453 ex->fixup = (prog - start_of_ldx) | (reg2pt_regs[dst_reg] << 8);
1457 case BPF_STX | BPF_ATOMIC | BPF_W:
1458 case BPF_STX | BPF_ATOMIC | BPF_DW:
1459 if (insn->imm == (BPF_AND | BPF_FETCH) ||
1460 insn->imm == (BPF_OR | BPF_FETCH) ||
1461 insn->imm == (BPF_XOR | BPF_FETCH)) {
1462 bool is64 = BPF_SIZE(insn->code) == BPF_DW;
1463 u32 real_src_reg = src_reg;
1464 u32 real_dst_reg = dst_reg;
1468 * Can't be implemented with a single x86 insn.
1469 * Need to do a CMPXCHG loop.
1472 /* Will need RAX as a CMPXCHG operand so save R0 */
1473 emit_mov_reg(&prog, true, BPF_REG_AX, BPF_REG_0);
1474 if (src_reg == BPF_REG_0)
1475 real_src_reg = BPF_REG_AX;
1476 if (dst_reg == BPF_REG_0)
1477 real_dst_reg = BPF_REG_AX;
1479 branch_target = prog;
1480 /* Load old value */
1481 emit_ldx(&prog, BPF_SIZE(insn->code),
1482 BPF_REG_0, real_dst_reg, insn->off);
1484 * Perform the (commutative) operation locally,
1485 * put the result in the AUX_REG.
1487 emit_mov_reg(&prog, is64, AUX_REG, BPF_REG_0);
1488 maybe_emit_mod(&prog, AUX_REG, real_src_reg, is64);
1489 EMIT2(simple_alu_opcodes[BPF_OP(insn->imm)],
1490 add_2reg(0xC0, AUX_REG, real_src_reg));
1491 /* Attempt to swap in new value */
1492 err = emit_atomic(&prog, BPF_CMPXCHG,
1493 real_dst_reg, AUX_REG,
1495 BPF_SIZE(insn->code));
1499 * ZF tells us whether we won the race. If it's
1500 * cleared we need to try again.
1502 EMIT2(X86_JNE, -(prog - branch_target) - 2);
1503 /* Return the pre-modification value */
1504 emit_mov_reg(&prog, is64, real_src_reg, BPF_REG_0);
1505 /* Restore R0 after clobbering RAX */
1506 emit_mov_reg(&prog, true, BPF_REG_0, BPF_REG_AX);
1510 err = emit_atomic(&prog, insn->imm, dst_reg, src_reg,
1511 insn->off, BPF_SIZE(insn->code));
1517 case BPF_JMP | BPF_CALL:
1518 func = (u8 *) __bpf_call_base + imm32;
1519 if (tail_call_reachable) {
1520 /* mov rax, qword ptr [rbp - rounded_stack_depth - 8] */
1521 EMIT3_off32(0x48, 0x8B, 0x85,
1522 -round_up(bpf_prog->aux->stack_depth, 8) - 8);
1523 if (!imm32 || emit_call(&prog, func, image + addrs[i - 1] + 7))
1526 if (!imm32 || emit_call(&prog, func, image + addrs[i - 1]))
1531 case BPF_JMP | BPF_TAIL_CALL:
1533 emit_bpf_tail_call_direct(&bpf_prog->aux->poke_tab[imm32 - 1],
1534 &prog, image + addrs[i - 1],
1536 bpf_prog->aux->stack_depth,
1539 emit_bpf_tail_call_indirect(&prog,
1541 bpf_prog->aux->stack_depth,
1542 image + addrs[i - 1],
1547 case BPF_JMP | BPF_JEQ | BPF_X:
1548 case BPF_JMP | BPF_JNE | BPF_X:
1549 case BPF_JMP | BPF_JGT | BPF_X:
1550 case BPF_JMP | BPF_JLT | BPF_X:
1551 case BPF_JMP | BPF_JGE | BPF_X:
1552 case BPF_JMP | BPF_JLE | BPF_X:
1553 case BPF_JMP | BPF_JSGT | BPF_X:
1554 case BPF_JMP | BPF_JSLT | BPF_X:
1555 case BPF_JMP | BPF_JSGE | BPF_X:
1556 case BPF_JMP | BPF_JSLE | BPF_X:
1557 case BPF_JMP32 | BPF_JEQ | BPF_X:
1558 case BPF_JMP32 | BPF_JNE | BPF_X:
1559 case BPF_JMP32 | BPF_JGT | BPF_X:
1560 case BPF_JMP32 | BPF_JLT | BPF_X:
1561 case BPF_JMP32 | BPF_JGE | BPF_X:
1562 case BPF_JMP32 | BPF_JLE | BPF_X:
1563 case BPF_JMP32 | BPF_JSGT | BPF_X:
1564 case BPF_JMP32 | BPF_JSLT | BPF_X:
1565 case BPF_JMP32 | BPF_JSGE | BPF_X:
1566 case BPF_JMP32 | BPF_JSLE | BPF_X:
1567 /* cmp dst_reg, src_reg */
1568 maybe_emit_mod(&prog, dst_reg, src_reg,
1569 BPF_CLASS(insn->code) == BPF_JMP);
1570 EMIT2(0x39, add_2reg(0xC0, dst_reg, src_reg));
1573 case BPF_JMP | BPF_JSET | BPF_X:
1574 case BPF_JMP32 | BPF_JSET | BPF_X:
1575 /* test dst_reg, src_reg */
1576 maybe_emit_mod(&prog, dst_reg, src_reg,
1577 BPF_CLASS(insn->code) == BPF_JMP);
1578 EMIT2(0x85, add_2reg(0xC0, dst_reg, src_reg));
1581 case BPF_JMP | BPF_JSET | BPF_K:
1582 case BPF_JMP32 | BPF_JSET | BPF_K:
1583 /* test dst_reg, imm32 */
1584 maybe_emit_1mod(&prog, dst_reg,
1585 BPF_CLASS(insn->code) == BPF_JMP);
1586 EMIT2_off32(0xF7, add_1reg(0xC0, dst_reg), imm32);
1589 case BPF_JMP | BPF_JEQ | BPF_K:
1590 case BPF_JMP | BPF_JNE | BPF_K:
1591 case BPF_JMP | BPF_JGT | BPF_K:
1592 case BPF_JMP | BPF_JLT | BPF_K:
1593 case BPF_JMP | BPF_JGE | BPF_K:
1594 case BPF_JMP | BPF_JLE | BPF_K:
1595 case BPF_JMP | BPF_JSGT | BPF_K:
1596 case BPF_JMP | BPF_JSLT | BPF_K:
1597 case BPF_JMP | BPF_JSGE | BPF_K:
1598 case BPF_JMP | BPF_JSLE | BPF_K:
1599 case BPF_JMP32 | BPF_JEQ | BPF_K:
1600 case BPF_JMP32 | BPF_JNE | BPF_K:
1601 case BPF_JMP32 | BPF_JGT | BPF_K:
1602 case BPF_JMP32 | BPF_JLT | BPF_K:
1603 case BPF_JMP32 | BPF_JGE | BPF_K:
1604 case BPF_JMP32 | BPF_JLE | BPF_K:
1605 case BPF_JMP32 | BPF_JSGT | BPF_K:
1606 case BPF_JMP32 | BPF_JSLT | BPF_K:
1607 case BPF_JMP32 | BPF_JSGE | BPF_K:
1608 case BPF_JMP32 | BPF_JSLE | BPF_K:
1609 /* test dst_reg, dst_reg to save one extra byte */
1611 maybe_emit_mod(&prog, dst_reg, dst_reg,
1612 BPF_CLASS(insn->code) == BPF_JMP);
1613 EMIT2(0x85, add_2reg(0xC0, dst_reg, dst_reg));
1617 /* cmp dst_reg, imm8/32 */
1618 maybe_emit_1mod(&prog, dst_reg,
1619 BPF_CLASS(insn->code) == BPF_JMP);
1622 EMIT3(0x83, add_1reg(0xF8, dst_reg), imm32);
1624 EMIT2_off32(0x81, add_1reg(0xF8, dst_reg), imm32);
1626 emit_cond_jmp: /* Convert BPF opcode to x86 */
1627 switch (BPF_OP(insn->code)) {
1636 /* GT is unsigned '>', JA in x86 */
1640 /* LT is unsigned '<', JB in x86 */
1644 /* GE is unsigned '>=', JAE in x86 */
1648 /* LE is unsigned '<=', JBE in x86 */
1652 /* Signed '>', GT in x86 */
1656 /* Signed '<', LT in x86 */
1660 /* Signed '>=', GE in x86 */
1664 /* Signed '<=', LE in x86 */
1667 default: /* to silence GCC warning */
1670 jmp_offset = addrs[i + insn->off] - addrs[i];
1671 if (is_imm8(jmp_offset)) {
1673 /* To keep the jmp_offset valid, the extra bytes are
1674 * padded before the jump insn, so we subtract the
1675 * 2 bytes of jmp_cond insn from INSN_SZ_DIFF.
1677 * If the previous pass already emits an imm8
1678 * jmp_cond, then this BPF insn won't shrink, so
1681 * On the other hand, if the previous pass emits an
1682 * imm32 jmp_cond, the extra 4 bytes(*) is padded to
1683 * keep the image from shrinking further.
1685 * (*) imm32 jmp_cond is 6 bytes, and imm8 jmp_cond
1686 * is 2 bytes, so the size difference is 4 bytes.
1688 nops = INSN_SZ_DIFF - 2;
1689 if (nops != 0 && nops != 4) {
1690 pr_err("unexpected jmp_cond padding: %d bytes\n",
1694 emit_nops(&prog, nops);
1696 EMIT2(jmp_cond, jmp_offset);
1697 } else if (is_simm32(jmp_offset)) {
1698 EMIT2_off32(0x0F, jmp_cond + 0x10, jmp_offset);
1700 pr_err("cond_jmp gen bug %llx\n", jmp_offset);
1706 case BPF_JMP | BPF_JA:
1707 if (insn->off == -1)
1708 /* -1 jmp instructions will always jump
1709 * backwards two bytes. Explicitly handling
1710 * this case avoids wasting too many passes
1711 * when there are long sequences of replaced
1716 jmp_offset = addrs[i + insn->off] - addrs[i];
1720 * If jmp_padding is enabled, the extra nops will
1721 * be inserted. Otherwise, optimize out nop jumps.
1724 /* There are 3 possible conditions.
1725 * (1) This BPF_JA is already optimized out in
1726 * the previous run, so there is no need
1727 * to pad any extra byte (0 byte).
1728 * (2) The previous pass emits an imm8 jmp,
1729 * so we pad 2 bytes to match the previous
1731 * (3) Similarly, the previous pass emits an
1732 * imm32 jmp, and 5 bytes is padded.
1734 nops = INSN_SZ_DIFF;
1735 if (nops != 0 && nops != 2 && nops != 5) {
1736 pr_err("unexpected nop jump padding: %d bytes\n",
1740 emit_nops(&prog, nops);
1745 if (is_imm8(jmp_offset)) {
1747 /* To avoid breaking jmp_offset, the extra bytes
1748 * are padded before the actual jmp insn, so
1749 * 2 bytes is subtracted from INSN_SZ_DIFF.
1751 * If the previous pass already emits an imm8
1752 * jmp, there is nothing to pad (0 byte).
1754 * If it emits an imm32 jmp (5 bytes) previously
1755 * and now an imm8 jmp (2 bytes), then we pad
1756 * (5 - 2 = 3) bytes to stop the image from
1757 * shrinking further.
1759 nops = INSN_SZ_DIFF - 2;
1760 if (nops != 0 && nops != 3) {
1761 pr_err("unexpected jump padding: %d bytes\n",
1765 emit_nops(&prog, INSN_SZ_DIFF - 2);
1767 EMIT2(0xEB, jmp_offset);
1768 } else if (is_simm32(jmp_offset)) {
1769 EMIT1_off32(0xE9, jmp_offset);
1771 pr_err("jmp gen bug %llx\n", jmp_offset);
1776 case BPF_JMP | BPF_EXIT:
1778 jmp_offset = ctx->cleanup_addr - addrs[i];
1782 /* Update cleanup_addr */
1783 ctx->cleanup_addr = proglen;
1784 pop_callee_regs(&prog, callee_regs_used);
1785 EMIT1(0xC9); /* leave */
1786 emit_return(&prog, image + addrs[i - 1] + (prog - temp));
1791 * By design x86-64 JIT should support all BPF instructions.
1792 * This error will be seen if new instruction was added
1793 * to the interpreter, but not to the JIT, or if there is
1796 pr_err("bpf_jit: unknown opcode %02x\n", insn->code);
1801 if (ilen > BPF_MAX_INSN_SIZE) {
1802 pr_err("bpf_jit: fatal insn size error\n");
1808 * When populating the image, assert that:
1810 * i) We do not write beyond the allocated space, and
1811 * ii) addrs[i] did not change from the prior run, in order
1812 * to validate assumptions made for computing branch
1815 if (unlikely(proglen + ilen > oldproglen ||
1816 proglen + ilen != addrs[i])) {
1817 pr_err("bpf_jit: fatal error\n");
1820 memcpy(rw_image + proglen, temp, ilen);
1827 if (image && excnt != bpf_prog->aux->num_exentries) {
1828 pr_err("extable is not populated\n");
1834 static void save_regs(const struct btf_func_model *m, u8 **prog, int nr_args,
1837 int i, j, arg_size, nr_regs;
1838 /* Store function arguments to stack.
1839 * For a function that accepts two pointers the sequence will be:
1840 * mov QWORD PTR [rbp-0x10],rdi
1841 * mov QWORD PTR [rbp-0x8],rsi
1843 for (i = 0, j = 0; i < min(nr_args, 6); i++) {
1844 if (m->arg_flags[i] & BTF_FMODEL_STRUCT_ARG) {
1845 nr_regs = (m->arg_size[i] + 7) / 8;
1849 arg_size = m->arg_size[i];
1853 emit_stx(prog, bytes_to_bpf_size(arg_size),
1855 j == 5 ? X86_REG_R9 : BPF_REG_1 + j,
1856 -(stack_size - j * 8));
1863 static void restore_regs(const struct btf_func_model *m, u8 **prog, int nr_args,
1866 int i, j, arg_size, nr_regs;
1868 /* Restore function arguments from stack.
1869 * For a function that accepts two pointers the sequence will be:
1870 * EMIT4(0x48, 0x8B, 0x7D, 0xF0); mov rdi,QWORD PTR [rbp-0x10]
1871 * EMIT4(0x48, 0x8B, 0x75, 0xF8); mov rsi,QWORD PTR [rbp-0x8]
1873 for (i = 0, j = 0; i < min(nr_args, 6); i++) {
1874 if (m->arg_flags[i] & BTF_FMODEL_STRUCT_ARG) {
1875 nr_regs = (m->arg_size[i] + 7) / 8;
1879 arg_size = m->arg_size[i];
1883 emit_ldx(prog, bytes_to_bpf_size(arg_size),
1884 j == 5 ? X86_REG_R9 : BPF_REG_1 + j,
1886 -(stack_size - j * 8));
1893 static int invoke_bpf_prog(const struct btf_func_model *m, u8 **pprog,
1894 struct bpf_tramp_link *l, int stack_size,
1895 int run_ctx_off, bool save_ret)
1897 void (*exit)(struct bpf_prog *prog, u64 start,
1898 struct bpf_tramp_run_ctx *run_ctx) = __bpf_prog_exit;
1899 u64 (*enter)(struct bpf_prog *prog,
1900 struct bpf_tramp_run_ctx *run_ctx) = __bpf_prog_enter;
1903 int ctx_cookie_off = offsetof(struct bpf_tramp_run_ctx, bpf_cookie);
1904 struct bpf_prog *p = l->link.prog;
1905 u64 cookie = l->cookie;
1907 /* mov rdi, cookie */
1908 emit_mov_imm64(&prog, BPF_REG_1, (long) cookie >> 32, (u32) (long) cookie);
1910 /* Prepare struct bpf_tramp_run_ctx.
1912 * bpf_tramp_run_ctx is already preserved by
1913 * arch_prepare_bpf_trampoline().
1915 * mov QWORD PTR [rbp - run_ctx_off + ctx_cookie_off], rdi
1917 emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_1, -run_ctx_off + ctx_cookie_off);
1919 if (p->aux->sleepable) {
1920 enter = __bpf_prog_enter_sleepable;
1921 exit = __bpf_prog_exit_sleepable;
1922 } else if (p->type == BPF_PROG_TYPE_STRUCT_OPS) {
1923 enter = __bpf_prog_enter_struct_ops;
1924 exit = __bpf_prog_exit_struct_ops;
1925 } else if (p->expected_attach_type == BPF_LSM_CGROUP) {
1926 enter = __bpf_prog_enter_lsm_cgroup;
1927 exit = __bpf_prog_exit_lsm_cgroup;
1930 /* arg1: mov rdi, progs[i] */
1931 emit_mov_imm64(&prog, BPF_REG_1, (long) p >> 32, (u32) (long) p);
1932 /* arg2: lea rsi, [rbp - ctx_cookie_off] */
1933 EMIT4(0x48, 0x8D, 0x75, -run_ctx_off);
1935 if (emit_call(&prog, enter, prog))
1937 /* remember prog start time returned by __bpf_prog_enter */
1938 emit_mov_reg(&prog, true, BPF_REG_6, BPF_REG_0);
1940 /* if (__bpf_prog_enter*(prog) == 0)
1941 * goto skip_exec_of_prog;
1943 EMIT3(0x48, 0x85, 0xC0); /* test rax,rax */
1944 /* emit 2 nops that will be replaced with JE insn */
1946 emit_nops(&prog, 2);
1948 /* arg1: lea rdi, [rbp - stack_size] */
1949 EMIT4(0x48, 0x8D, 0x7D, -stack_size);
1950 /* arg2: progs[i]->insnsi for interpreter */
1952 emit_mov_imm64(&prog, BPF_REG_2,
1953 (long) p->insnsi >> 32,
1954 (u32) (long) p->insnsi);
1955 /* call JITed bpf program or interpreter */
1956 if (emit_call(&prog, p->bpf_func, prog))
1960 * BPF_TRAMP_MODIFY_RETURN trampolines can modify the return
1961 * of the previous call which is then passed on the stack to
1962 * the next BPF program.
1964 * BPF_TRAMP_FENTRY trampoline may need to return the return
1965 * value of BPF_PROG_TYPE_STRUCT_OPS prog.
1968 emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -8);
1970 /* replace 2 nops with JE insn, since jmp target is known */
1971 jmp_insn[0] = X86_JE;
1972 jmp_insn[1] = prog - jmp_insn - 2;
1974 /* arg1: mov rdi, progs[i] */
1975 emit_mov_imm64(&prog, BPF_REG_1, (long) p >> 32, (u32) (long) p);
1976 /* arg2: mov rsi, rbx <- start time in nsec */
1977 emit_mov_reg(&prog, true, BPF_REG_2, BPF_REG_6);
1978 /* arg3: lea rdx, [rbp - run_ctx_off] */
1979 EMIT4(0x48, 0x8D, 0x55, -run_ctx_off);
1980 if (emit_call(&prog, exit, prog))
1987 static void emit_align(u8 **pprog, u32 align)
1989 u8 *target, *prog = *pprog;
1991 target = PTR_ALIGN(prog, align);
1993 emit_nops(&prog, target - prog);
1998 static int emit_cond_near_jump(u8 **pprog, void *func, void *ip, u8 jmp_cond)
2003 offset = func - (ip + 2 + 4);
2004 if (!is_simm32(offset)) {
2005 pr_err("Target %p is out of range\n", func);
2008 EMIT2_off32(0x0F, jmp_cond + 0x10, offset);
2013 static int invoke_bpf(const struct btf_func_model *m, u8 **pprog,
2014 struct bpf_tramp_links *tl, int stack_size,
2015 int run_ctx_off, bool save_ret)
2020 for (i = 0; i < tl->nr_links; i++) {
2021 if (invoke_bpf_prog(m, &prog, tl->links[i], stack_size,
2022 run_ctx_off, save_ret))
2029 static int invoke_bpf_mod_ret(const struct btf_func_model *m, u8 **pprog,
2030 struct bpf_tramp_links *tl, int stack_size,
2031 int run_ctx_off, u8 **branches)
2036 /* The first fmod_ret program will receive a garbage return value.
2037 * Set this to 0 to avoid confusing the program.
2039 emit_mov_imm32(&prog, false, BPF_REG_0, 0);
2040 emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -8);
2041 for (i = 0; i < tl->nr_links; i++) {
2042 if (invoke_bpf_prog(m, &prog, tl->links[i], stack_size, run_ctx_off, true))
2045 /* mod_ret prog stored return value into [rbp - 8]. Emit:
2046 * if (*(u64 *)(rbp - 8) != 0)
2049 /* cmp QWORD PTR [rbp - 0x8], 0x0 */
2050 EMIT4(0x48, 0x83, 0x7d, 0xf8); EMIT1(0x00);
2052 /* Save the location of the branch and Generate 6 nops
2053 * (4 bytes for an offset and 2 bytes for the jump) These nops
2054 * are replaced with a conditional jump once do_fexit (i.e. the
2055 * start of the fexit invocation) is finalized.
2058 emit_nops(&prog, 4 + 2);
2066 * __be16 eth_type_trans(struct sk_buff *skb, struct net_device *dev);
2067 * its 'struct btf_func_model' will be nr_args=2
2068 * The assembly code when eth_type_trans is executing after trampoline:
2072 * sub rsp, 16 // space for skb and dev
2073 * push rbx // temp regs to pass start time
2074 * mov qword ptr [rbp - 16], rdi // save skb pointer to stack
2075 * mov qword ptr [rbp - 8], rsi // save dev pointer to stack
2076 * call __bpf_prog_enter // rcu_read_lock and preempt_disable
2077 * mov rbx, rax // remember start time in bpf stats are enabled
2078 * lea rdi, [rbp - 16] // R1==ctx of bpf prog
2079 * call addr_of_jited_FENTRY_prog
2080 * movabsq rdi, 64bit_addr_of_struct_bpf_prog // unused if bpf stats are off
2081 * mov rsi, rbx // prog start time
2082 * call __bpf_prog_exit // rcu_read_unlock, preempt_enable and stats math
2083 * mov rdi, qword ptr [rbp - 16] // restore skb pointer from stack
2084 * mov rsi, qword ptr [rbp - 8] // restore dev pointer from stack
2089 * eth_type_trans has 5 byte nop at the beginning. These 5 bytes will be
2090 * replaced with 'call generated_bpf_trampoline'. When it returns
2091 * eth_type_trans will continue executing with original skb and dev pointers.
2093 * The assembly code when eth_type_trans is called from trampoline:
2097 * sub rsp, 24 // space for skb, dev, return value
2098 * push rbx // temp regs to pass start time
2099 * mov qword ptr [rbp - 24], rdi // save skb pointer to stack
2100 * mov qword ptr [rbp - 16], rsi // save dev pointer to stack
2101 * call __bpf_prog_enter // rcu_read_lock and preempt_disable
2102 * mov rbx, rax // remember start time if bpf stats are enabled
2103 * lea rdi, [rbp - 24] // R1==ctx of bpf prog
2104 * call addr_of_jited_FENTRY_prog // bpf prog can access skb and dev
2105 * movabsq rdi, 64bit_addr_of_struct_bpf_prog // unused if bpf stats are off
2106 * mov rsi, rbx // prog start time
2107 * call __bpf_prog_exit // rcu_read_unlock, preempt_enable and stats math
2108 * mov rdi, qword ptr [rbp - 24] // restore skb pointer from stack
2109 * mov rsi, qword ptr [rbp - 16] // restore dev pointer from stack
2110 * call eth_type_trans+5 // execute body of eth_type_trans
2111 * mov qword ptr [rbp - 8], rax // save return value
2112 * call __bpf_prog_enter // rcu_read_lock and preempt_disable
2113 * mov rbx, rax // remember start time in bpf stats are enabled
2114 * lea rdi, [rbp - 24] // R1==ctx of bpf prog
2115 * call addr_of_jited_FEXIT_prog // bpf prog can access skb, dev, return value
2116 * movabsq rdi, 64bit_addr_of_struct_bpf_prog // unused if bpf stats are off
2117 * mov rsi, rbx // prog start time
2118 * call __bpf_prog_exit // rcu_read_unlock, preempt_enable and stats math
2119 * mov rax, qword ptr [rbp - 8] // restore eth_type_trans's return value
2122 * add rsp, 8 // skip eth_type_trans's frame
2123 * ret // return to its caller
2125 int arch_prepare_bpf_trampoline(struct bpf_tramp_image *im, void *image, void *image_end,
2126 const struct btf_func_model *m, u32 flags,
2127 struct bpf_tramp_links *tlinks,
2130 int ret, i, nr_args = m->nr_args, extra_nregs = 0;
2131 int regs_off, ip_off, args_off, stack_size = nr_args * 8, run_ctx_off;
2132 struct bpf_tramp_links *fentry = &tlinks[BPF_TRAMP_FENTRY];
2133 struct bpf_tramp_links *fexit = &tlinks[BPF_TRAMP_FEXIT];
2134 struct bpf_tramp_links *fmod_ret = &tlinks[BPF_TRAMP_MODIFY_RETURN];
2135 void *orig_call = func_addr;
2136 u8 **branches = NULL;
2140 /* x86-64 supports up to 6 arguments. 7+ can be added in the future */
2144 for (i = 0; i < MAX_BPF_FUNC_ARGS; i++) {
2145 if (m->arg_flags[i] & BTF_FMODEL_STRUCT_ARG)
2146 extra_nregs += (m->arg_size[i] + 7) / 8 - 1;
2148 if (nr_args + extra_nregs > 6)
2150 stack_size += extra_nregs * 8;
2152 /* Generated trampoline stack layout:
2154 * RBP + 8 [ return address ]
2157 * RBP - 8 [ return value ] BPF_TRAMP_F_CALL_ORIG or
2158 * BPF_TRAMP_F_RET_FENTRY_RET flags
2160 * [ reg_argN ] always
2162 * RBP - regs_off [ reg_arg1 ] program's ctx pointer
2164 * RBP - args_off [ arg regs count ] always
2166 * RBP - ip_off [ traced function ] BPF_TRAMP_F_IP_ARG flag
2168 * RBP - run_ctx_off [ bpf_tramp_run_ctx ]
2171 /* room for return value of orig_call or fentry prog */
2172 save_ret = flags & (BPF_TRAMP_F_CALL_ORIG | BPF_TRAMP_F_RET_FENTRY_RET);
2176 regs_off = stack_size;
2180 args_off = stack_size;
2182 if (flags & BPF_TRAMP_F_IP_ARG)
2183 stack_size += 8; /* room for IP address argument */
2185 ip_off = stack_size;
2187 stack_size += (sizeof(struct bpf_tramp_run_ctx) + 7) & ~0x7;
2188 run_ctx_off = stack_size;
2190 if (flags & BPF_TRAMP_F_SKIP_FRAME) {
2191 /* skip patched call instruction and point orig_call to actual
2192 * body of the kernel function.
2194 if (is_endbr(*(u32 *)orig_call))
2195 orig_call += ENDBR_INSN_SIZE;
2196 orig_call += X86_PATCH_SIZE;
2202 EMIT1(0x55); /* push rbp */
2203 EMIT3(0x48, 0x89, 0xE5); /* mov rbp, rsp */
2204 EMIT4(0x48, 0x83, 0xEC, stack_size); /* sub rsp, stack_size */
2205 EMIT1(0x53); /* push rbx */
2207 /* Store number of argument registers of the traced function:
2208 * mov rax, nr_args + extra_nregs
2209 * mov QWORD PTR [rbp - args_off], rax
2211 emit_mov_imm64(&prog, BPF_REG_0, 0, (u32) nr_args + extra_nregs);
2212 emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -args_off);
2214 if (flags & BPF_TRAMP_F_IP_ARG) {
2215 /* Store IP address of the traced function:
2216 * movabsq rax, func_addr
2217 * mov QWORD PTR [rbp - ip_off], rax
2219 emit_mov_imm64(&prog, BPF_REG_0, (long) func_addr >> 32, (u32) (long) func_addr);
2220 emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -ip_off);
2223 save_regs(m, &prog, nr_args, regs_off);
2225 if (flags & BPF_TRAMP_F_CALL_ORIG) {
2226 /* arg1: mov rdi, im */
2227 emit_mov_imm64(&prog, BPF_REG_1, (long) im >> 32, (u32) (long) im);
2228 if (emit_call(&prog, __bpf_tramp_enter, prog)) {
2234 if (fentry->nr_links)
2235 if (invoke_bpf(m, &prog, fentry, regs_off, run_ctx_off,
2236 flags & BPF_TRAMP_F_RET_FENTRY_RET))
2239 if (fmod_ret->nr_links) {
2240 branches = kcalloc(fmod_ret->nr_links, sizeof(u8 *),
2245 if (invoke_bpf_mod_ret(m, &prog, fmod_ret, regs_off,
2246 run_ctx_off, branches)) {
2252 if (flags & BPF_TRAMP_F_CALL_ORIG) {
2253 restore_regs(m, &prog, nr_args, regs_off);
2255 if (flags & BPF_TRAMP_F_ORIG_STACK) {
2256 emit_ldx(&prog, BPF_DW, BPF_REG_0, BPF_REG_FP, 8);
2257 EMIT2(0xff, 0xd0); /* call *rax */
2259 /* call original function */
2260 if (emit_call(&prog, orig_call, prog)) {
2265 /* remember return value in a stack for bpf prog to access */
2266 emit_stx(&prog, BPF_DW, BPF_REG_FP, BPF_REG_0, -8);
2267 im->ip_after_call = prog;
2268 memcpy(prog, x86_nops[5], X86_PATCH_SIZE);
2269 prog += X86_PATCH_SIZE;
2272 if (fmod_ret->nr_links) {
2273 /* From Intel 64 and IA-32 Architectures Optimization
2274 * Reference Manual, 3.4.1.4 Code Alignment, Assembly/Compiler
2275 * Coding Rule 11: All branch targets should be 16-byte
2278 emit_align(&prog, 16);
2279 /* Update the branches saved in invoke_bpf_mod_ret with the
2280 * aligned address of do_fexit.
2282 for (i = 0; i < fmod_ret->nr_links; i++)
2283 emit_cond_near_jump(&branches[i], prog, branches[i],
2287 if (fexit->nr_links)
2288 if (invoke_bpf(m, &prog, fexit, regs_off, run_ctx_off, false)) {
2293 if (flags & BPF_TRAMP_F_RESTORE_REGS)
2294 restore_regs(m, &prog, nr_args, regs_off);
2296 /* This needs to be done regardless. If there were fmod_ret programs,
2297 * the return value is only updated on the stack and still needs to be
2300 if (flags & BPF_TRAMP_F_CALL_ORIG) {
2301 im->ip_epilogue = prog;
2302 /* arg1: mov rdi, im */
2303 emit_mov_imm64(&prog, BPF_REG_1, (long) im >> 32, (u32) (long) im);
2304 if (emit_call(&prog, __bpf_tramp_exit, prog)) {
2309 /* restore return value of orig_call or fentry prog back into RAX */
2311 emit_ldx(&prog, BPF_DW, BPF_REG_0, BPF_REG_FP, -8);
2313 EMIT1(0x5B); /* pop rbx */
2314 EMIT1(0xC9); /* leave */
2315 if (flags & BPF_TRAMP_F_SKIP_FRAME)
2316 /* skip our return address and return to parent */
2317 EMIT4(0x48, 0x83, 0xC4, 8); /* add rsp, 8 */
2318 emit_return(&prog, prog);
2319 /* Make sure the trampoline generation logic doesn't overflow */
2320 if (WARN_ON_ONCE(prog > (u8 *)image_end - BPF_INSN_SAFETY)) {
2324 ret = prog - (u8 *)image;
2331 static int emit_bpf_dispatcher(u8 **pprog, int a, int b, s64 *progs, u8 *image, u8 *buf)
2333 u8 *jg_reloc, *prog = *pprog;
2334 int pivot, err, jg_bytes = 1;
2338 /* Leaf node of recursion, i.e. not a range of indices
2341 EMIT1(add_1mod(0x48, BPF_REG_3)); /* cmp rdx,func */
2342 if (!is_simm32(progs[a]))
2344 EMIT2_off32(0x81, add_1reg(0xF8, BPF_REG_3),
2346 err = emit_cond_near_jump(&prog, /* je func */
2347 (void *)progs[a], image + (prog - buf),
2352 emit_indirect_jump(&prog, 2 /* rdx */, image + (prog - buf));
2358 /* Not a leaf node, so we pivot, and recursively descend into
2359 * the lower and upper ranges.
2361 pivot = (b - a) / 2;
2362 EMIT1(add_1mod(0x48, BPF_REG_3)); /* cmp rdx,func */
2363 if (!is_simm32(progs[a + pivot]))
2365 EMIT2_off32(0x81, add_1reg(0xF8, BPF_REG_3), progs[a + pivot]);
2367 if (pivot > 2) { /* jg upper_part */
2368 /* Require near jump. */
2370 EMIT2_off32(0x0F, X86_JG + 0x10, 0);
2376 err = emit_bpf_dispatcher(&prog, a, a + pivot, /* emit lower_part */
2381 /* From Intel 64 and IA-32 Architectures Optimization
2382 * Reference Manual, 3.4.1.4 Code Alignment, Assembly/Compiler
2383 * Coding Rule 11: All branch targets should be 16-byte
2386 emit_align(&prog, 16);
2387 jg_offset = prog - jg_reloc;
2388 emit_code(jg_reloc - jg_bytes, jg_offset, jg_bytes);
2390 err = emit_bpf_dispatcher(&prog, a + pivot + 1, /* emit upper_part */
2391 b, progs, image, buf);
2399 static int cmp_ips(const void *a, const void *b)
2411 int arch_prepare_bpf_dispatcher(void *image, void *buf, s64 *funcs, int num_funcs)
2415 sort(funcs, num_funcs, sizeof(funcs[0]), cmp_ips, NULL);
2416 return emit_bpf_dispatcher(&prog, 0, num_funcs - 1, funcs, image, buf);
2419 struct x64_jit_data {
2420 struct bpf_binary_header *rw_header;
2421 struct bpf_binary_header *header;
2425 struct jit_context ctx;
2428 #define MAX_PASSES 20
2429 #define PADDING_PASSES (MAX_PASSES - 5)
2431 struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
2433 struct bpf_binary_header *rw_header = NULL;
2434 struct bpf_binary_header *header = NULL;
2435 struct bpf_prog *tmp, *orig_prog = prog;
2436 struct x64_jit_data *jit_data;
2437 int proglen, oldproglen = 0;
2438 struct jit_context ctx = {};
2439 bool tmp_blinded = false;
2440 bool extra_pass = false;
2441 bool padding = false;
2442 u8 *rw_image = NULL;
2448 if (!prog->jit_requested)
2451 tmp = bpf_jit_blind_constants(prog);
2453 * If blinding was requested and we failed during blinding,
2454 * we must fall back to the interpreter.
2463 jit_data = prog->aux->jit_data;
2465 jit_data = kzalloc(sizeof(*jit_data), GFP_KERNEL);
2470 prog->aux->jit_data = jit_data;
2472 addrs = jit_data->addrs;
2474 ctx = jit_data->ctx;
2475 oldproglen = jit_data->proglen;
2476 image = jit_data->image;
2477 header = jit_data->header;
2478 rw_header = jit_data->rw_header;
2479 rw_image = (void *)rw_header + ((void *)image - (void *)header);
2482 goto skip_init_addrs;
2484 addrs = kvmalloc_array(prog->len + 1, sizeof(*addrs), GFP_KERNEL);
2491 * Before first pass, make a rough estimation of addrs[]
2492 * each BPF instruction is translated to less than 64 bytes
2494 for (proglen = 0, i = 0; i <= prog->len; i++) {
2498 ctx.cleanup_addr = proglen;
2502 * JITed image shrinks with every pass and the loop iterates
2503 * until the image stops shrinking. Very large BPF programs
2504 * may converge on the last pass. In such case do one more
2505 * pass to emit the final image.
2507 for (pass = 0; pass < MAX_PASSES || image; pass++) {
2508 if (!padding && pass >= PADDING_PASSES)
2510 proglen = do_jit(prog, addrs, image, rw_image, oldproglen, &ctx, padding);
2515 bpf_arch_text_copy(&header->size, &rw_header->size,
2516 sizeof(rw_header->size));
2517 bpf_jit_binary_pack_free(header, rw_header);
2519 /* Fall back to interpreter mode */
2522 prog->bpf_func = NULL;
2524 prog->jited_len = 0;
2529 if (proglen != oldproglen) {
2530 pr_err("bpf_jit: proglen=%d != oldproglen=%d\n",
2531 proglen, oldproglen);
2536 if (proglen == oldproglen) {
2538 * The number of entries in extable is the number of BPF_LDX
2539 * insns that access kernel memory via "pointer to BTF type".
2540 * The verifier changed their opcode from LDX|MEM|size
2541 * to LDX|PROBE_MEM|size to make JITing easier.
2543 u32 align = __alignof__(struct exception_table_entry);
2544 u32 extable_size = prog->aux->num_exentries *
2545 sizeof(struct exception_table_entry);
2547 /* allocate module memory for x86 insns and extable */
2548 header = bpf_jit_binary_pack_alloc(roundup(proglen, align) + extable_size,
2549 &image, align, &rw_header, &rw_image,
2555 prog->aux->extable = (void *) image + roundup(proglen, align);
2557 oldproglen = proglen;
2561 if (bpf_jit_enable > 1)
2562 bpf_jit_dump(prog->len, proglen, pass + 1, image);
2565 if (!prog->is_func || extra_pass) {
2567 * bpf_jit_binary_pack_finalize fails in two scenarios:
2568 * 1) header is not pointing to proper module memory;
2569 * 2) the arch doesn't support bpf_arch_text_copy().
2571 * Both cases are serious bugs and justify WARN_ON.
2573 if (WARN_ON(bpf_jit_binary_pack_finalize(prog, header, rw_header))) {
2574 /* header has been freed */
2579 bpf_tail_call_direct_fixup(prog);
2581 jit_data->addrs = addrs;
2582 jit_data->ctx = ctx;
2583 jit_data->proglen = proglen;
2584 jit_data->image = image;
2585 jit_data->header = header;
2586 jit_data->rw_header = rw_header;
2588 prog->bpf_func = (void *)image;
2590 prog->jited_len = proglen;
2595 if (!image || !prog->is_func || extra_pass) {
2597 bpf_prog_fill_jited_linfo(prog, addrs + 1);
2601 prog->aux->jit_data = NULL;
2605 bpf_jit_prog_release_other(prog, prog == orig_prog ?
2610 bool bpf_jit_supports_kfunc_call(void)
2615 void *bpf_arch_text_copy(void *dst, void *src, size_t len)
2617 if (text_poke_copy(dst, src, len) == NULL)
2618 return ERR_PTR(-EINVAL);
2622 /* Indicate the JIT backend supports mixing bpf2bpf and tailcalls. */
2623 bool bpf_jit_supports_subprog_tailcalls(void)
2628 void bpf_jit_free(struct bpf_prog *prog)
2631 struct x64_jit_data *jit_data = prog->aux->jit_data;
2632 struct bpf_binary_header *hdr;
2635 * If we fail the final pass of JIT (from jit_subprogs),
2636 * the program may not be finalized yet. Call finalize here
2637 * before freeing it.
2640 bpf_jit_binary_pack_finalize(prog, jit_data->header,
2641 jit_data->rw_header);
2642 kvfree(jit_data->addrs);
2645 hdr = bpf_jit_binary_pack_hdr(prog);
2646 bpf_jit_binary_pack_free(hdr, NULL);
2647 WARN_ON_ONCE(!bpf_prog_kallsyms_verify_off(prog));
2650 bpf_prog_unlock_free(prog);